diff --git a/.travis.yml b/.travis.yml index 9200d9603c..cedbf506c7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -45,7 +45,7 @@ before_install: - wget https://archive.apache.org/dist/thrift/0.11.0/thrift-0.11.0.tar.gz && tar -xf thrift-0.11.0.tar.gz && cd thrift-0.11.0/ && ./configure --prefix=/usr --with-rs=no --with-ruby=no --with-python=no --with-java=no --with-go=no --with-perl=no --with-php=no --with-csharp=no --with-erlang=no --with-lua=no --with-nodejs=no CXXFLAGS="-Wno-unused-variable" && make -j4 && sudo make install && cd - install: -- sudo apt-get install -qq realpath libgflags-dev libprotobuf-dev libprotoc-dev protobuf-compiler libleveldb-dev libgoogle-perftools-dev libboost-dev libssl-dev libevent-dev libboost-test-dev libgoogle-glog-dev +- sudo apt-get install -qq realpath libgflags-dev libprotobuf-dev libprotoc-dev protobuf-compiler libleveldb-dev libgoogle-perftools-dev libboost-dev libssl-dev libevent-dev libboost-test-dev libgoogle-glog-dev libbson-dev - sudo apt-get install libgtest-dev && cd /usr/src/gtest && sudo env "PATH=$PATH" cmake . && sudo make && sudo mv libgtest* /usr/lib/ && cd - - sudo apt-get install -y gdb # install gdb diff --git a/CMakeLists.txt b/CMakeLists.txt index 0c45649f80..8f4304503a 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -146,6 +146,7 @@ endif() find_package(Protobuf REQUIRED) find_package(Threads REQUIRED) +find_package (bson-1.0 1.7 REQUIRED) find_path(LEVELDB_INCLUDE_PATH NAMES leveldb/db.h) find_library(LEVELDB_LIB NAMES leveldb) @@ -195,11 +196,16 @@ endif() find_package(OpenSSL) +find_package(bson-1.0 REQUIRED) + +get_target_property(BSON_INCLUDE_DIRS mongo::bson_shared INTERFACE_INCLUDE_DIRECTORIES) + include_directories( ${GFLAGS_INCLUDE_PATH} ${PROTOBUF_INCLUDE_DIRS} ${LEVELDB_INCLUDE_PATH} ${OPENSSL_INCLUDE_DIR} + ${BSON_INCLUDE_DIRS} ) set(DYNAMIC_LIB @@ -274,6 +280,7 @@ set(BUTIL_SOURCES ${PROJECT_SOURCE_DIR}/src/butil/atomicops_internals_x86_gcc.cc ${PROJECT_SOURCE_DIR}/src/butil/base64.cc ${PROJECT_SOURCE_DIR}/src/butil/big_endian.cc + ${PROJECT_SOURCE_DIR}/src/butil/bson_util.cc ${PROJECT_SOURCE_DIR}/src/butil/cpu.cc ${PROJECT_SOURCE_DIR}/src/butil/debug/alias.cc ${PROJECT_SOURCE_DIR}/src/butil/debug/asan_invalid_access.cc diff --git a/example/mongo_c++/CMakeLists.txt b/example/mongo_c++/CMakeLists.txt new file mode 100644 index 0000000000..bd614cad68 --- /dev/null +++ b/example/mongo_c++/CMakeLists.txt @@ -0,0 +1,137 @@ +# Licensed to the Apache Software Foundation (ASF) under one +# or more contributor license agreements. See the NOTICE file +# distributed with this work for additional information +# regarding copyright ownership. The ASF licenses this file +# to you under the Apache License, Version 2.0 (the +# "License"); you may not use this file except in compliance +# with the License. You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, +# software distributed under the License is distributed on an +# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, either express or implied. See the License for the +# specific language governing permissions and limitations +# under the License. + +cmake_minimum_required(VERSION 2.8.10) +project(mongo_c++ C CXX) + +option(LINK_SO "Whether examples are linked dynamically" OFF) + +execute_process( + COMMAND bash -c "find ${PROJECT_SOURCE_DIR}/../.. -type d -regex \".*output/include$\" | head -n1 | xargs dirname | tr -d '\n'" + OUTPUT_VARIABLE OUTPUT_PATH +) + +set(CMAKE_PREFIX_PATH ${OUTPUT_PATH}) + +find_package(bson-1.0 1.7 REQUIRED) + +include(FindThreads) +include(FindProtobuf) +# protobuf_generate_cpp(PROTO_SRC PROTO_HEADER echo.proto) +# include PROTO_HEADER +include_directories(${CMAKE_CURRENT_BINARY_DIR}) + +# Search for libthrift* by best effort. If it is not found and brpc is +# compiled with thrift protocol enabled, a link error would be reported. +find_path(BRPC_INCLUDE_PATH NAMES brpc/server.h) +if(LINK_SO) + find_library(BRPC_LIB NAMES brpc) +else() + find_library(BRPC_LIB NAMES libbrpc.a brpc) +endif() +if((NOT BRPC_INCLUDE_PATH) OR (NOT BRPC_LIB)) + message(FATAL_ERROR "Fail to find brpc") +endif() +include_directories(${BRPC_INCLUDE_PATH}) + +find_path(GFLAGS_INCLUDE_PATH gflags/gflags.h) +find_library(GFLAGS_LIBRARY NAMES gflags libgflags) +if((NOT GFLAGS_INCLUDE_PATH) OR (NOT GFLAGS_LIBRARY)) + message(FATAL_ERROR "Fail to find gflags") +endif() +include_directories(${GFLAGS_INCLUDE_PATH}) + +execute_process( + COMMAND bash -c "grep \"namespace [_A-Za-z0-9]\\+ {\" ${GFLAGS_INCLUDE_PATH}/gflags/gflags_declare.h | head -1 | awk '{print $2}' | tr -d '\n'" + OUTPUT_VARIABLE GFLAGS_NS +) +if(${GFLAGS_NS} STREQUAL "GFLAGS_NAMESPACE") + execute_process( + COMMAND bash -c "grep \"#define GFLAGS_NAMESPACE [_A-Za-z0-9]\\+\" ${GFLAGS_INCLUDE_PATH}/gflags/gflags_declare.h | head -1 | awk '{print $3}' | tr -d '\n'" + OUTPUT_VARIABLE GFLAGS_NS + ) +endif() +if(CMAKE_SYSTEM_NAME STREQUAL "Darwin") + include(CheckFunctionExists) + CHECK_FUNCTION_EXISTS(clock_gettime HAVE_CLOCK_GETTIME) + if(NOT HAVE_CLOCK_GETTIME) + set(DEFINE_CLOCK_GETTIME "-DNO_CLOCK_GETTIME_IN_MAC") + endif() +endif() + +set(CMAKE_CPP_FLAGS "${DEFINE_CLOCK_GETTIME} -DGFLAGS_NS=${GFLAGS_NS}") +set(CMAKE_CXX_FLAGS "${CMAKE_CPP_FLAGS} -g -D__const__= -pipe -W -Wall -Wno-unused-parameter -fPIC -fno-omit-frame-pointer") + +if(CMAKE_VERSION VERSION_LESS "3.1.3") + if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") + endif() + if(CMAKE_CXX_COMPILER_ID STREQUAL "Clang") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11") + endif() +else() + set(CMAKE_CXX_STANDARD 11) + set(CMAKE_CXX_STANDARD_REQUIRED ON) +endif() + +find_path(LEVELDB_INCLUDE_PATH NAMES leveldb/db.h) +find_library(LEVELDB_LIB NAMES leveldb) +if ((NOT LEVELDB_INCLUDE_PATH) OR (NOT LEVELDB_LIB)) + message(FATAL_ERROR "Fail to find leveldb") +endif() +include_directories(${LEVELDB_INCLUDE_PATH}) + +if(CMAKE_SYSTEM_NAME STREQUAL "Darwin") + set(OPENSSL_ROOT_DIR + "/usr/local/opt/openssl" # Homebrew installed OpenSSL + ) +endif() + +find_package(OpenSSL) +include_directories(${OPENSSL_INCLUDE_DIR}) + +set(DYNAMIC_LIB + ${CMAKE_THREAD_LIBS_INIT} + ${GFLAGS_LIBRARY} + ${PROTOBUF_LIBRARIES} + ${LEVELDB_LIB} + ${OPENSSL_CRYPTO_LIBRARY} + ${OPENSSL_SSL_LIBRARY} + ${THRIFT_LIB} + ${THRIFTNB_LIB} + dl + ) + +if(CMAKE_SYSTEM_NAME STREQUAL "Darwin") + set(DYNAMIC_LIB ${DYNAMIC_LIB} + pthread + "-framework CoreFoundation" + "-framework CoreGraphics" + "-framework CoreData" + "-framework CoreText" + "-framework Security" + "-framework Foundation" + "-Wl,-U,_MallocExtension_ReleaseFreeMemory" + "-Wl,-U,_ProfilerStart" + "-Wl,-U,_ProfilerStop") +endif() + +add_executable(mongo_press mongo_press.cpp) + +MESSAGE( STATUS "link library = ${BRPC_LIB} ${DYNAMIC_LIB}.") +target_link_libraries(mongo_press ${BRPC_LIB} ${DYNAMIC_LIB} mongo::bson_static) +# target_link_libraries(mongo_press ${BRPC_LIB} ${DYNAMIC_LIB}) diff --git a/example/mongo_c++/mongo_press.cpp b/example/mongo_c++/mongo_press.cpp new file mode 100644 index 0000000000..2fc77a70e4 --- /dev/null +++ b/example/mongo_c++/mongo_press.cpp @@ -0,0 +1,543 @@ +// Copyright (c) 2014 Baidu, Inc. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// A brpc based test talk with mongodb server + +#include +#include +#include + +#include +// #include +#include +#include +#include +#include +#include +#include + +#include "brpc/mongo.h" + +DEFINE_string(connection_type, "pooled", + "Connection type. Available values: pooled, short"); +DEFINE_string(server, "127.0.0.1", "IP Address of server"); +DEFINE_int32(port, 27017, "Port of server"); +DEFINE_string(user, "brpcuser", "user name"); +DEFINE_string(password, "12345678", "password"); +DEFINE_string(database, "test", "database"); +DEFINE_string(collection, "people", "collection"); +// DEFINE_string(data, "ABCDEF", "data"); +DEFINE_int32(timeout_ms, 5000, "RPC timeout in milliseconds"); +DEFINE_int32(connect_timeout_ms, 5000, "RPC timeout in milliseconds"); +DEFINE_int32(max_retry, 3, "Max retries(not including the first RPC)"); +DEFINE_int32(thread_num, 1, "Number of threads to send requests"); +DEFINE_bool(use_bthread, true, "Use bthread to send requests"); +DEFINE_int32(dummy_port, -1, "port of dummy server(for monitoring)"); +DEFINE_int32(op_type, 1, + "CRUD operation, 0:INSERT, 1:SELECT, 2:UPDATE, 3:COUNT, 4:DELETE, " + "5:findAndModify"); +DEFINE_bool(dont_fail, false, "Print fatal when some call failed"); + +bvar::LatencyRecorder g_latency_recorder("client"); +bvar::Adder g_error_count("client_error_count"); + +struct SenderArgs { + int base_index; + brpc::Channel* mongo_channel; +}; + +std::pair get_more(brpc::Channel* channel, + int64_t cursorid) { + brpc::MongoGetMoreRequest getMore_request; + getMore_request.set_database(FLAGS_database); + getMore_request.set_collection(FLAGS_collection); + getMore_request.set_cursorid(cursorid); + getMore_request.set_batch_size(100); + brpc::Controller cntl; + brpc::MongoQueryResponse query_response; + channel->CallMethod(NULL, &cntl, &getMore_request, &query_response, NULL); + if (!cntl.Failed()) { + return std::make_pair(true, query_response); + } else { + LOG(ERROR) << "error=" << cntl.ErrorText(); + return std::make_pair(false, query_response); + } +} + +static void* delete_test(void* void_args) { + SenderArgs* args = (SenderArgs*)void_args; + brpc::Channel* mongo_channel = args->mongo_channel; + + // insert then delete + brpc::MongoInsertRequest insert_request; + brpc::MongoDeleteRequest delete_request; + while (!brpc::IsAskedToQuit()) { + // insert random number records + insert_request.Clear(); + insert_request.set_database(FLAGS_database); + insert_request.set_collection(FLAGS_collection); + size_t number = random() % 10 + 1; + for (size_t i = 0; i < number; ++i) { + butil::bson::BsonPtr doc = butil::bson::new_bson(); + std::string name = "test_" + std::to_string(i); + BSON_APPEND_UTF8(doc.get(), "name", name.c_str()); + BSON_APPEND_UTF8(doc.get(), "comment", "delete_test"); + insert_request.add_documents(doc); + } + brpc::Controller insert_cntl; + brpc::MongoInsertResponse insert_response; + mongo_channel->CallMethod(nullptr, &insert_cntl, &insert_request, + &insert_response, nullptr); + if (!insert_cntl.Failed()) { + if (insert_response.number() != number) { + std::stringstream ss; + for (size_t i = 0; i < insert_response.write_errors_size(); ++i) { + ss << insert_response.write_errors(i).errmsg; + } + LOG(INFO) << "insert failed, errmsg:" << ss.str(); + break; + } else { + LOG(INFO) << "insert succ, number:" << number; + } + } else { + LOG(INFO) << "insert failed, error=" << insert_cntl.ErrorText(); + break; + } + // delete records + delete_request.Clear(); + delete_request.set_database(FLAGS_database); + delete_request.set_collection(FLAGS_collection); + delete_request.set_delete_many(true); + butil::bson::BsonPtr delete_filter = butil::bson::new_bson(); + BSON_APPEND_UTF8(delete_filter.get(), "comment", "delete_test"); + delete_request.set_query(delete_filter); + brpc::Controller delete_cntl; + brpc::MongoDeleteResponse delete_response; + mongo_channel->CallMethod(nullptr, &delete_cntl, &delete_request, + &delete_response, nullptr); + if (!delete_cntl.Failed()) { + if (delete_response.number() != number) { + LOG(INFO) << "delete failed, expect:" << number + << " actual:" << delete_response.number(); + break; + } else { + LOG(INFO) << "delete succ, number:" << number; + } + } else { + LOG(INFO) << "delete failed, error=" << delete_cntl.ErrorText(); + break; + } + } +} + +static void* update_test(void* void_args) { + SenderArgs* args = (SenderArgs*)void_args; + brpc::Channel* mongo_channel = args->mongo_channel; + + // insert then update + brpc::MongoInsertRequest insert_request; + insert_request.set_database(FLAGS_database); + insert_request.set_collection(FLAGS_collection); + size_t number = random() % 10 + 1; + for (size_t i = 0; i < number; ++i) { + butil::bson::BsonPtr doc = butil::bson::new_bson(); + std::string name = "test_" + std::to_string(i); + BSON_APPEND_UTF8(doc.get(), "name", name.c_str()); + BSON_APPEND_UTF8(doc.get(), "comment", "update_test"); + insert_request.add_documents(doc); + } + brpc::Controller insert_cntl; + brpc::MongoInsertResponse insert_response; + mongo_channel->CallMethod(nullptr, &insert_cntl, &insert_request, + &insert_response, nullptr); + if (!insert_cntl.Failed()) { + if (insert_response.number() != number) { + std::stringstream ss; + for (size_t i = 0; i < insert_response.write_errors_size(); ++i) { + ss << insert_response.write_errors(i).errmsg; + } + LOG(INFO) << "insert failed, errmsg:" << ss.str(); + return nullptr; + } else { + LOG(INFO) << "insert succ, number:" << number; + } + } else { + LOG(INFO) << "insert failed, error=" << insert_cntl.ErrorText(); + return nullptr; + } + // test upsert + brpc::MongoUpdateRequest upsert_request; + upsert_request.set_database(FLAGS_database); + upsert_request.set_collection(FLAGS_collection); + upsert_request.set_upsert(true); + butil::bson::BsonPtr selector = butil::bson::new_bson(); + BSON_APPEND_UTF8(selector.get(), "comment", "upsert_test"); + upsert_request.set_selector(selector); + butil::bson::BsonPtr update_doc = butil::bson::new_bson(); + BSON_APPEND_INT32(update_doc.get(), "time", time(nullptr)); + butil::bson::BsonPtr update = butil::bson::new_bson(); + BSON_APPEND_DOCUMENT(update.get(), "$set", update_doc.get()); + upsert_request.set_update(update); + brpc::Controller update_cntl; + brpc::MongoUpdateResponse update_response; + mongo_channel->CallMethod(nullptr, &update_cntl, &upsert_request, + &update_response, nullptr); + if (!update_cntl.Failed()) { + std::stringstream ss; + if (update_response.upserted_docs_size()) { + for (size_t i = 0; i < update_response.upserted_docs_size(); ++i) { + const brpc::UpsertedDoc doc = update_response.upserted_docs(i); + char oid_str[25]; + bson_oid_to_string(&doc._id, oid_str); + ss << "index:" << doc.index << " _id:" << oid_str << "\n"; + } + } + LOG(INFO) << "upsert match num:" << update_response.matched_number() + << ", modify num:" << update_response.modified_number() + << ss.str(); + } else { + LOG(INFO) << "update failed, error=" << update_cntl.ErrorText(); + return nullptr; + } + brpc::MongoUpdateRequest update_request; + while (!brpc::IsAskedToQuit()) { + // update records + update_request.Clear(); + update_request.set_database(FLAGS_database); + update_request.set_collection(FLAGS_collection); + update_request.set_ordered(true); + update_request.set_multi(true); + butil::bson::BsonPtr selector = butil::bson::new_bson(); + BSON_APPEND_UTF8(selector.get(), "comment", "update_test"); + update_request.set_selector(selector); + butil::bson::BsonPtr update_doc = butil::bson::new_bson(); + BSON_APPEND_INT32(update_doc.get(), "time", time(nullptr)); + butil::bson::BsonPtr update = butil::bson::new_bson(); + BSON_APPEND_DOCUMENT(update.get(), "$set", update_doc.get()); + update_request.set_update(update); + brpc::Controller update_cntl; + brpc::MongoUpdateResponse update_response; + mongo_channel->CallMethod(nullptr, &update_cntl, &update_request, + &update_response, nullptr); + if (!update_cntl.Failed()) { + if (update_response.modified_number() != + update_response.matched_number()) { + LOG(INFO) << "update failed, matched number:" + << update_response.matched_number() + << ", modify number:" << update_response.modified_number(); + break; + } else { + LOG(INFO) << "update succ, match_number:" + << update_response.matched_number() + << ", modify number:" << update_response.modified_number(); + } + } else { + LOG(INFO) << "update failed, error=" << update_cntl.ErrorText(); + break; + } + bthread_usleep(2 * 1000 * 1000); + } +} + +static void* find_and_modify_test(void* void_args) { + SenderArgs* args = (SenderArgs*)void_args; + brpc::Channel* mongo_channel = args->mongo_channel; + + // insert then delete + brpc::MongoFindAndModifyRequest find_and_modify_request; + while (!brpc::IsAskedToQuit()) { + find_and_modify_request.Clear(); + find_and_modify_request.set_database(FLAGS_database); + find_and_modify_request.set_collection(FLAGS_collection); + // query + brpc::BsonPtr query = butil::bson::new_bson(); + BSON_APPEND_UTF8(query.get(), "counter", "id"); + find_and_modify_request.set_query(query); + // update + brpc::BsonPtr update = butil::bson::new_bson(); + // inc value + brpc::BsonPtr inc_value = butil::bson::new_bson(); + BSON_APPEND_INT32(inc_value.get(), "value", 1); + BSON_APPEND_DOCUMENT(update.get(), "$inc", inc_value.get()); + find_and_modify_request.set_update(update); + + find_and_modify_request.set_upsert(true); + find_and_modify_request.set_return_new(true); + brpc::Controller cntl; + brpc::MongoFindAndModifyResponse find_and_modify_response; + mongo_channel->CallMethod(nullptr, &cntl, &find_and_modify_request, + &find_and_modify_response, nullptr); + if (!cntl.Failed()) { + g_latency_recorder << cntl.latency_us(); + if (find_and_modify_response.has_upserted()) { + char oid_str[25]; + bson_oid_to_string(&(find_and_modify_response.upserted()), oid_str); + LOG(INFO) << "upserted oid:" << oid_str; + } + if (find_and_modify_response.has_value()) { + const char* str = bson_as_canonical_extended_json( + find_and_modify_response.value().get(), nullptr); + LOG(INFO) << "value:" << str; + } + } else { + LOG(INFO) << "find_and_modify failed, error=" << cntl.ErrorText(); + break; + } + } + // 通过find_and_modify remove + find_and_modify_request.Clear(); + find_and_modify_request.set_database(FLAGS_database); + find_and_modify_request.set_collection(FLAGS_collection); + // query + brpc::BsonPtr query = butil::bson::new_bson(); + BSON_APPEND_UTF8(query.get(), "counter", "id"); + find_and_modify_request.set_query(query); + // remove + find_and_modify_request.set_remove(true); + brpc::Controller cntl; + brpc::MongoFindAndModifyResponse find_and_modify_response; + mongo_channel->CallMethod(nullptr, &cntl, &find_and_modify_request, + &find_and_modify_response, nullptr); + if (!cntl.Failed()) { + if (find_and_modify_response.has_upserted()) { + char oid_str[25]; + bson_oid_to_string(&(find_and_modify_response.upserted()), oid_str); + LOG(INFO) << "remove upserted oid:" << oid_str; + } + if (find_and_modify_response.has_value()) { + const char* str = bson_as_canonical_extended_json( + find_and_modify_response.value().get(), nullptr); + LOG(INFO) << "remove value:" << str; + } + } else { + LOG(INFO) << "find_and_modify failed, error=" << cntl.ErrorText(); + } +} + +// Send `command' to mongo-server via `channel' +static void* sender(void* void_args) { + SenderArgs* args = (SenderArgs*)void_args; + + google::protobuf::Message* request = nullptr; + if (FLAGS_op_type == 0) { + // insert + // brpc::MongoInsertRequest *insert_request = new + // brpc::MongoInsertRequest(); insert_request->set_database(FLAGS_database); + // insert_request->set_collection(FLAGS_collection); + // butil::bson::BsonPtr doc1 = butil::bson::new_bson(); + // BSON_APPEND_UTF8(doc1.get(), "name", "test2"); + // BSON_APPEND_UTF8(doc1.get(), "comment", "insert2"); + // insert_request->add_documents(doc1); + // request = insert_request; + } else if (FLAGS_op_type == 1) { + // query + brpc::MongoQueryRequest* query_request = new brpc::MongoQueryRequest(); + query_request->set_database(FLAGS_database); + query_request->set_collection(FLAGS_collection); + brpc::BsonPtr sort_doc = butil::bson::new_bson(); + BSON_APPEND_INT32(sort_doc.get(), "name", -1); + query_request->set_sort(sort_doc); + query_request->set_limit(1); + request = query_request; + } else if (FLAGS_op_type == 2) { + // update + + } else if (FLAGS_op_type == 3) { + // count + brpc::MongoCountRequest* count_request = new brpc::MongoCountRequest(); + count_request->set_database(FLAGS_database); + count_request->set_collection(FLAGS_collection); + request = count_request; + } + + while (!brpc::IsAskedToQuit()) { + google::protobuf::Message* response = nullptr; + brpc::Controller cntl; + if (FLAGS_op_type == 0) { + brpc::MongoInsertRequest* insert_request = new brpc::MongoInsertRequest(); + insert_request->set_database(FLAGS_database); + insert_request->set_collection(FLAGS_collection); + butil::bson::BsonPtr doc1 = butil::bson::new_bson(); + BSON_APPEND_UTF8(doc1.get(), "name", "test1"); + BSON_APPEND_UTF8(doc1.get(), "comment", "insert1"); + butil::bson::BsonPtr doc2 = butil::bson::new_bson(); + BSON_APPEND_UTF8(doc2.get(), "name", "test2"); + BSON_APPEND_UTF8(doc2.get(), "comment", "insert2"); + insert_request->add_documents(doc1); + insert_request->add_documents(doc2); + request = insert_request; + response = new brpc::MongoInsertResponse(); + } else if (FLAGS_op_type == 1) { + response = new brpc::MongoQueryResponse(); + } else if (FLAGS_op_type == 2) { + } else if (FLAGS_op_type == 3) { + response = new brpc::MongoCountResponse(); + } + + const int64_t elp = cntl.latency_us(); + args->mongo_channel->CallMethod(NULL, &cntl, request, response, NULL); + if (!cntl.Failed()) { + const int64_t elp = cntl.latency_us(); + g_latency_recorder << elp; + if (FLAGS_op_type == 0) { + brpc::MongoInsertResponse* insert_response = + dynamic_cast(response); + // LOG(INFO) << "insert return num:" << insert_response->number() + // << " write_errors num:" + // << insert_response->write_errors().size(); + for (size_t i = 0; i < insert_response->write_errors().size(); ++i) { + brpc::WriteError write_error = insert_response->write_errors(i); + LOG(INFO) << "index:" << write_error.index + << " code:" << write_error.code + << " errmsg:" << write_error.errmsg; + } + } else if (FLAGS_op_type == 1) { + brpc::MongoQueryResponse* query_response = + dynamic_cast(response); + assert(query_response); + LOG(INFO) << "query return num:" << query_response->number_returned(); + LOG(INFO) << "query return document num:" + << query_response->documents().size(); + LOG_IF(INFO, query_response->has_cursorid()) + << "cursorid:" << query_response->cursorid(); + int64_t cursor_id = 0; + if (query_response->has_cursorid()) { + cursor_id = query_response->cursorid(); + } + while (cursor_id) { + std::pair getMore_result = + get_more(args->mongo_channel, cursor_id); + if (getMore_result.first) { + auto& getMore_response = getMore_result.second; + // 返回成功 + LOG(INFO) << "query return num:" + << getMore_response.number_returned(); + LOG(INFO) << "query return document num:" + << getMore_response.documents().size(); + LOG_IF(INFO, getMore_response.has_cursorid()) + << "cursorid:" << getMore_response.cursorid(); + if (getMore_response.has_cursorid()) { + cursor_id = getMore_response.cursorid(); + } else { + cursor_id = 0; + } + } else { + cursor_id = 0; + } + } + } else if (FLAGS_op_type == 2) { + } else if (FLAGS_op_type == 3) { + brpc::MongoCountResponse* count_response = + dynamic_cast(response); + assert(count_response); + LOG(INFO) << "count return num:" << count_response->number(); + } + } else { + g_error_count << 1; + CHECK(brpc::IsAskedToQuit() || !FLAGS_dont_fail) + << "error=" << cntl.ErrorText() << " latency=" << elp; + // We can't connect to the server, sleep a while. Notice that this + // is a specific sleeping to prevent this thread from spinning too + // fast. You should continue the business logic in a production + // server rather than sleeping. + } + // bthread_usleep(2 * 1000 * 1000); + } + return NULL; +} + +int main(int argc, char* argv[]) { + // Parse gflags. We recommend you to use gflags as well. + GFLAGS_NS::ParseCommandLineFlags(&argc, &argv, true); + + // A Channel represents a communication line to a Server. Notice that + // Channel is thread-safe and can be shared by all threads in your program. + brpc::Channel channel; + + // Initialize the channel, NULL means using default options. + brpc::ChannelOptions options; + options.protocol = brpc::PROTOCOL_MONGO; + options.connection_type = FLAGS_connection_type; + options.timeout_ms = FLAGS_timeout_ms /*milliseconds*/; + options.connect_timeout_ms = FLAGS_connect_timeout_ms; + options.max_retry = FLAGS_max_retry; + LOG(INFO) << "passwd:" << FLAGS_password; + // options.auth = new brpc::policy::MysqlAuthenticator( + // FLAGS_user, FLAGS_password, FLAGS_schema, FLAGS_params, + // FLAGS_collation); + if (FLAGS_server.find("mongo://") != std::string::npos) { + if (channel.Init(FLAGS_server.c_str(), "random", &options) != 0) { + LOG(ERROR) << "Fail to initialize mongo channel"; + return -1; + } + } else { + if (channel.Init(FLAGS_server.c_str(), FLAGS_port, &options) != 0) { + LOG(ERROR) << "Fail to initialize channel"; + return -1; + } + } + + if (FLAGS_dummy_port >= 0) { + brpc::StartDummyServerAt(FLAGS_dummy_port); + } + + // test CRUD operations + std::vector bids; + std::vector pids; + bids.resize(FLAGS_thread_num); + pids.resize(FLAGS_thread_num); + std::vector args; + args.resize(FLAGS_thread_num); + decltype(sender)* test_func = sender; + if (FLAGS_op_type == 4) { + test_func = delete_test; + } else if (FLAGS_op_type == 2) { + test_func = update_test; + } else if (FLAGS_op_type == 5) { + test_func = find_and_modify_test; + } + for (int i = 0; i < FLAGS_thread_num; ++i) { + args[i].base_index = i; + args[i].mongo_channel = &channel; + if (!FLAGS_use_bthread) { + if (pthread_create(&pids[i], NULL, test_func, &args[i]) != 0) { + LOG(ERROR) << "Fail to create pthread"; + return -1; + } + } else { + if (bthread_start_background(&bids[i], NULL, test_func, &args[i]) != 0) { + LOG(ERROR) << "Fail to create bthread"; + return -1; + } + } + } + + while (!brpc::IsAskedToQuit()) { + sleep(1); + + LOG(INFO) << "Accessing mongo-server at qps=" << g_latency_recorder.qps(1) + << " latency=" << g_latency_recorder.latency(1); + } + + LOG(INFO) << "mongo_client is going to quit"; + for (int i = 0; i < FLAGS_thread_num; ++i) { + if (!FLAGS_use_bthread) { + pthread_join(pids[i], NULL); + } else { + bthread_join(bids[i], NULL); + } + } + + return 0; +} diff --git a/message_code_generate.py b/message_code_generate.py new file mode 100644 index 0000000000..b17a9318f2 --- /dev/null +++ b/message_code_generate.py @@ -0,0 +1,374 @@ +""" +message MongoGetMoreRequest { + required string database = 1; + required string collection = 2; + optional BsonPtr query = 3; + optional int64_t skip = 4; + optional int64_t limit = 5; +} +""" + +import os + +MESSAGE_NAME = "MongoCountRequest" +MESSAGE_FIELDS = (("required", "std::string", "database"), + ("required", "std::string", "collection"), + ("optional", "BsonPtr", "query"), + ("optional", "int64_t", "skip"), + ("optional", "int64_t", "limit")) + +TRIVAL_TYPE = ("int64_t", "uint64_t", "int32_t", "uint32_t") + +def is_trival_type(field_type): + if field_type in set(TRIVAL_TYPE): + return True + else: + return False + +def message_declare_generate(message_name, fields): + field_code = "" + """ + 0 -> MessageName + 1 -> field code + """ + message_declare = """ +class {0} : public ::google::protobuf::Message {{ + public: + {0}(); + virtual ~{0}(); + {0}(const {0}& from); + {0}& operator=(const {0}& from); + void Swap({0}* other); + bool SerializeTo(butil::IOBuf* buf) const; + {0}* New() const; + void CopyFrom(const ::google::protobuf::Message& from); + void MergeFrom(const ::google::protobuf::Message& from); + void CopyFrom(const {0}& from); + void MergeFrom(const {0}& from); + void Clear(); + bool IsInitialized() const; + bool MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input); + void SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const; + ::google::protobuf::uint8* SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* output) const; + int GetCachedSize() const {{ return _cached_size_; }} + static const ::google::protobuf::Descriptor* descriptor(); + + // fields + {1} + + protected: + ::google::protobuf::Metadata GetMetadata() const override; + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const; + + ::google::protobuf::internal::HasBits<1> _has_bits_; + mutable int _cached_size_; +}}; +""" + for i in range(len(MESSAGE_FIELDS)): + field_code = field_code + field_generate(MESSAGE_NAME, MESSAGE_FIELDS[i], i + 1) + return message_declare.format(message_name, field_code) + +def sharedctor_generate(fields): + result = "_cached_size_ = 0;\n" + for field in fields: + message_field_type = field[0] + field_type = field[1] + field_name = field[2] + if not message_field_type == "repeated" and is_trival_type(field_type): + result = result + "{0}_ = 0;\n".format(field_name) + return result + +def mergefrom_generate(fields): + result = "" + for field in fields: + message_field_type = field[0] + field_type = field[1] + field_name = field[2] + if not message_field_type == "repeated": + result = result + """ + if (from.has_{0}()) {{ + set_{0}(from.{0}()); + }} + """.format(field_name) + else: + # repated + result = result + """ + {0}_.insert({0}_.end(), from.{0}.cbegin(), from.{0}.cend()); + """.format(field_name) + return result + +def clear_generate(fields): + result = "" + for field in fields: + field_name = field[2] + result += "clear_{0}();\n".format(field_name) + return result + +def isinit_generate(fields): + result = "" + for field in fields: + message_field_type = field[0] + field_name = field[2] + if message_field_type == "required": + if len(result) == 0: + result += "return has_{0}()".format(field_name) + else: + result += " && has_{0}()".format(field_name) + if len(result) == 0: + result = "return true;" + else: + result += ";" + return result + +def message_define_generate(message_name, fields): + """ + 0 -> MessageName + 1 -> SharedCtorBody + 2 -> MergeFromBody + 3 -> ClearBody + 4 -> IsInitBody + """ + message_define = """ +{0}::{0}() : ::google::protobuf::Message() {{ + SharedCtor(); +}} + +{0}::~{0}() {{ SharedDtor(); }} + +{0}::{0}(const {0}& from) + : ::google::protobuf::Message() {{ + SharedCtor(); + MergeFrom(from); +}} + +{0}& {0}::operator=(const {0}& from) {{ + CopyFrom(from); + return *this; +}} + +void {0}::SharedCtor() {{ + {1} +}} + +void {0}::SharedDtor() {{}} + +bool {0}::SerializeTo(butil::IOBuf* buf) const {{ + // TODO custom definetion +}} + +void {0}::Swap({0}* other) {{}} + +{0}* {0}::New() const {{ + return new {0}(); +}} + +void {0}::CopyFrom(const ::google::protobuf::Message& from) {{ + if (&from == this) return; + Clear(); + MergeFrom(from); +}} + +void {0}::MergeFrom(const ::google::protobuf::Message& from) {{ + GOOGLE_CHECK_NE(&from, this); + const {0}* source = + dynamic_cast(&from); + if (source == NULL) {{ + ::google::protobuf::internal::ReflectionOps::Merge(from, this); + }} else {{ + MergeFrom(*source); + }} +}} + +void {0}::CopyFrom(const {0}& from) {{ + if (&from == this) return; + Clear(); + MergeFrom(from); +}} + +void {0}::MergeFrom(const {0}& from) {{ + GOOGLE_CHECK_NE(&from, this); + {2} +}} + +void {0}::Clear() {{ + {3} +}} + +bool {0}::IsInitialized() const {{ + {4} +}} + +bool {0}::MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input) {{ + LOG(WARNING) << "You're not supposed to parse a {0}"; + return true; +}} + +void {0}::SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const {{ + LOG(WARNING) << "You're not supposed to serialize a {0}"; +}} + +::google::protobuf::uint8* {0}::SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* output) const {{ + return output; +}} + +const ::google::protobuf::Descriptor* {0}::descriptor() {{ + return {0}Base::descriptor(); +}} + +::google::protobuf::Metadata {0}::GetMetadata() const {{ + ::google::protobuf::Metadata metadata; + metadata.descriptor = descriptor(); + metadata.reflection = NULL; + return metadata; +}} + +void {0}::SetCachedSize(int size) const {{ + _cached_size_ = size; +}} +""" + return message_define.format(message_name, sharedctor_generate(fields), + mergefrom_generate(fields), clear_generate(fields), isinit_generate(fields)) + + +""" +INPUT: tuple: ("required", "string", "database") + int: filed_number +OUTPUT: tuple: (func_declare_1, func_declare_2, field_define, func_define) +""" +def field_generate(message_name, field_info, filed_number): + message_field_type = field_info[0] # optional, required, repeated + field_type = field_info[1] # string, BsonPtr, int32_t + field_name = field_info[2] # query, limit, skip + """ + 0 -> field_name + 1 -> type + 2 -> field_number + 3 -> has_bit(0x00000001u) + """ + trival_declare_1 = """ + // {0} + public: + static const int k{0}FieldNumber = {2}; + {1} {0}() const {{ + return {0}_; + }} + bool has_{0}() const {{ + return _has_bits_[0] & {3}; + }} + void clear_{0}() {{ + clear_has_{0}(); + {0}_ = 0; + }} + void set_{0}({1} value) {{ + {0}_ = value; + set_has_{0}(); + }} + private: + void set_has_{0}() {{ + _has_bits_[0] |= {3}; + }} + void clear_has_{0}() {{ + _has_bits_[0] &= ~{3}; + }} + + {1} {0}_; + """ + + """ + 0 -> field_name + 1 -> type + 2 -> field_number + 3 -> has_bit(0x00000001u) + """ + notrival_declare_1 = """ + // {0} + public: + static const int k{0}FieldNumber = {2}; + const {1}& {0}() const {{ + return {0}_; + }} + bool has_{0}() const {{ + return _has_bits_[0] & {3}; + }} + void clear_{0}() {{ + clear_has_{0}(); + {0}_.clear(); + }} + void set_{0}({1} value) {{ + {0}_ = value; + set_has_{0}(); + }} + private: + void set_has_{0}() {{ + _has_bits_[0] |= {3}; + }} + void clear_has_{0}() {{ + _has_bits_[0] &= ~{3}; + }} + + {1} {0}_; + """ + + """ + 0 -> field_name + 1 -> type + 2 -> field_number + """ + repeated_declare_1 = """ + // {0} + public: + static const int k{0}FieldNumber = {2}; + const std::vector<{1}>& {0}() const {{ + return {0}_; + }} + int {0}_size() const {{ + return {0}_.size(); + }} + void clear_{0}() {{ + {0}_.clear(); + }} + const {1}& {0}(int index) const {{ + return {0}_[index]; + }} + {1}* mutable_{0}(int index) {{ + return &{0}_[index]; + }} + void add_{0}({1} value) {{ + {0}_.push_back(std::move(value)); + }} + + private: + std::vector<{1}> {0}_; + """ + is_trival = is_trival_type(field_type) + bit_str = hex(pow(2, filed_number - 1)) + "u" + if message_field_type == "repeated": + return repeated_declare_1.format(field_name, field_type, filed_number) + elif is_trival: + return trival_declare_1.format(field_name, field_type, filed_number, bit_str) + else: + return notrival_declare_1.format(field_name, field_type, filed_number, bit_str) + +if __name__ == "__main__": + with open(MESSAGE_NAME + ".h", 'w') as head_file: + head_file.seek(0) + head_file.truncate() + head_file.write(message_declare_generate(MESSAGE_NAME, MESSAGE_FIELDS)) + with open(MESSAGE_NAME + ".cpp", 'w') as source_file: + source_file.seek(0) + source_file.truncate() + source_file.write(message_define_generate(MESSAGE_NAME, MESSAGE_FIELDS)) + os.system("clang-format -style=Google -i %s.*" % (MESSAGE_NAME,)) + # source_file = open(MESSAGE_NAME + ".cpp") + # print(message_define_generate(MESSAGE_NAME, MESSAGE_FIELDS)) + # print(message_declare_generate(MESSAGE_NAME, MESSAGE_FIELDS)) diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 209c0e2870..c6dfd216a2 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -23,6 +23,7 @@ endif() include_directories(${CMAKE_CURRENT_BINARY_DIR}) include_directories(${PROJECT_SOURCE_DIR}/src) + add_library(BUTIL_LIB OBJECT ${BUTIL_SOURCES}) add_library(SOURCES_LIB OBJECT ${SOURCES}) add_dependencies(SOURCES_LIB PROTO_LIB) @@ -35,6 +36,8 @@ add_library(brpc-static STATIC $ $ $) +target_link_libraries(brpc-static mongo::bson_static) + if(BRPC_WITH_THRIFT) target_link_libraries(brpc-static thrift) endif() @@ -55,6 +58,7 @@ if(BUILD_SHARED_LIBS) $ $) target_link_libraries(brpc-shared ${DYNAMIC_LIB}) + target_link_libraries(brpc-shared mongo::bson_shared) if(BRPC_WITH_GLOG) target_link_libraries(brpc-shared ${GLOG_LIB}) endif() diff --git a/src/brpc/global.cpp b/src/brpc/global.cpp index ced8a11cbc..cc9db8c876 100755 --- a/src/brpc/global.cpp +++ b/src/brpc/global.cpp @@ -38,6 +38,7 @@ #include "brpc/policy/remote_file_naming_service.h" #include "brpc/policy/consul_naming_service.h" #include "brpc/policy/discovery_naming_service.h" +#include "brpc/policy/mongo_naming_service.h" // Load Balancers #include "brpc/policy/round_robin_load_balancer.h" @@ -135,6 +136,7 @@ struct GlobalExtensions { RemoteFileNamingService rfns; ConsulNamingService cns; DiscoveryNamingService dcns; + MongoNamingService mns; RoundRobinLoadBalancer rr_lb; WeightedRoundRobinLoadBalancer wrr_lb; @@ -358,6 +360,7 @@ static void GlobalInitializeOrDieImpl() { NamingServiceExtension()->RegisterOrDie("remotefile", &g_ext->rfns); NamingServiceExtension()->RegisterOrDie("consul", &g_ext->cns); NamingServiceExtension()->RegisterOrDie("discovery", &g_ext->dcns); + NamingServiceExtension()->RegisterOrDie("mongo", &g_ext->mns); // Load Balancers LoadBalancerExtension()->RegisterOrDie("rr", &g_ext->rr_lb); @@ -504,8 +507,10 @@ static void GlobalInitializeOrDieImpl() { } Protocol mongo_protocol = { ParseMongoMessage, - NULL, NULL, - ProcessMongoRequest, NULL, + SerializeMongoRequest, + PackMongoRequest, + ProcessMongoRequest, + ProcessMongoResponse, NULL, NULL, NULL, CONNECTION_TYPE_POOLED, "mongo" }; if (RegisterProtocol(PROTOCOL_MONGO, mongo_protocol) != 0) { diff --git a/src/brpc/mongo.cpp b/src/brpc/mongo.cpp new file mode 100644 index 0000000000..5823c54acc --- /dev/null +++ b/src/brpc/mongo.cpp @@ -0,0 +1,2216 @@ +#include "brpc/mongo.h" + +#include // ReflectionOps::Merge + +#include "butil/bson_util.h" + +namespace brpc { + +bool DocumentSequence::SerializeTo(butil::IOBuf* buf) const { + if (identifier.empty()) { + return false; + } + // 计算size + int32_t total_size = 4; // int32_t size + total_size += (identifier.size() + 1); + for (auto& document : documents) { + if (!document) { + return false; + } + total_size += document.get()->len; + } + size = total_size; + buf->append(static_cast(&size), 4); + buf->append(identifier); + buf->push_back(0); + for (auto& document : documents) { + buf->append(static_cast(bson_get_data(document.get())), + document.get()->len); + } + assert(buf->length() == size); + return true; +} + +bool Section::SeralizeTo(butil::IOBuf* buf) const { + if (type == 0) { + // Body + if (!body_document) { + return false; + } + uint8_t kind = 0; + buf->append(static_cast(&kind), 1); + buf->append(static_cast(bson_get_data(body_document.get())), + body_document.get()->len); + return true; + } else if (type == 1) { + // Document Sequence + if (!document_sequence) { + return false; + } + uint8_t kind = 1; + butil::IOBuf buf2; + bool ret = document_sequence->SerializeTo(&buf2); + if (!ret) { + return false; + } + buf->append(static_cast(&kind), 1); + buf->append(buf2); + return true; + } else { + return false; + } +} + +MongoQueryRequest::MongoQueryRequest() : ::google::protobuf::Message() { + SharedCtor(); +} + +MongoQueryRequest::~MongoQueryRequest() { SharedDtor(); } + +MongoQueryRequest::MongoQueryRequest(const MongoQueryRequest& from) + : ::google::protobuf::Message() { + SharedCtor(); + MergeFrom(from); +} + +MongoQueryRequest& MongoQueryRequest::operator=(const MongoQueryRequest& from) { + CopyFrom(from); + return *this; +} + +void MongoQueryRequest::SharedCtor() { + _cached_size_ = 0; + skip_ = 0; + limit_ = 0; +} + +void MongoQueryRequest::SharedDtor() {} + +bool MongoQueryRequest::SerializeTo(butil::IOBuf* buf) const { + if (!IsInitialized()) { + LOG(WARNING) << "MongoQueryRequest not initialize"; + return false; + } + BsonPtr query_element_ptr = butil::bson::new_bson(); + bson_t* query_element = query_element_ptr.get(); + // collection + BSON_APPEND_UTF8(query_element, "find", collection().c_str()); + // query_filter + auto query_filter = query(); + if (!query_filter) { + query_filter.reset(bson_new(), bson_free); + } + BSON_APPEND_DOCUMENT(query_element, "filter", query_filter.get()); + // query sort + if (sort()) { + BSON_APPEND_DOCUMENT(query_element, "sort", sort().get()); + } + if (!fields().empty()) { + // 是否需要bson_free + BsonPtr field_doc_ptr = butil::bson::new_bson(); + for (auto& field : fields()) { + BSON_APPEND_INT32(field_doc_ptr.get(), field.c_str(), 1); + } + BSON_APPEND_DOCUMENT(query_element, "projection", field_doc_ptr.get()); + } + if (has_skip()) { + BSON_APPEND_INT64(query_element, "skip", skip()); + } + if (has_limit()) { + BSON_APPEND_INT64(query_element, "limit", limit()); + } + // database + BSON_APPEND_UTF8(query_element, "$db", database().c_str()); + // Message Flags 4bytes + // Section[] Kind(1byte): Body(0); BodyDocument(Bson) + uint32_t flag_bits = 0; + buf->append(static_cast(&flag_bits), 4); + uint8_t kind = 0; // Body kind + buf->append(static_cast(&kind), 1); + buf->append(static_cast(bson_get_data(query_element)), + query_element->len); + return true; +} + +void MongoQueryRequest::Swap(MongoQueryRequest* other) {} + +MongoQueryRequest* MongoQueryRequest::New() const { + return new MongoQueryRequest(); +} + +void MongoQueryRequest::CopyFrom(const ::google::protobuf::Message& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void MongoQueryRequest::MergeFrom(const ::google::protobuf::Message& from) { + GOOGLE_CHECK_NE(&from, this); + const MongoQueryRequest* source = + dynamic_cast(&from); + if (source == NULL) { + ::google::protobuf::internal::ReflectionOps::Merge(from, this); + } else { + MergeFrom(*source); + } +} + +void MongoQueryRequest::CopyFrom(const MongoQueryRequest& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void MongoQueryRequest::MergeFrom(const MongoQueryRequest& from) { + GOOGLE_CHECK_NE(&from, this); + + if (from.has_database()) { + set_database(from.database()); + } + + if (from.has_collection()) { + set_collection(from.collection()); + } + + if (from.has_query()) { + set_query(from.query()); + } + + if (from.has_sort()) { + set_sort(from.sort()); + } + + if (from.has_skip()) { + set_skip(from.skip()); + } + + if (from.has_limit()) { + set_limit(from.limit()); + } + + fields_.insert(fields_.end(), from.fields().cbegin(), from.fields().cend()); +} + +void MongoQueryRequest::Clear() { + clear_database(); + clear_collection(); + clear_query(); + clear_sort(); + clear_skip(); + clear_limit(); + clear_fields(); +} + +bool MongoQueryRequest::IsInitialized() const { + return has_database() && has_collection(); +} + +bool MongoQueryRequest::MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input) { + LOG(WARNING) << "You're not supposed to parse a MongoQueryRequest"; + return true; +} + +void MongoQueryRequest::SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const { + LOG(WARNING) << "You're not supposed to serialize a MongoQueryRequest"; +} + +::google::protobuf::uint8* MongoQueryRequest::SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* output) const { + return output; +} + +const ::google::protobuf::Descriptor* MongoQueryRequest::descriptor() { + return MongoQueryRequestBase::descriptor(); +} + +::google::protobuf::Metadata MongoQueryRequest::GetMetadata() const { + ::google::protobuf::Metadata metadata; + metadata.descriptor = descriptor(); + metadata.reflection = NULL; + return metadata; +} + +void MongoQueryRequest::SetCachedSize(int size) const { _cached_size_ = size; } + +MongoQueryResponse::MongoQueryResponse() : ::google::protobuf::Message() { + SharedCtor(); +} + +MongoQueryResponse::MongoQueryResponse(const MongoQueryResponse& from) + : ::google::protobuf::Message() { + SharedCtor(); + MergeFrom(from); +} + +void MongoQueryResponse::SharedCtor() { + cursorid_ = 0; + starting_from_ = 0; + number_returned_ = 0; + _cached_size_ = 0; +} + +MongoQueryResponse::~MongoQueryResponse() { SharedDtor(); } + +void MongoQueryResponse::SharedDtor() {} + +void MongoQueryResponse::Swap(MongoQueryResponse* other) {} + +MongoQueryResponse* MongoQueryResponse::New() const { + return new MongoQueryResponse; +} + +void MongoQueryResponse::CopyFrom(const ::google::protobuf::Message& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void MongoQueryResponse::MergeFrom(const ::google::protobuf::Message& from) { + GOOGLE_CHECK_NE(&from, this); + const MongoQueryResponse* source = + dynamic_cast(&from); + if (source == nullptr) { + ::google::protobuf::internal::ReflectionOps::Merge(from, this); + } else { + MergeFrom(*source); + } +} + +void MongoQueryResponse::CopyFrom(const MongoQueryResponse& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void MongoQueryResponse::MergeFrom(const MongoQueryResponse& from) { + GOOGLE_CHECK_NE(&from, this); + if (from.has_cursorid()) { + set_cursorid(from.cursorid()); + } + if (from.has_starting_from()) { + set_starting_from(from.starting_from()); + } + if (from.has_number_returned()) { + set_number_returned(from.number_returned()); + } + documents_.insert(documents_.end(), from.documents_.cbegin(), + from.documents_.cend()); + if (from.has_ns()) { + set_ns(from.ns()); + } +} + +void MongoQueryResponse::Clear() { + clear_cursorid(); + clear_starting_from(); + clear_number_returned(); + clear_documents(); + clear_ns(); +} + +bool MongoQueryResponse::IsInitialized() const { return true; } + +// int ByteSize() const; +bool MongoQueryResponse::MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input) { + LOG(WARNING) << "You're not supposed to parse a MongoQueryResponse"; + return true; +} +void MongoQueryResponse::SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const { + LOG(WARNING) << "You're not supposed to serialize a MongoQueryResponse"; +} +::google::protobuf::uint8* MongoQueryResponse::SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* output) const { + return output; +} + +const ::google::protobuf::Descriptor* MongoQueryResponse::descriptor() { + return MongoQueryResponseBase::descriptor(); +} + +::google::protobuf::Metadata MongoQueryResponse::GetMetadata() const { + ::google::protobuf::Metadata metadata; + metadata.descriptor = descriptor(); + metadata.reflection = NULL; + return metadata; +} + +MongoGetMoreRequest::MongoGetMoreRequest() : ::google::protobuf::Message() { + SharedCtor(); +} + +MongoGetMoreRequest::~MongoGetMoreRequest() { SharedDtor(); } + +MongoGetMoreRequest::MongoGetMoreRequest(const MongoGetMoreRequest& from) + : ::google::protobuf::Message() { + SharedCtor(); + MergeFrom(from); +} + +void MongoGetMoreRequest::Swap(MongoGetMoreRequest* other) {} + +bool MongoGetMoreRequest::SerializeTo(butil::IOBuf* buf) const { + if (!IsInitialized()) { + LOG(WARNING) << "MongoGetMoreRequest not initialize"; + return false; + } + BsonPtr get_more_element_ptr = butil::bson::new_bson(); + bson_t* get_more_element = get_more_element_ptr.get(); + // getMore + BSON_APPEND_INT64(get_more_element, "getMore", cursorid()); + // collection + BSON_APPEND_UTF8(get_more_element, "collection", collection().c_str()); + // batch_size + if (has_batch_size()) { + BSON_APPEND_DOUBLE(get_more_element, "batchSize", + static_cast(batch_size())); + } + // $db + BSON_APPEND_UTF8(get_more_element, "$db", database().c_str()); + // Message Flags 4bytes + // Section[] Kind(1byte): Body(0); BodyDocument(Bson) + uint32_t flag_bits = 0; + buf->append(static_cast(&flag_bits), 4); + uint8_t kind = 0; // Body kind + buf->append(static_cast(&kind), 1); + buf->append(static_cast(bson_get_data(get_more_element)), + get_more_element->len); + return true; +} + +MongoGetMoreRequest* MongoGetMoreRequest::New() const { + return new MongoGetMoreRequest; +} + +void MongoGetMoreRequest::CopyFrom(const ::google::protobuf::Message& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void MongoGetMoreRequest::MergeFrom(const ::google::protobuf::Message& from) { + GOOGLE_CHECK_NE(&from, this); + const MongoGetMoreRequest* source = + dynamic_cast(&from); + if (source == NULL) { + ::google::protobuf::internal::ReflectionOps::Merge(from, this); + } else { + MergeFrom(*source); + } +} + +void MongoGetMoreRequest::CopyFrom(const MongoGetMoreRequest& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void MongoGetMoreRequest::MergeFrom(const MongoGetMoreRequest& from) { + GOOGLE_CHECK_NE(&from, this); + if (from.has_database()) { + set_database(from.database()); + } + if (from.has_collection()) { + set_collection(from.collection()); + } + if (from.has_cursorid()) { + set_cursorid(from.cursorid()); + } + if (from.has_batch_size()) { + set_batch_size(from.batch_size()); + } + if (from.has_max_time_ms()) { + set_max_time_ms(from.max_time_ms()); + } + if (from.has_comment()) { + set_comment(from.comment()); + } +} + +void MongoGetMoreRequest::Clear() { + clear_database(); + clear_collection(); + clear_cursorid(); + clear_batch_size(); + clear_max_time_ms(); + clear_comment(); +} + +bool MongoGetMoreRequest::IsInitialized() const { + return has_database() && has_collection() && has_cursorid(); +} + +bool MongoGetMoreRequest::MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input) { + LOG(WARNING) << "You're not supposed to parse a MongoGetMoreRequest"; + return true; +} + +void MongoGetMoreRequest::SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const { + LOG(WARNING) << "You're not supposed to serialize a MongoGetMoreRequest"; +} + +::google::protobuf::uint8* MongoGetMoreRequest::SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* output) const { + return output; +} + +const ::google::protobuf::Descriptor* MongoGetMoreRequest::descriptor() { + return MongoGetMoreRequestBase::descriptor(); +} + +void MongoGetMoreRequest::SharedCtor() { + cursorid_ = 0; + batch_size_ = 0; + max_time_ms_ = 0; + _cached_size_ = 0; +} + +void MongoGetMoreRequest::SharedDtor() {} + +void MongoGetMoreRequest::SetCachedSize(int size) const { + _cached_size_ = size; +} + +::google::protobuf::Metadata MongoGetMoreRequest::GetMetadata() const { + ::google::protobuf::Metadata metadata; + metadata.descriptor = descriptor(); + metadata.reflection = NULL; + return metadata; +} + +MongoCountRequest::MongoCountRequest() : ::google::protobuf::Message() { + SharedCtor(); +} + +MongoCountRequest::~MongoCountRequest() { SharedDtor(); } + +MongoCountRequest::MongoCountRequest(const MongoCountRequest& from) + : ::google::protobuf::Message() { + SharedCtor(); + MergeFrom(from); +} + +MongoCountRequest& MongoCountRequest::operator=(const MongoCountRequest& from) { + CopyFrom(from); + return *this; +} + +void MongoCountRequest::SharedCtor() { + _cached_size_ = 0; + skip_ = 0; + limit_ = 0; +} + +void MongoCountRequest::SharedDtor() {} + +bool MongoCountRequest::SerializeTo(butil::IOBuf* buf) const { + if (!IsInitialized()) { + LOG(WARNING) << "MongoCountRequest not initialize"; + return false; + } + BsonPtr count_element_ptr = butil::bson::new_bson(); + bson_t* count_element = count_element_ptr.get(); + // count + BSON_APPEND_UTF8(count_element, "count", collection().c_str()); + // query + auto query_filter = query(); + if (!query_filter) { + query_filter.reset(bson_new(), bson_free); + } + BSON_APPEND_DOCUMENT(count_element, "query", query_filter.get()); + // limit + if (has_limit()) { + BSON_APPEND_INT64(count_element, "limit", limit()); + } + // skip + if (has_skip()) { + BSON_APPEND_INT64(count_element, "skip", skip()); + } + // $db + BSON_APPEND_UTF8(count_element, "$db", database().c_str()); + // Message Flags 4bytes + // Section[] Kind(1byte): Body(0); BodyDocument(Bson) + uint32_t flag_bits = 0; + buf->append(static_cast(&flag_bits), 4); + uint8_t kind = 0; // Body kind + buf->append(static_cast(&kind), 1); + buf->append(static_cast(bson_get_data(count_element)), + count_element->len); + return true; +} + +void MongoCountRequest::Swap(MongoCountRequest* other) {} + +MongoCountRequest* MongoCountRequest::New() const { + return new MongoCountRequest(); +} + +void MongoCountRequest::CopyFrom(const ::google::protobuf::Message& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void MongoCountRequest::MergeFrom(const ::google::protobuf::Message& from) { + GOOGLE_CHECK_NE(&from, this); + const MongoCountRequest* source = + dynamic_cast(&from); + if (source == NULL) { + ::google::protobuf::internal::ReflectionOps::Merge(from, this); + } else { + MergeFrom(*source); + } +} + +void MongoCountRequest::CopyFrom(const MongoCountRequest& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void MongoCountRequest::MergeFrom(const MongoCountRequest& from) { + GOOGLE_CHECK_NE(&from, this); + + if (from.has_database()) { + set_database(from.database()); + } + + if (from.has_collection()) { + set_collection(from.collection()); + } + + if (from.has_query()) { + set_query(from.query()); + } + + if (from.has_skip()) { + set_skip(from.skip()); + } + + if (from.has_limit()) { + set_limit(from.limit()); + } +} + +void MongoCountRequest::Clear() { + clear_database(); + clear_collection(); + clear_query(); + clear_skip(); + clear_limit(); +} + +bool MongoCountRequest::IsInitialized() const { + return has_database() && has_collection(); +} + +bool MongoCountRequest::MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input) { + LOG(WARNING) << "You're not supposed to parse a MongoCountRequest"; + return true; +} + +void MongoCountRequest::SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const { + LOG(WARNING) << "You're not supposed to serialize a MongoCountRequest"; +} + +::google::protobuf::uint8* MongoCountRequest::SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* output) const { + return output; +} + +const ::google::protobuf::Descriptor* MongoCountRequest::descriptor() { + return MongoCountRequestBase::descriptor(); +} + +::google::protobuf::Metadata MongoCountRequest::GetMetadata() const { + ::google::protobuf::Metadata metadata; + metadata.descriptor = descriptor(); + metadata.reflection = NULL; + return metadata; +} + +void MongoCountRequest::SetCachedSize(int size) const { _cached_size_ = size; } + +MongoCountResponse::MongoCountResponse() : ::google::protobuf::Message() { + SharedCtor(); +} + +MongoCountResponse::~MongoCountResponse() { SharedDtor(); } + +MongoCountResponse::MongoCountResponse(const MongoCountResponse& from) + : ::google::protobuf::Message() { + SharedCtor(); + MergeFrom(from); +} + +MongoCountResponse& MongoCountResponse::operator=( + const MongoCountResponse& from) { + CopyFrom(from); + return *this; +} + +void MongoCountResponse::SharedCtor() { + _cached_size_ = 0; + number_ = 0; +} + +void MongoCountResponse::SharedDtor() {} + +bool MongoCountResponse::SerializeTo(butil::IOBuf* buf) const { + // TODO custom definetion + return true; +} + +void MongoCountResponse::Swap(MongoCountResponse* other) {} + +MongoCountResponse* MongoCountResponse::New() const { + return new MongoCountResponse(); +} + +void MongoCountResponse::CopyFrom(const ::google::protobuf::Message& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void MongoCountResponse::MergeFrom(const ::google::protobuf::Message& from) { + GOOGLE_CHECK_NE(&from, this); + const MongoCountResponse* source = + dynamic_cast(&from); + if (source == NULL) { + ::google::protobuf::internal::ReflectionOps::Merge(from, this); + } else { + MergeFrom(*source); + } +} + +void MongoCountResponse::CopyFrom(const MongoCountResponse& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void MongoCountResponse::MergeFrom(const MongoCountResponse& from) { + GOOGLE_CHECK_NE(&from, this); + + if (from.has_number()) { + set_number(from.number()); + } +} + +void MongoCountResponse::Clear() { clear_number(); } + +bool MongoCountResponse::IsInitialized() const { return true; } + +bool MongoCountResponse::MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input) { + LOG(WARNING) << "You're not supposed to parse a MongoCountResponse"; + return true; +} + +void MongoCountResponse::SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const { + LOG(WARNING) << "You're not supposed to serialize a MongoCountResponse"; +} + +::google::protobuf::uint8* MongoCountResponse::SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* output) const { + return output; +} + +const ::google::protobuf::Descriptor* MongoCountResponse::descriptor() { + return MongoCountResponseBase::descriptor(); +} + +::google::protobuf::Metadata MongoCountResponse::GetMetadata() const { + ::google::protobuf::Metadata metadata; + metadata.descriptor = descriptor(); + metadata.reflection = NULL; + return metadata; +} + +void MongoCountResponse::SetCachedSize(int size) const { _cached_size_ = size; } + +MongoInsertRequest::MongoInsertRequest() : ::google::protobuf::Message() { + SharedCtor(); +} + +MongoInsertRequest::~MongoInsertRequest() { SharedDtor(); } + +MongoInsertRequest::MongoInsertRequest(const MongoInsertRequest& from) + : ::google::protobuf::Message() { + SharedCtor(); + MergeFrom(from); +} + +MongoInsertRequest& MongoInsertRequest::operator=( + const MongoInsertRequest& from) { + CopyFrom(from); + return *this; +} + +void MongoInsertRequest::SharedCtor() { + _cached_size_ = 0; + ordered_ = true; +} + +void MongoInsertRequest::SharedDtor() {} + +bool MongoInsertRequest::SerializeTo(butil::IOBuf* buf) const { + if (!IsInitialized()) { + LOG(WARNING) << "MongoInsertRequest not initialize"; + return false; + } + if (documents().size() == 0) { + LOG(WARNING) << "To insert document null"; + return false; + } + // Message Flags 4bytes + uint32_t flag_bits = 0; + buf->append(static_cast(&flag_bits), 4); + + BsonPtr insert_body_element_ptr = butil::bson::new_bson(); + bson_t* insert_body_element = insert_body_element_ptr.get(); + // insert + BSON_APPEND_UTF8(insert_body_element, "insert", collection().c_str()); + // ordered + BSON_APPEND_BOOL(insert_body_element, "ordered", ordered()); + // $db + BSON_APPEND_UTF8(insert_body_element, "$db", database().c_str()); + + // Section[] Kind(1byte): Body(0); BodyDocument(Bson) + Section section1; + section1.type = 0; + section1.body_document = insert_body_element_ptr; + butil::IOBuf buf1; + bool ret = section1.SeralizeTo(&buf1); + if (!ret) { + return false; + } + buf->append(buf1); + // Section Kind(1byte): Document Sequence(1); SeqID: documents + // 添加object_id + for (auto document : documents()) { + bson_t* doc = document.get(); + if (!butil::bson::bson_has_oid(document)) { + bson_oid_t oid; + bson_oid_init(&oid, nullptr); + BSON_APPEND_OID(doc, "_id", &oid); + } + } + Section section2; + section2.type = 1; + DocumentSequencePtr document_sequence = std::make_shared(); + document_sequence->identifier = "documents"; + document_sequence->documents = documents(); + section2.document_sequence = document_sequence; + butil::IOBuf buf2; + ret = section2.SeralizeTo(&buf2); + if (!ret) { + return false; + } + buf->append(buf2); + return true; +} + +void MongoInsertRequest::Swap(MongoInsertRequest* other) {} + +MongoInsertRequest* MongoInsertRequest::New() const { + return new MongoInsertRequest(); +} + +void MongoInsertRequest::CopyFrom(const ::google::protobuf::Message& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void MongoInsertRequest::MergeFrom(const ::google::protobuf::Message& from) { + GOOGLE_CHECK_NE(&from, this); + const MongoInsertRequest* source = + dynamic_cast(&from); + if (source == NULL) { + ::google::protobuf::internal::ReflectionOps::Merge(from, this); + } else { + MergeFrom(*source); + } +} + +void MongoInsertRequest::CopyFrom(const MongoInsertRequest& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void MongoInsertRequest::MergeFrom(const MongoInsertRequest& from) { + GOOGLE_CHECK_NE(&from, this); + + if (from.has_database()) { + set_database(from.database()); + } + + if (from.has_collection()) { + set_collection(from.collection()); + } + + if (from.has_ordered()) { + set_ordered(from.ordered()); + } + + documents_.insert(documents_.end(), from.documents().cbegin(), + from.documents().cend()); +} + +void MongoInsertRequest::Clear() { + clear_database(); + clear_collection(); + clear_ordered(); + clear_documents(); +} + +bool MongoInsertRequest::IsInitialized() const { + return has_database() && has_collection(); +} + +bool MongoInsertRequest::MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input) { + LOG(WARNING) << "You're not supposed to parse a MongoInsertRequest"; + return true; +} + +void MongoInsertRequest::SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const { + LOG(WARNING) << "You're not supposed to serialize a MongoInsertRequest"; +} + +::google::protobuf::uint8* MongoInsertRequest::SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* output) const { + return output; +} + +const ::google::protobuf::Descriptor* MongoInsertRequest::descriptor() { + return MongoInsertRequestBase::descriptor(); +} + +::google::protobuf::Metadata MongoInsertRequest::GetMetadata() const { + ::google::protobuf::Metadata metadata; + metadata.descriptor = descriptor(); + metadata.reflection = NULL; + return metadata; +} + +void MongoInsertRequest::SetCachedSize(int size) const { _cached_size_ = size; } + +MongoInsertResponse::MongoInsertResponse() : ::google::protobuf::Message() { + SharedCtor(); +} + +MongoInsertResponse::~MongoInsertResponse() { SharedDtor(); } + +MongoInsertResponse::MongoInsertResponse(const MongoInsertResponse& from) + : ::google::protobuf::Message() { + SharedCtor(); + MergeFrom(from); +} + +MongoInsertResponse& MongoInsertResponse::operator=( + const MongoInsertResponse& from) { + CopyFrom(from); + return *this; +} + +void MongoInsertResponse::SharedCtor() { + _cached_size_ = 0; + number_ = 0; +} + +void MongoInsertResponse::SharedDtor() {} + +bool MongoInsertResponse::SerializeTo(butil::IOBuf* buf) const { + // TODO custom definetion + return true; +} + +void MongoInsertResponse::Swap(MongoInsertResponse* other) {} + +MongoInsertResponse* MongoInsertResponse::New() const { + return new MongoInsertResponse(); +} + +void MongoInsertResponse::CopyFrom(const ::google::protobuf::Message& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void MongoInsertResponse::MergeFrom(const ::google::protobuf::Message& from) { + GOOGLE_CHECK_NE(&from, this); + const MongoInsertResponse* source = + dynamic_cast(&from); + if (source == NULL) { + ::google::protobuf::internal::ReflectionOps::Merge(from, this); + } else { + MergeFrom(*source); + } +} + +void MongoInsertResponse::CopyFrom(const MongoInsertResponse& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void MongoInsertResponse::MergeFrom(const MongoInsertResponse& from) { + GOOGLE_CHECK_NE(&from, this); + + if (from.has_number()) { + set_number(from.number()); + } + + write_errors_.insert(write_errors_.end(), from.write_errors().cbegin(), + from.write_errors().cend()); +} + +void MongoInsertResponse::Clear() { + clear_number(); + clear_write_errors(); +} + +bool MongoInsertResponse::IsInitialized() const { return true; } + +bool MongoInsertResponse::MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input) { + LOG(WARNING) << "You're not supposed to parse a MongoInsertResponse"; + return true; +} + +void MongoInsertResponse::SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const { + LOG(WARNING) << "You're not supposed to serialize a MongoInsertResponse"; +} + +::google::protobuf::uint8* MongoInsertResponse::SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* output) const { + return output; +} + +const ::google::protobuf::Descriptor* MongoInsertResponse::descriptor() { + return MongoInsertResponseBase::descriptor(); +} + +::google::protobuf::Metadata MongoInsertResponse::GetMetadata() const { + ::google::protobuf::Metadata metadata; + metadata.descriptor = descriptor(); + metadata.reflection = NULL; + return metadata; +} + +void MongoInsertResponse::SetCachedSize(int size) const { + _cached_size_ = size; +} + +MongoDeleteRequest::MongoDeleteRequest() : ::google::protobuf::Message() { + SharedCtor(); +} + +MongoDeleteRequest::~MongoDeleteRequest() { SharedDtor(); } + +MongoDeleteRequest::MongoDeleteRequest(const MongoDeleteRequest& from) + : ::google::protobuf::Message() { + SharedCtor(); + MergeFrom(from); +} + +MongoDeleteRequest& MongoDeleteRequest::operator=( + const MongoDeleteRequest& from) { + CopyFrom(from); + return *this; +} + +void MongoDeleteRequest::SharedCtor() { + _cached_size_ = 0; + ordered_ = false; + delete_many_ = false; +} + +void MongoDeleteRequest::SharedDtor() {} + +bool MongoDeleteRequest::SerializeTo(butil::IOBuf* buf) const { + if (!IsInitialized()) { + LOG(WARNING) << "MongoDeleteRequest not initialize"; + return false; + } + // Message Flags 4bytes + uint32_t flag_bits = 0; + buf->append(static_cast(&flag_bits), 4); + + BsonPtr delete_body_element_ptr = butil::bson::new_bson(); + bson_t* delete_body_element = delete_body_element_ptr.get(); + // delete + BSON_APPEND_UTF8(delete_body_element, "delete", collection().c_str()); + // ordered + BSON_APPEND_BOOL(delete_body_element, "ordered", ordered()); + // $db + BSON_APPEND_UTF8(delete_body_element, "$db", database().c_str()); + + // Section[] Kind(1byte): Body(0); BodyDocument(Bson) + Section section1; + section1.type = 0; + section1.body_document = delete_body_element_ptr; + butil::IOBuf buf1; + bool ret = section1.SeralizeTo(&buf1); + if (!ret) { + return false; + } + buf->append(buf1); + // Section Kind(1byte): Document Sequence(1); SeqID: deletes + Section section2; + section2.type = 1; + DocumentSequencePtr document_sequence = std::make_shared(); + document_sequence->identifier = "deletes"; + // 删除记录的查询条件 + BsonPtr delete_filter_element_ptr = butil::bson::new_bson(); + BsonPtr empty_query_ptr; + if (query()) { + BSON_APPEND_DOCUMENT(delete_filter_element_ptr.get(), "q", query().get()); + } else { + empty_query_ptr = butil::bson::new_bson(); + BSON_APPEND_DOCUMENT(delete_filter_element_ptr.get(), "q", + empty_query_ptr.get()); + } + document_sequence->documents.push_back(delete_filter_element_ptr); + // 限制删除的数目, 0不限制, 1只删除一条 + BSON_APPEND_INT32(delete_filter_element_ptr.get(), "limit", + (delete_many() ? 0 : 1)); + section2.document_sequence = document_sequence; + butil::IOBuf buf2; + ret = section2.SeralizeTo(&buf2); + if (!ret) { + return false; + } + buf->append(buf2); + return true; +} + +void MongoDeleteRequest::Swap(MongoDeleteRequest* other) {} + +MongoDeleteRequest* MongoDeleteRequest::New() const { + return new MongoDeleteRequest(); +} + +void MongoDeleteRequest::CopyFrom(const ::google::protobuf::Message& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void MongoDeleteRequest::MergeFrom(const ::google::protobuf::Message& from) { + GOOGLE_CHECK_NE(&from, this); + const MongoDeleteRequest* source = + dynamic_cast(&from); + if (source == NULL) { + ::google::protobuf::internal::ReflectionOps::Merge(from, this); + } else { + MergeFrom(*source); + } +} + +void MongoDeleteRequest::CopyFrom(const MongoDeleteRequest& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void MongoDeleteRequest::MergeFrom(const MongoDeleteRequest& from) { + GOOGLE_CHECK_NE(&from, this); + + if (from.has_database()) { + set_database(from.database()); + } + + if (from.has_collection()) { + set_collection(from.collection()); + } + + if (from.has_ordered()) { + set_ordered(from.ordered()); + } + + if (from.has_query()) { + set_query(from.query()); + } + + if (from.has_delete_many()) { + set_delete_many(from.delete_many()); + } +} + +void MongoDeleteRequest::Clear() { + clear_database(); + clear_collection(); + clear_ordered(); + clear_query(); + clear_delete_many(); +} + +bool MongoDeleteRequest::IsInitialized() const { + return has_database() && has_collection(); +} + +bool MongoDeleteRequest::MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input) { + LOG(WARNING) << "You're not supposed to parse a MongoDeleteRequest"; + return true; +} + +void MongoDeleteRequest::SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const { + LOG(WARNING) << "You're not supposed to serialize a MongoDeleteRequest"; +} + +::google::protobuf::uint8* MongoDeleteRequest::SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* output) const { + return output; +} + +const ::google::protobuf::Descriptor* MongoDeleteRequest::descriptor() { + return MongoDeleteRequestBase::descriptor(); +} + +::google::protobuf::Metadata MongoDeleteRequest::GetMetadata() const { + ::google::protobuf::Metadata metadata; + metadata.descriptor = descriptor(); + metadata.reflection = NULL; + return metadata; +} + +void MongoDeleteRequest::SetCachedSize(int size) const { _cached_size_ = size; } + +MongoDeleteResponse::MongoDeleteResponse() : ::google::protobuf::Message() { + SharedCtor(); +} + +MongoDeleteResponse::~MongoDeleteResponse() { SharedDtor(); } + +MongoDeleteResponse::MongoDeleteResponse(const MongoDeleteResponse& from) + : ::google::protobuf::Message() { + SharedCtor(); + MergeFrom(from); +} + +MongoDeleteResponse& MongoDeleteResponse::operator=( + const MongoDeleteResponse& from) { + CopyFrom(from); + return *this; +} + +void MongoDeleteResponse::SharedCtor() { + _cached_size_ = 0; + number_ = 0; +} + +void MongoDeleteResponse::SharedDtor() {} + +bool MongoDeleteResponse::SerializeTo(butil::IOBuf* buf) const { + // TODO custom definetion + return true; +} + +void MongoDeleteResponse::Swap(MongoDeleteResponse* other) {} + +MongoDeleteResponse* MongoDeleteResponse::New() const { + return new MongoDeleteResponse(); +} + +void MongoDeleteResponse::CopyFrom(const ::google::protobuf::Message& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void MongoDeleteResponse::MergeFrom(const ::google::protobuf::Message& from) { + GOOGLE_CHECK_NE(&from, this); + const MongoDeleteResponse* source = + dynamic_cast(&from); + if (source == NULL) { + ::google::protobuf::internal::ReflectionOps::Merge(from, this); + } else { + MergeFrom(*source); + } +} + +void MongoDeleteResponse::CopyFrom(const MongoDeleteResponse& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void MongoDeleteResponse::MergeFrom(const MongoDeleteResponse& from) { + GOOGLE_CHECK_NE(&from, this); + + if (from.has_number()) { + set_number(from.number()); + } +} + +void MongoDeleteResponse::Clear() { clear_number(); } + +bool MongoDeleteResponse::IsInitialized() const { return true; } + +bool MongoDeleteResponse::MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input) { + LOG(WARNING) << "You're not supposed to parse a MongoDeleteResponse"; + return true; +} + +void MongoDeleteResponse::SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const { + LOG(WARNING) << "You're not supposed to serialize a MongoDeleteResponse"; +} + +::google::protobuf::uint8* MongoDeleteResponse::SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* output) const { + return output; +} + +const ::google::protobuf::Descriptor* MongoDeleteResponse::descriptor() { + return MongoDeleteResponseBase::descriptor(); +} + +::google::protobuf::Metadata MongoDeleteResponse::GetMetadata() const { + ::google::protobuf::Metadata metadata; + metadata.descriptor = descriptor(); + metadata.reflection = NULL; + return metadata; +} + +void MongoDeleteResponse::SetCachedSize(int size) const { + _cached_size_ = size; +} + +MongoUpdateRequest::MongoUpdateRequest() : ::google::protobuf::Message() { + SharedCtor(); +} + +MongoUpdateRequest::~MongoUpdateRequest() { SharedDtor(); } + +MongoUpdateRequest::MongoUpdateRequest(const MongoUpdateRequest& from) + : ::google::protobuf::Message() { + SharedCtor(); + MergeFrom(from); +} + +MongoUpdateRequest& MongoUpdateRequest::operator=( + const MongoUpdateRequest& from) { + CopyFrom(from); + return *this; +} + +void MongoUpdateRequest::SharedCtor() { + _cached_size_ = 0; + ordered_ = true; + upsert_ = false; + multi_ = false; +} + +void MongoUpdateRequest::SharedDtor() {} + +bool MongoUpdateRequest::SerializeTo(butil::IOBuf* buf) const { + if (!IsInitialized()) { + LOG(WARNING) << "MongoUpdateRequest not initialize"; + return false; + } + // $ operators + bson_iter_t iter; + if (!bson_iter_init(&iter, update().get())) { + LOG(ERROR) << "update document is corrupt"; + return false; + } + while (bson_iter_next(&iter)) { + const char* key = bson_iter_key(&iter); + if (key[0] != '$') { + LOG(ERROR) << "update only works with $ operators"; + return false; + } + } + + // Message Flags 4bytes + uint32_t flag_bits = 0; + buf->append(static_cast(&flag_bits), 4); + + BsonPtr update_body_element_ptr = butil::bson::new_bson(); + bson_t* update_body_element = update_body_element_ptr.get(); + // update + BSON_APPEND_UTF8(update_body_element, "update", collection().c_str()); + // ordered + BSON_APPEND_BOOL(update_body_element, "ordered", ordered()); + // $db + BSON_APPEND_UTF8(update_body_element, "$db", database().c_str()); + + // Section[] Kind(1byte): Body(0); BodyDocument(Bson) + Section section1; + section1.type = 0; + section1.body_document = update_body_element_ptr; + butil::IOBuf buf1; + bool ret = section1.SeralizeTo(&buf1); + if (!ret) { + return false; + } + buf->append(buf1); + // Section Kind(1byte): Document Sequence(1); SeqID: updates + Section section2; + section2.type = 1; + DocumentSequencePtr document_sequence = std::make_shared(); + document_sequence->identifier = "updates"; + // 更新记录的查询条件 + BsonPtr update_selector_element_ptr = butil::bson::new_bson(); + BSON_APPEND_DOCUMENT(update_selector_element_ptr.get(), "q", + selector().get()); + BSON_APPEND_DOCUMENT(update_selector_element_ptr.get(), "u", update().get()); + BSON_APPEND_BOOL(update_selector_element_ptr.get(), "upsert", upsert()); + BSON_APPEND_BOOL(update_selector_element_ptr.get(), "multi", multi()); + document_sequence->documents.push_back(update_selector_element_ptr); + section2.document_sequence = document_sequence; + butil::IOBuf buf2; + ret = section2.SeralizeTo(&buf2); + if (!ret) { + return false; + } + buf->append(buf2); + return true; +} + +void MongoUpdateRequest::Swap(MongoUpdateRequest* other) {} + +MongoUpdateRequest* MongoUpdateRequest::New() const { + return new MongoUpdateRequest(); +} + +void MongoUpdateRequest::CopyFrom(const ::google::protobuf::Message& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void MongoUpdateRequest::MergeFrom(const ::google::protobuf::Message& from) { + GOOGLE_CHECK_NE(&from, this); + const MongoUpdateRequest* source = + dynamic_cast(&from); + if (source == NULL) { + ::google::protobuf::internal::ReflectionOps::Merge(from, this); + } else { + MergeFrom(*source); + } +} + +void MongoUpdateRequest::CopyFrom(const MongoUpdateRequest& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void MongoUpdateRequest::MergeFrom(const MongoUpdateRequest& from) { + GOOGLE_CHECK_NE(&from, this); + + if (from.has_database()) { + set_database(from.database()); + } + + if (from.has_collection()) { + set_collection(from.collection()); + } + + if (from.has_selector()) { + set_selector(from.selector()); + } + + if (from.has_update()) { + set_update(from.update()); + } + + if (from.has_ordered()) { + set_ordered(from.ordered()); + } + + if (from.has_upsert()) { + set_upsert(from.upsert()); + } + + if (from.has_multi()) { + set_multi(from.multi()); + } +} + +void MongoUpdateRequest::Clear() { + clear_database(); + clear_collection(); + clear_selector(); + clear_update(); + clear_ordered(); + clear_upsert(); + clear_multi(); +} + +bool MongoUpdateRequest::IsInitialized() const { + return has_database() && has_collection() && has_selector() && has_update(); +} + +bool MongoUpdateRequest::MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input) { + LOG(WARNING) << "You're not supposed to parse a MongoUpdateRequest"; + return true; +} + +void MongoUpdateRequest::SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const { + LOG(WARNING) << "You're not supposed to serialize a MongoUpdateRequest"; +} + +::google::protobuf::uint8* MongoUpdateRequest::SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* output) const { + return output; +} + +const ::google::protobuf::Descriptor* MongoUpdateRequest::descriptor() { + return MongoUpdateRequestBase::descriptor(); +} + +::google::protobuf::Metadata MongoUpdateRequest::GetMetadata() const { + ::google::protobuf::Metadata metadata; + metadata.descriptor = descriptor(); + metadata.reflection = NULL; + return metadata; +} + +void MongoUpdateRequest::SetCachedSize(int size) const { _cached_size_ = size; } + +MongoUpdateResponse::MongoUpdateResponse() : ::google::protobuf::Message() { + SharedCtor(); +} + +MongoUpdateResponse::~MongoUpdateResponse() { SharedDtor(); } + +MongoUpdateResponse::MongoUpdateResponse(const MongoUpdateResponse& from) + : ::google::protobuf::Message() { + SharedCtor(); + MergeFrom(from); +} + +MongoUpdateResponse& MongoUpdateResponse::operator=( + const MongoUpdateResponse& from) { + CopyFrom(from); + return *this; +} + +void MongoUpdateResponse::SharedCtor() { + _cached_size_ = 0; + matched_number_ = 0; + modified_number_ = 0; +} + +void MongoUpdateResponse::SharedDtor() {} + +bool MongoUpdateResponse::SerializeTo(butil::IOBuf* buf) const { + // TODO custom definetion + return true; +} + +void MongoUpdateResponse::Swap(MongoUpdateResponse* other) {} + +MongoUpdateResponse* MongoUpdateResponse::New() const { + return new MongoUpdateResponse(); +} + +void MongoUpdateResponse::CopyFrom(const ::google::protobuf::Message& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void MongoUpdateResponse::MergeFrom(const ::google::protobuf::Message& from) { + GOOGLE_CHECK_NE(&from, this); + const MongoUpdateResponse* source = + dynamic_cast(&from); + if (source == NULL) { + ::google::protobuf::internal::ReflectionOps::Merge(from, this); + } else { + MergeFrom(*source); + } +} + +void MongoUpdateResponse::CopyFrom(const MongoUpdateResponse& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void MongoUpdateResponse::MergeFrom(const MongoUpdateResponse& from) { + GOOGLE_CHECK_NE(&from, this); + + if (from.has_matched_number()) { + set_matched_number(from.matched_number()); + } + + if (from.has_modified_number()) { + set_modified_number(from.modified_number()); + } + + upserted_docs_.insert(upserted_docs_.end(), from.upserted_docs().cbegin(), + from.upserted_docs().cend()); + + write_errors_.insert(write_errors_.end(), from.write_errors().cbegin(), + from.write_errors().cend()); +} + +void MongoUpdateResponse::Clear() { + clear_matched_number(); + clear_modified_number(); + clear_upserted_docs(); + clear_write_errors(); +} + +bool MongoUpdateResponse::IsInitialized() const { return true; } + +bool MongoUpdateResponse::MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input) { + LOG(WARNING) << "You're not supposed to parse a MongoUpdateResponse"; + return true; +} + +void MongoUpdateResponse::SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const { + LOG(WARNING) << "You're not supposed to serialize a MongoUpdateResponse"; +} + +::google::protobuf::uint8* MongoUpdateResponse::SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* output) const { + return output; +} + +const ::google::protobuf::Descriptor* MongoUpdateResponse::descriptor() { + return MongoUpdateResponseBase::descriptor(); +} + +::google::protobuf::Metadata MongoUpdateResponse::GetMetadata() const { + ::google::protobuf::Metadata metadata; + metadata.descriptor = descriptor(); + metadata.reflection = NULL; + return metadata; +} + +void MongoUpdateResponse::SetCachedSize(int size) const { + _cached_size_ = size; +} + +MongoFindAndModifyRequest::MongoFindAndModifyRequest() + : ::google::protobuf::Message() { + SharedCtor(); +} + +MongoFindAndModifyRequest::~MongoFindAndModifyRequest() { SharedDtor(); } + +MongoFindAndModifyRequest::MongoFindAndModifyRequest( + const MongoFindAndModifyRequest& from) + : ::google::protobuf::Message() { + SharedCtor(); + MergeFrom(from); +} + +MongoFindAndModifyRequest& MongoFindAndModifyRequest::operator=( + const MongoFindAndModifyRequest& from) { + CopyFrom(from); + return *this; +} + +void MongoFindAndModifyRequest::SharedCtor() { + _cached_size_ = 0; + upsert_ = false; + remove_ = false; + return_new_ = false; +} + +void MongoFindAndModifyRequest::SharedDtor() {} + +bool MongoFindAndModifyRequest::SerializeTo(butil::IOBuf* buf) const { + if (!IsInitialized()) { + LOG(WARNING) << "MongoFindAndModifyRequest not initialize"; + return false; + } + if (has_remove() && remove()) { + if (has_update()) { + LOG(ERROR) << "MongoFindAndModifyRequest cannot specify both update and " + "remove=true"; + return false; + } + if (has_upsert() && upsert()) { + LOG(ERROR) << "MongoFindAndModifyRequest cannot specify both upsert=true " + "and remove=true"; + return false; + } + if (has_return_new() && return_new()) { + LOG(ERROR) << "MongoFindAndModifyRequest cannot specify both new=true " + "and remove=true"; + return false; + } + } + if (has_update()) { + // $ operators + bson_iter_t iter; + if (!bson_iter_init(&iter, update().get())) { + LOG(ERROR) << "update document is corrupt"; + return false; + } + while (bson_iter_next(&iter)) { + const char* key = bson_iter_key(&iter); + if (key[0] != '$') { + LOG(ERROR) << "update only works with $ operators"; + return false; + } + } + } + + // Message Flags 4bytes + uint32_t flag_bits = 0; + buf->append(static_cast(&flag_bits), 4); + + BsonPtr find_and_modify_element_ptr = butil::bson::new_bson(); + bson_t* find_and_modify_element = find_and_modify_element_ptr.get(); + // findAndModify + BSON_APPEND_UTF8(find_and_modify_element, "findAndModify", + collection().c_str()); + // query + BSON_APPEND_DOCUMENT(find_and_modify_element, "query", query().get()); + // update + if (has_update()) { + BSON_APPEND_DOCUMENT(find_and_modify_element, "update", update().get()); + } + // upsert + BSON_APPEND_BOOL(find_and_modify_element, "upsert", upsert()); + // new + BSON_APPEND_BOOL(find_and_modify_element, "new", return_new()); + // remove + BSON_APPEND_BOOL(find_and_modify_element, "remove", remove()); + // $db + BSON_APPEND_UTF8(find_and_modify_element, "$db", database().c_str()); + + // Section[] Kind(1byte): Body(0); BodyDocument(Bson) + Section section1; + section1.type = 0; + section1.body_document = find_and_modify_element_ptr; + butil::IOBuf buf1; + bool ret = section1.SeralizeTo(&buf1); + if (!ret) { + return false; + } + buf->append(buf1); + return true; +} + +void MongoFindAndModifyRequest::Swap(MongoFindAndModifyRequest* other) {} + +MongoFindAndModifyRequest* MongoFindAndModifyRequest::New() const { + return new MongoFindAndModifyRequest(); +} + +void MongoFindAndModifyRequest::CopyFrom( + const ::google::protobuf::Message& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void MongoFindAndModifyRequest::MergeFrom( + const ::google::protobuf::Message& from) { + GOOGLE_CHECK_NE(&from, this); + const MongoFindAndModifyRequest* source = + dynamic_cast(&from); + if (source == NULL) { + ::google::protobuf::internal::ReflectionOps::Merge(from, this); + } else { + MergeFrom(*source); + } +} + +void MongoFindAndModifyRequest::CopyFrom( + const MongoFindAndModifyRequest& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void MongoFindAndModifyRequest::MergeFrom( + const MongoFindAndModifyRequest& from) { + GOOGLE_CHECK_NE(&from, this); + + if (from.has_database()) { + set_database(from.database()); + } + + if (from.has_collection()) { + set_collection(from.collection()); + } + + if (from.has_query()) { + set_query(from.query()); + } + + if (from.has_sort()) { + set_sort(from.sort()); + } + + if (from.has_update()) { + set_update(from.update()); + } + + if (from.has_upsert()) { + set_upsert(from.upsert()); + } + + if (from.has_remove()) { + set_remove(from.remove()); + } + + if (from.has_return_new()) { + set_return_new(from.return_new()); + } + + fields_.insert(fields_.end(), from.fields().cbegin(), from.fields().cend()); +} + +void MongoFindAndModifyRequest::Clear() { + clear_database(); + clear_collection(); + clear_query(); + clear_sort(); + clear_update(); + clear_upsert(); + clear_remove(); + clear_return_new(); + clear_fields(); +} + +bool MongoFindAndModifyRequest::IsInitialized() const { + return has_database() && has_collection() && has_query() && + (has_update() || has_remove()); +} + +bool MongoFindAndModifyRequest::MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input) { + LOG(WARNING) << "You're not supposed to parse a MongoFindAndModifyRequest"; + return true; +} + +void MongoFindAndModifyRequest::SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const { + LOG(WARNING) + << "You're not supposed to serialize a MongoFindAndModifyRequest"; +} + +::google::protobuf::uint8* +MongoFindAndModifyRequest::SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* output) const { + return output; +} + +const ::google::protobuf::Descriptor* MongoFindAndModifyRequest::descriptor() { + return MongoFindAndModifyRequestBase::descriptor(); +} + +::google::protobuf::Metadata MongoFindAndModifyRequest::GetMetadata() const { + ::google::protobuf::Metadata metadata; + metadata.descriptor = descriptor(); + metadata.reflection = NULL; + return metadata; +} + +void MongoFindAndModifyRequest::SetCachedSize(int size) const { + _cached_size_ = size; +} + +MongoFindAndModifyResponse::MongoFindAndModifyResponse() + : ::google::protobuf::Message() { + SharedCtor(); +} + +MongoFindAndModifyResponse::~MongoFindAndModifyResponse() { SharedDtor(); } + +MongoFindAndModifyResponse::MongoFindAndModifyResponse( + const MongoFindAndModifyResponse& from) + : ::google::protobuf::Message() { + SharedCtor(); + MergeFrom(from); +} + +MongoFindAndModifyResponse& MongoFindAndModifyResponse::operator=( + const MongoFindAndModifyResponse& from) { + CopyFrom(from); + return *this; +} + +void MongoFindAndModifyResponse::SharedCtor() { + _cached_size_ = 0; + memset(&upserted_, 0, sizeof(upserted_)); +} + +void MongoFindAndModifyResponse::SharedDtor() {} + +bool MongoFindAndModifyResponse::SerializeTo(butil::IOBuf* buf) const { + // TODO custom definetion + return true; +} + +void MongoFindAndModifyResponse::Swap(MongoFindAndModifyResponse* other) {} + +MongoFindAndModifyResponse* MongoFindAndModifyResponse::New() const { + return new MongoFindAndModifyResponse(); +} + +void MongoFindAndModifyResponse::CopyFrom( + const ::google::protobuf::Message& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void MongoFindAndModifyResponse::MergeFrom( + const ::google::protobuf::Message& from) { + GOOGLE_CHECK_NE(&from, this); + const MongoFindAndModifyResponse* source = + dynamic_cast(&from); + if (source == NULL) { + ::google::protobuf::internal::ReflectionOps::Merge(from, this); + } else { + MergeFrom(*source); + } +} + +void MongoFindAndModifyResponse::CopyFrom( + const MongoFindAndModifyResponse& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void MongoFindAndModifyResponse::MergeFrom( + const MongoFindAndModifyResponse& from) { + GOOGLE_CHECK_NE(&from, this); + + if (from.has_value()) { + set_value(from.value()); + } + + if (from.has_upserted()) { + set_upserted(from.upserted()); + } +} + +void MongoFindAndModifyResponse::Clear() { + clear_value(); + clear_upserted(); +} + +bool MongoFindAndModifyResponse::IsInitialized() const { return true; } + +bool MongoFindAndModifyResponse::MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input) { + LOG(WARNING) << "You're not supposed to parse a MongoFindAndModifyResponse"; + return true; +} + +void MongoFindAndModifyResponse::SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const { + LOG(WARNING) + << "You're not supposed to serialize a MongoFindAndModifyResponse"; +} + +::google::protobuf::uint8* +MongoFindAndModifyResponse::SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* output) const { + return output; +} + +const ::google::protobuf::Descriptor* MongoFindAndModifyResponse::descriptor() { + return MongoFindAndModifyResponseBase::descriptor(); +} + +::google::protobuf::Metadata MongoFindAndModifyResponse::GetMetadata() const { + ::google::protobuf::Metadata metadata; + metadata.descriptor = descriptor(); + metadata.reflection = NULL; + return metadata; +} + +void MongoFindAndModifyResponse::SetCachedSize(int size) const { + _cached_size_ = size; +} + +MongoGetReplSetStatusRequest::MongoGetReplSetStatusRequest() + : ::google::protobuf::Message() { + SharedCtor(); +} + +MongoGetReplSetStatusRequest::~MongoGetReplSetStatusRequest() { SharedDtor(); } + +MongoGetReplSetStatusRequest::MongoGetReplSetStatusRequest( + const MongoGetReplSetStatusRequest& from) + : ::google::protobuf::Message() { + SharedCtor(); + MergeFrom(from); +} + +MongoGetReplSetStatusRequest& MongoGetReplSetStatusRequest::operator=( + const MongoGetReplSetStatusRequest& from) { + CopyFrom(from); + return *this; +} + +void MongoGetReplSetStatusRequest::SharedCtor() { _cached_size_ = 0; } + +void MongoGetReplSetStatusRequest::SharedDtor() {} + +bool MongoGetReplSetStatusRequest::SerializeTo(butil::IOBuf* buf) const { + // TODO custom definetion + if (!IsInitialized()) { + LOG(WARNING) << "MongoGetReplSetStatusRequest not initialize"; + return false; + } + + // Message Flags 4bytes + uint32_t flag_bits = 0; + buf->append(static_cast(&flag_bits), 4); + + BsonPtr get_repl_set_status_element_ptr = butil::bson::new_bson(); + bson_t* get_repl_set_status_element = get_repl_set_status_element_ptr.get(); + // replSetGetStatus + BSON_APPEND_DOUBLE(get_repl_set_status_element, "replSetGetStatus", 1.0); + // $db + BSON_APPEND_UTF8(get_repl_set_status_element, "$db", "admin"); + + // Section[] Kind(1byte): Body(0); BodyDocument(Bson) + Section section1; + section1.type = 0; + section1.body_document = get_repl_set_status_element_ptr; + butil::IOBuf buf1; + bool ret = section1.SeralizeTo(&buf1); + if (!ret) { + return false; + } + buf->append(buf1); + return true; +} + +void MongoGetReplSetStatusRequest::Swap(MongoGetReplSetStatusRequest* other) {} + +MongoGetReplSetStatusRequest* MongoGetReplSetStatusRequest::New() const { + return new MongoGetReplSetStatusRequest(); +} + +void MongoGetReplSetStatusRequest::CopyFrom( + const ::google::protobuf::Message& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void MongoGetReplSetStatusRequest::MergeFrom( + const ::google::protobuf::Message& from) { + GOOGLE_CHECK_NE(&from, this); + const MongoGetReplSetStatusRequest* source = + dynamic_cast(&from); + if (source == NULL) { + ::google::protobuf::internal::ReflectionOps::Merge(from, this); + } else { + MergeFrom(*source); + } +} + +void MongoGetReplSetStatusRequest::CopyFrom( + const MongoGetReplSetStatusRequest& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void MongoGetReplSetStatusRequest::MergeFrom( + const MongoGetReplSetStatusRequest& from) { + GOOGLE_CHECK_NE(&from, this); +} + +void MongoGetReplSetStatusRequest::Clear() {} + +bool MongoGetReplSetStatusRequest::IsInitialized() const { return true; } + +bool MongoGetReplSetStatusRequest::MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input) { + LOG(WARNING) << "You're not supposed to parse a MongoGetReplSetStatusRequest"; + return true; +} + +void MongoGetReplSetStatusRequest::SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const { + LOG(WARNING) + << "You're not supposed to serialize a MongoGetReplSetStatusRequest"; +} + +::google::protobuf::uint8* +MongoGetReplSetStatusRequest::SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* output) const { + return output; +} + +const ::google::protobuf::Descriptor* +MongoGetReplSetStatusRequest::descriptor() { + return MongoGetReplSetStatusRequestBase::descriptor(); +} + +::google::protobuf::Metadata MongoGetReplSetStatusRequest::GetMetadata() const { + ::google::protobuf::Metadata metadata; + metadata.descriptor = descriptor(); + metadata.reflection = NULL; + return metadata; +} + +void MongoGetReplSetStatusRequest::SetCachedSize(int size) const { + _cached_size_ = size; +} + +MongoGetReplSetStatusResponse::MongoGetReplSetStatusResponse() + : ::google::protobuf::Message() { + SharedCtor(); +} + +MongoGetReplSetStatusResponse::~MongoGetReplSetStatusResponse() { + SharedDtor(); +} + +MongoGetReplSetStatusResponse::MongoGetReplSetStatusResponse( + const MongoGetReplSetStatusResponse& from) + : ::google::protobuf::Message() { + SharedCtor(); + MergeFrom(from); +} + +MongoGetReplSetStatusResponse& MongoGetReplSetStatusResponse::operator=( + const MongoGetReplSetStatusResponse& from) { + CopyFrom(from); + return *this; +} + +void MongoGetReplSetStatusResponse::SharedCtor() { + _cached_size_ = 0; + ok_ = false; + myState_ = 0; +} + +void MongoGetReplSetStatusResponse::SharedDtor() {} + +bool MongoGetReplSetStatusResponse::SerializeTo(butil::IOBuf* buf) const { + // TODO custom definetion + return true; +} + +void MongoGetReplSetStatusResponse::Swap(MongoGetReplSetStatusResponse* other) { +} + +MongoGetReplSetStatusResponse* MongoGetReplSetStatusResponse::New() const { + return new MongoGetReplSetStatusResponse(); +} + +void MongoGetReplSetStatusResponse::CopyFrom( + const ::google::protobuf::Message& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void MongoGetReplSetStatusResponse::MergeFrom( + const ::google::protobuf::Message& from) { + GOOGLE_CHECK_NE(&from, this); + const MongoGetReplSetStatusResponse* source = + dynamic_cast(&from); + if (source == NULL) { + ::google::protobuf::internal::ReflectionOps::Merge(from, this); + } else { + MergeFrom(*source); + } +} + +void MongoGetReplSetStatusResponse::CopyFrom( + const MongoGetReplSetStatusResponse& from) { + if (&from == this) return; + Clear(); + MergeFrom(from); +} + +void MongoGetReplSetStatusResponse::MergeFrom( + const MongoGetReplSetStatusResponse& from) { + GOOGLE_CHECK_NE(&from, this); + + if (from.has_ok()) { + set_ok(from.ok()); + } + + if (from.has_set()) { + set_set(from.set()); + } + + if (from.has_myState()) { + set_myState(from.myState()); + } + + members_.insert(members_.end(), from.members_.cbegin(), from.members_.cend()); +} + +void MongoGetReplSetStatusResponse::Clear() { + clear_ok(); + clear_set(); + clear_myState(); + clear_members(); +} + +bool MongoGetReplSetStatusResponse::IsInitialized() const { return has_ok(); } + +bool MongoGetReplSetStatusResponse::MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input) { + LOG(WARNING) + << "You're not supposed to parse a MongoGetReplSetStatusResponse"; + return true; +} + +void MongoGetReplSetStatusResponse::SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const { + LOG(WARNING) + << "You're not supposed to serialize a MongoGetReplSetStatusResponse"; +} + +::google::protobuf::uint8* +MongoGetReplSetStatusResponse::SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* output) const { + return output; +} + +const ::google::protobuf::Descriptor* +MongoGetReplSetStatusResponse::descriptor() { + return MongoGetReplSetStatusResponseBase::descriptor(); +} + +::google::protobuf::Metadata MongoGetReplSetStatusResponse::GetMetadata() + const { + ::google::protobuf::Metadata metadata; + metadata.descriptor = descriptor(); + metadata.reflection = NULL; + return metadata; +} + +void MongoGetReplSetStatusResponse::SetCachedSize(int size) const { + _cached_size_ = size; +} + +} // namespace brpc diff --git a/src/brpc/mongo.h b/src/brpc/mongo.h new file mode 100644 index 0000000000..a84eafa197 --- /dev/null +++ b/src/brpc/mongo.h @@ -0,0 +1,2065 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + +#ifndef BRPC_MONGO_H +#define BRPC_MONGO_H + +#include +#include + +#include +#include +#include +#include + +#include "brpc/callback.h" +#include "brpc/mongo_head.h" +#include "brpc/parse_result.h" +#include "brpc/policy/mongo.pb.h" +#include "brpc/proto_base.pb.h" +#include "brpc/socket.h" +#include "butil/arena.h" +#include "butil/bson_util.h" +#include "butil/iobuf.h" +#include "butil/strings/string_piece.h" + +namespace brpc { + +using butil::bson::BsonPtr; + +struct MongoReply { + int32_t response_flags; + int64_t cursorid; + int32_t straring_from; + int32_t number_returned; + std::vector documents; +}; + +struct DocumentSequence { + mutable int32_t size; + std::string identifier; + std::vector documents; + bool SerializeTo(butil::IOBuf* buf) const; +}; + +typedef std::shared_ptr DocumentSequencePtr; + +struct Section { + uint8_t type; + BsonPtr body_document; + DocumentSequencePtr document_sequence; + bool SeralizeTo(butil::IOBuf* buf) const; +}; + +struct MongoMsg { + uint32_t flagbits; + std::vector
sections; + uint32_t checksum; + + void make_host_endian() { + if (!ARCH_CPU_LITTLE_ENDIAN) { + flagbits = butil::ByteSwap(flagbits); + checksum = butil::ByteSwap(checksum); + } + } + + bool checksumPresent() { return flagbits & 0x00000001; } +}; + +struct ReplicaSetMember { + int32_t id; + std::string addr; + bool health; + int32_t state; + std::string state_str; +}; + +class MongoQueryRequest : public ::google::protobuf::Message { + public: + MongoQueryRequest(); + virtual ~MongoQueryRequest(); + MongoQueryRequest(const MongoQueryRequest& from); + MongoQueryRequest& operator=(const MongoQueryRequest& from); + void Swap(MongoQueryRequest* other); + bool SerializeTo(butil::IOBuf* buf) const; + MongoQueryRequest* New() const; + void CopyFrom(const ::google::protobuf::Message& from); + void MergeFrom(const ::google::protobuf::Message& from); + void CopyFrom(const MongoQueryRequest& from); + void MergeFrom(const MongoQueryRequest& from); + void Clear(); + bool IsInitialized() const; + bool MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input); + void SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const; + ::google::protobuf::uint8* SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* output) const; + int GetCachedSize() const { return _cached_size_; } + static const ::google::protobuf::Descriptor* descriptor(); + + // fields + + // database + public: + static const int kdatabaseFieldNumber = 1; + const std::string& database() const { return database_; } + bool has_database() const { return _has_bits_[0] & 0x1u; } + void clear_database() { + clear_has_database(); + database_.clear(); + } + void set_database(std::string value) { + database_ = value; + set_has_database(); + } + + private: + void set_has_database() { _has_bits_[0] |= 0x1u; } + void clear_has_database() { _has_bits_[0] &= ~0x1u; } + + std::string database_; + + // collection + public: + static const int kcollectionFieldNumber = 2; + const std::string& collection() const { return collection_; } + bool has_collection() const { return _has_bits_[0] & 0x2u; } + void clear_collection() { + clear_has_collection(); + collection_.clear(); + } + void set_collection(std::string value) { + collection_ = value; + set_has_collection(); + } + + private: + void set_has_collection() { _has_bits_[0] |= 0x2u; } + void clear_has_collection() { _has_bits_[0] &= ~0x2u; } + + std::string collection_; + + // query + public: + static const int kqueryFieldNumber = 3; + const BsonPtr& query() const { return query_; } + bool has_query() const { return _has_bits_[0] & 0x4u; } + void clear_query() { + clear_has_query(); + query_.reset(); + } + void set_query(BsonPtr value) { + query_ = value; + set_has_query(); + } + + private: + void set_has_query() { _has_bits_[0] |= 0x4u; } + void clear_has_query() { _has_bits_[0] &= ~0x4u; } + + BsonPtr query_; + + // sort + public: + static const int ksortFieldNumber = 4; + const BsonPtr& sort() const { return sort_; } + bool has_sort() const { return _has_bits_[0] & 0x8u; } + void clear_sort() { + clear_has_sort(); + sort_.reset(); + } + void set_sort(BsonPtr value) { + sort_ = value; + set_has_sort(); + } + + private: + void set_has_sort() { _has_bits_[0] |= 0x8u; } + void clear_has_sort() { _has_bits_[0] &= ~0x8u; } + + BsonPtr sort_; + + // skip + public: + static const int kskipFieldNumber = 5; + int32_t skip() const { return skip_; } + bool has_skip() const { return _has_bits_[0] & 0x10u; } + void clear_skip() { + clear_has_skip(); + skip_ = 0; + } + void set_skip(int32_t value) { + skip_ = value; + set_has_skip(); + } + + private: + void set_has_skip() { _has_bits_[0] |= 0x10u; } + void clear_has_skip() { _has_bits_[0] &= ~0x10u; } + + int32_t skip_; + + // limit + public: + static const int klimitFieldNumber = 6; + int32_t limit() const { return limit_; } + bool has_limit() const { return _has_bits_[0] & 0x20u; } + void clear_limit() { + clear_has_limit(); + limit_ = 0; + } + void set_limit(int32_t value) { + limit_ = value; + set_has_limit(); + } + + private: + void set_has_limit() { _has_bits_[0] |= 0x20u; } + void clear_has_limit() { _has_bits_[0] &= ~0x20u; } + + int32_t limit_; + + // fields + public: + static const int kfieldsFieldNumber = 7; + const std::vector& fields() const { return fields_; } + int fields_size() const { return fields_.size(); } + void clear_fields() { fields_.clear(); } + const std::string& fields(int index) const { return fields_[index]; } + std::string* mutable_fields(int index) { return &fields_[index]; } + void add_fields(std::string value) { fields_.push_back(std::move(value)); } + + private: + std::vector fields_; + + protected: + ::google::protobuf::Metadata GetMetadata() const override; + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const; + + ::google::protobuf::internal::HasBits<1> _has_bits_; + mutable int _cached_size_; +}; + +class MongoQueryResponse : public ::google::protobuf::Message { + public: + MongoQueryResponse(); + virtual ~MongoQueryResponse(); + MongoQueryResponse(const MongoQueryResponse& from); + inline MongoQueryResponse& operator=(const MongoQueryResponse& from) { + CopyFrom(from); + return *this; + } + void Swap(MongoQueryResponse* other); + + int64_t cursorid() const; + bool has_cursorid() const; + void clear_cursorid(); + void set_cursorid(int64_t cursorid); + static const int kCursoridFieldNumber = 1; + + int32_t starting_from() const; + bool has_starting_from() const; + void clear_starting_from(); + void set_starting_from(int32_t value); + static const int kStartingFromFieldNumber = 2; + + int32_t number_returned() const; + bool has_number_returned() const; + void clear_number_returned(); + void set_number_returned(int32_t value); + static const int kNumberReturnedFieldNumber = 3; + + int documents_size() const; + void clear_documents(); + BsonPtr* mutable_documents(int index); + std::vector* mutable_documents(); + const BsonPtr& documents(int index) const; + void add_documents(const BsonPtr&); + const std::vector& documents() const; + static const int kDocumentsFieldNumber = 4; + + std::string ns() const; + bool has_ns() const; + void clear_ns(); + void set_ns(std::string value); + static const int kNSfieldNumber = 5; + + bool SerializeTo(butil::IOBuf* buf) const; + + MongoQueryResponse* New() const; + void CopyFrom(const ::google::protobuf::Message& from); + void MergeFrom(const ::google::protobuf::Message& from); + void CopyFrom(const MongoQueryResponse& from); + void MergeFrom(const MongoQueryResponse& from); + void Clear(); + bool IsInitialized() const; + // int ByteSize() const; + bool MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input); + void SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const; + ::google::protobuf::uint8* SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* output) const; + int GetCachedSize() const { return _cached_size_; } + + static const ::google::protobuf::Descriptor* descriptor(); + + // void Print(std::ostream&) const; + + protected: + ::google::protobuf::Metadata GetMetadata() const override; + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const; + + void set_has_cursorid(); + void clear_has_cursorid(); + + void set_has_starting_from(); + void clear_has_starting_from(); + + void set_has_number_returned(); + void clear_has_number_returned(); + + void set_has_ns(); + void clear_has_ns(); + + int64_t cursorid_; + int32_t starting_from_; + int32_t number_returned_; + std::vector documents_; + std::string ns_; + ::google::protobuf::internal::HasBits<1> _has_bits_; + mutable int _cached_size_; +}; + +inline int64_t MongoQueryResponse::cursorid() const { return cursorid_; } + +inline bool MongoQueryResponse::has_cursorid() const { + return _has_bits_[0] & 0x00000001u; +} + +inline void MongoQueryResponse::clear_cursorid() { + clear_has_cursorid(); + cursorid_ = 0; +} + +inline void MongoQueryResponse::set_cursorid(int64_t cursorid) { + cursorid_ = cursorid; + set_has_cursorid(); +} + +inline void MongoQueryResponse::set_has_cursorid() { + _has_bits_[0] |= 0x00000001u; +} + +inline void MongoQueryResponse::clear_has_cursorid() { + _has_bits_[0] &= ~0x00000001u; +} + +inline int32_t MongoQueryResponse::starting_from() const { + return starting_from_; +} + +inline bool MongoQueryResponse::has_starting_from() const { + return _has_bits_[0] & 0x00000002u; +} + +inline void MongoQueryResponse::clear_starting_from() { + clear_has_starting_from(); + starting_from_ = 0; +} + +inline void MongoQueryResponse::set_starting_from(int32_t value) { + starting_from_ = value; + set_has_starting_from(); +} + +inline void MongoQueryResponse::set_has_starting_from() { + _has_bits_[0] |= 0x00000002u; +} + +inline void MongoQueryResponse::clear_has_starting_from() { + _has_bits_[0] &= ~0x00000002u; +} + +inline int32_t MongoQueryResponse::number_returned() const { + return number_returned_; +} + +inline bool MongoQueryResponse::has_number_returned() const { + return _has_bits_[0] & 0x00000004u; +} + +inline void MongoQueryResponse::clear_number_returned() { + clear_has_number_returned(); + number_returned_ = 0; +} + +inline void MongoQueryResponse::set_number_returned(int32_t value) { + number_returned_ = value; + set_has_number_returned(); +} + +inline void MongoQueryResponse::set_has_number_returned() { + _has_bits_[0] |= 0x00000004u; +} + +inline void MongoQueryResponse::clear_has_number_returned() { + _has_bits_[0] &= ~0x00000004u; +} + +inline int MongoQueryResponse::documents_size() const { + return documents_.size(); +} + +inline void MongoQueryResponse::clear_documents() { documents_.clear(); } + +inline BsonPtr* MongoQueryResponse::mutable_documents(int index) { + return &documents_[index]; +} + +inline std::vector* MongoQueryResponse::mutable_documents() { + return &documents_; +} + +inline const BsonPtr& MongoQueryResponse::documents(int index) const { + return documents_[index]; +} + +inline void MongoQueryResponse::add_documents(const BsonPtr& value) { + documents_.push_back(value); +} + +inline const std::vector& MongoQueryResponse::documents() const { + return documents_; +} + +inline std::string MongoQueryResponse::ns() const { return ns_; } + +inline bool MongoQueryResponse::has_ns() const { + return _has_bits_[0] & 0x00000010u; +} + +inline void MongoQueryResponse::clear_ns() { + clear_has_ns(); + ns_.clear(); +} + +inline void MongoQueryResponse::set_ns(std::string value) { + ns_ = value; + set_has_ns(); +} + +inline void MongoQueryResponse::set_has_ns() { _has_bits_[0] |= 0x00000010u; } + +inline void MongoQueryResponse::clear_has_ns() { + _has_bits_[0] &= ~0x00000010u; +} + +inline void MongoQueryResponse::SetCachedSize(int size) const { + _cached_size_ = size; +} + +class MongoGetMoreRequest : public ::google::protobuf::Message { + public: + MongoGetMoreRequest(); + virtual ~MongoGetMoreRequest(); + MongoGetMoreRequest(const MongoGetMoreRequest& from); + inline MongoGetMoreRequest& operator=(const MongoGetMoreRequest& from) { + CopyFrom(from); + return *this; + } + void Swap(MongoGetMoreRequest* other); + + // database + const std::string& database() const; + bool has_database() const; + void clear_database(); + void set_database(std::string database); + static const int kDatabaseFieldNumber = 1; + + // collection + const std::string& collection() const; + bool has_collection() const; + void clear_collection(); + void set_collection(std::string collection); + static const int kCollectionFieldNumber = 2; + + // cursor_id + int64_t cursorid() const; + bool has_cursorid() const; + void clear_cursorid(); + void set_cursorid(int64_t cursorid); + static const int kCursorIdFieldNumber = 3; + + // batch_size + int32_t batch_size() const; + bool has_batch_size() const; + void clear_batch_size(); + void set_batch_size(int32_t batch_size); + static const int kBatchSizeFieldNumber = 4; + + // maxTimeMS + int32_t max_time_ms() const; + bool has_max_time_ms() const; + void clear_max_time_ms(); + void set_max_time_ms(int32_t max_time_ms); + static const int kMaxTimeMSFieldNumber = 5; + + // comment + BsonPtr comment() const; + bool has_comment() const; + void clear_comment(); + void set_comment(BsonPtr comment); + static const int kCommentFieldNumber = 6; + + bool SerializeTo(butil::IOBuf* buf) const; + + MongoGetMoreRequest* New() const; + void CopyFrom(const ::google::protobuf::Message& from); + void MergeFrom(const ::google::protobuf::Message& from); + void CopyFrom(const MongoGetMoreRequest& from); + void MergeFrom(const MongoGetMoreRequest& from); + void Clear(); + bool IsInitialized() const; + bool MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input); + void SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const; + ::google::protobuf::uint8* SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* output) const; + int GetCachedSize() const { return _cached_size_; } + + static const ::google::protobuf::Descriptor* descriptor(); + + protected: + ::google::protobuf::Metadata GetMetadata() const override; + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const; + + void set_has_database(); + void clear_has_database(); + + void set_has_collection(); + void clear_has_collection(); + + void set_has_cursorid(); + void clear_has_cursorid(); + + void set_has_batch_size(); + void clear_has_batch_size(); + + void set_has_max_time_ms(); + void clear_has_max_time_ms(); + + void set_has_comment(); + void clear_has_comment(); + + std::string database_; + std::string collection_; + int64_t cursorid_; + int32_t batch_size_; + int32_t max_time_ms_; + BsonPtr comment_; + ::google::protobuf::internal::HasBits<1> _has_bits_; + mutable int _cached_size_; +}; + +inline const std::string& MongoGetMoreRequest::database() const { + return database_; +} + +inline bool MongoGetMoreRequest::has_database() const { + return _has_bits_[0] & 0x00000001u; +} + +inline void MongoGetMoreRequest::clear_database() { + clear_has_database(); + database_.clear(); +} + +inline void MongoGetMoreRequest::set_database(std::string database) { + database_ = database; + set_has_database(); +} + +inline void MongoGetMoreRequest::set_has_database() { + _has_bits_[0] |= 0x00000001u; +} + +inline void MongoGetMoreRequest::clear_has_database() { + _has_bits_[0] &= ~0x00000001u; +} + +inline const std::string& MongoGetMoreRequest::collection() const { + return collection_; +} + +inline bool MongoGetMoreRequest::has_collection() const { + return _has_bits_[0] & 0x00000002u; +} + +inline void MongoGetMoreRequest::clear_collection() { + clear_has_collection(); + collection_.clear(); +} +inline void MongoGetMoreRequest::set_collection(std::string collection) { + collection_ = collection; + set_has_collection(); +} + +inline void MongoGetMoreRequest::set_has_collection() { + _has_bits_[0] |= 0x00000002u; +} + +inline void MongoGetMoreRequest::clear_has_collection() { + _has_bits_[0] &= ~0x00000002u; +} + +inline int64_t MongoGetMoreRequest::cursorid() const { return cursorid_; } + +inline bool MongoGetMoreRequest::has_cursorid() const { + return _has_bits_[0] & 0x00000004u; +} + +inline void MongoGetMoreRequest::clear_cursorid() { + clear_has_cursorid(); + cursorid_ = 0; +} + +inline void MongoGetMoreRequest::set_cursorid(int64_t cursorid) { + cursorid_ = cursorid; + set_has_cursorid(); +} + +inline void MongoGetMoreRequest::set_has_cursorid() { + _has_bits_[0] |= 0x00000004u; +} + +inline void MongoGetMoreRequest::clear_has_cursorid() { + _has_bits_[0] &= ~0x00000004u; +} + +inline int32_t MongoGetMoreRequest::batch_size() const { return batch_size_; } + +inline bool MongoGetMoreRequest::has_batch_size() const { + return _has_bits_[0] & 0x00000008u; +} + +inline void MongoGetMoreRequest::clear_batch_size() { + batch_size_ = 0; + clear_has_batch_size(); +} + +inline void MongoGetMoreRequest::set_batch_size(int32_t batch_size) { + batch_size_ = batch_size; + set_has_batch_size(); +} + +inline void MongoGetMoreRequest::set_has_batch_size() { + _has_bits_[0] |= 0x00000008u; +} + +inline void MongoGetMoreRequest::clear_has_batch_size() { + _has_bits_[0] &= ~0x00000008u; +} + +inline int32_t MongoGetMoreRequest::max_time_ms() const { return max_time_ms_; } + +inline bool MongoGetMoreRequest::has_max_time_ms() const { + return _has_bits_[0] & 0x00000010u; +} + +inline void MongoGetMoreRequest::clear_max_time_ms() { + max_time_ms_ = 0; + clear_has_max_time_ms(); +} + +inline void MongoGetMoreRequest::set_max_time_ms(int32_t max_time_ms) { + max_time_ms_ = max_time_ms; + set_has_max_time_ms(); +} + +inline void MongoGetMoreRequest::set_has_max_time_ms() { + _has_bits_[0] |= 0x00000010u; +} + +inline void MongoGetMoreRequest::clear_has_max_time_ms() { + _has_bits_[0] &= ~0x00000010u; +} + +inline BsonPtr MongoGetMoreRequest::comment() const { return comment_; } + +inline bool MongoGetMoreRequest::has_comment() const { + return _has_bits_[0] & 0x00000020u; +} + +inline void MongoGetMoreRequest::clear_comment() { + clear_has_comment(); + comment_.reset(); +} + +inline void MongoGetMoreRequest::set_comment(BsonPtr comment) { + comment_ = comment; + set_has_comment(); +} + +inline void MongoGetMoreRequest::set_has_comment() { + _has_bits_[0] |= 0x00000020u; +} + +inline void MongoGetMoreRequest::clear_has_comment() { + _has_bits_[0] &= ~0x00000020u; +} + +class MongoCountRequest : public ::google::protobuf::Message { + public: + MongoCountRequest(); + virtual ~MongoCountRequest(); + MongoCountRequest(const MongoCountRequest& from); + MongoCountRequest& operator=(const MongoCountRequest& from); + void Swap(MongoCountRequest* other); + bool SerializeTo(butil::IOBuf* buf) const; + MongoCountRequest* New() const; + void CopyFrom(const ::google::protobuf::Message& from); + void MergeFrom(const ::google::protobuf::Message& from); + void CopyFrom(const MongoCountRequest& from); + void MergeFrom(const MongoCountRequest& from); + void Clear(); + bool IsInitialized() const; + bool MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input); + void SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const; + ::google::protobuf::uint8* SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* output) const; + int GetCachedSize() const { return _cached_size_; } + static const ::google::protobuf::Descriptor* descriptor(); + + // fields + // database + public: + static const int kdatabaseFieldNumber = 1; + const std::string& database() const { return database_; } + bool has_database() const { return _has_bits_[0] & 0x1u; } + void clear_database() { + clear_has_database(); + database_.clear(); + } + void set_database(std::string value) { + database_ = value; + set_has_database(); + } + + private: + void set_has_database() { _has_bits_[0] |= 0x1u; } + void clear_has_database() { _has_bits_[0] &= ~0x1u; } + + std::string database_; + + // collection + public: + static const int kcollectionFieldNumber = 2; + const std::string& collection() const { return collection_; } + bool has_collection() const { return _has_bits_[0] & 0x2u; } + void clear_collection() { + clear_has_collection(); + collection_.clear(); + } + void set_collection(std::string value) { + collection_ = value; + set_has_collection(); + } + + private: + void set_has_collection() { _has_bits_[0] |= 0x2u; } + void clear_has_collection() { _has_bits_[0] &= ~0x2u; } + + std::string collection_; + + // query + public: + static const int kqueryFieldNumber = 3; + const BsonPtr& query() const { return query_; } + bool has_query() const { return _has_bits_[0] & 0x4u; } + void clear_query() { + clear_has_query(); + query_.reset(); + } + void set_query(BsonPtr value) { + query_ = value; + set_has_query(); + } + + private: + void set_has_query() { _has_bits_[0] |= 0x4u; } + void clear_has_query() { _has_bits_[0] &= ~0x4u; } + + BsonPtr query_; + + // skip + public: + static const int kskipFieldNumber = 4; + int64_t skip() const { return skip_; } + bool has_skip() const { return _has_bits_[0] & 0x8u; } + void clear_skip() { + clear_has_skip(); + skip_ = 0; + } + void set_skip(int64_t value) { + skip_ = value; + set_has_skip(); + } + + private: + void set_has_skip() { _has_bits_[0] |= 0x8u; } + void clear_has_skip() { _has_bits_[0] &= ~0x8u; } + + int64_t skip_; + + // limit + public: + static const int klimitFieldNumber = 5; + int64_t limit() const { return limit_; } + bool has_limit() const { return _has_bits_[0] & 0x10u; } + void clear_limit() { + clear_has_limit(); + limit_ = 0; + } + void set_limit(int64_t value) { + limit_ = value; + set_has_limit(); + } + + private: + void set_has_limit() { _has_bits_[0] |= 0x10u; } + void clear_has_limit() { _has_bits_[0] &= ~0x10u; } + + int64_t limit_; + + protected: + ::google::protobuf::Metadata GetMetadata() const override; + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const; + + ::google::protobuf::internal::HasBits<1> _has_bits_; + mutable int _cached_size_; +}; + +class MongoCountResponse : public ::google::protobuf::Message { + public: + MongoCountResponse(); + virtual ~MongoCountResponse(); + MongoCountResponse(const MongoCountResponse& from); + MongoCountResponse& operator=(const MongoCountResponse& from); + void Swap(MongoCountResponse* other); + bool SerializeTo(butil::IOBuf* buf) const; + MongoCountResponse* New() const; + void CopyFrom(const ::google::protobuf::Message& from); + void MergeFrom(const ::google::protobuf::Message& from); + void CopyFrom(const MongoCountResponse& from); + void MergeFrom(const MongoCountResponse& from); + void Clear(); + bool IsInitialized() const; + bool MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input); + void SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const; + ::google::protobuf::uint8* SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* output) const; + int GetCachedSize() const { return _cached_size_; } + static const ::google::protobuf::Descriptor* descriptor(); + + // fields + // number + public: + static const int knumberFieldNumber = 1; + int32_t number() const { return number_; } + bool has_number() const { return _has_bits_[0] & 0x1u; } + void clear_number() { + clear_has_number(); + number_ = 0; + } + void set_number(int32_t value) { + number_ = value; + set_has_number(); + } + + private: + void set_has_number() { _has_bits_[0] |= 0x1u; } + void clear_has_number() { _has_bits_[0] &= ~0x1u; } + + int32_t number_; + + protected: + ::google::protobuf::Metadata GetMetadata() const override; + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const; + + ::google::protobuf::internal::HasBits<1> _has_bits_; + mutable int _cached_size_; +}; + +class MongoInsertRequest : public ::google::protobuf::Message { + public: + MongoInsertRequest(); + virtual ~MongoInsertRequest(); + MongoInsertRequest(const MongoInsertRequest& from); + MongoInsertRequest& operator=(const MongoInsertRequest& from); + void Swap(MongoInsertRequest* other); + bool SerializeTo(butil::IOBuf* buf) const; + MongoInsertRequest* New() const; + void CopyFrom(const ::google::protobuf::Message& from); + void MergeFrom(const ::google::protobuf::Message& from); + void CopyFrom(const MongoInsertRequest& from); + void MergeFrom(const MongoInsertRequest& from); + void Clear(); + bool IsInitialized() const; + bool MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input); + void SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const; + ::google::protobuf::uint8* SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* output) const; + int GetCachedSize() const { return _cached_size_; } + static const ::google::protobuf::Descriptor* descriptor(); + + // fields + + // database + public: + static const int kdatabaseFieldNumber = 1; + const std::string& database() const { return database_; } + bool has_database() const { return _has_bits_[0] & 0x1u; } + void clear_database() { + clear_has_database(); + database_.clear(); + } + void set_database(std::string value) { + database_ = value; + set_has_database(); + } + + private: + void set_has_database() { _has_bits_[0] |= 0x1u; } + void clear_has_database() { _has_bits_[0] &= ~0x1u; } + + std::string database_; + + // collection + public: + static const int kcollectionFieldNumber = 2; + const std::string& collection() const { return collection_; } + bool has_collection() const { return _has_bits_[0] & 0x2u; } + void clear_collection() { + clear_has_collection(); + collection_.clear(); + } + void set_collection(std::string value) { + collection_ = value; + set_has_collection(); + } + + private: + void set_has_collection() { _has_bits_[0] |= 0x2u; } + void clear_has_collection() { _has_bits_[0] &= ~0x2u; } + + std::string collection_; + + // ordered + public: + static const int korderedFieldNumber = 3; + bool ordered() const { return ordered_; } + bool has_ordered() const { return _has_bits_[0] & 0x4u; } + void clear_ordered() { + clear_has_ordered(); + ordered_ = 0; + } + void set_ordered(bool value) { + ordered_ = value; + set_has_ordered(); + } + + private: + void set_has_ordered() { _has_bits_[0] |= 0x4u; } + void clear_has_ordered() { _has_bits_[0] &= ~0x4u; } + + bool ordered_; + + // documents + public: + static const int kdocumentsFieldNumber = 4; + const std::vector& documents() const { return documents_; } + int documents_size() const { return documents_.size(); } + void clear_documents() { documents_.clear(); } + const BsonPtr& documents(int index) const { return documents_[index]; } + BsonPtr* mutable_documents(int index) { return &documents_[index]; } + void add_documents(BsonPtr value) { documents_.push_back(std::move(value)); } + + private: + std::vector documents_; + + protected: + ::google::protobuf::Metadata GetMetadata() const override; + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const; + + ::google::protobuf::internal::HasBits<1> _has_bits_; + mutable int _cached_size_; +}; + +struct WriteError { + int32_t index; + int32_t code; + std::string errmsg; +}; + +class MongoInsertResponse : public ::google::protobuf::Message { + public: + MongoInsertResponse(); + virtual ~MongoInsertResponse(); + MongoInsertResponse(const MongoInsertResponse& from); + MongoInsertResponse& operator=(const MongoInsertResponse& from); + void Swap(MongoInsertResponse* other); + bool SerializeTo(butil::IOBuf* buf) const; + MongoInsertResponse* New() const; + void CopyFrom(const ::google::protobuf::Message& from); + void MergeFrom(const ::google::protobuf::Message& from); + void CopyFrom(const MongoInsertResponse& from); + void MergeFrom(const MongoInsertResponse& from); + void Clear(); + bool IsInitialized() const; + bool MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input); + void SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const; + ::google::protobuf::uint8* SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* output) const; + int GetCachedSize() const { return _cached_size_; } + static const ::google::protobuf::Descriptor* descriptor(); + + // fields + + // number + public: + static const int knumberFieldNumber = 1; + int32_t number() const { return number_; } + bool has_number() const { return _has_bits_[0] & 0x1u; } + void clear_number() { + clear_has_number(); + number_ = 0; + } + void set_number(int32_t value) { + number_ = value; + set_has_number(); + } + + private: + void set_has_number() { _has_bits_[0] |= 0x1u; } + void clear_has_number() { _has_bits_[0] &= ~0x1u; } + + int32_t number_; + + // write_errors + public: + static const int kwrite_errorsFieldNumber = 2; + const std::vector& write_errors() const { return write_errors_; } + int write_errors_size() const { return write_errors_.size(); } + void clear_write_errors() { write_errors_.clear(); } + const WriteError& write_errors(int index) const { + return write_errors_[index]; + } + WriteError* mutable_write_errors(int index) { return &write_errors_[index]; } + void add_write_errors(WriteError value) { + write_errors_.push_back(std::move(value)); + } + + private: + std::vector write_errors_; + + protected: + ::google::protobuf::Metadata GetMetadata() const override; + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const; + + ::google::protobuf::internal::HasBits<1> _has_bits_; + mutable int _cached_size_; +}; + +class MongoDeleteRequest : public ::google::protobuf::Message { + public: + MongoDeleteRequest(); + virtual ~MongoDeleteRequest(); + MongoDeleteRequest(const MongoDeleteRequest& from); + MongoDeleteRequest& operator=(const MongoDeleteRequest& from); + void Swap(MongoDeleteRequest* other); + bool SerializeTo(butil::IOBuf* buf) const; + MongoDeleteRequest* New() const; + void CopyFrom(const ::google::protobuf::Message& from); + void MergeFrom(const ::google::protobuf::Message& from); + void CopyFrom(const MongoDeleteRequest& from); + void MergeFrom(const MongoDeleteRequest& from); + void Clear(); + bool IsInitialized() const; + bool MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input); + void SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const; + ::google::protobuf::uint8* SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* output) const; + int GetCachedSize() const { return _cached_size_; } + static const ::google::protobuf::Descriptor* descriptor(); + + // fields + + // database + public: + static const int kdatabaseFieldNumber = 1; + const std::string& database() const { return database_; } + bool has_database() const { return _has_bits_[0] & 0x1u; } + void clear_database() { + clear_has_database(); + database_.clear(); + } + void set_database(std::string value) { + database_ = value; + set_has_database(); + } + + private: + void set_has_database() { _has_bits_[0] |= 0x1u; } + void clear_has_database() { _has_bits_[0] &= ~0x1u; } + + std::string database_; + + // collection + public: + static const int kcollectionFieldNumber = 2; + const std::string& collection() const { return collection_; } + bool has_collection() const { return _has_bits_[0] & 0x2u; } + void clear_collection() { + clear_has_collection(); + collection_.clear(); + } + void set_collection(std::string value) { + collection_ = value; + set_has_collection(); + } + + private: + void set_has_collection() { _has_bits_[0] |= 0x2u; } + void clear_has_collection() { _has_bits_[0] &= ~0x2u; } + + std::string collection_; + + // ordered + public: + static const int korderedFieldNumber = 3; + bool ordered() const { return ordered_; } + bool has_ordered() const { return _has_bits_[0] & 0x4u; } + void clear_ordered() { + clear_has_ordered(); + ordered_ = false; + } + void set_ordered(bool value) { + ordered_ = value; + set_has_ordered(); + } + + private: + void set_has_ordered() { _has_bits_[0] |= 0x4u; } + void clear_has_ordered() { _has_bits_[0] &= ~0x4u; } + + bool ordered_; + + // query + public: + static const int kqueryFieldNumber = 4; + const BsonPtr& query() const { return query_; } + bool has_query() const { return _has_bits_[0] & 0x8u; } + void clear_query() { + clear_has_query(); + query_.reset(); + } + void set_query(BsonPtr value) { + query_ = value; + set_has_query(); + } + + private: + void set_has_query() { _has_bits_[0] |= 0x8u; } + void clear_has_query() { _has_bits_[0] &= ~0x8u; } + + BsonPtr query_; + + // delete_many + public: + static const int kdelete_manyFieldNumber = 5; + bool delete_many() const { return delete_many_; } + bool has_delete_many() const { return _has_bits_[0] & 0x10u; } + void clear_delete_many() { + clear_has_delete_many(); + delete_many_ = false; + } + void set_delete_many(bool value) { + delete_many_ = value; + set_has_delete_many(); + } + + private: + void set_has_delete_many() { _has_bits_[0] |= 0x10u; } + void clear_has_delete_many() { _has_bits_[0] &= ~0x10u; } + + bool delete_many_; + + protected: + ::google::protobuf::Metadata GetMetadata() const override; + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const; + + ::google::protobuf::internal::HasBits<1> _has_bits_; + mutable int _cached_size_; +}; + +class MongoDeleteResponse : public ::google::protobuf::Message { + public: + MongoDeleteResponse(); + virtual ~MongoDeleteResponse(); + MongoDeleteResponse(const MongoDeleteResponse& from); + MongoDeleteResponse& operator=(const MongoDeleteResponse& from); + void Swap(MongoDeleteResponse* other); + bool SerializeTo(butil::IOBuf* buf) const; + MongoDeleteResponse* New() const; + void CopyFrom(const ::google::protobuf::Message& from); + void MergeFrom(const ::google::protobuf::Message& from); + void CopyFrom(const MongoDeleteResponse& from); + void MergeFrom(const MongoDeleteResponse& from); + void Clear(); + bool IsInitialized() const; + bool MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input); + void SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const; + ::google::protobuf::uint8* SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* output) const; + int GetCachedSize() const { return _cached_size_; } + static const ::google::protobuf::Descriptor* descriptor(); + + // fields + + // number + public: + static const int knumberFieldNumber = 1; + int32_t number() const { return number_; } + bool has_number() const { return _has_bits_[0] & 0x1u; } + void clear_number() { + clear_has_number(); + number_ = 0; + } + void set_number(int32_t value) { + number_ = value; + set_has_number(); + } + + private: + void set_has_number() { _has_bits_[0] |= 0x1u; } + void clear_has_number() { _has_bits_[0] &= ~0x1u; } + + int32_t number_; + + protected: + ::google::protobuf::Metadata GetMetadata() const override; + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const; + + ::google::protobuf::internal::HasBits<1> _has_bits_; + mutable int _cached_size_; +}; + +class MongoUpdateRequest : public ::google::protobuf::Message { + public: + MongoUpdateRequest(); + virtual ~MongoUpdateRequest(); + MongoUpdateRequest(const MongoUpdateRequest& from); + MongoUpdateRequest& operator=(const MongoUpdateRequest& from); + void Swap(MongoUpdateRequest* other); + bool SerializeTo(butil::IOBuf* buf) const; + MongoUpdateRequest* New() const; + void CopyFrom(const ::google::protobuf::Message& from); + void MergeFrom(const ::google::protobuf::Message& from); + void CopyFrom(const MongoUpdateRequest& from); + void MergeFrom(const MongoUpdateRequest& from); + void Clear(); + bool IsInitialized() const; + bool MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input); + void SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const; + ::google::protobuf::uint8* SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* output) const; + int GetCachedSize() const { return _cached_size_; } + static const ::google::protobuf::Descriptor* descriptor(); + + // fields + + // database + public: + static const int kdatabaseFieldNumber = 1; + const std::string& database() const { return database_; } + bool has_database() const { return _has_bits_[0] & 0x1u; } + void clear_database() { + clear_has_database(); + database_.clear(); + } + void set_database(std::string value) { + database_ = value; + set_has_database(); + } + + private: + void set_has_database() { _has_bits_[0] |= 0x1u; } + void clear_has_database() { _has_bits_[0] &= ~0x1u; } + + std::string database_; + + // collection + public: + static const int kcollectionFieldNumber = 2; + const std::string& collection() const { return collection_; } + bool has_collection() const { return _has_bits_[0] & 0x2u; } + void clear_collection() { + clear_has_collection(); + collection_.clear(); + } + void set_collection(std::string value) { + collection_ = value; + set_has_collection(); + } + + private: + void set_has_collection() { _has_bits_[0] |= 0x2u; } + void clear_has_collection() { _has_bits_[0] &= ~0x2u; } + + std::string collection_; + + // selector + public: + static const int kselectorFieldNumber = 3; + const BsonPtr& selector() const { return selector_; } + bool has_selector() const { return _has_bits_[0] & 0x4u; } + void clear_selector() { + clear_has_selector(); + selector_.reset(); + } + void set_selector(BsonPtr value) { + selector_ = value; + set_has_selector(); + } + + private: + void set_has_selector() { _has_bits_[0] |= 0x4u; } + void clear_has_selector() { _has_bits_[0] &= ~0x4u; } + + BsonPtr selector_; + + // update + public: + static const int kupdateFieldNumber = 4; + const BsonPtr& update() const { return update_; } + bool has_update() const { return _has_bits_[0] & 0x8u; } + void clear_update() { + clear_has_update(); + update_.reset(); + } + void set_update(BsonPtr value) { + update_ = value; + set_has_update(); + } + + private: + void set_has_update() { _has_bits_[0] |= 0x8u; } + void clear_has_update() { _has_bits_[0] &= ~0x8u; } + + BsonPtr update_; + + // ordered + public: + static const int korderedFieldNumber = 5; + bool ordered() const { return ordered_; } + bool has_ordered() const { return _has_bits_[0] & 0x10u; } + void clear_ordered() { + clear_has_ordered(); + ordered_ = false; + } + void set_ordered(bool value) { + ordered_ = value; + set_has_ordered(); + } + + private: + void set_has_ordered() { _has_bits_[0] |= 0x10u; } + void clear_has_ordered() { _has_bits_[0] &= ~0x10u; } + + bool ordered_; + + // upsert + public: + static const int kupsertFieldNumber = 6; + bool upsert() const { return upsert_; } + bool has_upsert() const { return _has_bits_[0] & 0x20u; } + void clear_upsert() { + clear_has_upsert(); + upsert_ = false; + } + void set_upsert(bool value) { + upsert_ = value; + set_has_upsert(); + } + + private: + void set_has_upsert() { _has_bits_[0] |= 0x20u; } + void clear_has_upsert() { _has_bits_[0] &= ~0x20u; } + + bool upsert_; + + // multi + public: + static const int kmultiFieldNumber = 7; + bool multi() const { return multi_; } + bool has_multi() const { return _has_bits_[0] & 0x40u; } + void clear_multi() { + clear_has_multi(); + multi_ = false; + } + void set_multi(bool value) { + multi_ = value; + set_has_multi(); + } + + private: + void set_has_multi() { _has_bits_[0] |= 0x40u; } + void clear_has_multi() { _has_bits_[0] &= ~0x40u; } + + bool multi_; + + protected: + ::google::protobuf::Metadata GetMetadata() const override; + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const; + + ::google::protobuf::internal::HasBits<1> _has_bits_; + mutable int _cached_size_; +}; + +struct UpsertedDoc { + int32_t index; + bson_oid_t _id; +}; + +class MongoUpdateResponse : public ::google::protobuf::Message { + public: + MongoUpdateResponse(); + virtual ~MongoUpdateResponse(); + MongoUpdateResponse(const MongoUpdateResponse& from); + MongoUpdateResponse& operator=(const MongoUpdateResponse& from); + void Swap(MongoUpdateResponse* other); + bool SerializeTo(butil::IOBuf* buf) const; + MongoUpdateResponse* New() const; + void CopyFrom(const ::google::protobuf::Message& from); + void MergeFrom(const ::google::protobuf::Message& from); + void CopyFrom(const MongoUpdateResponse& from); + void MergeFrom(const MongoUpdateResponse& from); + void Clear(); + bool IsInitialized() const; + bool MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input); + void SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const; + ::google::protobuf::uint8* SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* output) const; + int GetCachedSize() const { return _cached_size_; } + static const ::google::protobuf::Descriptor* descriptor(); + + // fields + + // matched_number + public: + static const int kmatched_numberFieldNumber = 1; + int32_t matched_number() const { return matched_number_; } + bool has_matched_number() const { return _has_bits_[0] & 0x1u; } + void clear_matched_number() { + clear_has_matched_number(); + matched_number_ = 0; + } + void set_matched_number(int32_t value) { + matched_number_ = value; + set_has_matched_number(); + } + + private: + void set_has_matched_number() { _has_bits_[0] |= 0x1u; } + void clear_has_matched_number() { _has_bits_[0] &= ~0x1u; } + + int32_t matched_number_; + + // modified_number + public: + static const int kmodified_numberFieldNumber = 2; + int32_t modified_number() const { return modified_number_; } + bool has_modified_number() const { return _has_bits_[0] & 0x2u; } + void clear_modified_number() { + clear_has_modified_number(); + modified_number_ = 0; + } + void set_modified_number(int32_t value) { + modified_number_ = value; + set_has_modified_number(); + } + + private: + void set_has_modified_number() { _has_bits_[0] |= 0x2u; } + void clear_has_modified_number() { _has_bits_[0] &= ~0x2u; } + + int32_t modified_number_; + + // upserted_docs + public: + static const int kupserted_docsFieldNumber = 3; + const std::vector& upserted_docs() const { + return upserted_docs_; + } + int upserted_docs_size() const { return upserted_docs_.size(); } + void clear_upserted_docs() { upserted_docs_.clear(); } + const UpsertedDoc& upserted_docs(int index) const { + return upserted_docs_[index]; + } + UpsertedDoc* mutable_upserted_docs(int index) { + return &upserted_docs_[index]; + } + void add_upserted_docs(UpsertedDoc value) { + upserted_docs_.push_back(std::move(value)); + } + + private: + std::vector upserted_docs_; + + // write_errors + public: + static const int kwrite_errorsFieldNumber = 4; + const std::vector& write_errors() const { return write_errors_; } + int write_errors_size() const { return write_errors_.size(); } + void clear_write_errors() { write_errors_.clear(); } + const WriteError& write_errors(int index) const { + return write_errors_[index]; + } + WriteError* mutable_write_errors(int index) { return &write_errors_[index]; } + void add_write_errors(WriteError value) { + write_errors_.push_back(std::move(value)); + } + + private: + std::vector write_errors_; + + protected: + ::google::protobuf::Metadata GetMetadata() const override; + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const; + + ::google::protobuf::internal::HasBits<1> _has_bits_; + mutable int _cached_size_; +}; + +class MongoFindAndModifyRequest : public ::google::protobuf::Message { + public: + MongoFindAndModifyRequest(); + virtual ~MongoFindAndModifyRequest(); + MongoFindAndModifyRequest(const MongoFindAndModifyRequest& from); + MongoFindAndModifyRequest& operator=(const MongoFindAndModifyRequest& from); + void Swap(MongoFindAndModifyRequest* other); + bool SerializeTo(butil::IOBuf* buf) const; + MongoFindAndModifyRequest* New() const; + void CopyFrom(const ::google::protobuf::Message& from); + void MergeFrom(const ::google::protobuf::Message& from); + void CopyFrom(const MongoFindAndModifyRequest& from); + void MergeFrom(const MongoFindAndModifyRequest& from); + void Clear(); + bool IsInitialized() const; + bool MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input); + void SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const; + ::google::protobuf::uint8* SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* output) const; + int GetCachedSize() const { return _cached_size_; } + static const ::google::protobuf::Descriptor* descriptor(); + + // fields + + // database + public: + static const int kdatabaseFieldNumber = 1; + const std::string& database() const { return database_; } + bool has_database() const { return _has_bits_[0] & 0x1u; } + void clear_database() { + clear_has_database(); + database_.clear(); + } + void set_database(std::string value) { + database_ = value; + set_has_database(); + } + + private: + void set_has_database() { _has_bits_[0] |= 0x1u; } + void clear_has_database() { _has_bits_[0] &= ~0x1u; } + + std::string database_; + + // collection + public: + static const int kcollectionFieldNumber = 2; + const std::string& collection() const { return collection_; } + bool has_collection() const { return _has_bits_[0] & 0x2u; } + void clear_collection() { + clear_has_collection(); + collection_.clear(); + } + void set_collection(std::string value) { + collection_ = value; + set_has_collection(); + } + + private: + void set_has_collection() { _has_bits_[0] |= 0x2u; } + void clear_has_collection() { _has_bits_[0] &= ~0x2u; } + + std::string collection_; + + // query + public: + static const int kqueryFieldNumber = 3; + const BsonPtr& query() const { return query_; } + bool has_query() const { return _has_bits_[0] & 0x4u; } + void clear_query() { + clear_has_query(); + query_.reset(); + } + void set_query(BsonPtr value) { + query_ = value; + set_has_query(); + } + + private: + void set_has_query() { _has_bits_[0] |= 0x4u; } + void clear_has_query() { _has_bits_[0] &= ~0x4u; } + + BsonPtr query_; + + // sort + public: + static const int ksortFieldNumber = 4; + const BsonPtr& sort() const { return sort_; } + bool has_sort() const { return _has_bits_[0] & 0x8u; } + void clear_sort() { + clear_has_sort(); + sort_.reset(); + } + void set_sort(BsonPtr value) { + sort_ = value; + set_has_sort(); + } + + private: + void set_has_sort() { _has_bits_[0] |= 0x8u; } + void clear_has_sort() { _has_bits_[0] &= ~0x8u; } + + BsonPtr sort_; + + // update + public: + static const int kupdateFieldNumber = 5; + const BsonPtr& update() const { return update_; } + bool has_update() const { return _has_bits_[0] & 0x10u; } + void clear_update() { + clear_has_update(); + update_.reset(); + } + void set_update(BsonPtr value) { + update_ = value; + set_has_update(); + } + + private: + void set_has_update() { _has_bits_[0] |= 0x10u; } + void clear_has_update() { _has_bits_[0] &= ~0x10u; } + + BsonPtr update_; + + // upsert + public: + static const int kupsertFieldNumber = 6; + bool upsert() const { return upsert_; } + bool has_upsert() const { return _has_bits_[0] & 0x20u; } + void clear_upsert() { + clear_has_upsert(); + upsert_ = false; + } + void set_upsert(bool value) { + upsert_ = value; + set_has_upsert(); + } + + private: + void set_has_upsert() { _has_bits_[0] |= 0x20u; } + void clear_has_upsert() { _has_bits_[0] &= ~0x20u; } + + bool upsert_; + + // remove + public: + static const int kremoveFieldNumber = 7; + bool remove() const { return remove_; } + bool has_remove() const { return _has_bits_[0] & 0x40u; } + void clear_remove() { + clear_has_remove(); + remove_ = false; + } + void set_remove(bool value) { + remove_ = value; + set_has_remove(); + } + + private: + void set_has_remove() { _has_bits_[0] |= 0x40u; } + void clear_has_remove() { _has_bits_[0] &= ~0x40u; } + + bool remove_; + + // return_new + public: + static const int kreturn_newFieldNumber = 8; + bool return_new() const { return return_new_; } + bool has_return_new() const { return _has_bits_[0] & 0x80u; } + void clear_return_new() { + clear_has_return_new(); + return_new_ = false; + } + void set_return_new(bool value) { + return_new_ = value; + set_has_return_new(); + } + + private: + void set_has_return_new() { _has_bits_[0] |= 0x80u; } + void clear_has_return_new() { _has_bits_[0] &= ~0x80u; } + + bool return_new_; + + // fields + public: + static const int kfieldsFieldNumber = 9; + const std::vector& fields() const { return fields_; } + int fields_size() const { return fields_.size(); } + void clear_fields() { fields_.clear(); } + const std::string& fields(int index) const { return fields_[index]; } + std::string* mutable_fields(int index) { return &fields_[index]; } + void add_fields(std::string value) { fields_.push_back(std::move(value)); } + + private: + std::vector fields_; + + protected: + ::google::protobuf::Metadata GetMetadata() const override; + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const; + + ::google::protobuf::internal::HasBits<1> _has_bits_; + mutable int _cached_size_; +}; + +class MongoFindAndModifyResponse : public ::google::protobuf::Message { + public: + MongoFindAndModifyResponse(); + virtual ~MongoFindAndModifyResponse(); + MongoFindAndModifyResponse(const MongoFindAndModifyResponse& from); + MongoFindAndModifyResponse& operator=(const MongoFindAndModifyResponse& from); + void Swap(MongoFindAndModifyResponse* other); + bool SerializeTo(butil::IOBuf* buf) const; + MongoFindAndModifyResponse* New() const; + void CopyFrom(const ::google::protobuf::Message& from); + void MergeFrom(const ::google::protobuf::Message& from); + void CopyFrom(const MongoFindAndModifyResponse& from); + void MergeFrom(const MongoFindAndModifyResponse& from); + void Clear(); + bool IsInitialized() const; + bool MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input); + void SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const; + ::google::protobuf::uint8* SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* output) const; + int GetCachedSize() const { return _cached_size_; } + static const ::google::protobuf::Descriptor* descriptor(); + + // fields + + // value + public: + static const int kvalueFieldNumber = 1; + const BsonPtr& value() const { return value_; } + bool has_value() const { return _has_bits_[0] & 0x1u; } + void clear_value() { + clear_has_value(); + value_.reset(); + } + void set_value(BsonPtr value) { + value_ = value; + set_has_value(); + } + + private: + void set_has_value() { _has_bits_[0] |= 0x1u; } + void clear_has_value() { _has_bits_[0] &= ~0x1u; } + + BsonPtr value_; + + // upserted + public: + static const int kupsertedFieldNumber = 2; + const bson_oid_t& upserted() const { return upserted_; } + bool has_upserted() const { return _has_bits_[0] & 0x2u; } + void clear_upserted() { + clear_has_upserted(); + memset(&upserted_, 0, sizeof(upserted_)); + } + void set_upserted(bson_oid_t value) { + upserted_ = value; + set_has_upserted(); + } + + private: + void set_has_upserted() { _has_bits_[0] |= 0x2u; } + void clear_has_upserted() { _has_bits_[0] &= ~0x2u; } + + bson_oid_t upserted_; + + protected: + ::google::protobuf::Metadata GetMetadata() const override; + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const; + + ::google::protobuf::internal::HasBits<1> _has_bits_; + mutable int _cached_size_; +}; + +class MongoGetReplSetStatusRequest : public ::google::protobuf::Message { + public: + MongoGetReplSetStatusRequest(); + virtual ~MongoGetReplSetStatusRequest(); + MongoGetReplSetStatusRequest(const MongoGetReplSetStatusRequest& from); + MongoGetReplSetStatusRequest& operator=( + const MongoGetReplSetStatusRequest& from); + void Swap(MongoGetReplSetStatusRequest* other); + bool SerializeTo(butil::IOBuf* buf) const; + MongoGetReplSetStatusRequest* New() const; + void CopyFrom(const ::google::protobuf::Message& from); + void MergeFrom(const ::google::protobuf::Message& from); + void CopyFrom(const MongoGetReplSetStatusRequest& from); + void MergeFrom(const MongoGetReplSetStatusRequest& from); + void Clear(); + bool IsInitialized() const; + bool MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input); + void SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const; + ::google::protobuf::uint8* SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* output) const; + int GetCachedSize() const { return _cached_size_; } + static const ::google::protobuf::Descriptor* descriptor(); + + // fields + + protected: + ::google::protobuf::Metadata GetMetadata() const override; + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const; + + ::google::protobuf::internal::HasBits<1> _has_bits_; + mutable int _cached_size_; +}; + +class MongoGetReplSetStatusResponse : public ::google::protobuf::Message { + public: + MongoGetReplSetStatusResponse(); + virtual ~MongoGetReplSetStatusResponse(); + MongoGetReplSetStatusResponse(const MongoGetReplSetStatusResponse& from); + MongoGetReplSetStatusResponse& operator=( + const MongoGetReplSetStatusResponse& from); + void Swap(MongoGetReplSetStatusResponse* other); + bool SerializeTo(butil::IOBuf* buf) const; + MongoGetReplSetStatusResponse* New() const; + void CopyFrom(const ::google::protobuf::Message& from); + void MergeFrom(const ::google::protobuf::Message& from); + void CopyFrom(const MongoGetReplSetStatusResponse& from); + void MergeFrom(const MongoGetReplSetStatusResponse& from); + void Clear(); + bool IsInitialized() const; + bool MergePartialFromCodedStream( + ::google::protobuf::io::CodedInputStream* input); + void SerializeWithCachedSizes( + ::google::protobuf::io::CodedOutputStream* output) const; + ::google::protobuf::uint8* SerializeWithCachedSizesToArray( + ::google::protobuf::uint8* output) const; + int GetCachedSize() const { return _cached_size_; } + static const ::google::protobuf::Descriptor* descriptor(); + + // fields + + // ok + public: + static const int kokFieldNumber = 1; + bool ok() const { return ok_; } + bool has_ok() const { return _has_bits_[0] & 0x1u; } + void clear_ok() { + clear_has_ok(); + ok_ = false; + } + void set_ok(bool value) { + ok_ = value; + set_has_ok(); + } + + private: + void set_has_ok() { _has_bits_[0] |= 0x1u; } + void clear_has_ok() { _has_bits_[0] &= ~0x1u; } + + bool ok_; + + // set + public: + static const int ksetFieldNumber = 2; + const std::string& set() const { return set_; } + bool has_set() const { return _has_bits_[0] & 0x2u; } + void clear_set() { + clear_has_set(); + set_.clear(); + } + void set_set(std::string value) { + set_ = value; + set_has_set(); + } + + private: + void set_has_set() { _has_bits_[0] |= 0x2u; } + void clear_has_set() { _has_bits_[0] &= ~0x2u; } + + std::string set_; + + // myState + public: + static const int kmyStateFieldNumber = 3; + int32_t myState() const { return myState_; } + bool has_myState() const { return _has_bits_[0] & 0x4u; } + void clear_myState() { + clear_has_myState(); + myState_ = 0; + } + void set_myState(int32_t value) { + myState_ = value; + set_has_myState(); + } + + private: + void set_has_myState() { _has_bits_[0] |= 0x4u; } + void clear_has_myState() { _has_bits_[0] &= ~0x4u; } + + int32_t myState_; + + // members + public: + static const int kmembersFieldNumber = 4; + const std::vector& members() const { return members_; } + int members_size() const { return members_.size(); } + void clear_members() { members_.clear(); } + const ReplicaSetMember& members(int index) const { return members_[index]; } + ReplicaSetMember* mutable_members(int index) { return &members_[index]; } + void add_members(ReplicaSetMember value) { + members_.push_back(std::move(value)); + } + + private: + std::vector members_; + + protected: + ::google::protobuf::Metadata GetMetadata() const override; + + private: + void SharedCtor(); + void SharedDtor(); + void SetCachedSize(int size) const; + + ::google::protobuf::internal::HasBits<1> _has_bits_; + mutable int _cached_size_; +}; + +} // namespace brpc + +#endif // BRPC_MONGO_H diff --git a/src/brpc/mongo_head.h b/src/brpc/mongo_head.h index b9da0171f2..9a53c5b92e 100644 --- a/src/brpc/mongo_head.h +++ b/src/brpc/mongo_head.h @@ -18,6 +18,8 @@ #ifndef BRPC_MONGO_HEAD_H #define BRPC_MONGO_HEAD_H +#include +#include #include "butil/sys_byteorder.h" @@ -28,25 +30,27 @@ namespace brpc { // https://docs.mongodb.org/manual/reference/mongodb-wire-protocol/#request-opcodes enum MongoOpCode { MONGO_OPCODE_REPLY = 1, - MONGO_OPCODE_MSG = 1000, + // MONGO_OPCODE_MSG = 1000, MONGO_OPCODE_UPDATE = 2001, MONGO_OPCODE_INSERT = 2002, MONGO_OPCODE_QUERY = 2004, MONGO_OPCODE_GET_MORE = 2005, MONGO_OPCODE_DELETE = 2006, MONGO_OPCODE_KILL_CURSORS = 2007, + MONGO_OPCODE_MSG = 2013, }; inline bool is_mongo_opcode(int32_t op_code) { switch (op_code) { case MONGO_OPCODE_REPLY: return true; - case MONGO_OPCODE_MSG: return true; + // case MONGO_OPCODE_MSG: return true; case MONGO_OPCODE_UPDATE: return true; case MONGO_OPCODE_INSERT: return true; case MONGO_OPCODE_QUERY: return true; case MONGO_OPCODE_GET_MORE: return true; case MONGO_OPCODE_DELETE: return true; case MONGO_OPCODE_KILL_CURSORS : return true; + case MONGO_OPCODE_MSG: return true; } return false; } @@ -69,9 +73,29 @@ struct mongo_head_t { op_code = butil::ByteSwap((uint32_t)op_code); } } + + void make_network_endian() { + // 大端需要转换成小端 + if (!ARCH_CPU_LITTLE_ENDIAN) { + message_length = butil::ByteSwap((uint32_t)message_length); + request_id = butil::ByteSwap((uint32_t)request_id); + response_to = butil::ByteSwap((uint32_t)response_to); + op_code = butil::ByteSwap((uint32_t)op_code); + } + } }; #pragma pack() +struct mongo_section_t { + uint8_t type; + std::string data; +}; + +struct mongo_msg_t { + uint32_t flag_bits; + std::vector sections; +}; + } // namespace brpc diff --git a/src/brpc/policy/mongo.proto b/src/brpc/policy/mongo.proto index 87b839b3ad..1633d4dff2 100644 --- a/src/brpc/policy/mongo.proto +++ b/src/brpc/policy/mongo.proto @@ -26,15 +26,16 @@ option java_outer_classname="MongoProto"; enum MongoOp { OPREPLY = 1; - DBMSG = 1000; + // DBMSG = 1000; DB_UPDATE = 2001; DB_INSERT = 2002; DB_QUERY = 2004; DB_GETMORE = 2005; DB_DELETE = 2006; DB_KILLCURSORS = 2007; - DB_COMMAND = 2008; - DB_COMMANDREPLY = 2009; + // DB_COMMAND = 2008; + // DB_COMMANDREPLY = 2009; + DB_OP_MSG = 2013; } message MongoHeader { @@ -49,6 +50,34 @@ message MongoRequest { required string message = 2; } +message Document { + optional string document = 1; +} + +message QueryRequest { + required string database = 1; + required string collection = 2; + optional int32 skip = 3; + optional int32 limit = 4; + optional Document query = 5; // 替换成bson_t + repeated string fields = 6; +// optional bool TailableCursor = 1; +// optional bool SlaveOk = 2; +// optional bool OplogReplay = 3; +// optional bool NoCursorTimeout = 4; +// optional bool AwaitData = 5; +// optional bool Exhaust = 6; +// optional bool Partial = 7; +} + +message QueryResponse { + optional int64 cursorid = 1; + optional int32 starting_from = 2; + optional int32 number_returned = 3; + repeated Document documents = 4; // 替换成bson_t +} + + message MongoResponse { required MongoHeader header = 1; required int32 response_flags = 2; @@ -57,7 +86,9 @@ message MongoResponse { required int32 number_returned = 5; required string message = 6; } - service MongoService { - rpc default_method(MongoRequest) returns (MongoResponse); + // rpc default_method(MongoRequest) returns (MongoResponse); + rpc query(QueryRequest) returns (QueryResponse); + // rpc mongo_operation(MongoDBRequest) returns (MongoDBResponse); + // rpc test(TestRequest) returns (TestResponse); } diff --git a/src/brpc/policy/mongo_authenticator.h b/src/brpc/policy/mongo_authenticator.h new file mode 100644 index 0000000000..750a27770d --- /dev/null +++ b/src/brpc/policy/mongo_authenticator.h @@ -0,0 +1,60 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. +// +// Author(s): Zhangke + +#ifndef BRPC_POLICY_MONGO_AUTHENTICATOR_H_ +#define BRPC_POLICY_MONGO_AUTHENTICATOR_H_ + +#include "brpc/authenticator.h" +namespace brpc { +namespace policy { + +class MongoAuthenticator : public Authenticator { + public: + MongoAuthenticator(const butil::StringPiece& user, + const butil::StringPiece& passwd) + : _user(user.data(), user.size()), + _passwd(user.data(), user.size()) {} + + int GenerateCredential(std::string* auth_str) const { + return 0; + } + + int VerifyCredential(const std::string& auth_str, + const butil::EndPoint& client_addr, + AuthContext* out_ctx) const { + return 0; + } + + const butil::StringPiece user() const { + return _user; + } + + const butil::StringPiece passwd() const { + return _passwd; + } + + private: + const std::string _user; + const std::string _passwd; +}; + +} // namespace policy +} // namespace brpc + +#endif // BRPC_POLICY_MONGO_AUTHENTICATOR_H_ diff --git a/src/brpc/policy/mongo_naming_service.cpp b/src/brpc/policy/mongo_naming_service.cpp new file mode 100644 index 0000000000..5cb2cee4a2 --- /dev/null +++ b/src/brpc/policy/mongo_naming_service.cpp @@ -0,0 +1,96 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + + +#include +#include "butil/string_printf.h" +#include "butil/strings/string_split.h" +#include "butil/fast_rand.h" +#include "bthread/bthread.h" +#include "brpc/channel.h" +#include "brpc/controller.h" +#include "brpc/policy/mongo_naming_service.h" +#include "brpc/mongo.h" + +namespace brpc { +namespace policy { + +// ========== DiscoveryNamingService ============= + +int MongoNamingService::GetServers(const char* service_name, + std::vector* servers) { + // 127.0.0.1:27017,127.0.0.1:27027,127.0.0.1:27037 + if (service_name == NULL || *service_name == '\0') { + LOG_ONCE(ERROR) << "Invalid parameters"; + return -1; + } + std::string mongo_service_url(service_name); + + for (butil::StringSplitter sp(mongo_service_url.c_str(), ','); sp; ++sp) { + std::string addr(sp.field(), sp.length()); + brpc::ChannelOptions options; + options.protocol = brpc::PROTOCOL_MONGO; + options.connection_type = "pooled"; + options.timeout_ms = 500; + options.connect_timeout_ms = 300; + brpc::Channel channel; + if (channel.Init(addr.c_str(), &options) != 0) { + LOG(ERROR) << "Fail to init channel"; + continue; + } + brpc::MongoGetReplSetStatusRequest request; + brpc::MongoGetReplSetStatusResponse response; + brpc::Controller cntl; + channel.CallMethod(nullptr, &cntl, &request, &response, nullptr); + if (cntl.Failed()) { + LOG(INFO) << "get repl set status from:" << addr << " failed, error=" << cntl.ErrorText(); + continue; + } else { + // find primary member + for (size_t i = 0; i < response.members().size(); ++i) { + if (response.members()[i].state_str == "PRIMARY") { + butil::EndPoint endpoint; + int ret = hostname2endpoint(response.members()[i].addr.c_str(), &endpoint); + if (ret == 0) { + servers->push_back(ServerNode(endpoint)); + LOG(INFO) << "get primary server:" << response.members()[i].addr; + return 0; + } + } + } + } + } + return 0; +} + +void MongoNamingService::Describe(std::ostream& os, + const DescribeOptions&) const { + os << "discovery"; + return; +} + +NamingService* MongoNamingService::New() const { + return new MongoNamingService; +} + +void MongoNamingService::Destroy() { + delete this; +} + + +} // namespace policy +} // namespace brpc diff --git a/src/brpc/policy/mongo_naming_service.h b/src/brpc/policy/mongo_naming_service.h new file mode 100644 index 0000000000..0a95acc935 --- /dev/null +++ b/src/brpc/policy/mongo_naming_service.h @@ -0,0 +1,45 @@ +// Licensed to the Apache Software Foundation (ASF) under one +// or more contributor license agreements. See the NOTICE file +// distributed with this work for additional information +// regarding copyright ownership. The ASF licenses this file +// to you under the Apache License, Version 2.0 (the +// "License"); you may not use this file except in compliance +// with the License. You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, +// software distributed under the License is distributed on an +// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +// KIND, either express or implied. See the License for the +// specific language governing permissions and limitations +// under the License. + + +#ifndef BRPC_POLICY_MONGO_NAMING_SERVICE_H +#define BRPC_POLICY_MONGO_NAMING_SERVICE_H + +#include "brpc/periodic_naming_service.h" +#include "brpc/channel.h" +#include "butil/synchronization/lock.h" + +namespace brpc { +namespace policy { + +class MongoNamingService : public PeriodicNamingService { +private: + int GetServers(const char* service_name, + std::vector* servers) override; + + void Describe(std::ostream& os, const DescribeOptions&) const override; + + NamingService* New() const override; + + void Destroy() override; +}; + + +} // namespace policy +} // namespace brpc + +#endif // BRPC_POLICY_MONGO_NAMING_SERVICE_H diff --git a/src/brpc/policy/mongo_protocol.cpp b/src/brpc/policy/mongo_protocol.cpp index 82bb3e0b36..ddd74ecddd 100644 --- a/src/brpc/policy/mongo_protocol.cpp +++ b/src/brpc/policy/mongo_protocol.cpp @@ -15,150 +15,195 @@ // specific language governing permissions and limitations // under the License. -#include // MethodDescriptor -#include // Message +#include "brpc/policy/mongo_protocol.h" + +#include #include -#include "butil/time.h" -#include "butil/iobuf.h" // butil::IOBuf -#include "brpc/controller.h" // Controller -#include "brpc/socket.h" // Socket -#include "brpc/server.h" // Server -#include "brpc/span.h" -#include "brpc/mongo_head.h" -#include "brpc/details/server_private_accessor.h" +#include // MethodDescriptor +#include // Message + +#include "brpc/controller.h" // Controller #include "brpc/details/controller_private_accessor.h" +#include "brpc/details/server_private_accessor.h" +#include "brpc/details/usercode_backup_pool.h" +#include "brpc/mongo.h" +#include "brpc/mongo_head.h" #include "brpc/mongo_service_adaptor.h" +#include "brpc/policy/mongo.pb.h" #include "brpc/policy/most_common_message.h" #include "brpc/policy/nshead_protocol.h" -#include "brpc/policy/mongo.pb.h" -#include "brpc/details/usercode_backup_pool.h" +#include "brpc/server.h" // Server +#include "brpc/socket.h" // Socket +#include "brpc/span.h" +#include "butil/atomicops.h" +#include "butil/bson_util.h" +#include "butil/iobuf.h" // butil::IOBuf +#include "butil/time.h" extern "C" { void bthread_assign_data(void* data); } - namespace brpc { namespace policy { struct SendMongoResponse : public google::protobuf::Closure { - SendMongoResponse(const Server *server) : - status(NULL), - received_us(0L), - server(server) {} - ~SendMongoResponse(); - void Run(); - - MethodStatus* status; - int64_t received_us; - const Server *server; - Controller cntl; - MongoRequest req; - MongoResponse res; + SendMongoResponse(const Server* server) + : status(NULL), received_us(0L), server(server) {} + ~SendMongoResponse(); + void Run(); + + MethodStatus* status; + int64_t received_us; + const Server* server; + Controller cntl; + MongoRequest req; + MongoResponse res; }; -SendMongoResponse::~SendMongoResponse() { - LogErrorTextAndDelete(false)(&cntl); -} +SendMongoResponse::~SendMongoResponse() { LogErrorTextAndDelete(false)(&cntl); } void SendMongoResponse::Run() { - std::unique_ptr delete_self(this); - ConcurrencyRemover concurrency_remover(status, &cntl, received_us); - Socket* socket = ControllerPrivateAccessor(&cntl).get_sending_socket(); - - if (cntl.IsCloseConnection()) { - socket->SetFailed(); - return; - } - - const MongoServiceAdaptor* adaptor = - server->options().mongo_service_adaptor; - butil::IOBuf res_buf; - if (cntl.Failed()) { - adaptor->SerializeError(res.header().response_to(), &res_buf); - } else if (res.has_message()) { - mongo_head_t header = { - res.header().message_length(), - res.header().request_id(), - res.header().response_to(), - res.header().op_code() - }; - res_buf.append(static_cast(&header), sizeof(mongo_head_t)); - int32_t response_flags = res.response_flags(); - int64_t cursor_id = res.cursor_id(); - int32_t starting_from = res.starting_from(); - int32_t number_returned = res.number_returned(); - res_buf.append(&response_flags, sizeof(response_flags)); - res_buf.append(&cursor_id, sizeof(cursor_id)); - res_buf.append(&starting_from, sizeof(starting_from)); - res_buf.append(&number_returned, sizeof(number_returned)); - res_buf.append(res.message()); - } - - if (!res_buf.empty()) { - // Have the risk of unlimited pending responses, in which case, tell - // users to set max_concurrency. - Socket::WriteOptions wopt; - wopt.ignore_eovercrowded = true; - if (socket->Write(&res_buf, &wopt) != 0) { - PLOG(WARNING) << "Fail to write into " << *socket; - return; - } + std::unique_ptr delete_self(this); + ConcurrencyRemover concurrency_remover(status, &cntl, received_us); + Socket* socket = ControllerPrivateAccessor(&cntl).get_sending_socket(); + + if (cntl.IsCloseConnection()) { + socket->SetFailed(); + return; + } + + const MongoServiceAdaptor* adaptor = server->options().mongo_service_adaptor; + butil::IOBuf res_buf; + if (cntl.Failed()) { + adaptor->SerializeError(res.header().response_to(), &res_buf); + } else if (res.has_message()) { + mongo_head_t header = {res.header().message_length(), + res.header().request_id(), + res.header().response_to(), res.header().op_code()}; + res_buf.append(static_cast(&header), sizeof(mongo_head_t)); + int32_t response_flags = res.response_flags(); + int64_t cursor_id = res.cursor_id(); + int32_t starting_from = res.starting_from(); + int32_t number_returned = res.number_returned(); + res_buf.append(&response_flags, sizeof(response_flags)); + res_buf.append(&cursor_id, sizeof(cursor_id)); + res_buf.append(&starting_from, sizeof(starting_from)); + res_buf.append(&number_returned, sizeof(number_returned)); + res_buf.append(res.message()); + } + + if (!res_buf.empty()) { + // Have the risk of unlimited pending responses, in which case, tell + // users to set max_concurrency. + Socket::WriteOptions wopt; + wopt.ignore_eovercrowded = true; + if (socket->Write(&res_buf, &wopt) != 0) { + PLOG(WARNING) << "Fail to write into " << *socket; + return; } + } } -ParseResult ParseMongoMessage(butil::IOBuf* source, - Socket* socket, bool /*read_eof*/, const void *arg) { +// butil::atomic global_request_id(0); + +ParseResult ParseMongoMessage(butil::IOBuf* source, Socket* socket, + bool /*read_eof*/, const void* arg) { + const MongoServiceAdaptor* adaptor = nullptr; + if (arg) { + // server side const Server* server = static_cast(arg); - const MongoServiceAdaptor* adaptor = server->options().mongo_service_adaptor; + adaptor = server->options().mongo_service_adaptor; if (NULL == adaptor) { - // The server does not enable mongo adaptor. - return MakeParseError(PARSE_ERROR_TRY_OTHERS); - } - - char buf[sizeof(mongo_head_t)]; - const char *p = (const char *)source->fetch(buf, sizeof(buf)); - if (NULL == p) { - return MakeParseError(PARSE_ERROR_NOT_ENOUGH_DATA); - } - mongo_head_t header = *(const mongo_head_t*)p; - header.make_host_endian(); - if (!is_mongo_opcode(header.op_code)) { - // The op_code plays the role of "magic number" here. - return MakeParseError(PARSE_ERROR_TRY_OTHERS); - } - if (header.message_length < (int32_t)sizeof(mongo_head_t)) { - // definitely not a valid mongo packet. - return MakeParseError(PARSE_ERROR_TRY_OTHERS); - } - uint32_t body_len = static_cast(header.message_length); - if (body_len > FLAGS_max_body_size) { - return MakeParseError(PARSE_ERROR_TOO_BIG_DATA); - } else if (source->length() < body_len) { - return MakeParseError(PARSE_ERROR_NOT_ENOUGH_DATA); - } - // Mongo protocol is a protocol with state. Each connection has its own - // mongo context. (e.g. last error occured on the connection, the cursor - // created by the last Query). The context is stored in - // socket::_input_message, and created at the first time when msg - // comes over the socket. - Destroyable *socket_context_msg = socket->parsing_context(); + // The server does not enable mongo adaptor. + return MakeParseError(PARSE_ERROR_TRY_OTHERS); + } + } + + char buf[sizeof(mongo_head_t)]; + const char* p = (const char*)source->fetch(buf, sizeof(buf)); + if (NULL == p) { + return MakeParseError(PARSE_ERROR_NOT_ENOUGH_DATA); + } + mongo_head_t header = *(const mongo_head_t*)p; + header.make_host_endian(); + if (!is_mongo_opcode(header.op_code)) { + // The op_code plays the role of "magic number" here. + return MakeParseError(PARSE_ERROR_TRY_OTHERS); + } + if (header.message_length < (int32_t)sizeof(mongo_head_t)) { + // definitely not a valid mongo packet. + return MakeParseError(PARSE_ERROR_TRY_OTHERS); + } + uint32_t body_len = static_cast(header.message_length); + if (body_len > FLAGS_max_body_size) { + return MakeParseError(PARSE_ERROR_TOO_BIG_DATA); + } else if (source->length() < body_len) { + return MakeParseError(PARSE_ERROR_NOT_ENOUGH_DATA); + } + // Mongo protocol is a protocol with state. Each connection has its own + // mongo context. (e.g. last error occured on the connection, the cursor + // created by the last Query). The context is stored in + // socket::_input_message, and created at the first time when msg + // comes over the socket. + if (arg) { + // server side + Destroyable* socket_context_msg = socket->parsing_context(); if (NULL == socket_context_msg) { - MongoContext *context = adaptor->CreateSocketContext(); - if (NULL == context) { - return MakeParseError(PARSE_ERROR_NO_RESOURCE); - } - socket_context_msg = new MongoContextMessage(context); - socket->reset_parsing_context(socket_context_msg); + MongoContext* context = adaptor->CreateSocketContext(); + if (NULL == context) { + return MakeParseError(PARSE_ERROR_NO_RESOURCE); + } + socket_context_msg = new MongoContextMessage(context); + socket->reset_parsing_context(socket_context_msg); } policy::MostCommonMessage* msg = policy::MostCommonMessage::Get(); source->cutn(&msg->meta, sizeof(buf)); size_t act_body_len = source->cutn(&msg->payload, body_len - sizeof(buf)); if (act_body_len != body_len - sizeof(buf)) { - CHECK(false); // Very unlikely, unless memory is corrupted. - return MakeParseError(PARSE_ERROR_TRY_OTHERS); + CHECK(false); // Very unlikely, unless memory is corrupted. + return MakeParseError(PARSE_ERROR_TRY_OTHERS); } return MakeMessage(msg); + } else { + MongoInputResponse* response_msg = new MongoInputResponse; + // client side + // 前面已经读取了mongo_head + source->pop_front(sizeof(buf)); + if (header.op_code == MONGO_OPCODE_REPLY) { + LOG(WARNING) << "ParseMongoMessage not support op_code: REPLY"; + return MakeParseError(PARSE_ERROR_ABSOLUTELY_WRONG); + } else if (header.op_code == MONGO_OPCODE_MSG) { + response_msg->opcode = MONGO_OPCODE_MSG; + MongoMsg& mongo_msg = response_msg->msg; + butil::IOBuf msg_buf; + size_t act_body_len = source->cutn(&msg_buf, body_len - sizeof(buf)); + if (act_body_len != body_len - sizeof(buf)) { + CHECK(false); + return MakeParseError(PARSE_ERROR_ABSOLUTELY_WRONG); + } + // flagbits 4bytes + bool flagbits_cut_ret = msg_buf.cutn(&(mongo_msg.flagbits), 4); + if (!flagbits_cut_ret) { + CHECK(false); + return MakeParseError(PARSE_ERROR_ABSOLUTELY_WRONG); + } + while (!msg_buf.empty()) { + Section section; + bool parse_ret = ParseMongoSection(&msg_buf, §ion); + if (!parse_ret) { + LOG(WARNING) << "parse mongo section failed"; + return MakeParseError(PARSE_ERROR_ABSOLUTELY_WRONG); + } + mongo_msg.sections.push_back(section); + } + return MakeMessage(response_msg); + } else { + LOG(WARNING) << "ParseMongoMessage not support op_code:" + << header.op_code; + return MakeParseError(PARSE_ERROR_ABSOLUTELY_WRONG); + } + } } // Defined in baidu_rpc_protocol.cpp @@ -167,132 +212,1126 @@ void EndRunningCallMethodInPool( const ::google::protobuf::MethodDescriptor* method, ::google::protobuf::RpcController* controller, const ::google::protobuf::Message* request, - ::google::protobuf::Message* response, - ::google::protobuf::Closure* done); + ::google::protobuf::Message* response, ::google::protobuf::Closure* done); void ProcessMongoRequest(InputMessageBase* msg_base) { - DestroyingPtr msg(static_cast(msg_base)); - SocketUniquePtr socket_guard(msg->ReleaseSocket()); - Socket* socket = socket_guard.get(); - const Server* server = static_cast(msg_base->arg()); - ScopedNonServiceError non_service_error(server); - - char buf[sizeof(mongo_head_t)]; - const char *p = (const char *)msg->meta.fetch(buf, sizeof(buf)); - const mongo_head_t *header = (const mongo_head_t*)p; - - const google::protobuf::ServiceDescriptor* srv_des = MongoService::descriptor(); - if (1 != srv_des->method_count()) { - LOG(WARNING) << "method count:" << srv_des->method_count() - << " of MongoService should be equal to 1!"; - } - - const Server::MethodProperty *mp = - ServerPrivateAccessor(server) - .FindMethodPropertyByFullName(srv_des->method(0)->full_name()); - - MongoContextMessage *context_msg = - dynamic_cast(socket->parsing_context()); - if (NULL == context_msg) { - LOG(WARNING) << "socket context wasn't set correctly"; - return; - } - - SendMongoResponse* mongo_done = new SendMongoResponse(server); - mongo_done->cntl.set_mongo_session_data(context_msg->context()); - - ControllerPrivateAccessor accessor(&(mongo_done->cntl)); - accessor.set_server(server) - .set_security_mode(server->options().security_mode()) - .set_peer_id(socket->id()) - .set_remote_side(socket->remote_side()) - .set_local_side(socket->local_side()) - .set_auth_context(socket->auth_context()) - .set_request_protocol(PROTOCOL_MONGO) - .set_begin_time_us(msg->received_us()) - .move_in_server_receiving_sock(socket_guard); - - // Tag the bthread with this server's key for - // thread_local_data(). - if (server->thread_local_options().thread_local_data_factory) { - bthread_assign_data((void*)&server->thread_local_options()); - } - do { - if (!server->IsRunning()) { - mongo_done->cntl.SetFailed(ELOGOFF, "Server is stopping"); - break; - } + DestroyingPtr msg( + static_cast(msg_base)); + SocketUniquePtr socket_guard(msg->ReleaseSocket()); + Socket* socket = socket_guard.get(); + const Server* server = static_cast(msg_base->arg()); + ScopedNonServiceError non_service_error(server); + + char buf[sizeof(mongo_head_t)]; + const char* p = (const char*)msg->meta.fetch(buf, sizeof(buf)); + const mongo_head_t* header = (const mongo_head_t*)p; - if (!ServerPrivateAccessor(server).AddConcurrency(&(mongo_done->cntl))) { - mongo_done->cntl.SetFailed( - ELIMIT, "Reached server's max_concurrency=%d", - server->options().max_concurrency); - break; + const google::protobuf::ServiceDescriptor* srv_des = + MongoService::descriptor(); + if (1 != srv_des->method_count()) { + LOG(WARNING) << "method count:" << srv_des->method_count() + << " of MongoService should be equal to 1!"; + } + + const Server::MethodProperty* mp = + ServerPrivateAccessor(server).FindMethodPropertyByFullName( + srv_des->method(0)->full_name()); + + MongoContextMessage* context_msg = + dynamic_cast(socket->parsing_context()); + if (NULL == context_msg) { + LOG(WARNING) << "socket context wasn't set correctly"; + return; + } + + SendMongoResponse* mongo_done = new SendMongoResponse(server); + mongo_done->cntl.set_mongo_session_data(context_msg->context()); + + ControllerPrivateAccessor accessor(&(mongo_done->cntl)); + accessor.set_server(server) + .set_security_mode(server->options().security_mode()) + .set_peer_id(socket->id()) + .set_remote_side(socket->remote_side()) + .set_local_side(socket->local_side()) + .set_auth_context(socket->auth_context()) + .set_request_protocol(PROTOCOL_MONGO) + .set_begin_time_us(msg->received_us()) + .move_in_server_receiving_sock(socket_guard); + + // Tag the bthread with this server's key for + // thread_local_data(). + if (server->thread_local_options().thread_local_data_factory) { + bthread_assign_data((void*)&server->thread_local_options()); + } + do { + if (!server->IsRunning()) { + mongo_done->cntl.SetFailed(ELOGOFF, "Server is stopping"); + break; + } + + if (!ServerPrivateAccessor(server).AddConcurrency(&(mongo_done->cntl))) { + mongo_done->cntl.SetFailed(ELIMIT, "Reached server's max_concurrency=%d", + server->options().max_concurrency); + break; + } + if (FLAGS_usercode_in_pthread && TooManyUserCode()) { + mongo_done->cntl.SetFailed(ELIMIT, + "Too many user code to run when" + " -usercode_in_pthread is on"); + break; + } + + if (NULL == mp || + mp->service->GetDescriptor() == BadMethodService::descriptor()) { + mongo_done->cntl.SetFailed(ENOMETHOD, "Fail to find default_method"); + break; + } + // Switch to service-specific error. + non_service_error.release(); + MethodStatus* method_status = mp->status; + mongo_done->status = method_status; + if (method_status) { + int rejected_cc = 0; + if (!method_status->OnRequested(&rejected_cc)) { + mongo_done->cntl.SetFailed( + ELIMIT, "Rejected by %s's ConcurrencyLimiter, concurrency=%d", + mp->method->full_name().c_str(), rejected_cc); + break; + } + } + + if (!MongoOp_IsValid(header->op_code)) { + mongo_done->cntl.SetFailed(EREQUEST, "Unknown op_code:%d", + header->op_code); + break; + } + + mongo_done->cntl.set_log_id(header->request_id); + const std::string& body_str = msg->payload.to_string(); + mongo_done->req.set_message(body_str.c_str(), body_str.size()); + mongo_done->req.mutable_header()->set_message_length( + header->message_length); + mongo_done->req.mutable_header()->set_request_id(header->request_id); + mongo_done->req.mutable_header()->set_response_to(header->response_to); + mongo_done->req.mutable_header()->set_op_code( + static_cast(header->op_code)); + mongo_done->res.mutable_header()->set_response_to(header->request_id); + mongo_done->received_us = msg->received_us(); + + google::protobuf::Service* svc = mp->service; + const google::protobuf::MethodDescriptor* method = mp->method; + accessor.set_method(method); + + if (!FLAGS_usercode_in_pthread) { + return svc->CallMethod(method, &(mongo_done->cntl), &(mongo_done->req), + &(mongo_done->res), mongo_done); + } + if (BeginRunningUserCode()) { + return svc->CallMethod(method, &(mongo_done->cntl), &(mongo_done->req), + &(mongo_done->res), mongo_done); + return EndRunningUserCodeInPlace(); + } else { + return EndRunningCallMethodInPool(svc, method, &(mongo_done->cntl), + &(mongo_done->req), &(mongo_done->res), + mongo_done); + } + } while (false); + + mongo_done->Run(); +} + +bool ParseReplicaSetMember(BsonPtr member_ptr, ReplicaSetMember* member) { + // _id + bool has_id = butil::bson::bson_get_int32(member_ptr, "_id", &(member->id)); + if (!has_id) { + LOG(DEBUG) << "not has _id"; + return false; + } + // name/addr + bool has_name = + butil::bson::bson_get_str(member_ptr, "name", &(member->addr)); + if (!has_name) { + LOG(DEBUG) << "not has name"; + return false; + } + // health + double health; + bool has_health = butil::bson::bson_get_double(member_ptr, "health", &health); + if (!has_health) { + LOG(DEBUG) << "not has health"; + return false; + } + member->health = (health == 1.0); + // state + bool has_state = + butil::bson::bson_get_int32(member_ptr, "state", &(member->state)); + if (!has_state) { + LOG(DEBUG) << "not has state"; + return false; + } + // stateStr + bool has_stateStr = + butil::bson::bson_get_str(member_ptr, "stateStr", &(member->state_str)); + if (!has_stateStr) { + LOG(DEBUG) << "not has stateStr"; + return false; + } + return true; +} + +// Actions to a server response in mongo format +void ProcessMongoResponse(InputMessageBase* msg_base) { + const int64_t start_parse_us = butil::cpuwide_time_us(); + DestroyingPtr msg( + static_cast(msg_base)); + + const CallId cid = {static_cast(msg->socket()->correlation_id())}; + Controller* cntl = NULL; + LOG(DEBUG) << "process mongo response, cid:" << cid.value; + const int rc = bthread_id_lock(cid, (void**)&cntl); + if (rc != 0) { + LOG_IF(ERROR, rc != EINVAL && rc != EPERM) + << "Fail to lock correlation_id=" << cid << ": " << berror(rc); + return; + } + + ControllerPrivateAccessor accessor(cntl); + Span* span = accessor.span(); + if (span) { + span->set_base_real_us(msg->base_real_us()); + span->set_received_us(msg->received_us()); + // span->set_response_size(msg->response.ByteSize()); + span->set_start_parse_us(start_parse_us); + } + if (cntl->request_id() == "query" || cntl->request_id() == "query_getMore") { + bool next_batch = cntl->request_id() == "query_getMore"; + if (msg->opcode == MONGO_OPCODE_MSG) { + MongoMsg& reply_msg = msg->msg; + if (reply_msg.sections.size() != 1 || reply_msg.sections[0].type != 0) { + cntl->SetFailed(ERESPONSE, "error query response"); + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + Section& section = reply_msg.sections[0]; + assert(section.body_document); + BsonPtr document = section.body_document; + // response if ok + double ok_value = 0.0; + bool has_ok = butil::bson::bson_get_double(document, "ok", &ok_value); + if (!has_ok) { + LOG(DEBUG) << "query response not has ok field"; + cntl->SetFailed(ERESPONSE, "query response no ok field"); + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + // query failed + if (ok_value != 1) { + LOG(DEBUG) << "query reponse error"; + int32_t error_code = 0; + bool has_error_code = + butil::bson::bson_get_int32(document, "code", &error_code); + std::string code_name, errmsg; + bool has_code_name = + butil::bson::bson_get_str(document, "codeName", &code_name); + bool has_errmsg = + butil::bson::bson_get_str(document, "errmsg", &errmsg); + if (has_error_code && has_code_name && has_errmsg) { + LOG(DEBUG) << "error_code:" << error_code + << " code_name:" << code_name << " errmsg:" << errmsg; + cntl->SetFailed(error_code, "%s, %s", code_name.c_str(), + errmsg.c_str()); + } else { + cntl->SetFailed(ERESPONSE, "query response failed"); } - if (FLAGS_usercode_in_pthread && TooManyUserCode()) { - mongo_done->cntl.SetFailed(ELIMIT, "Too many user code to run when" - " -usercode_in_pthread is on"); - break; + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + // query success + BsonPtr cursor_doc; + bool has_cursor_doc = + butil::bson::bson_get_doc(document, "cursor", &cursor_doc); + if (!has_cursor_doc) { + LOG(DEBUG) << "query response not has cursor document"; + cntl->SetFailed(ERESPONSE, "query response no cursor"); + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + std::vector first_batch; + const char* batch_element = "firstBatch"; + if (next_batch) { + batch_element = "nextBatch"; + } + bool has_batch = + butil::bson::bson_get_array(cursor_doc, batch_element, &first_batch); + if (!has_batch) { + LOG(DEBUG) << "query cursor document not has firstBatch array"; + cntl->SetFailed(ERESPONSE, "query response return null"); + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + int64_t cursor_id = 0; + bool has_cursor_id = + butil::bson::bson_get_int64(cursor_doc, "id", &cursor_id); + if (!has_cursor_id) { + LOG(DEBUG) << "query cursor document not has cursorid"; + cntl->SetFailed(ERESPONSE, "query response no cursor id"); + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + std::string ns; + bool has_ns = butil::bson::bson_get_str(cursor_doc, "ns", &ns); + if (!has_ns) { + LOG(DEBUG) << "query cursor document not has ns"; + cntl->SetFailed(ERESPONSE, "query response no ns"); + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + // build response + MongoQueryResponse* response = + static_cast(cntl->response()); + if (cursor_id) { + response->set_cursorid(cursor_id); + } + response->set_number_returned(first_batch.size()); + for (auto element : first_batch) { + response->add_documents(element); + } + response->set_ns(ns); + accessor.OnResponse(cid, cntl->ErrorCode()); + } + } else if (cntl->request_id() == "count") { + if (msg->opcode == MONGO_OPCODE_MSG) { + MongoMsg& reply_msg = msg->msg; + if (reply_msg.sections.size() != 1 || reply_msg.sections[0].type != 0) { + cntl->SetFailed(ERESPONSE, "error count response"); + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + Section& section = reply_msg.sections[0]; + assert(section.body_document); + BsonPtr document = section.body_document; + // response if ok + double ok_value = 0.0; + bool has_ok = butil::bson::bson_get_double(document, "ok", &ok_value); + if (!has_ok) { + LOG(DEBUG) << "count response not has ok field"; + cntl->SetFailed(ERESPONSE, "count response no ok field"); + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + // count failed + if (ok_value != 1) { + LOG(DEBUG) << "count reponse error"; + int32_t error_code = 0; + bool has_error_code = + butil::bson::bson_get_int32(document, "code", &error_code); + std::string code_name, errmsg; + bool has_code_name = + butil::bson::bson_get_str(document, "codeName", &code_name); + bool has_errmsg = + butil::bson::bson_get_str(document, "errmsg", &errmsg); + if (has_error_code && has_code_name && has_errmsg) { + LOG(DEBUG) << "error_code:" << error_code + << " code_name:" << code_name << " errmsg:" << errmsg; + cntl->SetFailed(error_code, "%s, %s", code_name.c_str(), + errmsg.c_str()); + } else { + cntl->SetFailed(ERESPONSE, "count response failed"); } - - if (NULL == mp || - mp->service->GetDescriptor() == BadMethodService::descriptor()) { - mongo_done->cntl.SetFailed(ENOMETHOD, "Fail to find default_method"); - break; + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + // count success + int32_t count = 0; + bool has_count = butil::bson::bson_get_int32(document, "n", &count); + if (!has_count) { + LOG(DEBUG) << "count response not has n element"; + cntl->SetFailed(ERESPONSE, "count response no n"); + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + // build response + MongoCountResponse* response = + static_cast(cntl->response()); + response->set_number(count); + accessor.OnResponse(cid, cntl->ErrorCode()); + } else { + cntl->SetFailed(ERESPONSE, "msg not msg type"); + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + } else if (cntl->request_id() == "insert") { + if (msg->opcode == MONGO_OPCODE_MSG) { + MongoMsg& reply_msg = msg->msg; + if (reply_msg.sections.size() != 1 || reply_msg.sections[0].type != 0) { + cntl->SetFailed(ERESPONSE, "error insert response"); + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + Section& section = reply_msg.sections[0]; + assert(section.body_document); + BsonPtr document = section.body_document; + // response if ok + double ok_value = 0.0; + bool has_ok = butil::bson::bson_get_double(document, "ok", &ok_value); + if (!has_ok) { + LOG(DEBUG) << "count response not has ok field"; + cntl->SetFailed(ERESPONSE, "insert response no ok field"); + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + // insert failed + if (ok_value != 1) { + LOG(DEBUG) << "insert reponse error"; + int32_t error_code = 0; + bool has_error_code = + butil::bson::bson_get_int32(document, "code", &error_code); + std::string code_name, errmsg; + bool has_code_name = + butil::bson::bson_get_str(document, "codeName", &code_name); + bool has_errmsg = + butil::bson::bson_get_str(document, "errmsg", &errmsg); + if (has_error_code && has_code_name && has_errmsg) { + LOG(DEBUG) << "error_code:" << error_code + << " code_name:" << code_name << " errmsg:" << errmsg; + cntl->SetFailed(error_code, "%s, %s", code_name.c_str(), + errmsg.c_str()); + } else { + cntl->SetFailed(ERESPONSE, "insert response failed"); + } + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + // insert success + int32_t insert_number = 0; + bool has_number = + butil::bson::bson_get_int32(document, "n", &insert_number); + if (!has_number) { + LOG(DEBUG) << "insert response not has n element"; + cntl->SetFailed(ERESPONSE, "insert response no n"); + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + // build response number + MongoInsertResponse* response = + static_cast(cntl->response()); + response->set_number(insert_number); + // writeErrors array + std::vector write_errors; + const char* write_errors_element = "writeErrors"; + bool has_write_errors = butil::bson::bson_get_array( + document, write_errors_element, &write_errors); + if (has_write_errors) { + // build response write_errors + for (BsonPtr write_error_ptr : write_errors) { + WriteError write_error_record; + int32_t index = 0; + int32_t code = 0; + std::string errmsg; + bool has_index = + butil::bson::bson_get_int32(write_error_ptr, "index", &index); + if (!has_index) { + LOG(WARNING) << "unrecognize insert write_error:" + << bson_as_canonical_extended_json( + write_error_ptr.get(), nullptr); + continue; + } + write_error_record.index = index; + bool has_code = + butil::bson::bson_get_int32(write_error_ptr, "code", &code); + if (!has_code) { + LOG(WARNING) << "unrecognize insert write_error:" + << bson_as_canonical_extended_json( + write_error_ptr.get(), nullptr); + continue; + } + write_error_record.code = code; + bool has_errmsg = + butil::bson::bson_get_str(write_error_ptr, "errmsg", &errmsg); + if (!has_errmsg) { + LOG(WARNING) << "unrecognize insert write_error:" + << bson_as_canonical_extended_json( + write_error_ptr.get(), nullptr); + continue; + } + write_error_record.errmsg = errmsg; + response->add_write_errors(write_error_record); + } + } + accessor.OnResponse(cid, cntl->ErrorCode()); + } else { + cntl->SetFailed(ERESPONSE, "msg not msg type"); + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + } else if (cntl->request_id() == "delete") { + if (msg->opcode == MONGO_OPCODE_MSG) { + MongoMsg& reply_msg = msg->msg; + if (reply_msg.sections.size() != 1 || reply_msg.sections[0].type != 0) { + cntl->SetFailed(ERESPONSE, "error delete response"); + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + Section& section = reply_msg.sections[0]; + assert(section.body_document); + BsonPtr document = section.body_document; + // response if ok + double ok_value = 0.0; + bool has_ok = butil::bson::bson_get_double(document, "ok", &ok_value); + if (!has_ok) { + LOG(DEBUG) << "count response not has ok field"; + cntl->SetFailed(ERESPONSE, "delete response no ok field"); + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + // delete failed + if (ok_value != 1) { + LOG(DEBUG) << "delete reponse error"; + int32_t error_code = 0; + bool has_error_code = + butil::bson::bson_get_int32(document, "code", &error_code); + std::string code_name, errmsg; + bool has_code_name = + butil::bson::bson_get_str(document, "codeName", &code_name); + bool has_errmsg = + butil::bson::bson_get_str(document, "errmsg", &errmsg); + if (has_error_code && has_code_name && has_errmsg) { + LOG(DEBUG) << "error_code:" << error_code + << " code_name:" << code_name << " errmsg:" << errmsg; + cntl->SetFailed(error_code, "%s, %s", code_name.c_str(), + errmsg.c_str()); + } else { + cntl->SetFailed(ERESPONSE, "delete response failed"); + } + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + // delete success + int32_t delete_number = 0; + bool has_number = + butil::bson::bson_get_int32(document, "n", &delete_number); + if (!has_number) { + LOG(DEBUG) << "delete response not has n element"; + cntl->SetFailed(ERESPONSE, "delete response no n"); + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + // build response number + MongoDeleteResponse* response = + static_cast(cntl->response()); + response->set_number(delete_number); + accessor.OnResponse(cid, cntl->ErrorCode()); + } else { + cntl->SetFailed(ERESPONSE, "msg not msg type"); + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + } else if (cntl->request_id() == "update") { + if (msg->opcode == MONGO_OPCODE_MSG) { + MongoMsg& reply_msg = msg->msg; + if (reply_msg.sections.size() != 1 || reply_msg.sections[0].type != 0) { + cntl->SetFailed(ERESPONSE, "error update response"); + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + Section& section = reply_msg.sections[0]; + assert(section.body_document); + BsonPtr document = section.body_document; + // response if ok + double ok_value = 0.0; + bool has_ok = butil::bson::bson_get_double(document, "ok", &ok_value); + if (!has_ok) { + LOG(DEBUG) << "update response not has ok field"; + cntl->SetFailed(ERESPONSE, "update response no ok field"); + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + // update failed + if (ok_value != 1) { + LOG(DEBUG) << "update reponse error"; + int32_t error_code = 0; + bool has_error_code = + butil::bson::bson_get_int32(document, "code", &error_code); + std::string code_name, errmsg; + bool has_code_name = + butil::bson::bson_get_str(document, "codeName", &code_name); + bool has_errmsg = + butil::bson::bson_get_str(document, "errmsg", &errmsg); + if (has_error_code && has_code_name && has_errmsg) { + LOG(DEBUG) << "error_code:" << error_code + << " code_name:" << code_name << " errmsg:" << errmsg; + cntl->SetFailed(error_code, "%s, %s", code_name.c_str(), + errmsg.c_str()); + } else { + cntl->SetFailed(ERESPONSE, "update response failed"); + } + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + // update success + // n + int32_t matched_number = 0; + bool has_matched_numberr = + butil::bson::bson_get_int32(document, "n", &matched_number); + if (!has_matched_numberr) { + LOG(DEBUG) << "update response not has n element"; + cntl->SetFailed(ERESPONSE, "update response no n"); + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + // nModified + int32_t modified_number = 0; + bool has_modified_numberr = + butil::bson::bson_get_int32(document, "nModified", &modified_number); + if (!has_modified_numberr) { + LOG(DEBUG) << "update response not has nModified element"; + cntl->SetFailed(ERESPONSE, "update response no nModified"); + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + // build response number + MongoUpdateResponse* response = + static_cast(cntl->response()); + response->set_matched_number(matched_number); + response->set_modified_number(modified_number); + // writeErrors array + std::vector write_errors; + const char* write_errors_element = "writeErrors"; + bool has_write_errors = butil::bson::bson_get_array( + document, write_errors_element, &write_errors); + if (has_write_errors) { + // build response write_errors + for (BsonPtr write_error_ptr : write_errors) { + WriteError write_error_record; + int32_t index = 0; + int32_t code = 0; + std::string errmsg; + bool has_index = + butil::bson::bson_get_int32(write_error_ptr, "index", &index); + if (!has_index) { + LOG(WARNING) << "unrecognize update write_error:" + << bson_as_canonical_extended_json( + write_error_ptr.get(), nullptr); + continue; + } + write_error_record.index = index; + bool has_code = + butil::bson::bson_get_int32(write_error_ptr, "code", &code); + if (!has_code) { + LOG(WARNING) << "unrecognize update write_error:" + << bson_as_canonical_extended_json( + write_error_ptr.get(), nullptr); + continue; + } + write_error_record.code = code; + bool has_errmsg = + butil::bson::bson_get_str(write_error_ptr, "errmsg", &errmsg); + if (!has_errmsg) { + LOG(WARNING) << "unrecognize update write_error:" + << bson_as_canonical_extended_json( + write_error_ptr.get(), nullptr); + continue; + } + write_error_record.errmsg = errmsg; + response->add_write_errors(write_error_record); } - // Switch to service-specific error. - non_service_error.release(); - MethodStatus* method_status = mp->status; - mongo_done->status = method_status; - if (method_status) { - int rejected_cc = 0; - if (!method_status->OnRequested(&rejected_cc)) { - mongo_done->cntl.SetFailed( - ELIMIT, "Rejected by %s's ConcurrencyLimiter, concurrency=%d", - mp->method->full_name().c_str(), rejected_cc); - break; - } + } + // upserted array + std::vector upserted_docs; + const char* upserted_docs_element = "upserted"; + bool has_upserted = butil::bson::bson_get_array( + document, upserted_docs_element, &upserted_docs); + if (has_upserted) { + // build response upserted_docs + for (BsonPtr upserted_doc_ptr : upserted_docs) { + UpsertedDoc upserted_doc; + int32_t index = 0; + bson_oid_t id; + bool has_index = + butil::bson::bson_get_int32(upserted_doc_ptr, "index", &index); + if (!has_index) { + LOG(WARNING) << "unrecognize update upserted:" + << bson_as_canonical_extended_json( + upserted_doc_ptr.get(), nullptr); + continue; + } + upserted_doc.index = index; + bool has_oid = + butil::bson::bson_get_oid(upserted_doc_ptr, "_id", &id); + if (!has_oid) { + LOG(WARNING) << "unrecognize update upserted:" + << bson_as_canonical_extended_json( + upserted_doc_ptr.get(), nullptr); + continue; + } + upserted_doc._id = id; + response->add_upserted_docs(upserted_doc); } - - if (!MongoOp_IsValid(header->op_code)) { - mongo_done->cntl.SetFailed(EREQUEST, "Unknown op_code:%d", header->op_code); - break; + } + accessor.OnResponse(cid, cntl->ErrorCode()); + } else { + cntl->SetFailed(ERESPONSE, "msg not msg type"); + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + } else if (cntl->request_id() == "find_and_modify") { + if (msg->opcode == MONGO_OPCODE_MSG) { + MongoMsg& reply_msg = msg->msg; + if (reply_msg.sections.size() != 1 || reply_msg.sections[0].type != 0) { + cntl->SetFailed(ERESPONSE, "error find_and_modify response"); + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + Section& section = reply_msg.sections[0]; + assert(section.body_document); + BsonPtr document = section.body_document; + // response if ok + double ok_value = 0.0; + bool has_ok = butil::bson::bson_get_double(document, "ok", &ok_value); + if (!has_ok) { + LOG(DEBUG) << "find_and_modify response not has ok field"; + cntl->SetFailed(ERESPONSE, "find_and_modify response no ok field"); + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + // find_and_modify failed + if (ok_value != 1) { + LOG(DEBUG) << "find_and_modify reponse error"; + int32_t error_code = 0; + bool has_error_code = + butil::bson::bson_get_int32(document, "code", &error_code); + std::string code_name, errmsg; + bool has_code_name = + butil::bson::bson_get_str(document, "codeName", &code_name); + bool has_errmsg = + butil::bson::bson_get_str(document, "errmsg", &errmsg); + if (has_error_code && has_code_name && has_errmsg) { + LOG(DEBUG) << "error_code:" << error_code + << " code_name:" << code_name << " errmsg:" << errmsg; + cntl->SetFailed(error_code, "%s, %s", code_name.c_str(), + errmsg.c_str()); + } else { + cntl->SetFailed(ERESPONSE, "find_and_modify response failed"); } - - mongo_done->cntl.set_log_id(header->request_id); - const std::string &body_str = msg->payload.to_string(); - mongo_done->req.set_message(body_str.c_str(), body_str.size()); - mongo_done->req.mutable_header()->set_message_length(header->message_length); - mongo_done->req.mutable_header()->set_request_id(header->request_id); - mongo_done->req.mutable_header()->set_response_to(header->response_to); - mongo_done->req.mutable_header()->set_op_code( - static_cast(header->op_code)); - mongo_done->res.mutable_header()->set_response_to(header->request_id); - mongo_done->received_us = msg->received_us(); - - google::protobuf::Service* svc = mp->service; - const google::protobuf::MethodDescriptor* method = mp->method; - accessor.set_method(method); - - if (!FLAGS_usercode_in_pthread) { - return svc->CallMethod( - method, &(mongo_done->cntl), &(mongo_done->req), - &(mongo_done->res), mongo_done); + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + // find_and_modify success + // lastErrorObject + BsonPtr last_error_object_ptr; + bool has_last_error_object = butil::bson::bson_get_doc( + document, "lastErrorObject", &last_error_object_ptr); + if (!has_last_error_object) { + LOG(DEBUG) + << "find_and_modify response not has lastErrorObject element"; + cntl->SetFailed(ERESPONSE, + "find_and_modify response no lastErrorObject"); + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + // updatedExisting + bool update_existing = false; + butil::bson::bson_get_bool(last_error_object_ptr, "updatedExisting", + &update_existing); + // upserted + bson_oid_t upserted_oid; + bool has_upserted = butil::bson::bson_get_oid(last_error_object_ptr, + "upserted", &upserted_oid); + // value + std::pair value_type_result = + butil::bson::bson_get_type(document, "value"); + if (!value_type_result.first) { + LOG(DEBUG) << "find_and_modify response not has value element"; + cntl->SetFailed(ERESPONSE, "find_and_modify response no value"); + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + BsonPtr value; + if (value_type_result.second == BSON_TYPE_DOCUMENT) { + bool has_value = butil::bson::bson_get_doc(document, "value", &value); + if (!has_value) { + LOG(DEBUG) << "find_and_modify response not has value element"; + cntl->SetFailed(ERESPONSE, "find_and_modify response no value"); + accessor.OnResponse(cid, cntl->ErrorCode()); + return; } - if (BeginRunningUserCode()) { - return svc->CallMethod( - method, &(mongo_done->cntl), &(mongo_done->req), - &(mongo_done->res), mongo_done); - return EndRunningUserCodeInPlace(); + } else if (!update_existing && + value_type_result.second == BSON_TYPE_NULL) { + } else { + LOG(DEBUG) << "find_and_modify response with updateExisting=true but " + "wrong value"; + cntl->SetFailed(ERESPONSE, + "find_and_modify response with updateExisting=true but " + "wrong value"); + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + // build response + MongoFindAndModifyResponse* response = + static_cast(cntl->response()); + if (value) { + response->set_value(value); + } + if (has_upserted) { + response->set_upserted(upserted_oid); + } + accessor.OnResponse(cid, cntl->ErrorCode()); + } else { + cntl->SetFailed(ERESPONSE, "msg not msg type"); + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + } else if (cntl->request_id() == "get_repl_set_status") { + if (msg->opcode == MONGO_OPCODE_MSG) { + MongoMsg& reply_msg = msg->msg; + if (reply_msg.sections.size() != 1 || reply_msg.sections[0].type != 0) { + cntl->SetFailed(ERESPONSE, "error get_repl_set_status response"); + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + Section& section = reply_msg.sections[0]; + assert(section.body_document); + BsonPtr document = section.body_document; + // response if ok + double ok_value = 0.0; + bool has_ok = butil::bson::bson_get_double(document, "ok", &ok_value); + if (!has_ok) { + LOG(DEBUG) << "get_repl_set_status response not has ok field"; + cntl->SetFailed(ERESPONSE, "get_repl_set_status response no ok field"); + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + // get_repl_set_status failed + if (ok_value != 1) { + LOG(DEBUG) << "get_repl_set_status reponse error"; + int32_t error_code = 0; + bool has_error_code = + butil::bson::bson_get_int32(document, "code", &error_code); + std::string code_name, errmsg; + bool has_code_name = + butil::bson::bson_get_str(document, "codeName", &code_name); + bool has_errmsg = + butil::bson::bson_get_str(document, "errmsg", &errmsg); + if (has_error_code && has_code_name && has_errmsg) { + LOG(DEBUG) << "error_code:" << error_code + << " code_name:" << code_name << " errmsg:" << errmsg; + cntl->SetFailed(error_code, "%s, %s", code_name.c_str(), + errmsg.c_str()); } else { - return EndRunningCallMethodInPool( - svc, method, &(mongo_done->cntl), &(mongo_done->req), - &(mongo_done->res), mongo_done); + cntl->SetFailed(ERESPONSE, "get_repl_set_status response failed"); + } + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + // get_repl_set_status success + // set + std::string set; + bool has_set = butil::bson::bson_get_str(document, "set", &set); + if (!has_set) { + LOG(DEBUG) << "get_repl_set_status response not has set element"; + cntl->SetFailed(ERESPONSE, "get_repl_set_status response no set"); + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + // myState + int32_t myState; + bool has_myState = + butil::bson::bson_get_int32(document, "myState", &myState); + if (!has_myState) { + LOG(DEBUG) << "get_repl_set_status response not has myState element"; + cntl->SetFailed(ERESPONSE, "get_repl_set_status response no myState"); + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + // members + std::vector members_ptr; + bool has_members = + butil::bson::bson_get_array(document, "members", &members_ptr); + if (!has_members) { + LOG(DEBUG) << "get_repl_set_status response not has members element"; + cntl->SetFailed(ERESPONSE, "get_repl_set_status response no members"); + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + // parse member + std::vector members(members_ptr.size()); + for (size_t i = 0; i < members_ptr.size(); ++i) { + bool parse_member_ret = + ParseReplicaSetMember(members_ptr[i], &(members[i])); + if (!parse_member_ret) { + LOG(DEBUG) << "parse replica_set_member failed"; + cntl->SetFailed(ERESPONSE, + "parse get_repl_set_status response member fail"); + accessor.OnResponse(cid, cntl->ErrorCode()); + return; } - } while (false); + } + // build response + brpc::MongoGetReplSetStatusResponse* response = + static_cast(cntl->response()); + response->set_ok(true); + response->set_set(set); + response->set_myState(myState); + for (ReplicaSetMember member : members) { + response->add_members(member); + } + accessor.OnResponse(cid, cntl->ErrorCode()); + } else { + cntl->SetFailed(ERESPONSE, "msg not msg type"); + accessor.OnResponse(cid, cntl->ErrorCode()); + return; + } + } else if (false) { + LOG(DEBUG) << "not imple other response"; + accessor.OnResponse(cid, cntl->ErrorCode()); + } +} + +// Serialize request into request_buf +void SerializeMongoRequest(butil::IOBuf* request_buf, Controller* cntl, + const google::protobuf::Message* request) { + if (request == nullptr) { + return cntl->SetFailed(EREQUEST, "request is null"); + } + if (request->GetDescriptor() == brpc::MongoQueryRequest::descriptor()) { + const MongoQueryRequest* query_request = + dynamic_cast(request); + if (!query_request) { + return cntl->SetFailed(EREQUEST, "Fail to parse request"); + } + SerializeMongoQueryRequest(request_buf, cntl, query_request); + cntl->set_request_id("query"); + LOG(DEBUG) << "serialize mongo query request, length:" + << request_buf->length(); + return; + } else if (request->GetDescriptor() == + brpc::MongoGetMoreRequest::descriptor()) { + const MongoGetMoreRequest* getMore_request = + dynamic_cast(request); + if (!getMore_request) { + return cntl->SetFailed(EREQUEST, "Fail to parse request"); + } + SerializeMongoGetMoreRequest(request_buf, cntl, getMore_request); + cntl->set_request_id("query_getMore"); + LOG(DEBUG) << "serialize mongo getMore request, length:" + << request_buf->length(); + return; + } else if (request->GetDescriptor() == + brpc::MongoCountRequest::descriptor()) { + const MongoCountRequest* count_request = + dynamic_cast(request); + if (!count_request) { + return cntl->SetFailed(EREQUEST, "Fail to parse request"); + } + SerializeMongoCountRequest(request_buf, cntl, count_request); + cntl->set_request_id("count"); + LOG(DEBUG) << "serialize mongo count request, length:" + << request_buf->length(); + return; + } else if (request->GetDescriptor() == + brpc::MongoInsertRequest::descriptor()) { + const MongoInsertRequest* insert_request = + dynamic_cast(request); + if (!insert_request) { + return cntl->SetFailed(EREQUEST, "Fail to parse request"); + } + SerializeMongoInsertRequest(request_buf, cntl, insert_request); + cntl->set_request_id("insert"); + LOG(DEBUG) << "serialize mongo insert request, length:" + << request_buf->length(); + return; + } else if (request->GetDescriptor() == + brpc::MongoDeleteRequest::descriptor()) { + const MongoDeleteRequest* delete_request = + dynamic_cast(request); + if (!delete_request) { + return cntl->SetFailed(EREQUEST, "Fail to parse request"); + } + SerializeMongoDeleteRequest(request_buf, cntl, delete_request); + cntl->set_request_id("delete"); + LOG(DEBUG) << "serialize mongo delete request, length:" + << request_buf->length(); + return; + } else if (request->GetDescriptor() == + brpc::MongoUpdateRequest::descriptor()) { + const MongoUpdateRequest* update_request = + dynamic_cast(request); + if (!update_request) { + return cntl->SetFailed(EREQUEST, "Fail to parse request"); + } + SerializeMongoUpdateRequest(request_buf, cntl, update_request); + cntl->set_request_id("update"); + LOG(DEBUG) << "serialize mongo update request, length:" + << request_buf->length(); + return; + } else if (request->GetDescriptor() == + brpc::MongoFindAndModifyRequest::descriptor()) { + const MongoFindAndModifyRequest* find_and_modify_request = + dynamic_cast(request); + if (!find_and_modify_request) { + return cntl->SetFailed(EREQUEST, "Fail to parse request"); + } + SerializeMongoFindAndModifyRequest(request_buf, cntl, + find_and_modify_request); + cntl->set_request_id("find_and_modify"); + LOG(DEBUG) << "serialize mongo find_and_modify request, length:" + << request_buf->length(); + return; + } else if (request->GetDescriptor() == + brpc::MongoGetReplSetStatusRequest::descriptor()) { + const MongoGetReplSetStatusRequest* get_repl_set_status_request = + dynamic_cast(request); + if (!get_repl_set_status_request) { + return cntl->SetFailed(EREQUEST, "Fail to parse request"); + } + SerializeMongoGetReplSetStatusRequest(request_buf, cntl, + get_repl_set_status_request); + cntl->set_request_id("get_repl_set_status"); + LOG(DEBUG) << "serialize mongo get_repl_set_status request, length:" + << request_buf->length(); + } +} + +// Pack request_buf into msg, call after serialize +void PackMongoRequest(butil::IOBuf* msg, SocketMessage** user_message_out, + uint64_t correlation_id, + const google::protobuf::MethodDescriptor* method, + Controller* controller, const butil::IOBuf& request_buf, + const Authenticator* auth) { + LOG(DEBUG) << "mongo request buf length:" << request_buf.length(); + mongo_head_t request_head; + request_head.message_length = sizeof(mongo_head_t) + request_buf.length(); + request_head.request_id = static_cast(correlation_id); + request_head.response_to = 0; + request_head.op_code = DB_OP_MSG; + LOG(DEBUG) << "mongo head message_length:" << request_head.message_length + << ", request_id:" << request_head.request_id; + request_head.make_network_endian(); + msg->append(static_cast(&request_head), sizeof(request_head)); + msg->append(request_buf); + LOG(DEBUG) << "mongo request to send msg length:" << msg->length(); + ControllerPrivateAccessor accessor(controller); + accessor.get_sending_socket()->set_correlation_id(correlation_id); +} + +void SerializeMongoQueryRequest(butil::IOBuf* request_buf, Controller* cntl, + const MongoQueryRequest* request) { + if (!request->SerializeTo(request_buf)) { + cntl->SetFailed(EREQUEST, "QueryRequest not initialize"); + return; + } +} + +void SerializeMongoGetMoreRequest(butil::IOBuf* request_buf, Controller* cntl, + const MongoGetMoreRequest* request) { + if (!request->SerializeTo(request_buf)) { + cntl->SetFailed(EREQUEST, "GetMoreRequest not initialize"); + return; + } +} - mongo_done->Run(); +void SerializeMongoCountRequest(butil::IOBuf* request_buf, Controller* cntl, + const MongoCountRequest* request) { + if (!request->SerializeTo(request_buf)) { + cntl->SetFailed(EREQUEST, "CountRequest not initialize"); + return; + } +} + +void SerializeMongoInsertRequest(butil::IOBuf* request_buf, Controller* cntl, + const MongoInsertRequest* request) { + if (!request->SerializeTo(request_buf)) { + cntl->SetFailed(EREQUEST, "InsertRequest not initialize"); + return; + } +} + +void SerializeMongoDeleteRequest(butil::IOBuf* request_buf, Controller* cntl, + const MongoDeleteRequest* request) { + if (!request->SerializeTo(request_buf)) { + cntl->SetFailed(EREQUEST, "DeleteRequest not initialize"); + return; + } +} + +void SerializeMongoUpdateRequest(butil::IOBuf* request_buf, Controller* cntl, + const MongoUpdateRequest* request) { + if (!request->SerializeTo(request_buf)) { + cntl->SetFailed(EREQUEST, "UpdateRequest not initialize"); + return; + } +} + +void SerializeMongoFindAndModifyRequest( + butil::IOBuf* request_buf, Controller* cntl, + const MongoFindAndModifyRequest* request) { + if (!request->SerializeTo(request_buf)) { + cntl->SetFailed(EREQUEST, "FindAndModifyRequest not initialize"); + return; + } +} + +void SerializeMongoGetReplSetStatusRequest( + butil::IOBuf* request_buf, Controller* cntl, + const brpc::MongoGetReplSetStatusRequest* request) { + if (!request->SerializeTo(request_buf)) { + cntl->SetFailed(EREQUEST, "GetReplSetStatusRequest not initialize"); + return; + } +} + +bool ParseMongoSection(butil::IOBuf* source, Section* section) { + if (!source || !section) { + return false; + } + if (source->length() < 5) { // kind(1 byte) + bson size(4 byte) + return false; + } + bool cut_kind_ret = source->cut1(&(section->type)); + if (!cut_kind_ret) { + return false; + } + if (section->type == 0) { + // Body + // cut 4byte as bson size + uint32_t bson_size = 0; + const void* bson_size_fetch = source->fetch(&bson_size, 4); + if (!bson_size_fetch) { + return false; + } + bson_size = *(static_cast(bson_size_fetch)); + // tranfrom to host endian + if (!ARCH_CPU_LITTLE_ENDIAN) { + bson_size = butil::ByteSwap(bson_size); + } + LOG(DEBUG) << "get bson size:" << bson_size + << " iobuf size:" << source->length(); + if (source->length() < bson_size) { + return false; + } + butil::IOBuf bson_buf; + bool cut_bson = source->cutn(&bson_buf, bson_size); + if (!cut_bson) { + return false; + } + std::string bson_str = bson_buf.to_string(); + bson_t* document_ptr = bson_new_from_data( + reinterpret_cast(bson_str.c_str()), bson_str.length()); + if (!document_ptr) { + LOG(WARNING) << "bson init failed"; + return false; + } + section->body_document = butil::bson::new_bson(document_ptr); + LOG(DEBUG) << "parse mongo section with type body succ"; + return true; + } else if (section->type == 1) { + // Document Sequence + LOG(WARNING) << "not support document sequence now"; + return false; + } else { + return false; + } } } // namespace policy -} // namespace brpc +} // namespace brpc diff --git a/src/brpc/policy/mongo_protocol.h b/src/brpc/policy/mongo_protocol.h index 3b8e6c44c3..7565d1467c 100644 --- a/src/brpc/policy/mongo_protocol.h +++ b/src/brpc/policy/mongo_protocol.h @@ -18,21 +18,81 @@ #ifndef BRPC_POLICY_MONGO_PROTOCOL_H #define BRPC_POLICY_MONGO_PROTOCOL_H -#include "brpc/protocol.h" #include "brpc/input_messenger.h" - +#include "brpc/mongo.h" +#include "brpc/protocol.h" namespace brpc { namespace policy { +struct MongoInputResponse : public InputMessageBase { + int32_t opcode; + MongoReply reply; + MongoMsg msg; + + // @InputMessageBase + void DestroyImpl() { delete this; } +}; + // Parse binary format of mongo -ParseResult ParseMongoMessage(butil::IOBuf* source, Socket* socket, bool read_eof, const void *arg); +ParseResult ParseMongoMessage(butil::IOBuf* source, Socket* socket, + bool read_eof, const void* arg); // Actions to a (client) request in mongo format void ProcessMongoRequest(InputMessageBase* msg); -} // namespace policy -} // namespace brpc +// Actions to a server response in mongo format +void ProcessMongoResponse(InputMessageBase* msg_base); + +// Serialize query request +void SerializeMongoQueryRequest(butil::IOBuf* request_buf, Controller* cntl, + const MongoQueryRequest* request); + +// Serialize getMore request +void SerializeMongoGetMoreRequest(butil::IOBuf* request_buf, Controller* cntl, + const MongoGetMoreRequest* request); + +// Serialize count request +void SerializeMongoCountRequest(butil::IOBuf* request_buf, Controller* cntl, + const MongoCountRequest* request); + +// Serialize insert request +void SerializeMongoInsertRequest(butil::IOBuf* request_buf, Controller* cntl, + const MongoInsertRequest* request); + +// Serialize delete request +void SerializeMongoDeleteRequest(butil::IOBuf* request_buf, Controller* cntl, + const MongoDeleteRequest* request); + +// Serialize update request +void SerializeMongoUpdateRequest(butil::IOBuf* request_buf, Controller* cntl, + const MongoUpdateRequest* request); + +// Serialize find_and_modify request +void SerializeMongoFindAndModifyRequest( + butil::IOBuf* request_buf, Controller* cntl, + const MongoFindAndModifyRequest* request); + +// Serialize get_repl_set_status request +void SerializeMongoGetReplSetStatusRequest( + butil::IOBuf* request_buf, Controller* cntl, + const brpc::MongoGetReplSetStatusRequest* request); + +// Serialize request into request_buf +void SerializeMongoRequest(butil::IOBuf* request_buf, Controller* cntl, + const google::protobuf::Message* request); + +// Pack request_buf into msg, call after serialize +void PackMongoRequest(butil::IOBuf* msg, SocketMessage** user_message_out, + uint64_t correlation_id, + const google::protobuf::MethodDescriptor* method, + Controller* controller, const butil::IOBuf& request_buf, + const Authenticator* auth); + +// Parse Mongo Sections +bool ParseMongoSection(butil::IOBuf* source, Section* section); +} // namespace policy +} // namespace brpc -#endif // BRPC_POLICY_MONGO_PROTOCOL_H +#endif // BRPC_POLICY_MONGO_PROTOCOL_H diff --git a/src/brpc/proto_base.proto b/src/brpc/proto_base.proto index c0bbc086e1..1b60cbef2a 100644 --- a/src/brpc/proto_base.proto +++ b/src/brpc/proto_base.proto @@ -32,3 +32,26 @@ message NsheadMessageBase {} message SerializedRequestBase {} message ThriftFramedMessageBase {} + +message MongoQueryRequestBase {} +message MongoQueryResponseBase {} + +message MongoGetMoreRequestBase {} + +message MongoInsertRequestBase {} +message MongoInsertResponseBase {} + +message MongoUpdateRequestBase {} +message MongoUpdateResponseBase {} + +message MongoDeleteRequestBase {} +message MongoDeleteResponseBase {} + +message MongoCountRequestBase {} +message MongoCountResponseBase {} + +message MongoFindAndModifyRequestBase {} +message MongoFindAndModifyResponseBase {} + +message MongoGetReplSetStatusRequestBase {} +message MongoGetReplSetStatusResponseBase {} diff --git a/src/butil/bson_util.cc b/src/butil/bson_util.cc new file mode 100644 index 0000000000..5a741f03bf --- /dev/null +++ b/src/butil/bson_util.cc @@ -0,0 +1,197 @@ +#include "butil/bson_util.h" + +#include + +#include "butil/logging.h" + +namespace butil { +namespace bson { + +BsonPtr new_bson(bson_t *doc) { + if (!doc) { + doc = bson_new(); + } + return std::shared_ptr(doc, bson_destroy); +} + +bool bson_get_double(BsonPtr doc, const char *key, double *value) { + assert(doc); + assert(key); + assert(value); + bson_iter_t iter; + bson_t *doc_ptr = doc.get(); + if (!bson_iter_init(&iter, doc_ptr) || !bson_iter_find(&iter, key) || + !BSON_ITER_HOLDS_DOUBLE(&iter)) { + return false; + } + *value = bson_iter_double(&iter); + return true; +} + +bool bson_get_int32(BsonPtr doc, const char *key, int32_t *value) { + assert(doc); + assert(key); + assert(value); + bson_iter_t iter; + bson_t *doc_ptr = doc.get(); + if (!bson_iter_init(&iter, doc_ptr) || !bson_iter_find(&iter, key) || + !BSON_ITER_HOLDS_INT32(&iter)) { + return false; + } + *value = bson_iter_int32(&iter); + return true; +} + +bool bson_get_int64(BsonPtr doc, const char *key, int64_t *value) { + assert(doc); + assert(key); + assert(value); + bson_iter_t iter; + bson_t *doc_ptr = doc.get(); + if (!bson_iter_init(&iter, doc_ptr) || !bson_iter_find(&iter, key) || + !BSON_ITER_HOLDS_INT64(&iter)) { + return false; + } + *value = bson_iter_int64(&iter); + return true; +} + +bool bson_get_str(BsonPtr doc, const char *key, std::string *value) { + assert(doc); + assert(key); + assert(value); + bson_iter_t iter; + bson_t *doc_ptr = doc.get(); + if (!bson_iter_init(&iter, doc_ptr) || !bson_iter_find(&iter, key) || + !BSON_ITER_HOLDS_UTF8(&iter)) { + return false; + } + uint32_t length = 0; + const char *str = bson_iter_utf8(&iter, &length); + if (!str) { + return false; + } else { + *value = std::string(str, length); + return true; + } +} + +bool bson_get_doc(BsonPtr doc, const char *key, BsonPtr *value) { + assert(doc); + assert(key); + assert(value); + bson_iter_t iter; + bson_t *doc_ptr = doc.get(); + if (!bson_iter_init(&iter, doc_ptr) || !bson_iter_find(&iter, key) || + !BSON_ITER_HOLDS_DOCUMENT(&iter)) { + return false; + } + uint32_t length = 0; + const uint8_t *document_str = nullptr; + bson_iter_document(&iter, &length, &document_str); + bson_t *value_doc_ptr = bson_new_from_data(document_str, length); + if (!value_doc_ptr) { + return false; + } + *value = new_bson(value_doc_ptr); + return true; +} + +bool bson_get_array(BsonPtr doc, const char *key, std::vector *value) { + assert(doc); + assert(key); + assert(value); + bson_iter_t iter; + bson_t *doc_ptr = doc.get(); + if (!bson_iter_init(&iter, doc_ptr) || !bson_iter_find(&iter, key) || + !BSON_ITER_HOLDS_ARRAY(&iter)) { + return false; + } + uint32_t length = 0; + const uint8_t *array_str = nullptr; + bson_iter_array(&iter, &length, &array_str); + bson_t bson_array; // read only + bool array_init = bson_init_static(&bson_array, array_str, length); + if (!array_init) { + return false; + } + bson_iter_t array_iter; + bool r = bson_iter_init(&array_iter, &bson_array); + if (!r) { + return false; + } + while (bson_iter_next(&array_iter)) { + if (!BSON_ITER_HOLDS_DOCUMENT(&array_iter)) { + continue; + } + uint32_t doc_length = 0; + const uint8_t *document_str = nullptr; + bson_iter_document(&array_iter, &doc_length, &document_str); + bson_t *array_element_ptr = bson_new_from_data(document_str, doc_length); + if (!array_element_ptr) { + return false; + } + value->push_back(new_bson(array_element_ptr)); + } + return true; +} + +bool bson_has_oid(BsonPtr doc) { + assert(doc); + bson_iter_t iter; + bson_t *doc_ptr = doc.get(); + const char *oid = "_id"; + if (!bson_iter_init(&iter, doc_ptr) || !bson_iter_find(&iter, oid) || + !BSON_ITER_HOLDS_OID(&iter)) { + return false; + } + return true; +} + +bool bson_get_oid(BsonPtr doc, const char *key, bson_oid_t *value) { + assert(doc); + assert(key); + assert(value); + bson_iter_t iter; + bson_t *doc_ptr = doc.get(); + if (!bson_iter_init(&iter, doc_ptr) || !bson_iter_find(&iter, key) || + !BSON_ITER_HOLDS_OID(&iter)) { + return false; + } + const bson_oid_t *oid = bson_iter_oid(&iter); + if (!oid) { + return false; + } else { + *value = *oid; + return true; + } +} + +bool bson_get_bool(BsonPtr doc, const char *key, bool *value) { + assert(doc); + assert(key); + assert(value); + bson_iter_t iter; + bson_t *doc_ptr = doc.get(); + if (!bson_iter_init(&iter, doc_ptr) || !bson_iter_find(&iter, key) || + !BSON_ITER_HOLDS_BOOL(&iter)) { + return false; + } + *value = bson_iter_bool(&iter); + return true; +} + +std::pair bson_get_type(BsonPtr doc, const char *key) { + assert(doc); + assert(key); + bson_iter_t iter; + bson_t *doc_ptr = doc.get(); + if (!bson_iter_init(&iter, doc_ptr) || !bson_iter_find(&iter, key)) { + return std::make_pair(false, BSON_TYPE_EOD); + } else { + return std::make_pair(true, bson_iter_type(&iter)); + } +} + +} // namespace bson +} // namespace butil \ No newline at end of file diff --git a/src/butil/bson_util.h b/src/butil/bson_util.h new file mode 100644 index 0000000000..2ea628e92d --- /dev/null +++ b/src/butil/bson_util.h @@ -0,0 +1,40 @@ +#ifndef BUTIL_BSON_UTIL_H_ +#define BUTIL_BSON_UTIL_H_ + +#include +#include + +#include +#include +#include + +namespace butil { +namespace bson { + +typedef std::shared_ptr BsonPtr; + +BsonPtr new_bson(bson_t *doc = nullptr); + +bool bson_get_double(BsonPtr doc, const char *key, double *value); + +bool bson_get_int32(BsonPtr doc, const char *key, int32_t *value); + +bool bson_get_int64(BsonPtr doc, const char *key, int64_t *value); + +bool bson_get_str(BsonPtr doc, const char *key, std::string *value); + +bool bson_get_doc(BsonPtr doc, const char *key, BsonPtr *value); + +bool bson_get_array(BsonPtr doc, const char *key, std::vector *value); + +bool bson_has_oid(BsonPtr doc); + +bool bson_get_oid(BsonPtr doc, const char *key, bson_oid_t *value); + +bool bson_get_bool(BsonPtr doc, const char *key, bool *value); + +std::pair bson_get_type(BsonPtr doc, const char *key); +} // namespace bson +} // namespace butil + +#endif // BUTIL_BSON_UTIL_H_