From 36be22209105575979bb95cd9a881b3dad8138c0 Mon Sep 17 00:00:00 2001 From: Roman Gershman Date: Wed, 6 Sep 2023 09:35:11 +0300 Subject: [PATCH] chore: add macos daily build (#1795) It compiles most of the code though some linking problems still exist. Signed-off-by: Roman Gershman --- .github/workflows/daily-builds.yml | 52 ++++++++++++++++++++- docs/build-from-source.md | 2 +- patches/lua-v5.4.4.patch | 6 ++- src/CMakeLists.txt | 12 +++-- src/core/CMakeLists.txt | 12 +++-- src/core/dash_test.cc | 5 +- src/core/search/indices.cc | 3 +- src/core/search/parser.y | 2 +- src/facade/dragonfly_connection.cc | 5 ++ src/facade/dragonfly_listener.cc | 7 +++ src/redis/debug.c | 4 ++ src/redis/zmalloc.h | 1 + src/server/CMakeLists.txt | 11 +++-- src/server/detail/save_stages_controller.cc | 10 ++++ src/server/dfly_main.cc | 17 ++++--- src/server/generic_family.cc | 10 ++-- src/server/main_service.cc | 4 ++ src/server/memory_cmd.cc | 4 +- src/server/rdb_load.cc | 8 ++-- src/server/rdb_save.cc | 10 ++-- src/server/search/doc_index.h | 29 ++++++++++++ src/server/server_family.cc | 14 ++++-- 22 files changed, 187 insertions(+), 41 deletions(-) diff --git a/.github/workflows/daily-builds.yml b/.github/workflows/daily-builds.yml index 8bc18d992..3952f1a0a 100644 --- a/.github/workflows/daily-builds.yml +++ b/.github/workflows/daily-builds.yml @@ -36,17 +36,67 @@ jobs: - uses: actions/checkout@v3 with: submodules: true + - name: Run sccache-cache + uses: mozilla-actions/sccache-action@v0.0.3 + + - name: Configure Cache Env + uses: actions/github-script@v6 + with: + script: | + core.exportVariable('ACTIONS_CACHE_URL', process.env.ACTIONS_CACHE_URL || ''); + core.exportVariable('ACTIONS_RUNTIME_TOKEN', process.env.ACTIONS_RUNTIME_TOKEN || '') + - name: Install dependencies run: | cmake --version mkdir -p $GITHUB_WORKSPACE/build + - name: Install packages + if: matrix.container == 'fedora:30' + run: | + echo Passed - name: Configure & Build run: | cd $GITHUB_WORKSPACE/build - cmake .. -DCMAKE_BUILD_TYPE=Debug -GNinja -DCMAKE_CXX_COMPILER_LAUNCHER=ccache ${{ matrix.flags }} + cmake .. -DCMAKE_BUILD_TYPE=Debug -GNinja -DCMAKE_CXX_COMPILER_LAUNCHER=sccache -DCMAKE_C_COMPILER_LAUNCHER=sccache \ + ${{ matrix.flags }} ninja src/all - name: Test run: | cd $GITHUB_WORKSPACE/build ctest -V -L DFLY + + build-macos: + runs-on: macos-latest + timeout-minutes: 45 + steps: + - uses: actions/checkout@v3 + with: + submodules: true + - name: Install dependencies + run: | + brew update && brew install ninja boost openssl automake gcc zstd icu4c bison c-ares \ + autoconf libtool automake + brew info icu4c + mkdir -p $GITHUB_WORKSPACE/build + + - name: Configure & Build + run: | + cd $GITHUB_WORKSPACE/build + + export PATH=/usr/local/opt/bison/bin:$PATH + which gcc + which gcc-13 + + alias g++=g++-13 + alias gcc=gcc-13 + + bison --version + + alias g++ + + echo "*************************** START BUILDING **************************************" + CC=gcc-13 CXX=g++-13 cmake .. -DCMAKE_BUILD_TYPE=Debug -GNinja -DWITH_UNWIND=OFF \ + -DCMAKE_PREFIX_PATH=/usr/local/opt/icu4c + ninja ok_backend dfly_core_test dconv_project + ninja dragonfly diff --git a/docs/build-from-source.md b/docs/build-from-source.md index 8ffd6eeeb..4081cd884 100644 --- a/docs/build-from-source.md +++ b/docs/build-from-source.md @@ -22,7 +22,7 @@ sudo apt install ninja-build libunwind-dev libboost-fiber-dev libssl-dev \ On Fedora: ```bash -sudo yum install automake boost-devel g++ git cmake libtool ninja-build libzstd-devel \ +sudo dnf install -y automake boost-devel g++ git cmake libtool ninja-build libzstd-devel \ openssl-devel libunwind-devel autoconf-archive patch bison libxml2-devel libicu-devel ``` diff --git a/patches/lua-v5.4.4.patch b/patches/lua-v5.4.4.patch index dd2256856..f603ccb1e 100644 --- a/patches/lua-v5.4.4.patch +++ b/patches/lua-v5.4.4.patch @@ -15,13 +15,15 @@ diff --git a/makefile b/makefile index d46e650c..c27e5677 100644 --- a/makefile +++ b/makefile -@@ -66,13 +66,25 @@ LOCAL = $(TESTS) $(CWARNS) +@@ -66,13 +66,26 @@ LOCAL = $(TESTS) $(CWARNS) # enable Linux goodies -MYCFLAGS= $(LOCAL) -std=c99 -DLUA_USE_LINUX -DLUA_USE_READLINE +MYCFLAGS= $(LOCAL) -std=c99 -g -O2 -DLUA_USE_LINUX - MYLDFLAGS= $(LOCAL) -Wl,-E +-MYLDFLAGS= $(LOCAL) -Wl,-E ++# Commenting out dynamic linking flags because we link statically ++# and this does not work on MacOS: MYLDFLAGS= $(LOCAL) -Wl,-E -MYLIBS= -ldl -lreadline +MYLIBS= -ldl diff --git a/src/CMakeLists.txt b/src/CMakeLists.txt index 8c7be2ed7..c18f6b315 100644 --- a/src/CMakeLists.txt +++ b/src/CMakeLists.txt @@ -45,13 +45,19 @@ function(gen_bison name) set_source_files_properties(${name}.cc ${name}_base.h PROPERTIES GENERATED TRUE) endfunction() +if (APPLE) + set(SED_REPL sed "-i" '') +else() + set(SED_REPL sed "-i") +endif() + add_third_party( dconv URL https://github.com/google/double-conversion/archive/refs/tags/v3.3.0.tar.gz - PATCH_COMMAND sed -i "/static const std::ctype/d" + PATCH_COMMAND ${SED_REPL} "/static const std::ctype/d" /double-conversion/string-to-double.cc - COMMAND sed -i "/std::use_facet/double-conversion/string-to-double.cc - COMMAND sed -i "s/cType.tolower/std::tolower/g" /double-conversion/string-to-double.cc + COMMAND ${SED_REPL} "/std::use_facet/double-conversion/string-to-double.cc + COMMAND ${SED_REPL} "s/cType.tolower/std::tolower/g" /double-conversion/string-to-double.cc LIB libdouble-conversion.a ) diff --git a/src/core/CMakeLists.txt b/src/core/CMakeLists.txt index e4a9d21c7..c88a05edd 100644 --- a/src/core/CMakeLists.txt +++ b/src/core/CMakeLists.txt @@ -1,11 +1,17 @@ +# We have some linking problems with search on Apple +if (NOT APPLE) + add_subdirectory(search) + set(SEARCH_LIB query_parser) +endif() + add_library(dfly_core compact_object.cc dragonfly_core.cc extent_tree.cc external_alloc.cc interpreter.cc json_object.cc mi_memory_resource.cc sds_utils.cc segment_allocator.cc simple_lru_counter.cc score_map.cc small_string.cc sorted_map.cc tx_queue.cc dense_set.cc string_set.cc string_map.cc detail/bitpacking.cc) -cxx_link(dfly_core base query_parser absl::flat_hash_map absl::str_format redis_lib TRDP::lua lua_modules - fibers2 TRDP::jsoncons OpenSSL::Crypto) +cxx_link(dfly_core base absl::flat_hash_map absl::str_format redis_lib TRDP::lua lua_modules + fibers2 ${SEARCH_LIB} TRDP::jsoncons OpenSSL::Crypto) add_executable(dash_bench dash_bench.cc) cxx_link(dash_bench dfly_core) @@ -23,5 +29,3 @@ cxx_test(string_map_test dfly_core LABELS DFLY) cxx_test(sorted_map_test dfly_core LABELS DFLY) cxx_test(bptree_set_test dfly_core LABELS DFLY) cxx_test(score_map_test dfly_core LABELS DFLY) - -add_subdirectory(search) diff --git a/src/core/dash_test.cc b/src/core/dash_test.cc index 4ba6adfde..d4a161d5e 100644 --- a/src/core/dash_test.cc +++ b/src/core/dash_test.cc @@ -228,8 +228,11 @@ TEST_F(DashTest, Basic) { TEST_F(DashTest, Segment) { std::unique_ptr seg(new Segment(1)); + +#ifndef __APPLE__ LOG(INFO) << "Segment size " << sizeof(Segment) << " malloc size: " << malloc_usable_size(seg.get()); +#endif set keys = FillSegment(0); @@ -817,7 +820,7 @@ TEST_F(DashTest, SplitBug) { string_view line; uint64_t val; while (lr.Next(&line)) { - CHECK(absl::SimpleHexAtoi(line, &val)); + CHECK(absl::SimpleHexAtoi(line, &val)) << line; table.Insert(val, 0); } EXPECT_EQ(746, table.size()); diff --git a/src/core/search/indices.cc b/src/core/search/indices.cc index 39f98a363..d45a939d9 100644 --- a/src/core/search/indices.cc +++ b/src/core/search/indices.cc @@ -74,7 +74,8 @@ absl::flat_hash_set ICUTokenizeWords(std::string_view text) { // Convert string to lowercase with ICU library std::string ICUToLowercase(string_view input) { - icu::UnicodeString uStr = icu::UnicodeString::fromUTF8(input); + icu::UnicodeString uStr = + icu::UnicodeString::fromUTF8(icu::StringPiece(input.data(), input.size())); uStr.toLower(); std::string result; uStr.toUTF8String(result); diff --git a/src/core/search/parser.y b/src/core/search/parser.y index 203c42476..36e270a39 100644 --- a/src/core/search/parser.y +++ b/src/core/search/parser.y @@ -1,5 +1,5 @@ %skeleton "lalr1.cc" // -*- C++ -*- -%require "3.5.1" // That's what's present on ubuntu 20.04. +%require "3.5" // fedora 32 has this one. %defines // %header starts from 3.8.1 diff --git a/src/facade/dragonfly_connection.cc b/src/facade/dragonfly_connection.cc index f2cc4bdc7..e63405124 100644 --- a/src/facade/dragonfly_connection.cc +++ b/src/facade/dragonfly_connection.cc @@ -396,7 +396,12 @@ string Connection::GetClientInfo(unsigned thread_id) const { int cpu = 0; socklen_t len = sizeof(cpu); getsockopt(socket_->native_handle(), SOL_SOCKET, SO_INCOMING_CPU, &cpu, &len); + +#ifdef __APPLE__ + int my_cpu_id = -1; // __APPLE__ does not have sched_getcpu() +#else int my_cpu_id = sched_getcpu(); +#endif static constexpr string_view PHASE_NAMES[] = {"readsock", "process"}; static_assert(PHASE_NAMES[PROCESS] == "process"); diff --git a/src/facade/dragonfly_listener.cc b/src/facade/dragonfly_listener.cc index 034de828c..bb1f4f03c 100644 --- a/src/facade/dragonfly_listener.cc +++ b/src/facade/dragonfly_listener.cc @@ -107,8 +107,13 @@ bool ConfigureKeepAlive(int fd) { return false; val = absl::GetFlag(FLAGS_tcp_keepalive); +#ifdef __APPLE__ + if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPALIVE, &val, sizeof(val)) < 0) + return false; +#else if (setsockopt(fd, IPPROTO_TCP, TCP_KEEPIDLE, &val, sizeof(val)) < 0) return false; +#endif /* Send next probes after the specified interval. Note that we set the * delay as interval / 3, as we send three probes before detecting @@ -160,6 +165,7 @@ error_code Listener::ConfigureServerSocket(int fd) { bool success = ConfigureKeepAlive(fd); if (!success) { +#ifndef __APPLE__ int myerr = errno; int socket_type; @@ -170,6 +176,7 @@ error_code Listener::ConfigureServerSocket(int fd) { socket_type != AF_UNIX) { LOG(WARNING) << "Could not configure keep alive " << SafeErrorMessage(myerr); } +#endif } return error_code{}; diff --git a/src/redis/debug.c b/src/redis/debug.c index 444ab8668..4559d638e 100644 --- a/src/redis/debug.c +++ b/src/redis/debug.c @@ -62,7 +62,11 @@ void _serverPanic(const char *file, int line, const char *msg, ...) { serverLog(LL_WARNING, "!!! Software Failure. Press left mouse button to continue"); serverLog(LL_WARNING, "Guru Meditation: %s #%s:%d", fmtmsg,file,line); #ifndef NDEBUG +#ifdef __APPLE__ + __assert_rtn(msg, file, line, ""); +#else __assert_fail(msg, file, line, ""); +#endif #endif } diff --git a/src/redis/zmalloc.h b/src/redis/zmalloc.h index 742530a77..b2ace3c3f 100644 --- a/src/redis/zmalloc.h +++ b/src/redis/zmalloc.h @@ -51,6 +51,7 @@ #include #define HAVE_MALLOC_SIZE 1 #define zmalloc_size(p) malloc_size(p) +#define ZMALLOC_LIB "macos" #endif /* On native libc implementations, we should still do our best to provide a diff --git a/src/server/CMakeLists.txt b/src/server/CMakeLists.txt index ad576483c..a7d90ae63 100644 --- a/src/server/CMakeLists.txt +++ b/src/server/CMakeLists.txt @@ -21,10 +21,16 @@ add_library(dfly_transaction db_slice.cc malloc_stats.cc engine_shard_set.cc blo ) cxx_link(dfly_transaction dfly_core strings_lib) + +if (NOT APPLE) + SET(SEARCH_FILES search/search_family.cc search/doc_index.cc search/doc_accessors.cc) + cxx_test(search/search_family_test dfly_test_lib LABELS DFLY) +endif() + add_library(dragonfly_lib channel_store.cc command_registry.cc config_registry.cc conn_context.cc debugcmd.cc dflycmd.cc generic_family.cc hset_family.cc json_family.cc - search/search_family.cc search/doc_index.cc search/doc_accessors.cc + ${SEARCH_FILES} list_family.cc main_service.cc memory_cmd.cc rdb_load.cc rdb_save.cc replica.cc protocol_client.cc snapshot.cc script_mgr.cc server_family.cc malloc_stats.cc @@ -37,7 +43,7 @@ add_library(dragonfly_lib channel_store.cc command_registry.cc acl/validator.cc) -find_library(ZSTD_LIB NAMES libzstd.a libzstdstatic.a zstd NAMES_PER_DIR) +find_library(ZSTD_LIB NAMES libzstd.a libzstdstatic.a zstd NAMES_PER_DIR REQUIRED) cxx_link(dragonfly_lib dfly_transaction dfly_facade redis_lib aws_lib strings_lib html_lib http_client_lib absl::random_random TRDP::jsoncons ${ZSTD_LIB} TRDP::lz4 TRDP::croncpp) @@ -69,7 +75,6 @@ cxx_test(journal/journal_test dfly_test_lib LABELS DFLY) cxx_test(tiered_storage_test dfly_test_lib LABELS DFLY) cxx_test(top_keys_test dfly_test_lib LABELS DFLY) cxx_test(hll_family_test dfly_test_lib LABELS DFLY) -cxx_test(search/search_family_test dfly_test_lib LABELS DFLY) cxx_test(cluster/cluster_config_test dfly_test_lib LABELS DFLY) cxx_test(cluster/cluster_family_test dfly_test_lib LABELS DFLY) cxx_test(acl/user_registry_test dfly_test_lib LABELS DFLY) diff --git a/src/server/detail/save_stages_controller.cc b/src/server/detail/save_stages_controller.cc index 74b69d061..f1916359e 100644 --- a/src/server/detail/save_stages_controller.cc +++ b/src/server/detail/save_stages_controller.cc @@ -39,7 +39,11 @@ namespace fs = std::filesystem; namespace { const size_t kBucketConnectMs = 2000; + +#ifdef __linux__ const int kRdbWriteFlags = O_CREAT | O_WRONLY | O_TRUNC | O_CLOEXEC | O_DIRECT; +#endif + constexpr string_view kS3Prefix = "s3://"sv; bool IsCloudPath(string_view path) { @@ -252,6 +256,7 @@ GenericError RdbSnapshot::Start(SaveMode save_mode, const std::string& path, return GenericError(res.error(), "Couldn't open file for writing"); io_sink_.reset(*res); } else { +#ifdef __linux__ auto res = OpenLinux(path, kRdbWriteFlags, 0666); if (!res) { return GenericError( @@ -261,6 +266,9 @@ GenericError RdbSnapshot::Start(SaveMode save_mode, const std::string& path, is_linux_file_ = true; io_sink_.reset(new LinuxWriteWrapper(res->release())); is_direct = kRdbWriteFlags & O_DIRECT; +#else + LOG(FATAL) << "Linux I/O is not supported on this platform"; +#endif } } @@ -507,6 +515,7 @@ RdbSaver::GlobalData SaveStagesController::GetGlobalData() const { script_bodies.push_back(move(data.body)); } +#ifndef __APPLE__ { shard_set->Await(0, [&] { auto* indices = EngineShard::tlocal()->search_indices(); @@ -517,6 +526,7 @@ RdbSaver::GlobalData SaveStagesController::GetGlobalData() const { } }); } +#endif return RdbSaver::GlobalData{move(script_bodies), move(search_indices)}; } diff --git a/src/server/dfly_main.cc b/src/server/dfly_main.cc index d399e874b..fb33615b1 100644 --- a/src/server/dfly_main.cc +++ b/src/server/dfly_main.cc @@ -754,14 +754,14 @@ Usage: dragonfly [FLAGS] } } - auto memory = ReadMemInfo().value(); + io::MemInfoData mem_info = ReadMemInfo().value_or(io::MemInfoData{}); size_t max_available_threads = 0u; #ifdef __linux__ - UpdateResourceLimitsIfInsideContainer(&memory, &max_available_threads); + UpdateResourceLimitsIfInsideContainer(&mem_info, &max_available_threads); #endif - if (memory.swap_total != 0) + if (mem_info.swap_total != 0) LOG(WARNING) << "SWAP is enabled. Consider disabling it when running Dragonfly."; dfly::max_memory_limit = dfly::GetMaxMemoryFlag(); @@ -769,8 +769,13 @@ Usage: dragonfly [FLAGS] if (dfly::max_memory_limit == 0) { LOG(INFO) << "maxmemory has not been specified. Deciding myself...."; - size_t available = memory.mem_avail; + size_t available = mem_info.mem_avail; size_t maxmemory = size_t(0.8 * available); + if (maxmemory == 0) { + LOG(ERROR) << "Could not deduce how much memory available. " + << "Use --maxmemory=... to specify explicitly"; + return 1; + } LOG(INFO) << "Found " << HumanReadableNumBytes(available) << " available memory. Setting maxmemory to " << HumanReadableNumBytes(maxmemory); @@ -778,9 +783,9 @@ Usage: dragonfly [FLAGS] dfly::max_memory_limit = maxmemory; } else { string hr_limit = HumanReadableNumBytes(dfly::max_memory_limit); - if (dfly::max_memory_limit > memory.mem_avail) + if (dfly::max_memory_limit > mem_info.mem_avail) LOG(WARNING) << "Got memory limit " << hr_limit << ", however only " - << HumanReadableNumBytes(memory.mem_avail) << " was found."; + << HumanReadableNumBytes(mem_info.mem_avail) << " was found."; LOG(INFO) << "Max memory limit is: " << hr_limit; } diff --git a/src/server/generic_family.cc b/src/server/generic_family.cc index a555de960..c99c0dfe6 100644 --- a/src/server/generic_family.cc +++ b/src/server/generic_family.cc @@ -108,7 +108,7 @@ class InMemSource : public ::io::Source { ::io::Result InMemSource::ReadSome(const iovec* v, uint32_t len) { ssize_t read_total = 0; while (size_t(offs_) < buf_.size() && len > 0) { - size_t read_sz = min(buf_.size() - offs_, v->iov_len); + size_t read_sz = min(buf_.size() - offs_, v->iov_len); memcpy(v->iov_base, buf_.data() + offs_, read_sz); read_total += read_sz; offs_ += read_sz; @@ -723,7 +723,7 @@ void GenericFamily::Expire(CmdArgList args, ConnectionContext* cntx) { return (*cntx)->SendError(InvalidExpireTime(cntx->cid->name())); } - int_arg = std::max(int_arg, -1L); + int_arg = std::max(int_arg, -1); DbSlice::ExpireParams params{.value = int_arg}; auto cb = [&](Transaction* t, EngineShard* shard) { @@ -743,7 +743,7 @@ void GenericFamily::ExpireAt(CmdArgList args, ConnectionContext* cntx) { return (*cntx)->SendError(kInvalidIntErr); } - int_arg = std::max(int_arg, 0L); + int_arg = std::max(int_arg, 0L); DbSlice::ExpireParams params{.value = int_arg, .absolute = true}; auto cb = [&](Transaction* t, EngineShard* shard) { @@ -787,7 +787,7 @@ void GenericFamily::PexpireAt(CmdArgList args, ConnectionContext* cntx) { if (!absl::SimpleAtoi(msec, &int_arg)) { return (*cntx)->SendError(kInvalidIntErr); } - int_arg = std::max(int_arg, 0L); + int_arg = std::max(int_arg, 0L); DbSlice::ExpireParams params{.value = int_arg, .absolute = true, .unit = TimeUnit::MSEC}; auto cb = [&](Transaction* t, EngineShard* shard) { @@ -810,7 +810,7 @@ void GenericFamily::Pexpire(CmdArgList args, ConnectionContext* cntx) { if (!absl::SimpleAtoi(msec, &int_arg)) { return (*cntx)->SendError(kInvalidIntErr); } - int_arg = std::max(int_arg, 0L); + int_arg = std::max(int_arg, 0L); DbSlice::ExpireParams params{.value = int_arg, .unit = TimeUnit::MSEC}; auto cb = [&](Transaction* t, EngineShard* shard) { diff --git a/src/server/main_service.cc b/src/server/main_service.cc index 8d6ce6bde..682d39d18 100644 --- a/src/server/main_service.cc +++ b/src/server/main_service.cc @@ -2160,7 +2160,11 @@ void Service::RegisterCommands() { JsonFamily::Register(®istry_); BitOpsFamily::Register(®istry_); HllFamily::Register(®istry_); + +#ifndef __APPLE__ SearchFamily::Register(®istry_); +#endif + acl_family_.Register(®istry_); server_family_.Register(®istry_); diff --git a/src/server/memory_cmd.cc b/src/server/memory_cmd.cc index 07edba3b4..dbbfb1298 100644 --- a/src/server/memory_cmd.cc +++ b/src/server/memory_cmd.cc @@ -63,8 +63,8 @@ std::string MallocStats(bool backing, unsigned tid) { uint64_t delta = (absl::GetCurrentTimeNanos() - start) / 1000; absl::StrAppend(&str, "--- End mimalloc statistics, took ", delta, "us ---\n"); absl::StrAppend(&str, "total reserved: ", reserved, ", comitted: ", committed, ", used: ", used, - "fragmentation waste: ", (100.0 * (committed - used)) / std::max(1UL, committed), - "%\n"); + "fragmentation waste: ", + (100.0 * (committed - used)) / std::max(1UL, committed), "%\n"); return str; } diff --git a/src/server/rdb_load.cc b/src/server/rdb_load.cc index 85d6aebf9..d83b35891 100644 --- a/src/server/rdb_load.cc +++ b/src/server/rdb_load.cc @@ -1495,7 +1495,7 @@ auto RdbLoaderBase::ReadGeneric(int rdbtype) -> io::Result { } auto RdbLoaderBase::ReadHMap() -> io::Result { - uint64_t len; + size_t len; SET_OR_UNEXPECT(LoadLen(nullptr), len); if (len == 0) @@ -1506,7 +1506,7 @@ auto RdbLoaderBase::ReadHMap() -> io::Result { len *= 2; load_trace->arr.resize((len + kMaxBlobLen - 1) / kMaxBlobLen); for (size_t i = 0; i < load_trace->arr.size(); ++i) { - size_t n = std::min(len, kMaxBlobLen); + size_t n = std::min(len, kMaxBlobLen); load_trace->arr[i].resize(n); for (size_t j = 0; j < n; ++j) { error_code ec = ReadStringObj(&load_trace->arr[i][j].rdb_var); @@ -1533,7 +1533,7 @@ auto RdbLoaderBase::ReadZSet(int rdbtype) -> io::Result { double score; for (size_t i = 0; i < load_trace->arr.size(); ++i) { - size_t n = std::min(zsetlen, kMaxBlobLen); + size_t n = std::min(zsetlen, kMaxBlobLen); load_trace->arr[i].resize(n); for (size_t j = 0; j < n; ++j) { error_code ec = ReadStringObj(&load_trace->arr[i][j].rdb_var); @@ -1581,7 +1581,7 @@ auto RdbLoaderBase::ReadListQuicklist(int rdbtype) -> io::Result { load_trace->arr.resize((len + kMaxBlobLen - 1) / kMaxBlobLen); for (size_t i = 0; i < load_trace->arr.size(); ++i) { - size_t n = std::min(len, kMaxBlobLen); + size_t n = std::min(len, kMaxBlobLen); load_trace->arr[i].resize(n); for (size_t j = 0; j < n; ++j) { uint64_t container = QUICKLIST_NODE_CONTAINER_PACKED; diff --git a/src/server/rdb_save.cc b/src/server/rdb_save.cc index b0f21a1b5..495be76a1 100644 --- a/src/server/rdb_save.cc +++ b/src/server/rdb_save.cc @@ -947,10 +947,12 @@ class RdbSaver::Impl { // correct closing semantics - channel is closing when K producers marked it as closed. RdbSaver::Impl::Impl(bool align_writes, unsigned producers_len, CompressionMode compression_mode, SaveMode sm, io::Sink* sink) - : sink_(sink), shard_snapshots_(producers_len), + : sink_(sink), + shard_snapshots_(producers_len), meta_serializer_(CompressionMode::NONE), // Note: I think there is not need for compression // at all in meta serializer - channel_{128, producers_len}, compression_mode_(compression_mode) { + channel_{128, producers_len}, + compression_mode_(compression_mode) { if (align_writes) { aligned_buf_.emplace(kBufLen, sink); sink_ = &aligned_buf_.value(); @@ -1022,7 +1024,7 @@ error_code RdbSaver::Impl::ConsumeChannel(const Cancellation* cll) { // we can not exit on io-error since we spawn fibers that push data. // TODO: we may signal them to stop processing and exit asap in case of the error. - while (record = records_popper.Pop()) { + while ((record = records_popper.Pop())) { if (io_error || cll->IsCancelled()) continue; @@ -1037,7 +1039,7 @@ error_code RdbSaver::Impl::ConsumeChannel(const Cancellation* cll) { if (io_error) { break; } - } while (record = records_popper.TryPop()); + } while ((record = records_popper.TryPop())); } // while (records_popper.Pop()) size_t pushed_bytes = 0; diff --git a/src/server/search/doc_index.h b/src/server/search/doc_index.h index 0e8643c06..204758a62 100644 --- a/src/server/search/doc_index.h +++ b/src/server/search/doc_index.h @@ -146,4 +146,33 @@ class ShardDocIndices { absl::flat_hash_map> indices_; }; +#ifdef __APPLE__ +inline ShardDocIndex* ShardDocIndices::GetIndex(std::string_view name) { + return nullptr; +} + +inline void ShardDocIndices::InitIndex(const OpArgs& op_args, std::string_view name, + std::shared_ptr index) { +} + +inline bool ShardDocIndices::DropIndex(std::string_view name) { + return false; +} + +inline void ShardDocIndices::RebuildAllIndices(const OpArgs& op_args) { +} + +inline std::vector ShardDocIndices::GetIndexNames() const { + return {}; +} + +inline void ShardDocIndices::AddDoc(std::string_view key, const DbContext& db_cnt, + const PrimeValue& pv) { +} + +inline void ShardDocIndices::RemoveDoc(std::string_view key, const DbContext& db_cnt, + const PrimeValue& pv) { +} + +#endif // __APPLE__ } // namespace dfly diff --git a/src/server/server_family.cc b/src/server/server_family.cc index 14882b15c..205dcf95d 100644 --- a/src/server/server_family.cc +++ b/src/server/server_family.cc @@ -315,7 +315,7 @@ std::optional InferSnapshotCronExpr() { if (!snapshot_cron_exp.empty() && !save_time.empty()) { LOG(ERROR) << "snapshot_cron and save_schedule flags should not be set simultaneously"; - quick_exit(1); + exit(1); } string raw_cron_expr; @@ -411,10 +411,14 @@ void ServerFamily::Init(util::AcceptServer* acceptor, std::vectorAwaitBrief([&] { return pb_task_->AddPeriodic(period_ms, cache_cb); }); +#endif // check for '--replicaof' before loading anything if (ReplicaOfFlag flag = GetFlag(FLAGS_replicaof); flag.has_value()) { @@ -461,8 +465,10 @@ void ServerFamily::Shutdown() { } pb_task_->Await([this] { - pb_task_->CancelPeriodic(stats_caching_task_); - stats_caching_task_ = 0; + if (stats_caching_task_) { + pb_task_->CancelPeriodic(stats_caching_task_); + stats_caching_task_ = 0; + } if (journal_->EnterLameDuck()) { auto ec = journal_->Close(); @@ -1522,6 +1528,7 @@ void ServerFamily::Info(CmdArgList args, ConnectionContext* cntx) { } } +#ifndef __APPLE__ if (should_enter("CPU")) { ADD_HEADER("# CPU"); struct rusage ru, cu, tu; @@ -1535,6 +1542,7 @@ void ServerFamily::Info(CmdArgList args, ConnectionContext* cntx) { append("used_cpu_sys_main_thread", StrCat(tu.ru_stime.tv_sec, ".", tu.ru_stime.tv_usec)); append("used_cpu_user_main_thread", StrCat(tu.ru_utime.tv_sec, ".", tu.ru_utime.tv_usec)); } +#endif if (should_enter("CLUSTER")) { ADD_HEADER("# Cluster");