chore: fix build for MacOs (#2635)

Also update actions versions to Node 20.
This change allows dragonfly to be built on MacOs.
However, we still have multiple failing tests on MacOS.

Signed-off-by: Roman Gershman <roman@dragonflydb.io>
This commit is contained in:
Roman Gershman 2024-02-22 12:02:20 +02:00 committed by GitHub
parent ea98513a30
commit 6fce3fca9f
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
16 changed files with 67 additions and 36 deletions

View file

@ -5,7 +5,7 @@ runs:
using: "composite" using: "composite"
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v3 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0

View file

@ -16,7 +16,7 @@ jobs:
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.GITHUB_TOKEN }}
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
with: with:
submodules: true submodules: true
- name: Install NodeJs - name: Install NodeJs

View file

@ -27,7 +27,7 @@ jobs:
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.GITHUB_TOKEN }}
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
with: with:
submodules: true submodules: true
- name: Install dependencies - name: Install dependencies
@ -38,7 +38,7 @@ jobs:
apt update && apt install -y lcov pip apt update && apt install -y lcov pip
- name: Cache build deps - name: Cache build deps
id: cache-deps id: cache-deps
uses: actions/cache@v3 uses: actions/cache@v4
with: with:
path: | path: |
~/.ccache ~/.ccache
@ -78,7 +78,7 @@ jobs:
echo ls covout echo ls covout
ls covout/ ls covout/
- name: Upload coverage - name: Upload coverage
uses: actions/upload-artifact@v3 uses: actions/upload-artifact@v4
with: with:
name: coverage-report name: coverage-report
path: build/covout/ path: build/covout/

View file

@ -33,7 +33,7 @@ jobs:
password: ${{ secrets.GITHUB_TOKEN }} password: ${{ secrets.GITHUB_TOKEN }}
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
with: with:
submodules: true submodules: true
- name: Run sccache-cache - name: Run sccache-cache
@ -70,7 +70,7 @@ jobs:
runs-on: macos-latest runs-on: macos-latest
timeout-minutes: 45 timeout-minutes: 45
steps: steps:
- uses: actions/checkout@v3 - uses: actions/checkout@v4
with: with:
submodules: true submodules: true
- name: Install dependencies - name: Install dependencies

View file

@ -63,7 +63,7 @@ jobs:
run: env run: env
- name: checkout - name: checkout
uses: actions/checkout@v3 uses: actions/checkout@v4
with: with:
fetch-depth: 0 fetch-depth: 0
@ -71,7 +71,7 @@ jobs:
uses: azure/setup-helm@v3 uses: azure/setup-helm@v3
- name: Setup Go - name: Setup Go
uses: actions/setup-go@v3 uses: actions/setup-go@v4
- name: Configure Git - name: Configure Git
if: env.IS_PRERELEASE != 'true' if: env.IS_PRERELEASE != 'true'

View file

@ -87,14 +87,14 @@ jobs:
- name: Set up QEMU - name: Set up QEMU
id: qemu id: qemu
uses: docker/setup-qemu-action@v2 uses: docker/setup-qemu-action@v3
with: with:
platforms: arm64,amd64 platforms: arm64,amd64
- name: Set up Docker Buildx - name: Set up Docker Buildx
uses: docker/setup-buildx-action@v2 uses: docker/setup-buildx-action@v3
- name: Login to GitHub Container Registry - name: Login to GitHub Container Registry
uses: docker/login-action@v2 uses: docker/login-action@v3
with: with:
registry: ${{ inputs.registry }} registry: ${{ inputs.registry }}
username: ${{ inputs.registry_username }} username: ${{ inputs.registry_username }}
@ -102,7 +102,7 @@ jobs:
- name: Docker meta - name: Docker meta
id: metadata id: metadata
uses: docker/metadata-action@v4 uses: docker/metadata-action@v5
with: with:
images: | images: |
${{ inputs.image }} ${{ inputs.image }}
@ -132,7 +132,7 @@ jobs:
# build is broken based on platforms as load: true is not supported with multi-platform builds # build is broken based on platforms as load: true is not supported with multi-platform builds
- if: ${{ hashFiles(format('{0}-{1}', matrix.dockerfile, inputs.build_type)) }} - if: ${{ hashFiles(format('{0}-{1}', matrix.dockerfile, inputs.build_type)) }}
name: Build release image for amd64 name: Build release image for amd64
uses: docker/build-push-action@v3 uses: docker/build-push-action@v5
with: with:
context: . context: .
platforms: linux/amd64 platforms: linux/amd64
@ -190,7 +190,7 @@ jobs:
- if: ${{ hashFiles(format('{0}-{1}', matrix.dockerfile, inputs.build_type)) }} - if: ${{ hashFiles(format('{0}-{1}', matrix.dockerfile, inputs.build_type)) }}
name: Push release image name: Push release image
uses: docker/build-push-action@v3 uses: docker/build-push-action@v5
with: with:
context: . context: .
platforms: linux/amd64,linux/arm64 platforms: linux/amd64,linux/arm64

View file

@ -4,6 +4,7 @@
#pragma once #pragma once
#ifdef __linux__
#include "util/fibers/uring_file.h" #include "util/fibers/uring_file.h"
#include "util/fibers/uring_proactor.h" #include "util/fibers/uring_proactor.h"
namespace dfly { namespace dfly {
@ -14,3 +15,5 @@ using util::fb2::OpenLinux;
using util::fb2::OpenRead; using util::fb2::OpenRead;
} // namespace dfly } // namespace dfly
#endif

View file

@ -74,7 +74,8 @@ template <typename T> T CmdArgParser::Num(size_t idx) {
template float CmdArgParser::Num<float>(size_t); template float CmdArgParser::Num<float>(size_t);
template double CmdArgParser::Num<double>(size_t); template double CmdArgParser::Num<double>(size_t);
template uint64_t CmdArgParser::Num<uint64_t>(size_t); template unsigned long CmdArgParser::Num<unsigned long>(size_t);
template unsigned long long CmdArgParser::Num<unsigned long long>(size_t);
template int64_t CmdArgParser::Num<int64_t>(size_t); template int64_t CmdArgParser::Num<int64_t>(size_t);
template uint32_t CmdArgParser::Num<uint32_t>(size_t); template uint32_t CmdArgParser::Num<uint32_t>(size_t);
template int32_t CmdArgParser::Num<int32_t>(size_t); template int32_t CmdArgParser::Num<int32_t>(size_t);

View file

@ -37,13 +37,11 @@ TEST_F(CmdArgParserTest, BasicTypes) {
EXPECT_EQ(parser.Next<string>(), "STRING"s); EXPECT_EQ(parser.Next<string>(), "STRING"s);
EXPECT_EQ(parser.Next<string_view>(), "VIEW"sv); EXPECT_EQ(parser.Next<string_view>(), "VIEW"sv);
#ifndef __APPLE__
EXPECT_EQ(parser.Next<size_t>(), 11u); EXPECT_EQ(parser.Next<size_t>(), 11u);
EXPECT_EQ(parser.Next<size_t>(), 22u); EXPECT_EQ(parser.Next<size_t>(), 22u);
auto [a, b] = parser.Next<size_t, size_t>(); auto [a, b] = parser.Next<size_t, size_t>();
EXPECT_EQ(a, 33u); EXPECT_EQ(a, 33u);
EXPECT_EQ(b, 44u); EXPECT_EQ(b, 44u);
#endif
EXPECT_FALSE(parser.HasNext()); EXPECT_FALSE(parser.HasNext());
EXPECT_FALSE(parser.Error()); EXPECT_FALSE(parser.Error());

View file

@ -22,6 +22,7 @@
#include "facade/memcache_parser.h" #include "facade/memcache_parser.h"
#include "facade/redis_parser.h" #include "facade/redis_parser.h"
#include "facade/service_interface.h" #include "facade/service_interface.h"
#include "io/file.h"
#include "util/fibers/proactor_base.h" #include "util/fibers/proactor_base.h"
#ifdef DFLY_USE_SSL #ifdef DFLY_USE_SSL
@ -149,6 +150,7 @@ void OpenTrafficLogger(string_view base_path) {
if (tl_traffic_logger.log_file) if (tl_traffic_logger.log_file)
return; return;
#ifdef __linux__
// Open file with append mode, without it concurrent fiber writes seem to conflict // Open file with append mode, without it concurrent fiber writes seem to conflict
string path = absl::StrCat( string path = absl::StrCat(
base_path, "-", absl::Dec(ProactorBase::me()->GetPoolIndex(), absl::kZeroPad3), ".bin"); base_path, "-", absl::Dec(ProactorBase::me()->GetPoolIndex(), absl::kZeroPad3), ".bin");
@ -158,6 +160,9 @@ void OpenTrafficLogger(string_view base_path) {
return; return;
} }
tl_traffic_logger.log_file = unique_ptr<io::WriteFile>{file.value()}; tl_traffic_logger.log_file = unique_ptr<io::WriteFile>{file.value()};
#else
LOG(WARNING) << "Traffic logger is only supported on Linux";
#endif
} }
void LogTraffic(uint32_t id, bool has_more, absl::Span<RespExpr> resp) { void LogTraffic(uint32_t id, bool has_more, absl::Span<RespExpr> resp) {

View file

@ -13,8 +13,12 @@ endif()
set_property(SOURCE dfly_main.cc APPEND PROPERTY COMPILE_DEFINITIONS set_property(SOURCE dfly_main.cc APPEND PROPERTY COMPILE_DEFINITIONS
SOURCE_PATH_FROM_BUILD_ENV=${CMAKE_SOURCE_DIR}) SOURCE_PATH_FROM_BUILD_ENV=${CMAKE_SOURCE_DIR})
if("${CMAKE_SYSTEM_NAME}" STREQUAL "Linux") if ("${CMAKE_SYSTEM_NAME}" STREQUAL "Linux")
SET(TX_LINUX_SRCS io_mgr.cc tiered_storage.cc) SET(TX_LINUX_SRCS io_mgr.cc tiered_storage.cc)
add_executable(dfly_bench dfly_bench.cc)
cxx_link(dfly_bench dfly_facade fibers2 absl::random_random)
cxx_test(tiered_storage_test dfly_test_lib LABELS DFLY)
endif() endif()
@ -76,9 +80,6 @@ add_library(dfly_test_lib test_utils.cc)
cxx_link(dfly_test_lib dragonfly_lib facade_test gtest_main_ext) cxx_link(dfly_test_lib dragonfly_lib facade_test gtest_main_ext)
add_executable(dfly_bench dfly_bench.cc)
cxx_link(dfly_bench dfly_facade fibers2 absl::random_random)
cxx_test(dragonfly_test dfly_test_lib LABELS DFLY) cxx_test(dragonfly_test dfly_test_lib LABELS DFLY)
cxx_test(multi_test dfly_test_lib LABELS DFLY) cxx_test(multi_test dfly_test_lib LABELS DFLY)
cxx_test(generic_family_test dfly_test_lib LABELS DFLY) cxx_test(generic_family_test dfly_test_lib LABELS DFLY)
@ -95,7 +96,6 @@ cxx_test(zset_family_test dfly_test_lib LABELS DFLY)
cxx_test(blocking_controller_test dfly_test_lib LABELS DFLY) cxx_test(blocking_controller_test dfly_test_lib LABELS DFLY)
cxx_test(json_family_test dfly_test_lib LABELS DFLY) cxx_test(json_family_test dfly_test_lib LABELS DFLY)
cxx_test(journal/journal_test dfly_test_lib LABELS DFLY) cxx_test(journal/journal_test dfly_test_lib LABELS DFLY)
cxx_test(tiered_storage_test dfly_test_lib LABELS DFLY)
cxx_test(top_keys_test dfly_test_lib LABELS DFLY) cxx_test(top_keys_test dfly_test_lib LABELS DFLY)
cxx_test(hll_family_test dfly_test_lib LABELS DFLY) cxx_test(hll_family_test dfly_test_lib LABELS DFLY)
cxx_test(cluster/cluster_config_test dfly_test_lib LABELS DFLY) cxx_test(cluster/cluster_config_test dfly_test_lib LABELS DFLY)

View file

@ -130,9 +130,11 @@ size_t RdbSnapshot::GetSaveBuffersSize() {
} }
error_code RdbSnapshot::Close() { error_code RdbSnapshot::Close() {
#ifdef __linux__
if (is_linux_file_) { if (is_linux_file_) {
return static_cast<LinuxWriteWrapper*>(io_sink_.get())->Close(); return static_cast<LinuxWriteWrapper*>(io_sink_.get())->Close();
} }
#endif
return static_cast<io::WriteFile*>(io_sink_.get())->Close(); return static_cast<io::WriteFile*>(io_sink_.get())->Close();
} }

View file

@ -376,6 +376,7 @@ AwsS3SnapshotStorage::ListObjects(std::string_view bucket_name, std::string_view
return keys; return keys;
} }
#ifdef __linux__
io::Result<size_t> LinuxWriteWrapper::WriteSome(const iovec* v, uint32_t len) { io::Result<size_t> LinuxWriteWrapper::WriteSome(const iovec* v, uint32_t len) {
io::Result<size_t> res = lf_->WriteSome(v, len, offset_, 0); io::Result<size_t> res = lf_->WriteSome(v, len, offset_, 0);
if (res) { if (res) {
@ -384,6 +385,7 @@ io::Result<size_t> LinuxWriteWrapper::WriteSome(const iovec* v, uint32_t len) {
return res; return res;
} }
#endif
void SubstituteFilenamePlaceholders(fs::path* filename, const FilenameSubstitutions& fns) { void SubstituteFilenamePlaceholders(fs::path* filename, const FilenameSubstitutions& fns) {
*filename = absl::StrReplaceAll( *filename = absl::StrReplaceAll(

View file

@ -105,6 +105,7 @@ class AwsS3SnapshotStorage : public SnapshotStorage {
// Returns bucket_name, obj_path for an s3 path. // Returns bucket_name, obj_path for an s3 path.
std::optional<std::pair<std::string, std::string>> GetBucketPath(std::string_view path); std::optional<std::pair<std::string, std::string>> GetBucketPath(std::string_view path);
#ifdef __linux__
// takes ownership over the file. // takes ownership over the file.
class LinuxWriteWrapper : public io::Sink { class LinuxWriteWrapper : public io::Sink {
public: public:
@ -121,6 +122,7 @@ class LinuxWriteWrapper : public io::Sink {
std::unique_ptr<util::fb2::LinuxFile> lf_; std::unique_ptr<util::fb2::LinuxFile> lf_;
off_t offset_ = 0; off_t offset_ = 0;
}; };
#endif
struct FilenameSubstitutions { struct FilenameSubstitutions {
std::string_view ts; std::string_view ts;

View file

@ -22,10 +22,6 @@ extern "C" {
#include "server/server_state.h" #include "server/server_state.h"
#include "server/transaction.h" #include "server/transaction.h"
/* List related stuff */
#define LIST_HEAD 0
#define LIST_TAIL 1
/** /**
* The number of entries allowed per internal list node can be specified * The number of entries allowed per internal list node can be specified
* as a fixed maximum size or a maximum number of elements. * as a fixed maximum size or a maximum number of elements.
@ -80,6 +76,8 @@ void* listPopSaver(unsigned char* data, size_t sz) {
return new string((char*)data, sz); return new string((char*)data, sz);
} }
enum InsertParam { INSERT_BEFORE, INSERT_AFTER };
string ListPop(ListDir dir, quicklist* ql) { string ListPop(ListDir dir, quicklist* ql) {
long long vlong; long long vlong;
string* pop_str = nullptr; string* pop_str = nullptr;
@ -544,7 +542,7 @@ OpResult<vector<uint32_t>> OpPos(const OpArgs& op_args, std::string_view key,
} }
OpResult<int> OpInsert(const OpArgs& op_args, string_view key, string_view pivot, string_view elem, OpResult<int> OpInsert(const OpArgs& op_args, string_view key, string_view pivot, string_view elem,
int insert_param) { InsertParam insert_param) {
auto& db_slice = op_args.shard->db_slice(); auto& db_slice = op_args.shard->db_slice();
auto it_res = db_slice.FindMutable(op_args.db_cntx, key, OBJ_LIST); auto it_res = db_slice.FindMutable(op_args.db_cntx, key, OBJ_LIST);
if (!it_res) if (!it_res)
@ -564,10 +562,10 @@ OpResult<int> OpInsert(const OpArgs& op_args, string_view key, string_view pivot
int res = -1; int res = -1;
if (found) { if (found) {
if (insert_param == LIST_TAIL) { if (insert_param == INSERT_AFTER) {
quicklistInsertAfter(qiter, &entry, elem.data(), elem.size()); quicklistInsertAfter(qiter, &entry, elem.data(), elem.size());
} else { } else {
DCHECK_EQ(LIST_HEAD, insert_param); DCHECK_EQ(INSERT_BEFORE, insert_param);
quicklistInsertBefore(qiter, &entry, elem.data(), elem.size()); quicklistInsertBefore(qiter, &entry, elem.data(), elem.size());
} }
res = quicklistCount(ql); res = quicklistCount(ql);
@ -1042,13 +1040,13 @@ void ListFamily::LInsert(CmdArgList args, ConnectionContext* cntx) {
string_view param = ArgS(args, 1); string_view param = ArgS(args, 1);
string_view pivot = ArgS(args, 2); string_view pivot = ArgS(args, 2);
string_view elem = ArgS(args, 3); string_view elem = ArgS(args, 3);
int where; InsertParam where;
ToUpper(&args[1]); ToUpper(&args[1]);
if (param == "AFTER") { if (param == "AFTER") {
where = LIST_TAIL; where = INSERT_AFTER;
} else if (param == "BEFORE") { } else if (param == "BEFORE") {
where = LIST_HEAD; where = INSERT_BEFORE;
} else { } else {
return cntx->SendError(kSyntaxErr); return cntx->SendError(kSyntaxErr);
} }

View file

@ -114,11 +114,15 @@ class TieredStorage {
public: public:
static constexpr size_t kMinBlobLen = size_t(-1); // infinity. static constexpr size_t kMinBlobLen = size_t(-1); // infinity.
explicit TieredStorage(DbSlice* db_slice) { TieredStorage(DbSlice* db_slice, size_t max_file_size) {
} }
~TieredStorage() { ~TieredStorage() {
} }
static bool CanExternalizeEntry(PrimeIterator it) {
return false;
}
std::error_code Open(const std::string& path) { std::error_code Open(const std::string& path) {
return {}; return {};
} }
@ -127,19 +131,35 @@ class TieredStorage {
return {}; return {};
} }
PrimeIterator Load(DbIndex db_index, PrimeIterator it, std::string_view key) {
return {};
}
// Schedules unloading of the item, pointed by the iterator. // Schedules unloading of the item, pointed by the iterator.
std::error_code ScheduleOffload(DbIndex db_index, PrimeIterator it) { std::error_code ScheduleOffload(DbIndex db_index, PrimeIterator it) {
return {}; return {};
} }
IoMgrStats GetDiskStats() const {
return IoMgrStats{};
}
void CancelAllIos(DbIndex db_index) {
}
void CancelIo(DbIndex db_index, PrimeIterator it) { void CancelIo(DbIndex db_index, PrimeIterator it) {
} }
static bool EligibleForOffload(std::string_view val) { static bool EligibleForOffload(size_t) {
return false; return false;
} }
void Free(size_t offset, size_t len) { std::error_code ScheduleOffloadWithThrottle(DbIndex db_index, PrimeIterator it,
std::string_view key) {
return {};
}
void Free(PrimeIterator it, DbTableStats* stats) {
} }
void Shutdown() { void Shutdown() {