mirror of
https://github.com/dragonflydb/dragonfly.git
synced 2025-05-10 18:05:44 +02:00
chore: update jsoncons version to 0.178 (#4368)
Signed-off-by: Roman Gershman <roman@dragonflydb.io>
This commit is contained in:
parent
5adb976207
commit
f3426bd559
4 changed files with 37 additions and 44 deletions
|
@ -74,10 +74,8 @@ set(REFLEX "${THIRD_PARTY_LIB_DIR}/reflex/bin/reflex")
|
|||
add_third_party(
|
||||
jsoncons
|
||||
GIT_REPOSITORY https://github.com/dragonflydb/jsoncons
|
||||
# URL https://github.com/danielaparker/jsoncons/archive/refs/tags/v0.171.1.tar.gz
|
||||
GIT_TAG Dragonfly
|
||||
GIT_TAG Dragonfly.178
|
||||
GIT_SHALLOW 1
|
||||
# PATCH_COMMAND patch -p1 -i "${CMAKE_SOURCE_DIR}/patches/jsoncons-v0.171.0.patch"
|
||||
CMAKE_PASS_FLAGS "-DJSONCONS_BUILD_TESTS=OFF -DJSONCONS_HAS_POLYMORPHIC_ALLOCATOR=ON"
|
||||
LIB "none"
|
||||
)
|
||||
|
|
|
@ -878,6 +878,9 @@ void CompactObj::SetJson(JsonType&& j) {
|
|||
if (taglen_ == JSON_TAG && JsonEnconding() == kEncodingJsonCons) {
|
||||
DCHECK(u_.json_obj.cons.json_ptr != nullptr); // must be allocated
|
||||
u_.json_obj.cons.json_ptr->swap(j);
|
||||
DCHECK(jsoncons::is_trivial_storage(u_.json_obj.cons.json_ptr->storage_kind()) ||
|
||||
u_.json_obj.cons.json_ptr->get_allocator().resource() == tl.local_mr);
|
||||
|
||||
// We do not set bytes_used as this is needed. Consider the two following cases:
|
||||
// 1. old json contains 50 bytes. The delta for new one is 50, so the total bytes
|
||||
// the new json occupies is 100.
|
||||
|
@ -889,6 +892,10 @@ void CompactObj::SetJson(JsonType&& j) {
|
|||
|
||||
SetMeta(JSON_TAG);
|
||||
u_.json_obj.cons.json_ptr = AllocateMR<JsonType>(std::move(j));
|
||||
|
||||
// With trivial storage json_ptr->get_allocator() throws an exception.
|
||||
DCHECK(jsoncons::is_trivial_storage(u_.json_obj.cons.json_ptr->storage_kind()) ||
|
||||
u_.json_obj.cons.json_ptr->get_allocator().resource() == tl.local_mr);
|
||||
u_.json_obj.cons.bytes_used = 0;
|
||||
}
|
||||
|
||||
|
|
|
@ -319,9 +319,18 @@ bool JsonAreEquals(const JsonType& lhs, const JsonType& rhs) {
|
|||
}
|
||||
}
|
||||
|
||||
template <typename Callback>
|
||||
// Use this method on the coordinator thread
|
||||
std::optional<JsonType> JsonFromString(std::string_view input) {
|
||||
return dfly::JsonFromString(input, PMR_NS::get_default_resource());
|
||||
}
|
||||
|
||||
// Use this method on the shard thread
|
||||
std::optional<JsonType> ShardJsonFromString(std::string_view input) {
|
||||
return dfly::JsonFromString(input, CompactObj::memory_resource());
|
||||
}
|
||||
|
||||
OpResult<DbSlice::AddOrFindResult> SetJson(const OpArgs& op_args, string_view key,
|
||||
Callback parse_json_value) {
|
||||
string_view json_str) {
|
||||
auto& db_slice = op_args.GetDbSlice();
|
||||
|
||||
auto op_res = db_slice.AddOrFind(op_args.db_cntx, key);
|
||||
|
@ -331,17 +340,20 @@ OpResult<DbSlice::AddOrFindResult> SetJson(const OpArgs& op_args, string_view ke
|
|||
|
||||
op_args.shard->search_indices()->RemoveDoc(key, op_args.db_cntx, res.it->second);
|
||||
|
||||
OpResult<JsonType> json_value = parse_json_value();
|
||||
RETURN_ON_BAD_STATUS(json_value);
|
||||
std::optional<JsonType> parsed_json = ShardJsonFromString(json_str);
|
||||
if (!parsed_json) {
|
||||
VLOG(1) << "got invalid JSON string '" << json_str << "' cannot be saved";
|
||||
return OpStatus::INVALID_VALUE;
|
||||
}
|
||||
|
||||
if (JsonEnconding() == kEncodingJsonFlat) {
|
||||
flexbuffers::Builder fbb;
|
||||
json::FromJsonType(json_value.value(), &fbb);
|
||||
json::FromJsonType(*parsed_json, &fbb);
|
||||
fbb.Finish();
|
||||
const auto& buf = fbb.GetBuffer();
|
||||
res.it->second.SetJson(buf.data(), buf.size());
|
||||
} else {
|
||||
res.it->second.SetJson(*std::move(json_value));
|
||||
res.it->second.SetJson(std::move(*parsed_json));
|
||||
}
|
||||
op_args.shard->search_indices()->AddDoc(key, op_args.db_cntx, res.it->second);
|
||||
return std::move(res);
|
||||
|
@ -388,16 +400,6 @@ string JsonTypeToName(const JsonType& val) {
|
|||
return std::string{};
|
||||
}
|
||||
|
||||
// Use this method on the coordinator thread
|
||||
std::optional<JsonType> JsonFromString(std::string_view input) {
|
||||
return dfly::JsonFromString(input, PMR_NS::get_default_resource());
|
||||
}
|
||||
|
||||
// Use this method on the shard thread
|
||||
std::optional<JsonType> ShardJsonFromString(std::string_view input) {
|
||||
return dfly::JsonFromString(input, CompactObj::memory_resource());
|
||||
}
|
||||
|
||||
OpResult<JsonType*> GetJson(const OpArgs& op_args, string_view key) {
|
||||
auto it_res = op_args.GetDbSlice().FindReadOnly(op_args.db_cntx, key, OBJ_JSON);
|
||||
if (!it_res.ok())
|
||||
|
@ -953,8 +955,8 @@ OpResult<long> OpDel(const OpArgs& op_args, string_view key, string_view path,
|
|||
|
||||
if (json_path.HoldsJsonPath()) {
|
||||
const json::Path& path = json_path.AsJsonPath();
|
||||
long deletions = json::MutatePath(
|
||||
path, [](optional<string_view>, JsonType* val) { return true; }, json_val);
|
||||
long deletions =
|
||||
json::MutatePath(path, [](optional<string_view>, JsonType* val) { return true; }, json_val);
|
||||
return deletions;
|
||||
}
|
||||
|
||||
|
@ -1308,15 +1310,6 @@ auto OpResp(const OpArgs& op_args, string_view key, const WrappedJsonPath& json_
|
|||
OpResult<bool> OpSet(const OpArgs& op_args, string_view key, string_view path,
|
||||
const WrappedJsonPath& json_path, std::string_view json_str,
|
||||
bool is_nx_condition, bool is_xx_condition) {
|
||||
auto parse_json = [&json_str]() -> OpResult<JsonType> {
|
||||
std::optional<JsonType> parsed_json = ShardJsonFromString(json_str);
|
||||
if (!parsed_json) {
|
||||
VLOG(1) << "got invalid JSON string '" << json_str << "' cannot be saved";
|
||||
return OpStatus::SYNTAX_ERR;
|
||||
}
|
||||
return parsed_json.value();
|
||||
};
|
||||
|
||||
// The whole key should be replaced.
|
||||
// NOTE: unlike in Redis, we are overriding the value when the path is "$"
|
||||
// this is regardless of the current key type. In redis if the key exists
|
||||
|
@ -1335,13 +1328,7 @@ OpResult<bool> OpSet(const OpArgs& op_args, string_view key, string_view path,
|
|||
}
|
||||
|
||||
JsonMemTracker mem_tracker;
|
||||
// We need to deep copy parsed_json.value() and not use move! The reason is that otherwise
|
||||
// it's really difficult to properly track memory deltas because even if we move below,
|
||||
// the deallocation of parsed_json won't happen in the scope of SetJson but in the scope
|
||||
// of this function. Because of this, the memory tracking will be off. Another solution here,
|
||||
// is to use absl::Cleanup and dispatch another Find() but that's too complicated because then
|
||||
// you need to take into account the order of destructors.
|
||||
OpResult<DbSlice::AddOrFindResult> st = SetJson(op_args, key, std::move(parse_json));
|
||||
OpResult<DbSlice::AddOrFindResult> st = SetJson(op_args, key, json_str);
|
||||
RETURN_ON_BAD_STATUS(st);
|
||||
mem_tracker.SetJsonSize(st->it->second, st->is_new);
|
||||
return true;
|
||||
|
@ -1355,18 +1342,19 @@ OpResult<bool> OpSet(const OpArgs& op_args, string_view key, string_view path,
|
|||
bool path_exists = false;
|
||||
bool operation_result = false;
|
||||
|
||||
OpResult<JsonType> parsed_json = parse_json();
|
||||
RETURN_ON_BAD_STATUS(parsed_json);
|
||||
optional<JsonType> parsed_json = ShardJsonFromString(json_str);
|
||||
if (!parsed_json) {
|
||||
VLOG(1) << "got invalid JSON string '" << json_str << "' cannot be saved";
|
||||
return OpStatus::INVALID_VALUE;
|
||||
}
|
||||
const JsonType& new_json = parsed_json.value();
|
||||
|
||||
auto cb = [&](std::optional<std::string_view>, JsonType* val) -> MutateCallbackResult<> {
|
||||
path_exists = true;
|
||||
if (!is_nx_condition) {
|
||||
operation_result = true;
|
||||
static_assert(
|
||||
std::is_same_v<std::allocator_traits<JsonType>::propagate_on_container_copy_assignment,
|
||||
std::false_type>);
|
||||
*val = new_json;
|
||||
*val =
|
||||
JsonType(new_json, std::pmr::polymorphic_allocator<char>{CompactObj::memory_resource()});
|
||||
}
|
||||
return {};
|
||||
};
|
||||
|
|
|
@ -43,7 +43,7 @@ async def test_rss_used_mem_gap(df_factory, type, keys, val_size, elements):
|
|||
await asyncio.sleep(1) # Wait for another RSS heartbeat update in Dragonfly
|
||||
|
||||
cmd = f"DEBUG POPULATE {keys} k {val_size} RAND TYPE {type} ELEMENTS {elements}"
|
||||
print(f"Running {cmd}")
|
||||
logging.info(f"Running {cmd}")
|
||||
await client.execute_command(cmd)
|
||||
|
||||
await asyncio.sleep(2) # Wait for another RSS heartbeat update in Dragonfly
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue