fix: potential OOM when first request sent in small bits (#4325)

Before: if socket data arrived in small bits, then CheckForHttpProto would grow
io_buf_ capacity exponentially with each iteration. For example, test_match_http test
easily causes OOM.

This PR ensures that there is always a buffer available - but it grows linearly with the input size.
Currently, the total input in CheckForHttpProto is limited to 1024.

Signed-off-by: Roman Gershman <roman@dragonflydb.io>
This commit is contained in:
Roman Gershman 2024-12-17 13:26:33 +02:00 committed by GitHub
parent 0fe5e86a1a
commit 19164badf9
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
2 changed files with 10 additions and 5 deletions

View file

@ -928,7 +928,7 @@ io::Result<bool> Connection::CheckForHttpProto() {
return MatchHttp11Line(ib); return MatchHttp11Line(ib);
} }
last_len = io_buf_.InputLen(); last_len = io_buf_.InputLen();
UpdateIoBufCapacity(io_buf_, stats_, [&]() { io_buf_.EnsureCapacity(io_buf_.Capacity()); }); UpdateIoBufCapacity(io_buf_, stats_, [&]() { io_buf_.EnsureCapacity(128); });
} while (last_len < 1024); } while (last_len < 1024);
return false; return false;
@ -959,10 +959,7 @@ void Connection::ConnectionFlow() {
// Main loop. // Main loop.
if (parse_status != ERROR && !ec) { if (parse_status != ERROR && !ec) {
if (io_buf_.AppendLen() < 64) { UpdateIoBufCapacity(io_buf_, stats_, [&]() { io_buf_.EnsureCapacity(64); });
UpdateIoBufCapacity(io_buf_, stats_,
[&]() { io_buf_.EnsureCapacity(io_buf_.Capacity() * 2); });
}
auto res = IoLoop(); auto res = IoLoop();
if (holds_alternative<error_code>(res)) { if (holds_alternative<error_code>(res)) {

View file

@ -606,6 +606,14 @@ async def test_subscribe_in_pipeline(async_client: aioredis.Redis):
assert res == ["one", ["subscribe", "ch1", 1], "two", ["subscribe", "ch2", 2], "three"] assert res == ["one", ["subscribe", "ch1", 1], "two", ["subscribe", "ch2", 2], "three"]
async def test_match_http(df_server: DflyInstance):
client = df_server.client()
reader, writer = await asyncio.open_connection("localhost", df_server.port)
for i in range(2000):
writer.write(f"foo bar ".encode())
await writer.drain()
""" """
This test makes sure that Dragonfly can receive blocks of pipelined commands even This test makes sure that Dragonfly can receive blocks of pipelined commands even
while a script is still executing. This is a dangerous scenario because both the dispatch fiber while a script is still executing. This is a dangerous scenario because both the dispatch fiber