Clayton Casciato
2025-11-04 08:57:59 -07:00
committed by Scott Murray
parent 029cf84f6b
commit afbbe28cee
30 changed files with 780 additions and 10261 deletions

View File

@@ -0,0 +1,40 @@
From a59708a9300df8116867ac77f7829f7fd647325e Mon Sep 17 00:00:00 2001
From: Clayton Casciato <ccasciato@21sw.us>
Date: Mon, 3 Nov 2025 10:30:26 -0700
Subject: [PATCH] Skip pkg Makefile from using its own rust steps
Upstream-Status: Inappropriate [OE Specific]
Signed-off-by: Armin Kuster <akuster808@gmail.com>
Signed-off-by: Clayton Casciato <majortomtosourcecontrol@gmail.com>
---
Makefile.am | 2 +-
Makefile.in | 2 +-
2 files changed, 2 insertions(+), 2 deletions(-)
diff --git a/Makefile.am b/Makefile.am
index d0d3d09..a572912 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -10,7 +10,7 @@ EXTRA_DIST = ChangeLog COPYING LICENSE suricata.yaml.in \
scripts/generate-images.sh \
scripts/docs-almalinux9-minimal-build.sh \
scripts/docs-ubuntu-debian-minimal-build.sh
-SUBDIRS = $(HTP_DIR) rust src qa rules doc contrib etc python ebpf \
+SUBDIRS = $(HTP_DIR) src qa rules doc contrib etc python ebpf \
$(SURICATA_UPDATE_DIR)
CLEANFILES = stamp-h[0-9]*
diff --git a/Makefile.in b/Makefile.in
index 7a89353..3864613 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -428,7 +428,7 @@ EXTRA_DIST = ChangeLog COPYING LICENSE suricata.yaml.in \
scripts/docs-almalinux9-minimal-build.sh \
scripts/docs-ubuntu-debian-minimal-build.sh
-SUBDIRS = $(HTP_DIR) rust src qa rules doc contrib etc python ebpf \
+SUBDIRS = $(HTP_DIR) src qa rules doc contrib etc python ebpf \
$(SURICATA_UPDATE_DIR)
CLEANFILES = stamp-h[0-9]*

View File

@@ -1,294 +0,0 @@
From e68ec4b227d19498f364a41eb25d3182f0383ca5 Mon Sep 17 00:00:00 2001
From: Philippe Antoine <pantoine@oisf.net>
Date: Wed, 27 Mar 2024 14:33:54 +0100
Subject: [PATCH] http2: use a reference counter for headers
Ticket: 6892
As HTTP hpack header compression allows one single byte to
express a previously seen arbitrary-size header block (name+value)
we should avoid to copy the vectors data, but just point
to the same data, while reamining memory safe, even in the case
of later headers eviction from the dybnamic table.
Rust std solution is Rc, and the use of clone, so long as the
data is accessed by only one thread.
(cherry picked from commit 390f09692eb99809c679d3f350c7cc185d163e1a)
CVE: CVE-2024-32663
Upstream-Status: Backport [https://github.com/OISF/suricata/commit/e68ec4b227d19498f364a41eb25d3182f0383ca5]
Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
---
rust/src/http2/detect.rs | 19 +++++++------
rust/src/http2/http2.rs | 2 +-
rust/src/http2/parser.rs | 61 +++++++++++++++++++++-------------------
3 files changed, 43 insertions(+), 39 deletions(-)
diff --git a/rust/src/http2/detect.rs b/rust/src/http2/detect.rs
index 99261ad..904b9ad 100644
--- a/rust/src/http2/detect.rs
+++ b/rust/src/http2/detect.rs
@@ -23,6 +23,7 @@ use crate::core::Direction;
use crate::detect::uint::{detect_match_uint, DetectUintData};
use std::ffi::CStr;
use std::str::FromStr;
+use std::rc::Rc;
fn http2_tx_has_frametype(
tx: &mut HTTP2Transaction, direction: Direction, value: u8,
@@ -404,7 +405,7 @@ fn http2_frames_get_header_firstvalue<'a>(
for frame in frames {
if let Some(blocks) = http2_header_blocks(frame) {
for block in blocks.iter() {
- if block.name == name.as_bytes() {
+ if block.name.as_ref() == name.as_bytes() {
return Ok(&block.value);
}
}
@@ -428,7 +429,7 @@ pub fn http2_frames_get_header_value_vec(
for frame in frames {
if let Some(blocks) = http2_header_blocks(frame) {
for block in blocks.iter() {
- if block.name == name.as_bytes() {
+ if block.name.as_ref() == name.as_bytes() {
if found == 0 {
vec.extend_from_slice(&block.value);
found = 1;
@@ -465,7 +466,7 @@ fn http2_frames_get_header_value<'a>(
for frame in frames {
if let Some(blocks) = http2_header_blocks(frame) {
for block in blocks.iter() {
- if block.name == name.as_bytes() {
+ if block.name.as_ref() == name.as_bytes() {
if found == 0 {
single = Ok(&block.value);
found = 1;
@@ -905,8 +906,8 @@ fn http2_tx_set_header(state: &mut HTTP2State, name: &[u8], input: &[u8]) {
};
let mut blocks = Vec::new();
let b = parser::HTTP2FrameHeaderBlock {
- name: name.to_vec(),
- value: input.to_vec(),
+ name: Rc::new(name.to_vec()),
+ value: Rc::new(input.to_vec()),
error: parser::HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeSuccess,
sizeupdate: 0,
};
@@ -1061,15 +1062,15 @@ mod tests {
};
let mut blocks = Vec::new();
let b = parser::HTTP2FrameHeaderBlock {
- name: "Host".as_bytes().to_vec(),
- value: "abc.com".as_bytes().to_vec(),
+ name: "Host".as_bytes().to_vec().into(),
+ value: "abc.com".as_bytes().to_vec().into(),
error: parser::HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeSuccess,
sizeupdate: 0,
};
blocks.push(b);
let b2 = parser::HTTP2FrameHeaderBlock {
- name: "Host".as_bytes().to_vec(),
- value: "efg.net".as_bytes().to_vec(),
+ name: "Host".as_bytes().to_vec().into(),
+ value: "efg.net".as_bytes().to_vec().into(),
error: parser::HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeSuccess,
sizeupdate: 0,
};
diff --git a/rust/src/http2/http2.rs b/rust/src/http2/http2.rs
index 326030f..d14ca06 100644
--- a/rust/src/http2/http2.rs
+++ b/rust/src/http2/http2.rs
@@ -204,7 +204,7 @@ impl HTTP2Transaction {
fn handle_headers(&mut self, blocks: &[parser::HTTP2FrameHeaderBlock], dir: Direction) {
for block in blocks {
- if block.name == b"content-encoding" {
+ if block.name.as_ref() == b"content-encoding" {
self.decoder.http2_encoding_fromvec(&block.value, dir);
}
}
diff --git a/rust/src/http2/parser.rs b/rust/src/http2/parser.rs
index adabeb2..1a46437 100644
--- a/rust/src/http2/parser.rs
+++ b/rust/src/http2/parser.rs
@@ -30,6 +30,7 @@ use nom7::sequence::tuple;
use nom7::{Err, IResult};
use std::fmt;
use std::str::FromStr;
+use std::rc::Rc;
#[repr(u8)]
#[derive(Clone, Copy, PartialEq, Eq, FromPrimitive, Debug)]
@@ -295,8 +296,8 @@ fn http2_frame_header_static(n: u64, dyn_headers: &HTTP2DynTable) -> Option<HTTP
};
if !name.is_empty() {
return Some(HTTP2FrameHeaderBlock {
- name: name.as_bytes().to_vec(),
- value: value.as_bytes().to_vec(),
+ name: Rc::new(name.as_bytes().to_vec()),
+ value: Rc::new(value.as_bytes().to_vec()),
error: HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeSuccess,
sizeupdate: 0,
});
@@ -304,23 +305,23 @@ fn http2_frame_header_static(n: u64, dyn_headers: &HTTP2DynTable) -> Option<HTTP
//use dynamic table
if n == 0 {
return Some(HTTP2FrameHeaderBlock {
- name: Vec::new(),
- value: Vec::new(),
+ name: Rc::new(Vec::new()),
+ value: Rc::new(Vec::new()),
error: HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeIndex0,
sizeupdate: 0,
});
} else if dyn_headers.table.len() + HTTP2_STATIC_HEADERS_NUMBER < n as usize {
return Some(HTTP2FrameHeaderBlock {
- name: Vec::new(),
- value: Vec::new(),
+ name: Rc::new(Vec::new()),
+ value: Rc::new(Vec::new()),
error: HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeNotIndexed,
sizeupdate: 0,
});
} else {
let indyn = dyn_headers.table.len() - (n as usize - HTTP2_STATIC_HEADERS_NUMBER);
let headcopy = HTTP2FrameHeaderBlock {
- name: dyn_headers.table[indyn].name.to_vec(),
- value: dyn_headers.table[indyn].value.to_vec(),
+ name: dyn_headers.table[indyn].name.clone(),
+ value: dyn_headers.table[indyn].value.clone(),
error: HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeSuccess,
sizeupdate: 0,
};
@@ -348,8 +349,10 @@ impl fmt::Display for HTTP2HeaderDecodeStatus {
#[derive(Clone, Debug)]
pub struct HTTP2FrameHeaderBlock {
- pub name: Vec<u8>,
- pub value: Vec<u8>,
+ // Use Rc reference counted so that indexed headers do not get copied.
+ // Otherwise, this leads to quadratic complexity in memory occupation.
+ pub name: Rc<Vec<u8>>,
+ pub value: Rc<Vec<u8>>,
pub error: HTTP2HeaderDecodeStatus,
pub sizeupdate: u64,
}
@@ -391,7 +394,7 @@ fn http2_parse_headers_block_literal_common<'a>(
) -> IResult<&'a [u8], HTTP2FrameHeaderBlock> {
let (i3, name, error) = if index == 0 {
match http2_parse_headers_block_string(input) {
- Ok((r, n)) => Ok((r, n, HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeSuccess)),
+ Ok((r, n)) => Ok((r, Rc::new(n), HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeSuccess)),
Err(e) => Err(e),
}
} else {
@@ -403,7 +406,7 @@ fn http2_parse_headers_block_literal_common<'a>(
)),
None => Ok((
input,
- Vec::new(),
+ Rc::new(Vec::new()),
HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeNotIndexed,
)),
}
@@ -413,7 +416,7 @@ fn http2_parse_headers_block_literal_common<'a>(
i4,
HTTP2FrameHeaderBlock {
name,
- value,
+ value: Rc::new(value),
error,
sizeupdate: 0,
},
@@ -435,8 +438,8 @@ fn http2_parse_headers_block_literal_incindex<'a>(
match r {
Ok((r, head)) => {
let headcopy = HTTP2FrameHeaderBlock {
- name: head.name.to_vec(),
- value: head.value.to_vec(),
+ name: head.name.clone(),
+ value: head.value.clone(),
error: head.error,
sizeupdate: 0,
};
@@ -556,8 +559,8 @@ fn http2_parse_headers_block_dynamic_size<'a>(
return Ok((
i3,
HTTP2FrameHeaderBlock {
- name: Vec::new(),
- value: Vec::new(),
+ name: Rc::new(Vec::new()),
+ value: Rc::new(Vec::new()),
error: HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeSizeUpdate,
sizeupdate: maxsize2,
},
@@ -614,8 +617,8 @@ fn http2_parse_headers_blocks<'a>(
// if we error from http2_parse_var_uint, we keep the first parsed headers
if err.code == ErrorKind::LengthValue {
blocks.push(HTTP2FrameHeaderBlock {
- name: Vec::new(),
- value: Vec::new(),
+ name: Rc::new(Vec::new()),
+ value: Rc::new(Vec::new()),
error: HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeIntegerOverflow,
sizeupdate: 0,
});
@@ -765,8 +768,8 @@ mod tests {
match r0 {
Ok((remainder, hd)) => {
// Check the first message.
- assert_eq!(hd.name, ":method".as_bytes().to_vec());
- assert_eq!(hd.value, "GET".as_bytes().to_vec());
+ assert_eq!(hd.name, ":method".as_bytes().to_vec().into());
+ assert_eq!(hd.value, "GET".as_bytes().to_vec().into());
// And we should have no bytes left.
assert_eq!(remainder.len(), 0);
}
@@ -782,8 +785,8 @@ mod tests {
match r1 {
Ok((remainder, hd)) => {
// Check the first message.
- assert_eq!(hd.name, "accept".as_bytes().to_vec());
- assert_eq!(hd.value, "*/*".as_bytes().to_vec());
+ assert_eq!(hd.name, "accept".as_bytes().to_vec().into());
+ assert_eq!(hd.value, "*/*".as_bytes().to_vec().into());
// And we should have no bytes left.
assert_eq!(remainder.len(), 0);
assert_eq!(dynh.table.len(), 1);
@@ -802,8 +805,8 @@ mod tests {
match result {
Ok((remainder, hd)) => {
// Check the first message.
- assert_eq!(hd.name, ":authority".as_bytes().to_vec());
- assert_eq!(hd.value, "localhost:3000".as_bytes().to_vec());
+ assert_eq!(hd.name, ":authority".as_bytes().to_vec().into());
+ assert_eq!(hd.value, "localhost:3000".as_bytes().to_vec().into());
// And we should have no bytes left.
assert_eq!(remainder.len(), 0);
assert_eq!(dynh.table.len(), 2);
@@ -820,8 +823,8 @@ mod tests {
match r3 {
Ok((remainder, hd)) => {
// same as before
- assert_eq!(hd.name, ":authority".as_bytes().to_vec());
- assert_eq!(hd.value, "localhost:3000".as_bytes().to_vec());
+ assert_eq!(hd.name, ":authority".as_bytes().to_vec().into());
+ assert_eq!(hd.value, "localhost:3000".as_bytes().to_vec().into());
// And we should have no bytes left.
assert_eq!(remainder.len(), 0);
assert_eq!(dynh.table.len(), 2);
@@ -856,8 +859,8 @@ mod tests {
match r2 {
Ok((remainder, hd)) => {
// Check the first message.
- assert_eq!(hd.name, ":path".as_bytes().to_vec());
- assert_eq!(hd.value, "/doc/manual/html/index.html".as_bytes().to_vec());
+ assert_eq!(hd.name, ":path".as_bytes().to_vec().into());
+ assert_eq!(hd.value, "/doc/manual/html/index.html".as_bytes().to_vec().into());
// And we should have no bytes left.
assert_eq!(remainder.len(), 0);
assert_eq!(dynh.table.len(), 2);
--
2.50.1

View File

@@ -1,70 +0,0 @@
From c0af92295e833d1db29b184d63cd3b829451d7fd Mon Sep 17 00:00:00 2001
From: Philippe Antoine <pantoine@oisf.net>
Date: Thu, 28 Mar 2024 11:15:51 +0100
Subject: [PATCH] http2: do not log duplicate headers
Ticket: 6900
And thus avoid DOS by logging a request using a compressed
header block repeated many times and having a long value...
(cherry picked from commit 03442c9071b8d863d26b609d54c6eacf4de9e340)
CVE: CVE-2024-32663
Upstream-Status: Backport [https://github.com/OISF/suricata/commit/c0af92295e833d1db29b184d63cd3b829451d7fd]
Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
---
rust/src/http2/logger.rs | 17 +++++++++++++++--
1 file changed, 15 insertions(+), 2 deletions(-)
diff --git a/rust/src/http2/logger.rs b/rust/src/http2/logger.rs
index d25f852..a117a54 100644
--- a/rust/src/http2/logger.rs
+++ b/rust/src/http2/logger.rs
@@ -19,7 +19,8 @@ use super::http2::{HTTP2Frame, HTTP2FrameTypeData, HTTP2Transaction};
use super::parser;
use crate::jsonbuilder::{JsonBuilder, JsonError};
use std;
-use std::collections::HashMap;
+use std::collections::{HashMap, HashSet};
+use std::rc::Rc;
#[derive(Hash, PartialEq, Eq, Debug)]
enum HeaderName {
@@ -35,10 +36,20 @@ fn log_http2_headers<'a>(
blocks: &'a [parser::HTTP2FrameHeaderBlock], js: &mut JsonBuilder,
common: &mut HashMap<HeaderName, &'a Vec<u8>>,
) -> Result<(), JsonError> {
+ let mut logged_headers = HashSet::new();
for block in blocks {
- js.start_object()?;
+ // delay js.start_object() because we skip suplicate headers
match block.error {
parser::HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeSuccess => {
+ if Rc::strong_count(&block.name) > 2 {
+ // more than one reference in headers table + current headers
+ let ptr = Rc::as_ptr(&block.name) as usize;
+ if !logged_headers.insert(ptr) {
+ // only log once
+ continue;
+ }
+ }
+ js.start_object()?;
js.set_string_from_bytes("name", &block.name)?;
js.set_string_from_bytes("value", &block.value)?;
if let Ok(name) = std::str::from_utf8(&block.name) {
@@ -66,9 +77,11 @@ fn log_http2_headers<'a>(
}
}
parser::HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeSizeUpdate => {
+ js.start_object()?;
js.set_uint("table_size_update", block.sizeupdate)?;
}
_ => {
+ js.start_object()?;
js.set_string("error", &block.error.to_string())?;
}
}
--
2.50.1

View File

@@ -1,53 +0,0 @@
From d5ffecf11ad2c6fe89265e518f5d7443caf26ba4 Mon Sep 17 00:00:00 2001
From: Philippe Antoine <pantoine@oisf.net>
Date: Thu, 28 Mar 2024 14:00:02 +0100
Subject: [PATCH] util/base64: fix buffer overflow
Ticket: 6902
In case the caller of DecodeBase64 does not supply a big enough
output buffer.
(cherry picked from commit fd47e67dc65f9111895c88fb406c938b1f857325)
CVE: CVE-2024-32664
Upstream-Status: Backport [https://github.com/OISF/suricata/commit/d5ffecf11ad2c6fe89265e518f5d7443caf26ba4]
Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
---
src/util-base64.c | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/src/util-base64.c b/src/util-base64.c
index 4a4a5d1..d973f0e 100644
--- a/src/util-base64.c
+++ b/src/util-base64.c
@@ -156,6 +156,8 @@ Base64Ecode DecodeBase64(uint8_t *dest, uint32_t dest_size, const uint8_t *src,
ecode = BASE64_ECODE_BUF;
break;
}
+ if (dest_size - *decoded_bytes < ASCII_BLOCK)
+ return BASE64_ECODE_BUF;
/* Decode base-64 block into ascii block and move pointer */
DecodeBase64Block(dptr, b64);
@@ -183,7 +185,7 @@ Base64Ecode DecodeBase64(uint8_t *dest, uint32_t dest_size, const uint8_t *src,
/* if the destination size is not at least 3 Bytes long, it'll give a dynamic
* buffer overflow while decoding, so, return and let the caller take care of the
* remaining bytes to be decoded which should always be < 4 at this stage */
- if (dest_size - *decoded_bytes < 3)
+ if (dest_size - *decoded_bytes < ASCII_BLOCK)
return BASE64_ECODE_BUF;
*decoded_bytes += numDecoded_blk;
DecodeBase64Block(dptr, b64);
@@ -193,6 +195,8 @@ Base64Ecode DecodeBase64(uint8_t *dest, uint32_t dest_size, const uint8_t *src,
/* Finish remaining b64 bytes by padding */
if (valid && bbidx > 0 && (mode != BASE64_MODE_RFC2045)) {
/* Decode remaining */
+ if (dest_size - *decoded_bytes < ASCII_BLOCK)
+ return BASE64_ECODE_BUF;
*decoded_bytes += ASCII_BLOCK - (B64_BLOCK - bbidx);
DecodeBase64Block(dptr, b64);
}
--
2.50.1

View File

@@ -1,235 +0,0 @@
From 2f39ba75f153ba9bdf8eedc2a839cc973dbaea66 Mon Sep 17 00:00:00 2001
From: Jason Ish <jason.ish@oisf.net>
Date: Tue, 28 Nov 2023 12:35:26 -0600
Subject: [PATCH] defrag: check next fragment for overlap before stopping
re-assembly
Instead of breaking the loop when the current fragment does not have
any more fragments, set a flag and continue to the next fragment as
the next fragment may have data that occurs before this fragment, but
overlaps it.
Then break if the next fragment does not overlap the previous.
Bug: #6668
(cherry picked from commit d0fd0782505d837e691ceef1b801776f0db82726)
CVE: CVE-2024-32867
Upstream-Status: Backport [https://github.com/OISF/suricata/commit/2f39ba75f153ba9bdf8eedc2a839cc973dbaea66]
Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
---
src/defrag.c | 145 ++++++++++++++++++++++++++++++++++++++++++++++++---
1 file changed, 139 insertions(+), 6 deletions(-)
diff --git a/src/defrag.c b/src/defrag.c
index 38704c9..e154899 100644
--- a/src/defrag.c
+++ b/src/defrag.c
@@ -295,10 +295,20 @@ Defrag4Reassemble(ThreadVars *tv, DefragTracker *tracker, Packet *p)
uint16_t hlen = 0;
int ip_hdr_offset = 0;
+ /* Assume more frags. */
+ uint16_t prev_offset = 0;
+ bool more_frags = 1;
+
RB_FOREACH(frag, IP_FRAGMENTS, &tracker->fragment_tree) {
SCLogDebug("frag %p, data_len %u, offset %u, pcap_cnt %"PRIu64,
frag, frag->data_len, frag->offset, frag->pcap_cnt);
+ /* Previous fragment has no more fragments, and this packet
+ * doesn't overlap. We're done. */
+ if (!more_frags && frag->offset > prev_offset) {
+ break;
+ }
+
if (frag->skip)
continue;
if (frag->ltrim >= frag->data_len)
@@ -339,9 +349,16 @@ Defrag4Reassemble(ThreadVars *tv, DefragTracker *tracker, Packet *p)
fragmentable_len = frag->offset + frag->data_len;
}
- if (!frag->more_frags) {
- break;
- }
+ /* Even if this fragment is flagged as having no more
+ * fragments, still continue. The next fragment may have the
+ * same offset with data that is preferred.
+ *
+ * For example, DefragBsdFragmentAfterNoMfIpv{4,6}Test
+ *
+ * This is due to not all fragments being completely trimmed,
+ * but relying on the copy ordering. */
+ more_frags = frag->more_frags;
+ prev_offset = frag->offset;
}
SCLogDebug("ip_hdr_offset %u, hlen %" PRIu16 ", fragmentable_len %" PRIu16, ip_hdr_offset, hlen,
@@ -436,7 +453,15 @@ Defrag6Reassemble(ThreadVars *tv, DefragTracker *tracker, Packet *p)
uint16_t fragmentable_len = 0;
int ip_hdr_offset = 0;
uint8_t next_hdr = 0;
+
+ /* Assume more frags. */
+ uint16_t prev_offset = 0;
+ bool more_frags = 1;
+
RB_FOREACH(frag, IP_FRAGMENTS, &tracker->fragment_tree) {
+ if (!more_frags && frag->offset > prev_offset) {
+ break;
+ }
if (frag->skip)
continue;
if (frag->data_len - frag->ltrim <= 0)
@@ -481,9 +506,16 @@ Defrag6Reassemble(ThreadVars *tv, DefragTracker *tracker, Packet *p)
fragmentable_len = frag->offset + frag->data_len;
}
- if (!frag->more_frags) {
- break;
- }
+ /* Even if this fragment is flagged as having no more
+ * fragments, still continue. The next fragment may have the
+ * same offset with data that is preferred.
+ *
+ * For example, DefragBsdFragmentAfterNoMfIpv{4,6}Test
+ *
+ * This is due to not all fragments being completely trimmed,
+ * but relying on the copy ordering. */
+ more_frags = frag->more_frags;
+ prev_offset = frag->offset;
}
rp->ip6h = (IPV6Hdr *)(GET_PKT_DATA(rp) + ip_hdr_offset);
@@ -2374,6 +2406,10 @@ static int DefragMfIpv4Test(void)
* fragments should be in the re-assembled packet. */
FAIL_IF(IPV4_GET_IPLEN(p) != 36);
+ /* Verify the payload of the IPv4 packet. */
+ uint8_t expected_payload[] = "AAAAAAAABBBBBBBB";
+ FAIL_IF(memcmp(GET_PKT_DATA(p) + sizeof(IPV4Hdr), expected_payload, sizeof(expected_payload)));
+
SCFree(p1);
SCFree(p2);
SCFree(p3);
@@ -2417,6 +2453,10 @@ static int DefragMfIpv6Test(void)
* of 2 fragments, so 16. */
FAIL_IF(IPV6_GET_PLEN(p) != 16);
+ /* Verify the payload of the IPv4 packet. */
+ uint8_t expected_payload[] = "AAAAAAAABBBBBBBB";
+ FAIL_IF(memcmp(GET_PKT_DATA(p) + sizeof(IPV6Hdr), expected_payload, sizeof(expected_payload)));
+
SCFree(p1);
SCFree(p2);
SCFree(p3);
@@ -2510,6 +2550,96 @@ static int DefragTestJeremyLinux(void)
PASS;
}
+static int DefragBsdFragmentAfterNoMfIpv4Test(void)
+{
+ DefragInit();
+ default_policy = DEFRAG_POLICY_BSD;
+ Packet *packets[4];
+
+ packets[0] = BuildIpv4TestPacket(IPPROTO_ICMP, 0x96, 24 >> 3, 0, 'A', 16);
+ packets[1] = BuildIpv4TestPacket(IPPROTO_ICMP, 0x96, 8 >> 3, 1, 'B', 16);
+ packets[2] = BuildIpv4TestPacket(IPPROTO_ICMP, 0x96, 16 >> 3, 1, 'C', 16);
+ packets[3] = BuildIpv4TestPacket(IPPROTO_ICMP, 0x96, 0, 1, 'D', 8);
+
+ Packet *r = Defrag(NULL, NULL, packets[0]);
+ FAIL_IF_NOT_NULL(r);
+
+ r = Defrag(NULL, NULL, packets[1]);
+ FAIL_IF_NOT_NULL(r);
+
+ r = Defrag(NULL, NULL, packets[2]);
+ FAIL_IF_NOT_NULL(r);
+
+ r = Defrag(NULL, NULL, packets[3]);
+ FAIL_IF_NULL(r);
+
+ // clang-format off
+ uint8_t expected[] = {
+ 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D',
+ 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B',
+ 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B',
+ 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C',
+ 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A',
+ };
+ // clang-format on
+
+ if (memcmp(expected, GET_PKT_DATA(r) + 20, sizeof(expected)) != 0) {
+ printf("Expected:\n");
+ PrintRawDataFp(stdout, expected, sizeof(expected));
+ printf("Got:\n");
+ PrintRawDataFp(stdout, GET_PKT_DATA(r) + 20, GET_PKT_LEN(r) - 20);
+ FAIL;
+ }
+
+ DefragDestroy();
+ PASS;
+}
+
+static int DefragBsdFragmentAfterNoMfIpv6Test(void)
+{
+ DefragInit();
+ default_policy = DEFRAG_POLICY_BSD;
+ Packet *packets[4];
+
+ packets[0] = BuildIpv6TestPacket(IPPROTO_ICMP, 0x96, 24 >> 3, 0, 'A', 16);
+ packets[1] = BuildIpv6TestPacket(IPPROTO_ICMP, 0x96, 8 >> 3, 1, 'B', 16);
+ packets[2] = BuildIpv6TestPacket(IPPROTO_ICMP, 0x96, 16 >> 3, 1, 'C', 16);
+ packets[3] = BuildIpv6TestPacket(IPPROTO_ICMP, 0x96, 0, 1, 'D', 8);
+
+ Packet *r = Defrag(NULL, NULL, packets[0]);
+ FAIL_IF_NOT_NULL(r);
+
+ r = Defrag(NULL, NULL, packets[1]);
+ FAIL_IF_NOT_NULL(r);
+
+ r = Defrag(NULL, NULL, packets[2]);
+ FAIL_IF_NOT_NULL(r);
+
+ r = Defrag(NULL, NULL, packets[3]);
+ FAIL_IF_NULL(r);
+
+ // clang-format off
+ uint8_t expected[] = {
+ 'D', 'D', 'D', 'D', 'D', 'D', 'D', 'D',
+ 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B',
+ 'B', 'B', 'B', 'B', 'B', 'B', 'B', 'B',
+ 'C', 'C', 'C', 'C', 'C', 'C', 'C', 'C',
+ 'A', 'A', 'A', 'A', 'A', 'A', 'A', 'A',
+ };
+ // clang-format on
+
+ if (memcmp(expected, GET_PKT_DATA(r) + 40, sizeof(expected)) != 0) {
+ printf("Expected:\n");
+ PrintRawDataFp(stdout, expected, sizeof(expected));
+ printf("Got:\n");
+ PrintRawDataFp(stdout, GET_PKT_DATA(r) + 40, GET_PKT_LEN(r) - 40);
+ FAIL;
+ }
+
+ DefragDestroy();
+ PASS;
+}
+
#endif /* UNITTESTS */
void DefragRegisterTests(void)
@@ -2555,5 +2685,8 @@ void DefragRegisterTests(void)
UtRegisterTest("DefragTestBadProto", DefragTestBadProto);
UtRegisterTest("DefragTestJeremyLinux", DefragTestJeremyLinux);
+
+ UtRegisterTest("DefragBsdFragmentAfterNoMfIpv4Test", DefragBsdFragmentAfterNoMfIpv4Test);
+ UtRegisterTest("DefragBsdFragmentAfterNoMfIpv6Test", DefragBsdFragmentAfterNoMfIpv6Test);
#endif /* UNITTESTS */
}
--
2.50.1

View File

@@ -1,591 +0,0 @@
From 7137d5e7ab5500f1b7f3391f8ab55a59f1e4cbd7 Mon Sep 17 00:00:00 2001
From: Jason Ish <jason.ish@oisf.net>
Date: Mon, 27 Nov 2023 16:27:27 -0600
Subject: [PATCH] defrag: consistent unit test naming
Use a more consistent naming scheme between ipv4 and ipv6.
(cherry picked from commit 2f00b5870abc6053fca8271a0a827babc03d56f0)
CVE: CVE-2024-32867
Upstream-Status: Backport [https://github.com/OISF/suricata/commit/7137d5e7ab5500f1b7f3391f8ab55a59f1e4cbd7]
Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
---
src/defrag.c | 217 ++++++++++++++++++++++++---------------------------
1 file changed, 102 insertions(+), 115 deletions(-)
diff --git a/src/defrag.c b/src/defrag.c
index e154899..99fbab3 100644
--- a/src/defrag.c
+++ b/src/defrag.c
@@ -1125,8 +1125,8 @@ void DefragDestroy(void)
* Allocate a test packet. Nothing to fancy, just a simple IP packet
* with some payload of no particular protocol.
*/
-static Packet *BuildTestPacket(uint8_t proto, uint16_t id, uint16_t off, int mf,
- const char content, int content_len)
+static Packet *BuildIpv4TestPacket(
+ uint8_t proto, uint16_t id, uint16_t off, int mf, const char content, int content_len)
{
Packet *p = NULL;
int hlen = 20;
@@ -1199,8 +1199,8 @@ error:
return NULL;
}
-static Packet *IPV6BuildTestPacket(uint8_t proto, uint32_t id, uint16_t off,
- int mf, const char content, int content_len)
+static Packet *BuildIpv6TestPacket(
+ uint8_t proto, uint32_t id, uint16_t off, int mf, const char content, int content_len)
{
Packet *p = NULL;
uint8_t *pcontent;
@@ -1283,11 +1283,11 @@ static int DefragInOrderSimpleTest(void)
DefragInit();
- p1 = BuildTestPacket(IPPROTO_ICMP, id, 0, 1, 'A', 8);
+ p1 = BuildIpv4TestPacket(IPPROTO_ICMP, id, 0, 1, 'A', 8);
FAIL_IF_NULL(p1);
- p2 = BuildTestPacket(IPPROTO_ICMP, id, 1, 1, 'B', 8);
+ p2 = BuildIpv4TestPacket(IPPROTO_ICMP, id, 1, 1, 'B', 8);
FAIL_IF_NULL(p2);
- p3 = BuildTestPacket(IPPROTO_ICMP, id, 2, 0, 'C', 3);
+ p3 = BuildIpv4TestPacket(IPPROTO_ICMP, id, 2, 0, 'C', 3);
FAIL_IF_NULL(p3);
FAIL_IF(Defrag(NULL, NULL, p1) != NULL);
@@ -1335,11 +1335,11 @@ static int DefragReverseSimpleTest(void)
DefragInit();
- p1 = BuildTestPacket(IPPROTO_ICMP, id, 0, 1, 'A', 8);
+ p1 = BuildIpv4TestPacket(IPPROTO_ICMP, id, 0, 1, 'A', 8);
FAIL_IF_NULL(p1);
- p2 = BuildTestPacket(IPPROTO_ICMP, id, 1, 1, 'B', 8);
+ p2 = BuildIpv4TestPacket(IPPROTO_ICMP, id, 1, 1, 'B', 8);
FAIL_IF_NULL(p2);
- p3 = BuildTestPacket(IPPROTO_ICMP, id, 2, 0, 'C', 3);
+ p3 = BuildIpv4TestPacket(IPPROTO_ICMP, id, 2, 0, 'C', 3);
FAIL_IF_NULL(p3);
FAIL_IF(Defrag(NULL, NULL, p3) != NULL);
@@ -1379,7 +1379,7 @@ static int DefragReverseSimpleTest(void)
* Test the simplest possible re-assembly scenario. All packet in
* order and no overlaps.
*/
-static int IPV6DefragInOrderSimpleTest(void)
+static int DefragInOrderSimpleIpv6Test(void)
{
Packet *p1 = NULL, *p2 = NULL, *p3 = NULL;
Packet *reassembled = NULL;
@@ -1388,11 +1388,11 @@ static int IPV6DefragInOrderSimpleTest(void)
DefragInit();
- p1 = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 0, 1, 'A', 8);
+ p1 = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 0, 1, 'A', 8);
FAIL_IF_NULL(p1);
- p2 = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 1, 1, 'B', 8);
+ p2 = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 1, 1, 'B', 8);
FAIL_IF_NULL(p2);
- p3 = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 2, 0, 'C', 3);
+ p3 = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 2, 0, 'C', 3);
FAIL_IF_NULL(p3);
FAIL_IF(Defrag(NULL, NULL, p1) != NULL);
@@ -1426,7 +1426,7 @@ static int IPV6DefragInOrderSimpleTest(void)
PASS;
}
-static int IPV6DefragReverseSimpleTest(void)
+static int DefragReverseSimpleIpv6Test(void)
{
DefragContext *dc = NULL;
Packet *p1 = NULL, *p2 = NULL, *p3 = NULL;
@@ -1439,11 +1439,11 @@ static int IPV6DefragReverseSimpleTest(void)
dc = DefragContextNew();
FAIL_IF_NULL(dc);
- p1 = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 0, 1, 'A', 8);
+ p1 = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 0, 1, 'A', 8);
FAIL_IF_NULL(p1);
- p2 = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 1, 1, 'B', 8);
+ p2 = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 1, 1, 'B', 8);
FAIL_IF_NULL(p2);
- p3 = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 2, 0, 'C', 3);
+ p3 = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 2, 0, 'C', 3);
FAIL_IF_NULL(p3);
FAIL_IF(Defrag(NULL, NULL, p3) != NULL);
@@ -1496,59 +1496,59 @@ static int DefragDoSturgesNovakTest(int policy, u_char *expected,
*/
/* A*24 at 0. */
- packets[0] = BuildTestPacket(IPPROTO_ICMP, id, 0, 1, 'A', 24);
+ packets[0] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 0, 1, 'A', 24);
/* B*15 at 32. */
- packets[1] = BuildTestPacket(IPPROTO_ICMP, id, 32 >> 3, 1, 'B', 16);
+ packets[1] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 32 >> 3, 1, 'B', 16);
/* C*24 at 48. */
- packets[2] = BuildTestPacket(IPPROTO_ICMP, id, 48 >> 3, 1, 'C', 24);
+ packets[2] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 48 >> 3, 1, 'C', 24);
/* D*8 at 80. */
- packets[3] = BuildTestPacket(IPPROTO_ICMP, id, 80 >> 3, 1, 'D', 8);
+ packets[3] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 80 >> 3, 1, 'D', 8);
/* E*16 at 104. */
- packets[4] = BuildTestPacket(IPPROTO_ICMP, id, 104 >> 3, 1, 'E', 16);
+ packets[4] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 104 >> 3, 1, 'E', 16);
/* F*24 at 120. */
- packets[5] = BuildTestPacket(IPPROTO_ICMP, id, 120 >> 3, 1, 'F', 24);
+ packets[5] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 120 >> 3, 1, 'F', 24);
/* G*16 at 144. */
- packets[6] = BuildTestPacket(IPPROTO_ICMP, id, 144 >> 3, 1, 'G', 16);
+ packets[6] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 144 >> 3, 1, 'G', 16);
/* H*16 at 160. */
- packets[7] = BuildTestPacket(IPPROTO_ICMP, id, 160 >> 3, 1, 'H', 16);
+ packets[7] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 160 >> 3, 1, 'H', 16);
/* I*8 at 176. */
- packets[8] = BuildTestPacket(IPPROTO_ICMP, id, 176 >> 3, 1, 'I', 8);
+ packets[8] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 176 >> 3, 1, 'I', 8);
/*
* Overlapping subsequent fragments.
*/
/* J*32 at 8. */
- packets[9] = BuildTestPacket(IPPROTO_ICMP, id, 8 >> 3, 1, 'J', 32);
+ packets[9] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 8 >> 3, 1, 'J', 32);
/* K*24 at 48. */
- packets[10] = BuildTestPacket(IPPROTO_ICMP, id, 48 >> 3, 1, 'K', 24);
+ packets[10] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 48 >> 3, 1, 'K', 24);
/* L*24 at 72. */
- packets[11] = BuildTestPacket(IPPROTO_ICMP, id, 72 >> 3, 1, 'L', 24);
+ packets[11] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 72 >> 3, 1, 'L', 24);
/* M*24 at 96. */
- packets[12] = BuildTestPacket(IPPROTO_ICMP, id, 96 >> 3, 1, 'M', 24);
+ packets[12] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 96 >> 3, 1, 'M', 24);
/* N*8 at 128. */
- packets[13] = BuildTestPacket(IPPROTO_ICMP, id, 128 >> 3, 1, 'N', 8);
+ packets[13] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 128 >> 3, 1, 'N', 8);
/* O*8 at 152. */
- packets[14] = BuildTestPacket(IPPROTO_ICMP, id, 152 >> 3, 1, 'O', 8);
+ packets[14] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 152 >> 3, 1, 'O', 8);
/* P*8 at 160. */
- packets[15] = BuildTestPacket(IPPROTO_ICMP, id, 160 >> 3, 1, 'P', 8);
+ packets[15] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 160 >> 3, 1, 'P', 8);
/* Q*16 at 176. */
- packets[16] = BuildTestPacket(IPPROTO_ICMP, id, 176 >> 3, 0, 'Q', 16);
+ packets[16] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 176 >> 3, 0, 'Q', 16);
default_policy = policy;
@@ -1588,8 +1588,7 @@ static int DefragDoSturgesNovakTest(int policy, u_char *expected,
PASS;
}
-static int IPV6DefragDoSturgesNovakTest(int policy, u_char *expected,
- size_t expected_len)
+static int DefragDoSturgesNovakIpv6Test(int policy, u_char *expected, size_t expected_len)
{
int i;
@@ -1608,59 +1607,59 @@ static int IPV6DefragDoSturgesNovakTest(int policy, u_char *expected,
*/
/* A*24 at 0. */
- packets[0] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 0, 1, 'A', 24);
+ packets[0] = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 0, 1, 'A', 24);
/* B*15 at 32. */
- packets[1] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 32 >> 3, 1, 'B', 16);
+ packets[1] = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 32 >> 3, 1, 'B', 16);
/* C*24 at 48. */
- packets[2] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 48 >> 3, 1, 'C', 24);
+ packets[2] = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 48 >> 3, 1, 'C', 24);
/* D*8 at 80. */
- packets[3] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 80 >> 3, 1, 'D', 8);
+ packets[3] = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 80 >> 3, 1, 'D', 8);
/* E*16 at 104. */
- packets[4] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 104 >> 3, 1, 'E', 16);
+ packets[4] = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 104 >> 3, 1, 'E', 16);
/* F*24 at 120. */
- packets[5] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 120 >> 3, 1, 'F', 24);
+ packets[5] = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 120 >> 3, 1, 'F', 24);
/* G*16 at 144. */
- packets[6] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 144 >> 3, 1, 'G', 16);
+ packets[6] = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 144 >> 3, 1, 'G', 16);
/* H*16 at 160. */
- packets[7] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 160 >> 3, 1, 'H', 16);
+ packets[7] = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 160 >> 3, 1, 'H', 16);
/* I*8 at 176. */
- packets[8] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 176 >> 3, 1, 'I', 8);
+ packets[8] = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 176 >> 3, 1, 'I', 8);
/*
* Overlapping subsequent fragments.
*/
/* J*32 at 8. */
- packets[9] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 8 >> 3, 1, 'J', 32);
+ packets[9] = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 8 >> 3, 1, 'J', 32);
/* K*24 at 48. */
- packets[10] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 48 >> 3, 1, 'K', 24);
+ packets[10] = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 48 >> 3, 1, 'K', 24);
/* L*24 at 72. */
- packets[11] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 72 >> 3, 1, 'L', 24);
+ packets[11] = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 72 >> 3, 1, 'L', 24);
/* M*24 at 96. */
- packets[12] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 96 >> 3, 1, 'M', 24);
+ packets[12] = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 96 >> 3, 1, 'M', 24);
/* N*8 at 128. */
- packets[13] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 128 >> 3, 1, 'N', 8);
+ packets[13] = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 128 >> 3, 1, 'N', 8);
/* O*8 at 152. */
- packets[14] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 152 >> 3, 1, 'O', 8);
+ packets[14] = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 152 >> 3, 1, 'O', 8);
/* P*8 at 160. */
- packets[15] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 160 >> 3, 1, 'P', 8);
+ packets[15] = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 160 >> 3, 1, 'P', 8);
/* Q*16 at 176. */
- packets[16] = IPV6BuildTestPacket(IPPROTO_ICMPV6, id, 176 >> 3, 0, 'Q', 16);
+ packets[16] = BuildIpv6TestPacket(IPPROTO_ICMPV6, id, 176 >> 3, 0, 'Q', 16);
default_policy = policy;
@@ -1735,7 +1734,7 @@ DefragSturgesNovakBsdTest(void)
PASS;
}
-static int IPV6DefragSturgesNovakBsdTest(void)
+static int DefragSturgesNovakBsdIpv6Test(void)
{
/* Expected data. */
u_char expected[] = {
@@ -1765,8 +1764,7 @@ static int IPV6DefragSturgesNovakBsdTest(void)
"QQQQQQQQ"
};
- FAIL_IF_NOT(IPV6DefragDoSturgesNovakTest(DEFRAG_POLICY_BSD, expected,
- sizeof(expected)));
+ FAIL_IF_NOT(DefragDoSturgesNovakIpv6Test(DEFRAG_POLICY_BSD, expected, sizeof(expected)));
PASS;
}
@@ -1805,7 +1803,7 @@ static int DefragSturgesNovakLinuxIpv4Test(void)
PASS;
}
-static int IPV6DefragSturgesNovakLinuxTest(void)
+static int DefragSturgesNovakLinuxIpv6Test(void)
{
/* Expected data. */
u_char expected[] = {
@@ -1835,8 +1833,7 @@ static int IPV6DefragSturgesNovakLinuxTest(void)
"QQQQQQQQ"
};
- FAIL_IF_NOT(IPV6DefragDoSturgesNovakTest(DEFRAG_POLICY_LINUX, expected,
- sizeof(expected)));
+ FAIL_IF_NOT(DefragDoSturgesNovakIpv6Test(DEFRAG_POLICY_LINUX, expected, sizeof(expected)));
PASS;
}
@@ -1875,7 +1872,7 @@ static int DefragSturgesNovakWindowsIpv4Test(void)
PASS;
}
-static int IPV6DefragSturgesNovakWindowsTest(void)
+static int DefragSturgesNovakWindowsIpv6Test(void)
{
/* Expected data. */
u_char expected[] = {
@@ -1905,8 +1902,7 @@ static int IPV6DefragSturgesNovakWindowsTest(void)
"QQQQQQQQ"
};
- FAIL_IF_NOT(IPV6DefragDoSturgesNovakTest(DEFRAG_POLICY_WINDOWS, expected,
- sizeof(expected)));
+ FAIL_IF_NOT(DefragDoSturgesNovakIpv6Test(DEFRAG_POLICY_WINDOWS, expected, sizeof(expected)));
PASS;
}
@@ -1945,7 +1941,7 @@ static int DefragSturgesNovakSolarisTest(void)
PASS;
}
-static int IPV6DefragSturgesNovakSolarisTest(void)
+static int DefragSturgesNovakSolarisIpv6Test(void)
{
/* Expected data. */
u_char expected[] = {
@@ -1975,8 +1971,7 @@ static int IPV6DefragSturgesNovakSolarisTest(void)
"QQQQQQQQ"
};
- FAIL_IF_NOT(IPV6DefragDoSturgesNovakTest(DEFRAG_POLICY_SOLARIS, expected,
- sizeof(expected)));
+ FAIL_IF_NOT(DefragDoSturgesNovakIpv6Test(DEFRAG_POLICY_SOLARIS, expected, sizeof(expected)));
PASS;
}
@@ -2015,7 +2010,7 @@ static int DefragSturgesNovakFirstTest(void)
PASS;
}
-static int IPV6DefragSturgesNovakFirstTest(void)
+static int DefragSturgesNovakFirstIpv6Test(void)
{
/* Expected data. */
u_char expected[] = {
@@ -2045,8 +2040,7 @@ static int IPV6DefragSturgesNovakFirstTest(void)
"QQQQQQQQ"
};
- return IPV6DefragDoSturgesNovakTest(DEFRAG_POLICY_FIRST, expected,
- sizeof(expected));
+ return DefragDoSturgesNovakIpv6Test(DEFRAG_POLICY_FIRST, expected, sizeof(expected));
}
static int
@@ -2085,7 +2079,7 @@ DefragSturgesNovakLastTest(void)
PASS;
}
-static int IPV6DefragSturgesNovakLastTest(void)
+static int DefragSturgesNovakLastIpv6Test(void)
{
/* Expected data. */
u_char expected[] = {
@@ -2115,8 +2109,7 @@ static int IPV6DefragSturgesNovakLastTest(void)
"QQQQQQQQ"
};
- FAIL_IF_NOT(IPV6DefragDoSturgesNovakTest(DEFRAG_POLICY_LAST, expected,
- sizeof(expected)));
+ FAIL_IF_NOT(DefragDoSturgesNovakIpv6Test(DEFRAG_POLICY_LAST, expected, sizeof(expected)));
PASS;
}
@@ -2131,7 +2124,7 @@ static int DefragTimeoutTest(void)
/* Load in 16 packets. */
for (i = 0; i < 16; i++) {
- Packet *p = BuildTestPacket(IPPROTO_ICMP,i, 0, 1, 'A' + i, 16);
+ Packet *p = BuildIpv4TestPacket(IPPROTO_ICMP, i, 0, 1, 'A' + i, 16);
FAIL_IF_NULL(p);
Packet *tp = Defrag(NULL, NULL, p);
@@ -2141,7 +2134,7 @@ static int DefragTimeoutTest(void)
/* Build a new packet but push the timestamp out by our timeout.
* This should force our previous fragments to be timed out. */
- Packet *p = BuildTestPacket(IPPROTO_ICMP, 99, 0, 1, 'A' + i, 16);
+ Packet *p = BuildIpv4TestPacket(IPPROTO_ICMP, 99, 0, 1, 'A' + i, 16);
FAIL_IF_NULL(p);
p->ts = SCTIME_ADD_SECS(p->ts, defrag_context->timeout + 1);
@@ -2166,7 +2159,7 @@ static int DefragTimeoutTest(void)
* fail. The fix was simple, but this unit test is just to make sure
* its not introduced.
*/
-static int DefragIPv4NoDataTest(void)
+static int DefragNoDataIpv4Test(void)
{
DefragContext *dc = NULL;
Packet *p = NULL;
@@ -2178,7 +2171,7 @@ static int DefragIPv4NoDataTest(void)
FAIL_IF_NULL(dc);
/* This packet has an offset > 0, more frags set to 0 and no data. */
- p = BuildTestPacket(IPPROTO_ICMP, id, 1, 0, 'A', 0);
+ p = BuildIpv4TestPacket(IPPROTO_ICMP, id, 1, 0, 'A', 0);
FAIL_IF_NULL(p);
/* We do not expect a packet returned. */
@@ -2195,7 +2188,7 @@ static int DefragIPv4NoDataTest(void)
PASS;
}
-static int DefragIPv4TooLargeTest(void)
+static int DefragTooLargeIpv4Test(void)
{
DefragContext *dc = NULL;
Packet *p = NULL;
@@ -2207,7 +2200,7 @@ static int DefragIPv4TooLargeTest(void)
/* Create a fragment that would extend past the max allowable size
* for an IPv4 packet. */
- p = BuildTestPacket(IPPROTO_ICMP, 1, 8183, 0, 'A', 71);
+ p = BuildIpv4TestPacket(IPPROTO_ICMP, 1, 8183, 0, 'A', 71);
FAIL_IF_NULL(p);
/* We do not expect a packet returned. */
@@ -2238,9 +2231,9 @@ static int DefragVlanTest(void)
DefragInit();
- p1 = BuildTestPacket(IPPROTO_ICMP, 1, 0, 1, 'A', 8);
+ p1 = BuildIpv4TestPacket(IPPROTO_ICMP, 1, 0, 1, 'A', 8);
FAIL_IF_NULL(p1);
- p2 = BuildTestPacket(IPPROTO_ICMP, 1, 1, 0, 'B', 8);
+ p2 = BuildIpv4TestPacket(IPPROTO_ICMP, 1, 1, 0, 'B', 8);
FAIL_IF_NULL(p2);
/* With no VLAN IDs set, packets should re-assemble. */
@@ -2270,9 +2263,9 @@ static int DefragVlanQinQTest(void)
DefragInit();
- p1 = BuildTestPacket(IPPROTO_ICMP, 1, 0, 1, 'A', 8);
+ p1 = BuildIpv4TestPacket(IPPROTO_ICMP, 1, 0, 1, 'A', 8);
FAIL_IF_NULL(p1);
- p2 = BuildTestPacket(IPPROTO_ICMP, 1, 1, 0, 'B', 8);
+ p2 = BuildIpv4TestPacket(IPPROTO_ICMP, 1, 1, 0, 'B', 8);
FAIL_IF_NULL(p2);
/* With no VLAN IDs set, packets should re-assemble. */
@@ -2304,9 +2297,9 @@ static int DefragVlanQinQinQTest(void)
DefragInit();
- Packet *p1 = BuildTestPacket(IPPROTO_ICMP, 1, 0, 1, 'A', 8);
+ Packet *p1 = BuildIpv4TestPacket(IPPROTO_ICMP, 1, 0, 1, 'A', 8);
FAIL_IF_NULL(p1);
- Packet *p2 = BuildTestPacket(IPPROTO_ICMP, 1, 1, 0, 'B', 8);
+ Packet *p2 = BuildIpv4TestPacket(IPPROTO_ICMP, 1, 1, 0, 'B', 8);
FAIL_IF_NULL(p2);
/* With no VLAN IDs set, packets should re-assemble. */
@@ -2340,7 +2333,7 @@ static int DefragTrackerReuseTest(void)
/* Build a packet, its not a fragment but shouldn't matter for
* this test. */
- p1 = BuildTestPacket(IPPROTO_ICMP, id, 0, 0, 'A', 8);
+ p1 = BuildIpv4TestPacket(IPPROTO_ICMP, id, 0, 0, 'A', 8);
FAIL_IF_NULL(p1);
/* Get a tracker. It shouldn't look like its already in use. */
@@ -2387,9 +2380,9 @@ static int DefragMfIpv4Test(void)
DefragInit();
- Packet *p1 = BuildTestPacket(IPPROTO_ICMP, ip_id, 2, 1, 'C', 8);
- Packet *p2 = BuildTestPacket(IPPROTO_ICMP, ip_id, 0, 1, 'A', 8);
- Packet *p3 = BuildTestPacket(IPPROTO_ICMP, ip_id, 1, 0, 'B', 8);
+ Packet *p1 = BuildIpv4TestPacket(IPPROTO_ICMP, ip_id, 2, 1, 'C', 8);
+ Packet *p2 = BuildIpv4TestPacket(IPPROTO_ICMP, ip_id, 0, 1, 'A', 8);
+ Packet *p3 = BuildIpv4TestPacket(IPPROTO_ICMP, ip_id, 1, 0, 'B', 8);
FAIL_IF(p1 == NULL || p2 == NULL || p3 == NULL);
p = Defrag(NULL, NULL, p1);
@@ -2434,9 +2427,9 @@ static int DefragMfIpv6Test(void)
DefragInit();
- Packet *p1 = IPV6BuildTestPacket(IPPROTO_ICMPV6, ip_id, 2, 1, 'C', 8);
- Packet *p2 = IPV6BuildTestPacket(IPPROTO_ICMPV6, ip_id, 0, 1, 'A', 8);
- Packet *p3 = IPV6BuildTestPacket(IPPROTO_ICMPV6, ip_id, 1, 0, 'B', 8);
+ Packet *p1 = BuildIpv6TestPacket(IPPROTO_ICMPV6, ip_id, 2, 1, 'C', 8);
+ Packet *p2 = BuildIpv6TestPacket(IPPROTO_ICMPV6, ip_id, 0, 1, 'A', 8);
+ Packet *p3 = BuildIpv6TestPacket(IPPROTO_ICMPV6, ip_id, 1, 0, 'B', 8);
FAIL_IF(p1 == NULL || p2 == NULL || p3 == NULL);
p = Defrag(NULL, NULL, p1);
@@ -2476,11 +2469,11 @@ static int DefragTestBadProto(void)
DefragInit();
- p1 = BuildTestPacket(IPPROTO_ICMP, id, 0, 1, 'A', 8);
+ p1 = BuildIpv4TestPacket(IPPROTO_ICMP, id, 0, 1, 'A', 8);
FAIL_IF_NULL(p1);
- p2 = BuildTestPacket(IPPROTO_UDP, id, 1, 1, 'B', 8);
+ p2 = BuildIpv4TestPacket(IPPROTO_UDP, id, 1, 1, 'B', 8);
FAIL_IF_NULL(p2);
- p3 = BuildTestPacket(IPPROTO_ICMP, id, 2, 0, 'C', 3);
+ p3 = BuildIpv4TestPacket(IPPROTO_ICMP, id, 2, 0, 'C', 3);
FAIL_IF_NULL(p3);
FAIL_IF_NOT_NULL(Defrag(NULL, NULL, p1));
@@ -2522,10 +2515,10 @@ static int DefragTestJeremyLinux(void)
Packet *packets[4];
int i = 0;
- packets[0] = BuildTestPacket(IPPROTO_ICMP, id, 0, 1, 'A', 24);
- packets[1] = BuildTestPacket(IPPROTO_ICMP, id, 40 >> 3, 1, 'B', 48);
- packets[2] = BuildTestPacket(IPPROTO_ICMP, id, 24 >> 3, 1, 'C', 48);
- packets[3] = BuildTestPacket(IPPROTO_ICMP, id, 88 >> 3, 0, 'D', 14);
+ packets[0] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 0, 1, 'A', 24);
+ packets[1] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 40 >> 3, 1, 'B', 48);
+ packets[2] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 24 >> 3, 1, 'C', 48);
+ packets[3] = BuildIpv4TestPacket(IPPROTO_ICMP, id, 88 >> 3, 0, 'D', 14);
Packet *r = Defrag(NULL, NULL, packets[0]);
FAIL_IF_NOT_NULL(r);
@@ -2657,23 +2650,17 @@ void DefragRegisterTests(void)
UtRegisterTest("DefragSturgesNovakFirstTest", DefragSturgesNovakFirstTest);
UtRegisterTest("DefragSturgesNovakLastTest", DefragSturgesNovakLastTest);
- UtRegisterTest("DefragIPv4NoDataTest", DefragIPv4NoDataTest);
- UtRegisterTest("DefragIPv4TooLargeTest", DefragIPv4TooLargeTest);
-
- UtRegisterTest("IPV6DefragInOrderSimpleTest", IPV6DefragInOrderSimpleTest);
- UtRegisterTest("IPV6DefragReverseSimpleTest", IPV6DefragReverseSimpleTest);
- UtRegisterTest("IPV6DefragSturgesNovakBsdTest",
- IPV6DefragSturgesNovakBsdTest);
- UtRegisterTest("IPV6DefragSturgesNovakLinuxTest",
- IPV6DefragSturgesNovakLinuxTest);
- UtRegisterTest("IPV6DefragSturgesNovakWindowsTest",
- IPV6DefragSturgesNovakWindowsTest);
- UtRegisterTest("IPV6DefragSturgesNovakSolarisTest",
- IPV6DefragSturgesNovakSolarisTest);
- UtRegisterTest("IPV6DefragSturgesNovakFirstTest",
- IPV6DefragSturgesNovakFirstTest);
- UtRegisterTest("IPV6DefragSturgesNovakLastTest",
- IPV6DefragSturgesNovakLastTest);
+ UtRegisterTest("DefragNoDataIpv4Test", DefragNoDataIpv4Test);
+ UtRegisterTest("DefragTooLargeIpv4Test", DefragTooLargeIpv4Test);
+
+ UtRegisterTest("DefragInOrderSimpleIpv6Test", DefragInOrderSimpleIpv6Test);
+ UtRegisterTest("DefragReverseSimpleIpv6Test", DefragReverseSimpleIpv6Test);
+ UtRegisterTest("DefragSturgesNovakBsdIpv6Test", DefragSturgesNovakBsdIpv6Test);
+ UtRegisterTest("DefragSturgesNovakLinuxIpv6Test", DefragSturgesNovakLinuxIpv6Test);
+ UtRegisterTest("DefragSturgesNovakWindowsIpv6Test", DefragSturgesNovakWindowsIpv6Test);
+ UtRegisterTest("DefragSturgesNovakSolarisIpv6Test", DefragSturgesNovakSolarisIpv6Test);
+ UtRegisterTest("DefragSturgesNovakFirstIpv6Test", DefragSturgesNovakFirstIpv6Test);
+ UtRegisterTest("DefragSturgesNovakLastIpv6Test", DefragSturgesNovakLastIpv6Test);
UtRegisterTest("DefragVlanTest", DefragVlanTest);
UtRegisterTest("DefragVlanQinQTest", DefragVlanQinQTest);
--
2.50.1

View File

@@ -1,472 +0,0 @@
From 1e110d0a71db46571040b937e17a4bc9f91d6de9 Mon Sep 17 00:00:00 2001
From: Jason Ish <jason.ish@oisf.net>
Date: Thu, 7 Dec 2023 16:44:56 -0600
Subject: [PATCH] defrag: fix subsequent overlap of start of original (bsd)
Fix the BSD policy case where a subsequent fragment starts before an
original fragment and overlaps the beginning of the original
fragment. In this case the overlapping data from the new fragment is
preferred.
Suricata was preferring the data from the original fragment, but it
should only do that when the original fragment has an offset <= to the
new fragment.
- Adds test for this case
Bug: #6669
(cherry picked from commit f1709ea551124e1a64fdc509993ad022ab27aa77)
CVE: CVE-2024-32867
Upstream-Status: Backport [https://github.com/OISF/suricata/commit/1e110d0a71db46571040b937e17a4bc9f91d6de9]
Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
---
src/defrag.c | 387 ++++++++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 380 insertions(+), 7 deletions(-)
diff --git a/src/defrag.c b/src/defrag.c
index 99fbab3..28d085d 100644
--- a/src/defrag.c
+++ b/src/defrag.c
@@ -692,16 +692,45 @@ DefragInsertFrag(ThreadVars *tv, DecodeThreadVars *dtv, DefragTracker *tracker,
switch (tracker->policy) {
case DEFRAG_POLICY_BSD:
if (frag_offset < prev->offset + prev->data_len) {
- if (frag_offset >= prev->offset) {
- ltrim = prev->offset + prev->data_len - frag_offset;
+ if (prev->offset <= frag_offset) {
+ /* We prefer the data from the previous
+ * fragment, so trim off the data in the new
+ * fragment that exists in the previous
+ * fragment. */
+ uint16_t prev_end = prev->offset + prev->data_len;
+ if (prev_end > frag_end) {
+ /* Just skip. */
+ /* TODO: Set overlap flag. */
+ goto done;
+ }
+ ltrim = prev_end - frag_offset;
+
+ if ((next != NULL) && (frag_end > next->offset)) {
+ next->ltrim = frag_end - next->offset;
+ }
+
+ goto insert;
}
+
+ /* If the end of this fragment overlaps the start
+ * of the previous fragment, then trim up the
+ * start of previous fragment so this fragment is
+ * used.
+ *
+ * See:
+ * DefragBsdSubsequentOverlapsStartOfOriginal.
+ */
+ if (frag_offset <= prev->offset && frag_end > prev->offset + prev->ltrim) {
+ uint16_t prev_ltrim = frag_end - prev->offset;
+ if (prev_ltrim > prev->ltrim) {
+ prev->ltrim = prev_ltrim;
+ }
+ }
+
if ((next != NULL) && (frag_end > next->offset)) {
next->ltrim = frag_end - next->offset;
}
- if ((frag_offset < prev->offset) &&
- (frag_end >= prev->offset + prev->data_len)) {
- prev->skip = 1;
- }
+
goto insert;
}
break;
@@ -1199,6 +1228,77 @@ error:
return NULL;
}
+/**
+ * Allocate a test packet, much like BuildIpv4TestPacket, but with
+ * the full content provided by the caller.
+ */
+static Packet *BuildIpv4TestPacketWithContent(
+ uint8_t proto, uint16_t id, uint16_t off, int mf, const uint8_t *content, int content_len)
+{
+ Packet *p = NULL;
+ int hlen = 20;
+ int ttl = 64;
+ IPV4Hdr ip4h;
+
+ p = SCCalloc(1, sizeof(*p) + default_packet_size);
+ if (unlikely(p == NULL))
+ return NULL;
+
+ PacketInit(p);
+
+ struct timeval tval;
+ gettimeofday(&tval, NULL);
+ p->ts = SCTIME_FROM_TIMEVAL(&tval);
+ ip4h.ip_verhl = 4 << 4;
+ ip4h.ip_verhl |= hlen >> 2;
+ ip4h.ip_len = htons(hlen + content_len);
+ ip4h.ip_id = htons(id);
+ if (mf)
+ ip4h.ip_off = htons(IP_MF | off);
+ else
+ ip4h.ip_off = htons(off);
+ ip4h.ip_ttl = ttl;
+ ip4h.ip_proto = proto;
+
+ ip4h.s_ip_src.s_addr = 0x01010101; /* 1.1.1.1 */
+ ip4h.s_ip_dst.s_addr = 0x02020202; /* 2.2.2.2 */
+
+ /* copy content_len crap, we need full length */
+ PacketCopyData(p, (uint8_t *)&ip4h, sizeof(ip4h));
+ p->ip4h = (IPV4Hdr *)GET_PKT_DATA(p);
+ SET_IPV4_SRC_ADDR(p, &p->src);
+ SET_IPV4_DST_ADDR(p, &p->dst);
+
+ PacketCopyDataOffset(p, hlen, content, content_len);
+ SET_PKT_LEN(p, hlen + content_len);
+
+ p->ip4h->ip_csum = IPV4Checksum((uint16_t *)GET_PKT_DATA(p), hlen, 0);
+
+ /* Self test. */
+ if (IPV4_GET_VER(p) != 4)
+ goto error;
+ if (IPV4_GET_HLEN(p) != hlen)
+ goto error;
+ if (IPV4_GET_IPLEN(p) != hlen + content_len)
+ goto error;
+ if (IPV4_GET_IPID(p) != id)
+ goto error;
+ if (IPV4_GET_IPOFFSET(p) != off)
+ goto error;
+ if (IPV4_GET_MF(p) != mf)
+ goto error;
+ if (IPV4_GET_IPTTL(p) != ttl)
+ goto error;
+ if (IPV4_GET_IPPROTO(p) != proto)
+ goto error;
+
+ return p;
+error:
+ if (p != NULL)
+ SCFree(p);
+ return NULL;
+}
+
static Packet *BuildIpv6TestPacket(
uint8_t proto, uint32_t id, uint16_t off, int mf, const char content, int content_len)
{
@@ -1270,6 +1370,71 @@ error:
return NULL;
}
+static Packet *BuildIpv6TestPacketWithContent(
+ uint8_t proto, uint32_t id, uint16_t off, int mf, const uint8_t *content, int content_len)
+{
+ Packet *p = NULL;
+ IPV6Hdr ip6h;
+
+ p = SCCalloc(1, sizeof(*p) + default_packet_size);
+ if (unlikely(p == NULL))
+ return NULL;
+
+ PacketInit(p);
+
+ struct timeval tval;
+ gettimeofday(&tval, NULL);
+ p->ts = SCTIME_FROM_TIMEVAL(&tval);
+
+ ip6h.s_ip6_nxt = 44;
+ ip6h.s_ip6_hlim = 2;
+
+ /* Source and dest address - very bogus addresses. */
+ ip6h.s_ip6_src[0] = 0x01010101;
+ ip6h.s_ip6_src[1] = 0x01010101;
+ ip6h.s_ip6_src[2] = 0x01010101;
+ ip6h.s_ip6_src[3] = 0x01010101;
+ ip6h.s_ip6_dst[0] = 0x02020202;
+ ip6h.s_ip6_dst[1] = 0x02020202;
+ ip6h.s_ip6_dst[2] = 0x02020202;
+ ip6h.s_ip6_dst[3] = 0x02020202;
+
+ /* copy content_len crap, we need full length */
+ PacketCopyData(p, (uint8_t *)&ip6h, sizeof(IPV6Hdr));
+
+ p->ip6h = (IPV6Hdr *)GET_PKT_DATA(p);
+ IPV6_SET_RAW_VER(p->ip6h, 6);
+ /* Fragmentation header. */
+ IPV6FragHdr *fh = (IPV6FragHdr *)(GET_PKT_DATA(p) + sizeof(IPV6Hdr));
+ fh->ip6fh_nxt = proto;
+ fh->ip6fh_ident = htonl(id);
+ fh->ip6fh_offlg = htons((off << 3) | mf);
+
+ DecodeIPV6FragHeader(p, (uint8_t *)fh, 8, 8 + content_len, 0);
+
+ PacketCopyDataOffset(p, sizeof(IPV6Hdr) + sizeof(IPV6FragHdr), content, content_len);
+ SET_PKT_LEN(p, sizeof(IPV6Hdr) + sizeof(IPV6FragHdr) + content_len);
+
+ p->ip6h->s_ip6_plen = htons(sizeof(IPV6FragHdr) + content_len);
+
+ SET_IPV6_SRC_ADDR(p, &p->src);
+ SET_IPV6_DST_ADDR(p, &p->dst);
+
+ /* Self test. */
+ if (IPV6_GET_VER(p) != 6)
+ goto error;
+ if (IPV6_GET_NH(p) != 44)
+ goto error;
+ if (IPV6_GET_PLEN(p) != sizeof(IPV6FragHdr) + content_len)
+ goto error;
+
+ return p;
+error:
+ if (p != NULL)
+ SCFree(p);
+ return NULL;
+}
+
/**
* Test the simplest possible re-assembly scenario. All packet in
* order and no overlaps.
@@ -1575,7 +1740,13 @@ static int DefragDoSturgesNovakTest(int policy, u_char *expected,
FAIL_IF(IPV4_GET_HLEN(reassembled) != 20);
FAIL_IF(IPV4_GET_IPLEN(reassembled) != 20 + 192);
- FAIL_IF(memcmp(GET_PKT_DATA(reassembled) + 20, expected, expected_len) != 0);
+ if (memcmp(expected, GET_PKT_DATA(reassembled) + 20, expected_len) != 0) {
+ printf("Expected:\n");
+ PrintRawDataFp(stdout, expected, expected_len);
+ printf("Got:\n");
+ PrintRawDataFp(stdout, GET_PKT_DATA(reassembled) + 20, GET_PKT_LEN(reassembled) - 20);
+ FAIL;
+ }
SCFree(reassembled);
/* Make sure all frags were returned back to the pool. */
@@ -2543,6 +2714,16 @@ static int DefragTestJeremyLinux(void)
PASS;
}
+/**
+ * | 0 | 8 | 16 | 24 | 32 |
+ * |----------|----------|----------|----------|----------|
+ * | AAAAAAAA | AAAAAAAA |
+ * | | BBBBBBBB | BBBBBBBB | | |
+ * | | | CCCCCCCC | CCCCCCCC | |
+ * | DDDDDDDD | | | | |
+ *
+ * | DDDDDDDD | BBBBBBBB | BBBBBBBB | CCCCCCCC | AAAAAAAA |
+ */
static int DefragBsdFragmentAfterNoMfIpv4Test(void)
{
DefragInit();
@@ -2633,6 +2814,192 @@ static int DefragBsdFragmentAfterNoMfIpv6Test(void)
PASS;
}
+static int DefragBsdSubsequentOverlapsStartOfOriginalIpv4Test_2(void)
+{
+ DefragInit();
+ default_policy = DEFRAG_POLICY_BSD;
+ Packet *packets[4];
+
+ /* Packet 1: off=16, mf=1 */
+ packets[0] = BuildIpv4TestPacketWithContent(
+ IPPROTO_ICMP, 6, 16 >> 3, 1, (uint8_t *)"AABBCCDDAABBDDCC", 16);
+
+ /* Packet 2: off=8, mf=1 */
+ packets[1] = BuildIpv4TestPacketWithContent(
+ IPPROTO_ICMP, 6, 8 >> 3, 1, (uint8_t *)"AACCBBDDAACCDDBB", 16);
+
+ /* Packet 3: off=0, mf=1: IP and ICMP header. */
+ packets[2] = BuildIpv4TestPacketWithContent(IPPROTO_ICMP, 6, 0, 1, (uint8_t *)"ZZZZZZZZ", 8);
+
+ /* Packet 4: off=8, mf=1 */
+ packets[3] =
+ BuildIpv4TestPacketWithContent(IPPROTO_ICMP, 6, 32 >> 3, 0, (uint8_t *)"DDCCBBAA", 8);
+
+ Packet *r = Defrag(NULL, NULL, packets[0]);
+ FAIL_IF_NOT_NULL(r);
+
+ r = Defrag(NULL, NULL, packets[1]);
+ FAIL_IF_NOT_NULL(r);
+
+ r = Defrag(NULL, NULL, packets[2]);
+ FAIL_IF_NOT_NULL(r);
+
+ r = Defrag(NULL, NULL, packets[3]);
+ FAIL_IF_NULL(r);
+
+ // clang-format off
+ const uint8_t expected[] = {
+ // AACCBBDD
+ // AACCDDBB
+ // AABBDDCC
+ // DDCCBBAA
+ 'A', 'A', 'C', 'C', 'B', 'B', 'D', 'D',
+ 'A', 'A', 'C', 'C', 'D', 'D', 'B', 'B',
+ 'A', 'A', 'B', 'B', 'D', 'D', 'C', 'C',
+ 'D', 'D', 'C', 'C', 'B', 'B', 'A', 'A',
+ };
+ // clang-format on
+
+ FAIL_IF(memcmp(expected, GET_PKT_DATA(r) + 20 + 8, sizeof(expected)) != 0);
+
+ DefragDestroy();
+ PASS;
+}
+
+static int DefragBsdSubsequentOverlapsStartOfOriginalIpv6Test_2(void)
+{
+ DefragInit();
+ default_policy = DEFRAG_POLICY_BSD;
+ Packet *packets[4];
+
+ /* Packet 1: off=16, mf=1 */
+ packets[0] = BuildIpv6TestPacketWithContent(
+ IPPROTO_ICMP, 6, 16 >> 3, 1, (uint8_t *)"AABBCCDDAABBDDCC", 16);
+
+ /* Packet 2: off=8, mf=1 */
+ packets[1] = BuildIpv6TestPacketWithContent(
+ IPPROTO_ICMP, 6, 8 >> 3, 1, (uint8_t *)"AACCBBDDAACCDDBB", 16);
+
+ /* Packet 3: off=0, mf=1: IP and ICMP header. */
+ packets[2] = BuildIpv6TestPacketWithContent(IPPROTO_ICMP, 6, 0, 1, (uint8_t *)"ZZZZZZZZ", 8);
+
+ /* Packet 4: off=8, mf=1 */
+ packets[3] =
+ BuildIpv6TestPacketWithContent(IPPROTO_ICMP, 6, 32 >> 3, 0, (uint8_t *)"DDCCBBAA", 8);
+
+ Packet *r = Defrag(NULL, NULL, packets[0]);
+ FAIL_IF_NOT_NULL(r);
+
+ r = Defrag(NULL, NULL, packets[1]);
+ FAIL_IF_NOT_NULL(r);
+
+ r = Defrag(NULL, NULL, packets[2]);
+ FAIL_IF_NOT_NULL(r);
+
+ r = Defrag(NULL, NULL, packets[3]);
+ FAIL_IF_NULL(r);
+
+ // clang-format off
+ const uint8_t expected[] = {
+ // AACCBBDD
+ // AACCDDBB
+ // AABBDDCC
+ // DDCCBBAA
+ 'A', 'A', 'C', 'C', 'B', 'B', 'D', 'D',
+ 'A', 'A', 'C', 'C', 'D', 'D', 'B', 'B',
+ 'A', 'A', 'B', 'B', 'D', 'D', 'C', 'C',
+ 'D', 'D', 'C', 'C', 'B', 'B', 'A', 'A',
+ };
+ // clang-format on
+
+ FAIL_IF(memcmp(expected, GET_PKT_DATA(r) + 40 + 8, sizeof(expected)) != 0);
+
+ DefragDestroy();
+ PASS;
+}
+
+/**
+ * #### Input
+ *
+ * | 96 (0) | 104 (8) | 112 (16) | 120 (24) |
+ * |----------|----------|----------|----------|
+ * | | EEEEEEEE | EEEEEEEE | EEEEEEEE |
+ * | MMMMMMMM | MMMMMMMM | MMMMMMMM | |
+ *
+ * #### Expected Output
+ *
+ * | MMMMMMMM | MMMMMMMM | MMMMMMMM | EEEEEEEE |
+ */
+static int DefragBsdSubsequentOverlapsStartOfOriginalIpv4Test(void)
+{
+ DefragInit();
+ default_policy = DEFRAG_POLICY_BSD;
+ Packet *packets[2];
+
+ packets[0] = BuildIpv4TestPacket(IPPROTO_ICMP, 1, 8 >> 3, 0, 'E', 24);
+ packets[1] = BuildIpv4TestPacket(IPPROTO_ICMP, 1, 0, 1, 'M', 24);
+
+ Packet *r = Defrag(NULL, NULL, packets[0]);
+ FAIL_IF_NOT_NULL(r);
+
+ r = Defrag(NULL, NULL, packets[1]);
+ FAIL_IF_NULL(r);
+
+ // clang-format off
+ const uint8_t expected[] = {
+ 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M',
+ 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M',
+ 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M',
+ 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E',
+ };
+ // clang-format on
+
+ if (memcmp(expected, GET_PKT_DATA(r) + 20, sizeof(expected)) != 0) {
+ printf("Expected:\n");
+ PrintRawDataFp(stdout, expected, sizeof(expected));
+ printf("Got:\n");
+ PrintRawDataFp(stdout, GET_PKT_DATA(r) + 20, GET_PKT_LEN(r) - 20);
+ FAIL;
+ }
+
+ PASS;
+}
+
+static int DefragBsdSubsequentOverlapsStartOfOriginalIpv6Test(void)
+{
+ DefragInit();
+ default_policy = DEFRAG_POLICY_BSD;
+ Packet *packets[2];
+
+ packets[0] = BuildIpv6TestPacket(IPPROTO_ICMP, 1, 8 >> 3, 0, 'E', 24);
+ packets[1] = BuildIpv6TestPacket(IPPROTO_ICMP, 1, 0, 1, 'M', 24);
+
+ Packet *r = Defrag(NULL, NULL, packets[0]);
+ FAIL_IF_NOT_NULL(r);
+
+ r = Defrag(NULL, NULL, packets[1]);
+ FAIL_IF_NULL(r);
+
+ // clang-format off
+ const uint8_t expected[] = {
+ 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M',
+ 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M',
+ 'M', 'M', 'M', 'M', 'M', 'M', 'M', 'M',
+ 'E', 'E', 'E', 'E', 'E', 'E', 'E', 'E',
+ };
+ // clang-format on
+
+ if (memcmp(expected, GET_PKT_DATA(r) + 40, sizeof(expected)) != 0) {
+ printf("Expected:\n");
+ PrintRawDataFp(stdout, expected, sizeof(expected));
+ printf("Got:\n");
+ PrintRawDataFp(stdout, GET_PKT_DATA(r) + 40, GET_PKT_LEN(r) - 40);
+ FAIL;
+ }
+
+ PASS;
+}
+
#endif /* UNITTESTS */
void DefragRegisterTests(void)
@@ -2675,5 +3042,11 @@ void DefragRegisterTests(void)
UtRegisterTest("DefragBsdFragmentAfterNoMfIpv4Test", DefragBsdFragmentAfterNoMfIpv4Test);
UtRegisterTest("DefragBsdFragmentAfterNoMfIpv6Test", DefragBsdFragmentAfterNoMfIpv6Test);
+ UtRegisterTest("DefragBsdSubsequentOverlapsStartOfOriginalIpv4Test",
+ DefragBsdSubsequentOverlapsStartOfOriginalIpv4Test);
+ UtRegisterTest("DefragBsdSubsequentOverlapsStartOfOriginalIpv6Test",
+ DefragBsdSubsequentOverlapsStartOfOriginalIpv6Test);
+ UtRegisterTest("DefragBsdSubsequentOverlapsStartOfOriginalIpv4Test_2", DefragBsdSubsequentOverlapsStartOfOriginalIpv4Test_2);
+ UtRegisterTest("DefragBsdSubsequentOverlapsStartOfOriginalIpv6Test_2", DefragBsdSubsequentOverlapsStartOfOriginalIpv6Test_2);
#endif /* UNITTESTS */
}
--
2.50.1

View File

@@ -1,169 +0,0 @@
From e6267758ed5da27f804f0c1c07f9423bdf4d72b8 Mon Sep 17 00:00:00 2001
From: Jason Ish <jason.ish@oisf.net>
Date: Fri, 12 Jan 2024 11:09:59 -0600
Subject: [PATCH] defrag: fix check for complete packet
The list of fragments may still contain overlaps, so adding up the
fragment lengths is flawed. Instead track the largest size of
contiguous data that can be re-assembled.
Bug: #6675
(cherry picked from commit d226d0a3fce8837936e1bdfaee496c80d417e0a5)
CVE: CVE-2024-32867
Upstream-Status: Backport [https://github.com/OISF/suricata/commit/e6267758ed5da27f804f0c1c07f9423bdf4d72b8]
Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
---
src/defrag.c | 116 ++++++++++++++++++++++++++++++++++++++++++++++++++-
1 file changed, 114 insertions(+), 2 deletions(-)
diff --git a/src/defrag.c b/src/defrag.c
index 28d085d..fc46411 100644
--- a/src/defrag.c
+++ b/src/defrag.c
@@ -276,7 +276,8 @@ Defrag4Reassemble(ThreadVars *tv, DefragTracker *tracker, Packet *p)
goto done;
}
else {
- len += frag->data_len;
+ /* Update the packet length to the largest known data offset. */
+ len = MAX(len, frag->offset + frag->data_len);
}
}
@@ -434,7 +435,7 @@ Defrag6Reassemble(ThreadVars *tv, DefragTracker *tracker, Packet *p)
goto done;
}
else {
- len += frag->data_len;
+ len = MAX(len, frag->offset + frag->data_len);
}
}
}
@@ -3000,6 +3001,115 @@ static int DefragBsdSubsequentOverlapsStartOfOriginalIpv6Test(void)
PASS;
}
+/**
+ * Reassembly should fail.
+ *
+ * |0 |8 |16 |24 |32 |40 |48 |
+ * |========|========|========|========|========|========|========|
+ * | | |AABBCCDD|AABBDDCC| | | |
+ * | | | | | |AACCBBDD| |
+ * | |AACCDDBB|AADDBBCC| | | | |
+ * |ZZZZZZZZ| | | | | | |
+ * | | | | | | |DDCCBBAA|
+ */
+static int DefragBsdMissingFragmentIpv4Test(void)
+{
+ DefragInit();
+ default_policy = DEFRAG_POLICY_BSD;
+ Packet *packets[5];
+
+ packets[0] = BuildIpv4TestPacketWithContent(
+ IPPROTO_ICMP, 189, 16 >> 3, 1, (uint8_t *)"AABBCCDDAABBDDCC", 16);
+
+ packets[1] =
+ BuildIpv4TestPacketWithContent(IPPROTO_ICMP, 189, 40 >> 3, 1, (uint8_t *)"AACCBBDD", 8);
+
+ packets[2] = BuildIpv4TestPacketWithContent(
+ IPPROTO_ICMP, 189, 8 >> 3, 1, (uint8_t *)"AACCDDBBAADDBBCC", 16);
+
+ /* ICMP header. */
+ packets[3] = BuildIpv4TestPacketWithContent(IPPROTO_ICMP, 189, 0, 1, (uint8_t *)"ZZZZZZZZ", 8);
+
+ packets[4] =
+ BuildIpv4TestPacketWithContent(IPPROTO_ICMP, 189, 48 >> 3, 0, (uint8_t *)"DDCCBBAA", 8);
+
+ Packet *r = Defrag(NULL, NULL, packets[0]);
+ FAIL_IF_NOT_NULL(r);
+
+ r = Defrag(NULL, NULL, packets[1]);
+ FAIL_IF_NOT_NULL(r);
+
+ r = Defrag(NULL, NULL, packets[2]);
+ FAIL_IF_NOT_NULL(r);
+
+ r = Defrag(NULL, NULL, packets[3]);
+ FAIL_IF_NOT_NULL(r);
+
+ r = Defrag(NULL, NULL, packets[4]);
+ FAIL_IF_NOT_NULL(r);
+
+#if 0
+ PrintRawDataFp(stdout, GET_PKT_DATA(r) + 20, GET_PKT_LEN(r) - 20);
+#endif
+
+ for (int i = 0; i < 5; i++) {
+ SCFree(packets[i]);
+ }
+
+ DefragDestroy();
+
+ PASS;
+}
+
+static int DefragBsdMissingFragmentIpv6Test(void)
+{
+ DefragInit();
+ default_policy = DEFRAG_POLICY_BSD;
+ Packet *packets[5];
+
+ packets[0] = BuildIpv6TestPacketWithContent(
+ IPPROTO_ICMP, 189, 16 >> 3, 1, (uint8_t *)"AABBCCDDAABBDDCC", 16);
+
+ packets[1] =
+ BuildIpv6TestPacketWithContent(IPPROTO_ICMP, 189, 40 >> 3, 1, (uint8_t *)"AACCBBDD", 8);
+
+ packets[2] = BuildIpv6TestPacketWithContent(
+ IPPROTO_ICMP, 189, 8 >> 3, 1, (uint8_t *)"AACCDDBBAADDBBCC", 16);
+
+ /* ICMP header. */
+ packets[3] = BuildIpv6TestPacketWithContent(IPPROTO_ICMP, 189, 0, 1, (uint8_t *)"ZZZZZZZZ", 8);
+
+ packets[4] =
+ BuildIpv6TestPacketWithContent(IPPROTO_ICMP, 189, 48 >> 3, 0, (uint8_t *)"DDCCBBAA", 8);
+
+ Packet *r = Defrag(NULL, NULL, packets[0]);
+ FAIL_IF_NOT_NULL(r);
+
+ r = Defrag(NULL, NULL, packets[1]);
+ FAIL_IF_NOT_NULL(r);
+
+ r = Defrag(NULL, NULL, packets[2]);
+ FAIL_IF_NOT_NULL(r);
+
+ r = Defrag(NULL, NULL, packets[3]);
+ FAIL_IF_NOT_NULL(r);
+
+ r = Defrag(NULL, NULL, packets[4]);
+ FAIL_IF_NOT_NULL(r);
+
+#if 0
+ PrintRawDataFp(stdout, GET_PKT_DATA(r) + 40, GET_PKT_LEN(r) - 40);
+#endif
+
+ for (int i = 0; i < 5; i++) {
+ SCFree(packets[i]);
+ }
+
+ DefragDestroy();
+
+ PASS;
+}
+
#endif /* UNITTESTS */
void DefragRegisterTests(void)
@@ -3048,5 +3158,7 @@ void DefragRegisterTests(void)
DefragBsdSubsequentOverlapsStartOfOriginalIpv6Test);
UtRegisterTest("DefragBsdSubsequentOverlapsStartOfOriginalIpv4Test_2", DefragBsdSubsequentOverlapsStartOfOriginalIpv4Test_2);
UtRegisterTest("DefragBsdSubsequentOverlapsStartOfOriginalIpv6Test_2", DefragBsdSubsequentOverlapsStartOfOriginalIpv6Test_2);
+ UtRegisterTest("DefragBsdMissingFragmentIpv4Test", DefragBsdMissingFragmentIpv4Test);
+ UtRegisterTest("DefragBsdMissingFragmentIpv6Test", DefragBsdMissingFragmentIpv6Test);
#endif /* UNITTESTS */
}
--
2.50.1

View File

@@ -1,123 +0,0 @@
From 72456d359bf3064306b62024c809bb30b162f18c Mon Sep 17 00:00:00 2001
From: Philippe Antoine <pantoine@oisf.net>
Date: Mon, 12 Aug 2024 09:54:43 +0200
Subject: [PATCH] detect/datasets: implement unset command
Ticket: 7195
Otherwise, Suricata aborted on such a rule
(cherry picked from commit e47598110a557bb9f87ea498d85ba91a45bb0cb6)
Upstream-Status: Backport [https://github.com/OISF/suricata/commit/72456d359bf3064306b62024c809bb30b162f18c && https://github.com/OISF/suricata/commit/96d5c81aed01f2bc0cd3e2e60057d0deb38caa99]
CVE: CVE-2024-45795
Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
---
doc/userguide/rules/datasets.rst | 2 +-
src/datasets.c | 20 ++++++++++++++++++++
src/datasets.h | 1 +
src/detect-dataset.c | 11 +++++++++++
src/detect-dataset.h | 5 -----
5 files changed, 33 insertions(+), 6 deletions(-)
diff --git a/doc/userguide/rules/datasets.rst b/doc/userguide/rules/datasets.rst
index 647b12e..dd9ecd2 100644
--- a/doc/userguide/rules/datasets.rst
+++ b/doc/userguide/rules/datasets.rst
@@ -78,7 +78,7 @@ Syntax::
dataset:<cmd>,<name>,<options>;
- dataset:<set|isset|isnotset>,<name> \
+ dataset:<set|unset|isset|isnotset>,<name> \
[, type <string|md5|sha256|ipv4|ip>, save <file name>, load <file name>, state <file name>, memcap <size>, hashsize <size>];
type <type>
diff --git a/src/datasets.c b/src/datasets.c
index d89ed8d..32bcf6e 100644
--- a/src/datasets.c
+++ b/src/datasets.c
@@ -1741,3 +1741,23 @@ int DatasetRemoveSerialized(Dataset *set, const char *string)
return DatasetOpSerialized(set, string, DatasetRemoveString, DatasetRemoveMd5,
DatasetRemoveSha256, DatasetRemoveIPv4, DatasetRemoveIPv6);
}
+
+int DatasetRemove(Dataset *set, const uint8_t *data, const uint32_t data_len)
+{
+ if (set == NULL)
+ return -1;
+
+ switch (set->type) {
+ case DATASET_TYPE_STRING:
+ return DatasetRemoveString(set, data, data_len);
+ case DATASET_TYPE_MD5:
+ return DatasetRemoveMd5(set, data, data_len);
+ case DATASET_TYPE_SHA256:
+ return DatasetRemoveSha256(set, data, data_len);
+ case DATASET_TYPE_IPV4:
+ return DatasetRemoveIPv4(set, data, data_len);
+ case DATASET_TYPE_IPV6:
+ return DatasetRemoveIPv6(set, data, data_len);
+ }
+ return -1;
+}
diff --git a/src/datasets.h b/src/datasets.h
index af4fc17..0f28a9f 100644
--- a/src/datasets.h
+++ b/src/datasets.h
@@ -56,6 +56,7 @@ Dataset *DatasetFind(const char *name, enum DatasetTypes type);
Dataset *DatasetGet(const char *name, enum DatasetTypes type, const char *save, const char *load,
uint64_t memcap, uint32_t hashsize);
int DatasetAdd(Dataset *set, const uint8_t *data, const uint32_t data_len);
+int DatasetRemove(Dataset *set, const uint8_t *data, const uint32_t data_len);
int DatasetLookup(Dataset *set, const uint8_t *data, const uint32_t data_len);
DataRepResultType DatasetLookupwRep(Dataset *set, const uint8_t *data, const uint32_t data_len,
const DataRepType *rep);
diff --git a/src/detect-dataset.c b/src/detect-dataset.c
index 3d29646..aad5cf0 100644
--- a/src/detect-dataset.c
+++ b/src/detect-dataset.c
@@ -41,6 +41,11 @@
#include "util-path.h"
#include "util-conf.h"
+#define DETECT_DATASET_CMD_SET 0
+#define DETECT_DATASET_CMD_UNSET 1
+#define DETECT_DATASET_CMD_ISNOTSET 2
+#define DETECT_DATASET_CMD_ISSET 3
+
int DetectDatasetMatch (ThreadVars *, DetectEngineThreadCtx *, Packet *,
const Signature *, const SigMatchCtx *);
static int DetectDatasetSetup (DetectEngineCtx *, Signature *, const char *);
@@ -91,6 +96,12 @@ int DetectDatasetBufferMatch(DetectEngineThreadCtx *det_ctx,
return 1;
break;
}
+ case DETECT_DATASET_CMD_UNSET: {
+ int r = DatasetRemove(sd->set, data, data_len);
+ if (r == 1)
+ return 1;
+ break;
+ }
default:
abort();
}
diff --git a/src/detect-dataset.h b/src/detect-dataset.h
index ca83267..d243552 100644
--- a/src/detect-dataset.h
+++ b/src/detect-dataset.h
@@ -26,11 +26,6 @@
#include "datasets.h"
-#define DETECT_DATASET_CMD_SET 0
-#define DETECT_DATASET_CMD_UNSET 1
-#define DETECT_DATASET_CMD_ISNOTSET 2
-#define DETECT_DATASET_CMD_ISSET 3
-
typedef struct DetectDatasetData_ {
Dataset *set;
uint8_t cmd;
--
2.25.1

View File

@@ -1,33 +0,0 @@
From 9203656496c4081260817cce018a0d8fd57869b5 Mon Sep 17 00:00:00 2001
From: Philippe Antoine <pantoine@oisf.net>
Date: Mon, 15 Jul 2024 09:52:00 +0200
Subject: [PATCH] defrag: fix off by one
Ticket: 7067
This off by one could lead to an empty fragment being inserted
in the rb tree, which led to integer underflow.
Upstream-Status: Backport [https://github.com/OISF/suricata/commit/9203656496c4081260817cce018a0d8fd57869b5]
CVE: CVE-2024-45796
Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
---
src/defrag.c | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/defrag.c b/src/defrag.c
index 71cf420..38704c9 100644
--- a/src/defrag.c
+++ b/src/defrag.c
@@ -808,7 +808,7 @@ DefragInsertFrag(ThreadVars *tv, DecodeThreadVars *dtv, DefragTracker *tracker,
}
}
- if (ltrim > data_len) {
+ if (ltrim >= data_len) {
/* Full packet has been trimmed due to the overlap policy. Overlap
* already set. */
goto done;
--
2.25.1

View File

@@ -1,148 +0,0 @@
From 0d550de551b91d5e57ba23e2b1e2c6430fad6818 Mon Sep 17 00:00:00 2001
From: Philippe Antoine <contact@catenacyber.fr>
Date: Mon, 12 Aug 2024 14:06:40 +0200
Subject: [PATCH] headers: put a configurable limit on their numbers
So as to avoid quadratic complexity
Ticket: 7191
Upstream-Status: Backport [https://github.com/OISF/libhtp/commit/0d550de551b91d5e57ba23e2b1e2c6430fad6818]
CVE: CVE-2024-45797
Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
---
htp/htp_config.c | 8 ++++++++
htp/htp_config.h | 8 ++++++++
htp/htp_config_private.h | 6 ++++++
htp/htp_core.h | 1 +
htp/htp_request_generic.c | 11 +++++++++++
htp/htp_response_generic.c | 10 ++++++++++
6 files changed, 44 insertions(+)
diff --git a/htp/htp_config.c b/htp/htp_config.c
index 767458f..9e0eee3 100644
--- a/htp/htp_config.c
+++ b/htp/htp_config.c
@@ -145,6 +145,8 @@ static unsigned char bestfit_1252[] = {
0xff, 0x5d, 0x7d, 0xff, 0x5e, 0x7e, 0x00, 0x00, 0x00
};
+#define HTP_HEADERS_LIMIT 1024
+
htp_cfg_t *htp_config_create(void) {
htp_cfg_t *cfg = calloc(1, sizeof (htp_cfg_t));
if (cfg == NULL) return NULL;
@@ -163,6 +165,7 @@ htp_cfg_t *htp_config_create(void) {
cfg->response_lzma_layer_limit = 1; // default is only one layer
cfg->compression_bomb_limit = HTP_COMPRESSION_BOMB_LIMIT;
cfg->compression_time_limit = HTP_COMPRESSION_TIME_LIMIT_USEC;
+ cfg->number_headers_limit = HTP_HEADERS_LIMIT;
cfg->allow_space_uri = 0;
// Default settings for URL-encoded data.
@@ -542,6 +545,11 @@ void htp_config_set_compression_time_limit(htp_cfg_t *cfg, size_t useclimit) {
}
}
+void htp_config_set_number_headers_limit(htp_cfg_t *cfg, uint32_t limit) {
+ if (cfg == NULL) return;
+ cfg->number_headers_limit = limit;
+}
+
void htp_config_set_log_level(htp_cfg_t *cfg, enum htp_log_level_t log_level) {
if (cfg == NULL) return;
cfg->log_level = log_level;
diff --git a/htp/htp_config.h b/htp/htp_config.h
index d1365dc..ed0eaeb 100644
--- a/htp/htp_config.h
+++ b/htp/htp_config.h
@@ -466,6 +466,14 @@ void htp_config_set_compression_time_limit(htp_cfg_t *cfg, size_t useclimit);
*/
void htp_config_set_log_level(htp_cfg_t *cfg, enum htp_log_level_t log_level);
+/**
+ * Configures the maximum number of headers LibHTP will accept per request or response.
+ *
+ * @param[in] cfg
+ * @param[in] limit
+ */
+void htp_config_set_number_headers_limit(htp_cfg_t *cfg, uint32_t limit);
+
/**
* Configures how the server reacts to encoded NUL bytes. Some servers will stop at
* at NUL, while some will respond with 400 or 404. When the termination option is not
diff --git a/htp/htp_config_private.h b/htp/htp_config_private.h
index 5f1d60d..ecc8717 100644
--- a/htp/htp_config_private.h
+++ b/htp/htp_config_private.h
@@ -360,6 +360,12 @@ struct htp_cfg_t {
/** Whether to decompress compressed request bodies. */
int request_decompression_enabled;
+
+ /** Maximum number of transactions. */
+ uint32_t max_tx;
+
+ /** Maximum number of headers. */
+ uint32_t number_headers_limit;
};
#ifdef __cplusplus
diff --git a/htp/htp_core.h b/htp/htp_core.h
index e4c933e..7c23212 100644
--- a/htp/htp_core.h
+++ b/htp/htp_core.h
@@ -235,6 +235,7 @@ enum htp_file_source_t {
#define HTP_REQUEST_INVALID 0x100000000ULL
#define HTP_REQUEST_INVALID_C_L 0x200000000ULL
#define HTP_AUTH_INVALID 0x400000000ULL
+#define HTP_HEADERS_TOO_MANY 0x800000000ULL
#define HTP_MAX_HEADERS_REPETITIONS 64
diff --git a/htp/htp_request_generic.c b/htp/htp_request_generic.c
index 435cf0a..1350e57 100644
--- a/htp/htp_request_generic.c
+++ b/htp/htp_request_generic.c
@@ -120,6 +120,17 @@ htp_status_t htp_process_request_header_generic(htp_connp_t *connp, unsigned cha
bstr_free(h->value);
free(h);
} else {
+ if (htp_table_size(connp->in_tx->request_headers) > connp->cfg->number_headers_limit) {
+ if (!(connp->in_tx->flags & HTP_HEADERS_TOO_MANY)) {
+ connp->in_tx->flags |= HTP_HEADERS_TOO_MANY;
+ htp_log(connp, HTP_LOG_MARK, HTP_LOG_WARNING, 0, "Too many request headers");
+ }
+ bstr_free(h->name);
+ bstr_free(h->value);
+ free(h);
+ // give up on what comes next
+ return HTP_ERROR;
+ }
// Add as a new header.
if (htp_table_add(connp->in_tx->request_headers, h->name, h) != HTP_OK) {
bstr_free(h->name);
diff --git a/htp/htp_response_generic.c b/htp/htp_response_generic.c
index f5fa59e..69da625 100644
--- a/htp/htp_response_generic.c
+++ b/htp/htp_response_generic.c
@@ -321,6 +321,16 @@ htp_status_t htp_process_response_header_generic(htp_connp_t *connp, unsigned ch
bstr_free(h->value);
free(h);
} else {
+ if (htp_table_size(connp->out_tx->response_headers) > connp->cfg->number_headers_limit) {
+ if (!(connp->out_tx->flags & HTP_HEADERS_TOO_MANY)) {
+ connp->out_tx->flags |= HTP_HEADERS_TOO_MANY;
+ htp_log(connp, HTP_LOG_MARK, HTP_LOG_WARNING, 0, "Too many response headers");
+ }
+ bstr_free(h->name);
+ bstr_free(h->value);
+ free(h);
+ return HTP_ERROR;
+ }
// Add as a new header.
if (htp_table_add(connp->out_tx->response_headers, h->name, h) != HTP_OK) {
bstr_free(h->name);
--
2.25.1

View File

@@ -1,205 +0,0 @@
From f80ebd5a30b02db5915f749f0c067c7adefbbe76 Mon Sep 17 00:00:00 2001
From: Philippe Antoine <pantoine@oisf.net>
Date: Thu, 7 Nov 2024 17:49:45 +0100
Subject: [PATCH] detect/transforms: write directly in inspect buffer
instead of writing to a temporary buffer and then copying,
to save the cost of copying.
Ticket: 7229
Upstream-Status: Backport [https://github.com/OISF/suricata/commit/f80ebd5a30b02db5915f749f0c067c7adefbbe76 && https://github.com/OISF/suricata/commit/c3a6abf60134c2993ee3802ee52206e9fdbf55ba]
CVE: CVE-2024-55605
Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
---
src/detect-engine.c | 23 ++++++++++++++++++++--
src/detect-engine.h | 3 ++-
src/detect-transform-compress-whitespace.c | 8 ++++++--
src/detect-transform-dotprefix.c | 10 +++++++---
src/detect-transform-strip-whitespace.c | 8 ++++++--
src/detect-transform-urldecode.c | 8 ++++++--
src/detect-transform-xor.c | 7 +++++--
7 files changed, 53 insertions(+), 14 deletions(-)
diff --git a/src/detect-engine.c b/src/detect-engine.c
index 141b48a..cdb24d8 100644
--- a/src/detect-engine.c
+++ b/src/detect-engine.c
@@ -1647,11 +1647,13 @@ void InspectionBufferFree(InspectionBuffer *buffer)
/**
* \brief make sure that the buffer has at least 'min_size' bytes
* Expand the buffer if necessary
+ *
+ * \retval pointer to inner buffer to use, or NULL if realloc failed
*/
-void InspectionBufferCheckAndExpand(InspectionBuffer *buffer, uint32_t min_size)
+uint8_t *InspectionBufferCheckAndExpand(InspectionBuffer *buffer, uint32_t min_size)
{
if (likely(buffer->size >= min_size))
- return;
+ return buffer->buf;
uint32_t new_size = (buffer->size == 0) ? 4096 : buffer->size;
while (new_size < min_size) {
@@ -1662,7 +1664,24 @@ void InspectionBufferCheckAndExpand(InspectionBuffer *buffer, uint32_t min_size)
if (ptr != NULL) {
buffer->buf = ptr;
buffer->size = new_size;
+ } else {
+ return NULL;
}
+ return buffer->buf;
+}
+
+/**
+ * \brief set inspect length of inspect buffer
+ * The inspect buffer may have been overallocated (by strip_whitespace for example)
+ * so, this sets the final length
+ */
+void InspectionBufferTruncate(InspectionBuffer *buffer, uint32_t buf_len)
+{
+ DEBUG_VALIDATE_BUG_ON(buffer->buf == NULL);
+ DEBUG_VALIDATE_BUG_ON(buf_len > buffer->size);
+ buffer->inspect = buffer->buf;
+ buffer->inspect_len = buf_len;
+ buffer->initialized = true;
}
void InspectionBufferCopy(InspectionBuffer *buffer, uint8_t *buf, uint32_t buf_len)
diff --git a/src/detect-engine.h b/src/detect-engine.h
index 7617e66..04713a7 100644
--- a/src/detect-engine.h
+++ b/src/detect-engine.h
@@ -31,7 +31,8 @@ void InspectionBufferInit(InspectionBuffer *buffer, uint32_t initial_size);
void InspectionBufferSetup(DetectEngineThreadCtx *det_ctx, const int list_id,
InspectionBuffer *buffer, const uint8_t *data, const uint32_t data_len);
void InspectionBufferFree(InspectionBuffer *buffer);
-void InspectionBufferCheckAndExpand(InspectionBuffer *buffer, uint32_t min_size);
+uint8_t *InspectionBufferCheckAndExpand(InspectionBuffer *buffer, uint32_t min_size);
+void InspectionBufferTruncate(InspectionBuffer *buffer, uint32_t buf_len);
void InspectionBufferCopy(InspectionBuffer *buffer, uint8_t *buf, uint32_t buf_len);
void InspectionBufferApplyTransforms(InspectionBuffer *buffer,
const DetectEngineTransforms *transforms);
diff --git a/src/detect-transform-compress-whitespace.c b/src/detect-transform-compress-whitespace.c
index 5cbf0fd..cc78c7e 100644
--- a/src/detect-transform-compress-whitespace.c
+++ b/src/detect-transform-compress-whitespace.c
@@ -111,7 +111,11 @@ static void TransformCompressWhitespace(InspectionBuffer *buffer, void *options)
return;
}
- uint8_t output[input_len]; // we can only shrink
+ // we can only shrink
+ uint8_t *output = InspectionBufferCheckAndExpand(buffer, input_len);
+ if (output == NULL) {
+ return;
+ }
uint8_t *oi = output, *os = output;
//PrintRawDataFp(stdout, input, input_len);
@@ -132,7 +136,7 @@ static void TransformCompressWhitespace(InspectionBuffer *buffer, void *options)
uint32_t output_size = oi - os;
//PrintRawDataFp(stdout, output, output_size);
- InspectionBufferCopy(buffer, os, output_size);
+ InspectionBufferTruncate(buffer, output_size);
}
#ifdef UNITTESTS
diff --git a/src/detect-transform-dotprefix.c b/src/detect-transform-dotprefix.c
index 52a2633..d58e1d4 100644
--- a/src/detect-transform-dotprefix.c
+++ b/src/detect-transform-dotprefix.c
@@ -110,11 +110,15 @@ static void TransformDotPrefix(InspectionBuffer *buffer, void *options)
const size_t input_len = buffer->inspect_len;
if (input_len) {
- uint8_t output[input_len + 1]; // For the leading '.'
+ // For the leading '.'
+ uint8_t *output = InspectionBufferCheckAndExpand(buffer, input_len + 1);
+ if (output == NULL) {
+ return;
+ }
+ memmove(&output[1], buffer->inspect, input_len);
output[0] = '.';
- memcpy(&output[1], buffer->inspect, input_len);
- InspectionBufferCopy(buffer, output, input_len + 1);
+ InspectionBufferTruncate(buffer, input_len + 1);
}
}
diff --git a/src/detect-transform-strip-whitespace.c b/src/detect-transform-strip-whitespace.c
index 32fb96f..6040592 100644
--- a/src/detect-transform-strip-whitespace.c
+++ b/src/detect-transform-strip-whitespace.c
@@ -106,7 +106,11 @@ static void TransformStripWhitespace(InspectionBuffer *buffer, void *options)
if (input_len == 0) {
return;
}
- uint8_t output[input_len]; // we can only shrink
+ // we can only shrink
+ uint8_t *output = InspectionBufferCheckAndExpand(buffer, input_len);
+ if (output == NULL) {
+ return;
+ }
uint8_t *oi = output, *os = output;
//PrintRawDataFp(stdout, input, input_len);
@@ -119,7 +123,7 @@ static void TransformStripWhitespace(InspectionBuffer *buffer, void *options)
uint32_t output_size = oi - os;
//PrintRawDataFp(stdout, output, output_size);
- InspectionBufferCopy(buffer, os, output_size);
+ InspectionBufferTruncate(buffer, output_size);
}
#ifdef UNITTESTS
diff --git a/src/detect-transform-urldecode.c b/src/detect-transform-urldecode.c
index 13ef033..a4e9655 100644
--- a/src/detect-transform-urldecode.c
+++ b/src/detect-transform-urldecode.c
@@ -125,12 +125,16 @@ static void TransformUrlDecode(InspectionBuffer *buffer, void *options)
if (input_len == 0) {
return;
}
- uint8_t output[input_len]; // we can only shrink
+ // we can only shrink
+ uint8_t *output = InspectionBufferCheckAndExpand(buffer, input_len);
+ if (output == NULL) {
+ return;
+ }
changed = BufferUrlDecode(input, input_len, output, &output_size);
if (changed) {
- InspectionBufferCopy(buffer, output, output_size);
+ InspectionBufferTruncate(buffer, output_size);
}
}
diff --git a/src/detect-transform-xor.c b/src/detect-transform-xor.c
index e42700f..18f96df 100644
--- a/src/detect-transform-xor.c
+++ b/src/detect-transform-xor.c
@@ -133,12 +133,15 @@ static void DetectTransformXor(InspectionBuffer *buffer, void *options)
if (input_len == 0) {
return;
}
- uint8_t output[input_len];
+ uint8_t *output = InspectionBufferCheckAndExpand(buffer, input_len);
+ if (output == NULL) {
+ return;
+ }
for (uint32_t i = 0; i < input_len; i++) {
output[i] = input[i] ^ pxd->key[i % pxd->length];
}
- InspectionBufferCopy(buffer, output, input_len);
+ InspectionBufferTruncate(buffer, input_len);
}
#ifdef UNITTESTS
--
2.25.1

View File

@@ -1,59 +0,0 @@
From 0dc364aef2dec122fc0e7ee4c190864f4cc5f1bd Mon Sep 17 00:00:00 2001
From: Philippe Antoine <pantoine@oisf.net>
Date: Thu, 21 Nov 2024 14:55:32 +0100
Subject: [PATCH] util/streaming-buffer: fix regions intersection
This was not a problem for current callers in Suricata,
as RegionsIntersect is only called through StreamingBufferInsertAt
which is only used by TCP...
And TCP uses default region gap = 256kb, and only calls
StreamingBufferInsertAt with a u16, so TCP never inserts a new
data that will strictly contain an existing region augmented
with region gap, which was the only case where RegionsIntersect
returned the wrong result, which could later lead to a
buffer overflow.
Ticket: 7393
(cherry picked from commit 282509f70c4ce805098e59535af445362e3e9ebd)
CVE: CVE-2024-55627
Upstream-Status: Backport [https://github.com/OISF/suricata/commit/0dc364aef2dec122fc0e7ee4c190864f4cc5f1bd]
Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
---
src/util-streaming-buffer.c | 19 ++++++++-----------
1 file changed, 8 insertions(+), 11 deletions(-)
diff --git a/src/util-streaming-buffer.c b/src/util-streaming-buffer.c
index 7608b50..d1d20e8 100644
--- a/src/util-streaming-buffer.c
+++ b/src/util-streaming-buffer.c
@@ -133,17 +133,14 @@ static inline bool RegionsIntersect(const StreamingBuffer *sb, const StreamingBu
SCLogDebug("r %p: %" PRIu64 "/%" PRIu64 " - adjusted %" PRIu64 "/%" PRIu64, r, r->stream_offset,
r->stream_offset + r->buf_size, reg_o, reg_re);
/* check if data range intersects with region range */
- if (offset >= reg_o && offset <= reg_re) {
- SCLogDebug("r %p is in-scope", r);
- return true;
- }
- if (re >= reg_o && re <= reg_re) {
- SCLogDebug("r %p is in-scope: %" PRIu64 " >= %" PRIu64 " && %" PRIu64 " <= %" PRIu64, r, re,
- reg_o, re, reg_re);
- return true;
- }
- SCLogDebug("r %p is out of scope: %" PRIu64 "/%" PRIu64, r, offset, re);
- return false;
+ /* [offset:re] and [reg_o:reg_re] do not intersect if and only if
+ * re < reg_o or if reg_re < offset (one segment is strictly before the other)
+ * trusting that offset<=re and reg_o<=reg_re
+ */
+ if (re < reg_o || reg_re < offset) {
+ return false;
+ }
+ return true;
}
/** \internal
--
2.50.1

View File

@@ -1,44 +0,0 @@
From 949bfeca0e5f92212dc3d79f4a87c7c482d376aa Mon Sep 17 00:00:00 2001
From: Philippe Antoine <pantoine@oisf.net>
Date: Thu, 21 Nov 2024 15:17:21 +0100
Subject: [PATCH] util/streaming-buffer: check need to grow region
Ticket: 7393
As it was possible before earlier patches to get here
with mem_size lesser than start->buf_size,
which caused then an unsigned underflow and a buffer overflow.
(cherry picked from commit 8900041405dbb5f9584edae994af2100733fb4be)
CVE: CVE-2024-55627
Upstream-Status: Backport [https://github.com/OISF/suricata/commit/949bfeca0e5f92212dc3d79f4a87c7c482d376aa]
Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
---
src/util-streaming-buffer.c | 10 +++++++---
1 file changed, 7 insertions(+), 3 deletions(-)
diff --git a/src/util-streaming-buffer.c b/src/util-streaming-buffer.c
index d1d20e8..2625e49 100644
--- a/src/util-streaming-buffer.c
+++ b/src/util-streaming-buffer.c
@@ -931,9 +931,13 @@ static inline void StreamingBufferSlideToOffsetWithRegions(
goto done;
} else {
/* using "main", expand to include "next" */
- if (GrowRegionToSize(sb, cfg, start, mem_size) != 0) {
- new_mem_size = new_data_size;
- goto just_main;
+ if (mem_size > start->buf_size) {
+ // Check that start->buf_size is actually not big enough
+ // As mem_size computation and earlier checks do not make it clear.
+ if (GrowRegionToSize(sb, cfg, start, mem_size) != 0) {
+ new_mem_size = new_data_size;
+ goto just_main;
+ }
}
SCLogDebug("start->buf now size %u", mem_size);
--
2.50.1

View File

@@ -1,41 +0,0 @@
From 7d47fcf7f7fefacd2b0d8f482534a83b35a3c45e Mon Sep 17 00:00:00 2001
From: Philippe Antoine <pantoine@oisf.net>
Date: Thu, 21 Nov 2024 15:20:44 +0100
Subject: [PATCH] util/streaming-buffer: add extra safety check
Ticket: 7393
Check if GrowRegionToSize is called with an argument
trying to shrink the region size, and if so do nothing,
ie do not try to shrink, and just return ok.
This way, we avoid a buffer overflow from memeset using an
unsigned having underflowed.
(cherry picked from commit 9a53ec43b13f0039a083950511a18bf6f408e432)
CVE: CVE-2024-55627
Upstream-Status: Backport [https://github.com/OISF/suricata/commit/7d47fcf7f7fefacd2b0d8f482534a83b35a3c45e]
Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
---
src/util-streaming-buffer.c | 4 ++++
1 file changed, 4 insertions(+)
diff --git a/src/util-streaming-buffer.c b/src/util-streaming-buffer.c
index 2625e49..077f8af 100644
--- a/src/util-streaming-buffer.c
+++ b/src/util-streaming-buffer.c
@@ -715,6 +715,10 @@ static inline int WARN_UNUSED GrowRegionToSize(StreamingBuffer *sb,
/* try to grow in multiples of cfg->buf_size */
const uint32_t grow = ToNextMultipleOf(size, cfg->buf_size);
SCLogDebug("grow %u", grow);
+ if (grow <= region->buf_size) {
+ // do not try to shrink, and do not memset with diff having unsigned underflow
+ return SC_OK;
+ }
void *ptr = REALLOC(cfg, region->buf, region->buf_size, grow);
if (ptr == NULL) {
--
2.50.1

View File

@@ -1,738 +0,0 @@
From 58c41a7fa99f62d9a8688e970ab1a9b09c79723a Mon Sep 17 00:00:00 2001
From: Jason Ish <jason.ish@oisf.net>
Date: Thu, 31 Oct 2024 15:40:40 -0600
Subject: [PATCH] dns: truncate names larger than 1025 characters
Once a name has gone over 1025 chars it will be truncated to 1025
chars and no more labels will be added to it, however the name will
continue to be parsed up to the label limit in attempt to find the end
so parsing can continue.
This introduces a new struct, DNSName which contains the name and any
flags which indicate any name parsing errors which should not error
out parsing the complete message, for example, infinite recursion
after some labels are parsed can continue, or truncation of name where
compression was used so we know the start of the next data to be
parsed.
This limits the logged DNS messages from being over our maximum size
of 10Mb in the case of really long names.
Ticket: #7280
CVE: CVE-2024-55628
Upstream-Status: Backport [https://github.com/OISF/suricata/commit/58c41a7fa99f62d9a8688e970ab1a9b09c79723a]
Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
---
rust/src/dns/dns.rs | 41 +++++---
rust/src/dns/log.rs | 41 ++++----
rust/src/dns/lua.rs | 36 ++++---
rust/src/dns/parser.rs | 231 ++++++++++++++++++++++++++++++++++++-----
4 files changed, 277 insertions(+), 72 deletions(-)
diff --git a/rust/src/dns/dns.rs b/rust/src/dns/dns.rs
index 382c76a..680bf7e 100644
--- a/rust/src/dns/dns.rs
+++ b/rust/src/dns/dns.rs
@@ -144,7 +144,7 @@ pub struct DNSHeader {
#[derive(Debug)]
pub struct DNSQueryEntry {
- pub name: Vec<u8>,
+ pub name: DNSName,
pub rrtype: u16,
pub rrclass: u16,
}
@@ -152,9 +152,9 @@ pub struct DNSQueryEntry {
#[derive(Debug, PartialEq, Eq)]
pub struct DNSRDataSOA {
/// Primary name server for this zone
- pub mname: Vec<u8>,
+ pub mname: DNSName,
/// Authority's mailbox
- pub rname: Vec<u8>,
+ pub rname: DNSName,
/// Serial version number
pub serial: u32,
/// Refresh interval (seconds)
@@ -186,7 +186,22 @@ pub struct DNSRDataSRV {
/// Port
pub port: u16,
/// Target
- pub target: Vec<u8>,
+ pub target: DNSName,
+}
+
+bitflags! {
+ #[derive(Default)]
+ pub struct DNSNameFlags: u8 {
+ const INFINITE_LOOP = 0b0000_0001;
+ const TRUNCATED = 0b0000_0010;
+ const LABEL_LIMIT = 0b0000_0100;
+ }
+}
+
+#[derive(Debug, Clone, PartialEq, Eq)]
+pub struct DNSName {
+ pub value: Vec<u8>,
+ pub flags: DNSNameFlags,
}
/// Represents RData of various formats
@@ -196,10 +211,10 @@ pub enum DNSRData {
A(Vec<u8>),
AAAA(Vec<u8>),
// RData is a domain name
- CNAME(Vec<u8>),
- PTR(Vec<u8>),
- MX(Vec<u8>),
- NS(Vec<u8>),
+ CNAME(DNSName),
+ PTR(DNSName),
+ MX(DNSName),
+ NS(DNSName),
// RData is text
TXT(Vec<u8>),
NULL(Vec<u8>),
@@ -213,7 +228,7 @@ pub enum DNSRData {
#[derive(Debug, PartialEq, Eq)]
pub struct DNSAnswerEntry {
- pub name: Vec<u8>,
+ pub name: DNSName,
pub rrtype: u16,
pub rrclass: u16,
pub ttl: u32,
@@ -871,9 +886,9 @@ pub unsafe extern "C" fn rs_dns_tx_get_query_name(
if let Some(request) = &tx.request {
if (i as usize) < request.queries.len() {
let query = &request.queries[i as usize];
- if !query.name.is_empty() {
- *len = query.name.len() as u32;
- *buf = query.name.as_ptr();
+ if !query.name.value.is_empty() {
+ *len = query.name.value.len() as u32;
+ *buf = query.name.value.as_ptr();
return 1;
}
}
@@ -904,7 +919,7 @@ pub unsafe extern "C" fn rs_dns_tx_get_query_rrtype(
if let Some(request) = &tx.request {
if (i as usize) < request.queries.len() {
let query = &request.queries[i as usize];
- if !query.name.is_empty() {
+ if !query.name.value.is_empty() {
*rrtype = query.rrtype;
return 1;
}
diff --git a/rust/src/dns/log.rs b/rust/src/dns/log.rs
index 5212b1a..6bf9589 100644
--- a/rust/src/dns/log.rs
+++ b/rust/src/dns/log.rs
@@ -398,8 +398,8 @@ pub fn dns_print_addr(addr: &Vec<u8>) -> std::string::String {
fn dns_log_soa(soa: &DNSRDataSOA) -> Result<JsonBuilder, JsonError> {
let mut js = JsonBuilder::try_new_object()?;
- js.set_string_from_bytes("mname", &soa.mname)?;
- js.set_string_from_bytes("rname", &soa.rname)?;
+ js.set_string_from_bytes("mname", &soa.mname.value)?;
+ js.set_string_from_bytes("rname", &soa.rname.value)?;
js.set_uint("serial", soa.serial as u64)?;
js.set_uint("refresh", soa.refresh as u64)?;
js.set_uint("retry", soa.retry as u64)?;
@@ -434,7 +434,7 @@ fn dns_log_srv(srv: &DNSRDataSRV) -> Result<JsonBuilder, JsonError> {
js.set_uint("priority", srv.priority as u64)?;
js.set_uint("weight", srv.weight as u64)?;
js.set_uint("port", srv.port as u64)?;
- js.set_string_from_bytes("name", &srv.target)?;
+ js.set_string_from_bytes("name", &srv.target.value)?;
js.close()?;
return Ok(js);
@@ -443,7 +443,7 @@ fn dns_log_srv(srv: &DNSRDataSRV) -> Result<JsonBuilder, JsonError> {
fn dns_log_json_answer_detail(answer: &DNSAnswerEntry) -> Result<JsonBuilder, JsonError> {
let mut jsa = JsonBuilder::try_new_object()?;
- jsa.set_string_from_bytes("rrname", &answer.name)?;
+ jsa.set_string_from_bytes("rrname", &answer.name.value)?;
jsa.set_string("rrtype", &dns_rrtype_string(answer.rrtype))?;
jsa.set_uint("ttl", answer.ttl as u64)?;
@@ -451,12 +451,10 @@ fn dns_log_json_answer_detail(answer: &DNSAnswerEntry) -> Result<JsonBuilder, Js
DNSRData::A(addr) | DNSRData::AAAA(addr) => {
jsa.set_string("rdata", &dns_print_addr(addr))?;
}
- DNSRData::CNAME(bytes)
- | DNSRData::MX(bytes)
- | DNSRData::NS(bytes)
- | DNSRData::TXT(bytes)
- | DNSRData::NULL(bytes)
- | DNSRData::PTR(bytes) => {
+ DNSRData::CNAME(name) | DNSRData::MX(name) | DNSRData::NS(name) | DNSRData::PTR(name) => {
+ jsa.set_string_from_bytes("rdata", &name.value)?;
+ }
+ DNSRData::TXT(bytes) | DNSRData::NULL(bytes) => {
jsa.set_string_from_bytes("rdata", bytes)?;
}
DNSRData::SOA(soa) => {
@@ -507,7 +505,7 @@ fn dns_log_json_answer(
js.set_uint("opcode", opcode as u64)?;
if let Some(query) = response.queries.first() {
- js.set_string_from_bytes("rrname", &query.name)?;
+ js.set_string_from_bytes("rrname", &query.name.value)?;
js.set_string("rrtype", &dns_rrtype_string(query.rrtype))?;
}
js.set_string("rcode", &dns_rcode_string(header.flags))?;
@@ -530,12 +528,19 @@ fn dns_log_json_answer(
a.append_string(&dns_print_addr(addr))?;
}
}
- DNSRData::CNAME(bytes)
- | DNSRData::MX(bytes)
- | DNSRData::NS(bytes)
- | DNSRData::TXT(bytes)
- | DNSRData::NULL(bytes)
- | DNSRData::PTR(bytes) => {
+ DNSRData::CNAME(name)
+ | DNSRData::MX(name)
+ | DNSRData::NS(name)
+ | DNSRData::PTR(name) => {
+ if !answer_types.contains_key(&type_string) {
+ answer_types
+ .insert(type_string.to_string(), JsonBuilder::try_new_array()?);
+ }
+ if let Some(a) = answer_types.get_mut(&type_string) {
+ a.append_string_from_bytes(&name.value)?;
+ }
+ }
+ DNSRData::TXT(bytes) | DNSRData::NULL(bytes) => {
if !answer_types.contains_key(&type_string) {
answer_types.insert(type_string.to_string(), JsonBuilder::try_new_array()?);
}
@@ -614,7 +619,7 @@ fn dns_log_query(
if dns_log_rrtype_enabled(query.rrtype, flags) {
jb.set_string("type", "query")?;
jb.set_uint("id", request.header.tx_id as u64)?;
- jb.set_string_from_bytes("rrname", &query.name)?;
+ jb.set_string_from_bytes("rrname", &query.name.value)?;
jb.set_string("rrtype", &dns_rrtype_string(query.rrtype))?;
jb.set_uint("tx_id", tx.id - 1)?;
if request.header.flags & 0x0040 != 0 {
diff --git a/rust/src/dns/lua.rs b/rust/src/dns/lua.rs
index b9935f8..f7b0c15 100644
--- a/rust/src/dns/lua.rs
+++ b/rust/src/dns/lua.rs
@@ -34,12 +34,12 @@ pub extern "C" fn rs_dns_lua_get_rrname(clua: &mut CLuaState, tx: &mut DNSTransa
if let Some(request) = &tx.request {
if let Some(query) = request.queries.first() {
- lua.pushstring(&String::from_utf8_lossy(&query.name));
+ lua.pushstring(&String::from_utf8_lossy(&query.name.value));
return 1;
}
} else if let Some(response) = &tx.response {
if let Some(query) = response.queries.first() {
- lua.pushstring(&String::from_utf8_lossy(&query.name));
+ lua.pushstring(&String::from_utf8_lossy(&query.name.value));
return 1;
}
}
@@ -86,7 +86,7 @@ pub extern "C" fn rs_dns_lua_get_query_table(
lua.settable(-3);
lua.pushstring("rrname");
- lua.pushstring(&String::from_utf8_lossy(&query.name));
+ lua.pushstring(&String::from_utf8_lossy(&query.name.value));
lua.settable(-3);
lua.settable(-3);
@@ -103,7 +103,7 @@ pub extern "C" fn rs_dns_lua_get_query_table(
lua.settable(-3);
lua.pushstring("rrname");
- lua.pushstring(&String::from_utf8_lossy(&query.name));
+ lua.pushstring(&String::from_utf8_lossy(&query.name.value));
lua.settable(-3);
lua.settable(-3);
@@ -142,11 +142,11 @@ pub extern "C" fn rs_dns_lua_get_answer_table(
lua.settable(-3);
lua.pushstring("rrname");
- lua.pushstring(&String::from_utf8_lossy(&answer.name));
+ lua.pushstring(&String::from_utf8_lossy(&answer.name.value));
lua.settable(-3);
// All rdata types are pushed to "addr" for backwards compatibility
- match answer.data {
+ match &answer.data {
DNSRData::A(ref bytes) | DNSRData::AAAA(ref bytes) => {
if !bytes.is_empty() {
lua.pushstring("addr");
@@ -154,12 +154,18 @@ pub extern "C" fn rs_dns_lua_get_answer_table(
lua.settable(-3);
}
}
- DNSRData::CNAME(ref bytes)
- | DNSRData::MX(ref bytes)
- | DNSRData::NS(ref bytes)
- | DNSRData::TXT(ref bytes)
+ DNSRData::CNAME(name)
+ | DNSRData::MX(name)
+ | DNSRData::NS(name)
+ | DNSRData::PTR(name) => {
+ if !name.value.is_empty() {
+ lua.pushstring("addr");
+ lua.pushstring(&String::from_utf8_lossy(&name.value));
+ lua.settable(-3);
+ }
+ }
+ DNSRData::TXT(ref bytes)
| DNSRData::NULL(ref bytes)
- | DNSRData::PTR(ref bytes)
| DNSRData::Unknown(ref bytes) => {
if !bytes.is_empty() {
lua.pushstring("addr");
@@ -168,9 +174,9 @@ pub extern "C" fn rs_dns_lua_get_answer_table(
}
}
DNSRData::SOA(ref soa) => {
- if !soa.mname.is_empty() {
+ if !soa.mname.value.is_empty() {
lua.pushstring("addr");
- lua.pushstring(&String::from_utf8_lossy(&soa.mname));
+ lua.pushstring(&String::from_utf8_lossy(&soa.mname.value));
lua.settable(-3);
}
}
@@ -181,7 +187,7 @@ pub extern "C" fn rs_dns_lua_get_answer_table(
}
DNSRData::SRV(ref srv) => {
lua.pushstring("addr");
- lua.pushstring(&String::from_utf8_lossy(&srv.target));
+ lua.pushstring(&String::from_utf8_lossy(&srv.target.value));
lua.settable(-3);
}
}
@@ -221,7 +227,7 @@ pub extern "C" fn rs_dns_lua_get_authority_table(
lua.settable(-3);
lua.pushstring("rrname");
- lua.pushstring(&String::from_utf8_lossy(&answer.name));
+ lua.pushstring(&String::from_utf8_lossy(&answer.name.value));
lua.settable(-3);
lua.settable(-3);
diff --git a/rust/src/dns/parser.rs b/rust/src/dns/parser.rs
index a1d97a5..12929bc 100644
--- a/rust/src/dns/parser.rs
+++ b/rust/src/dns/parser.rs
@@ -45,16 +45,48 @@ pub fn dns_parse_header(i: &[u8]) -> IResult<&[u8], DNSHeader> {
))
}
+// Set a maximum assembled hostname length of 1025, this value was
+// chosen as its what DNSMasq uses, a popular DNS server, even if most
+// tooling limits names to 256 chars without special options.
+static MAX_NAME_LEN: usize = 1025;
+
/// Parse a DNS name.
///
+/// Names are parsed with the following restrictions:
+///
+/// - Only 255 segments will be processed, if more the parser may
+/// error out. This is also our safeguard against an infinite loop. If
+/// a pointer had been followed a truncated name will be
+/// returned. However if pointer has been processed we error out as we
+/// don't know where the next data point starts without more
+/// iterations.
+///
+/// - The maximum name parsed in representation format is MAX_NAME_LEN
+/// characters. Once larger, the truncated name will be returned with
+/// a flag specifying the name was truncated. Note that parsing
+/// continues if no pointer has been used as we still need to find the
+/// start of the next protocol unit.
+///
+/// As some error in parsing the name are recoverable, a DNSName
+/// object is returned with flags signifying a recoverable
+/// error. These errors include:
+///
+/// - infinite loop: as we know the end of the name in the input
+/// stream, we can return what we've parsed with the remain data.
+///
+/// - maximum number of segments/labels parsed
+///
+/// - truncation of name when too long
+///
/// Parameters:
/// start: the start of the name
/// message: the complete message that start is a part of with the DNS header
-pub fn dns_parse_name<'b>(start: &'b [u8], message: &'b [u8]) -> IResult<&'b [u8], Vec<u8>> {
+pub fn dns_parse_name<'b>(start: &'b [u8], message: &'b [u8]) -> IResult<&'b [u8], DNSName> {
let mut pos = start;
let mut pivot = start;
let mut name: Vec<u8> = Vec::with_capacity(32);
let mut count = 0;
+ let mut flags = DNSNameFlags::default();
loop {
if pos.is_empty() {
@@ -68,10 +100,12 @@ pub fn dns_parse_name<'b>(start: &'b [u8], message: &'b [u8]) -> IResult<&'b [u8
break;
} else if len & 0b1100_0000 == 0 {
let (rem, label) = length_data(be_u8)(pos)?;
- if !name.is_empty() {
- name.push(b'.');
+ if !flags.contains(DNSNameFlags::TRUNCATED) {
+ if !name.is_empty() {
+ name.push(b'.');
+ }
+ name.extend(label);
}
- name.extend(label);
pos = rem;
} else if len & 0b1100_0000 == 0b1100_0000 {
let (rem, leader) = be_u16(pos)?;
@@ -79,6 +113,21 @@ pub fn dns_parse_name<'b>(start: &'b [u8], message: &'b [u8]) -> IResult<&'b [u8
if offset > message.len() {
return Err(Err::Error(error_position!(pos, ErrorKind::OctDigit)));
}
+
+ if &message[offset..] == pos {
+ // Self reference, immedate infinite loop.
+ flags.insert(DNSNameFlags::INFINITE_LOOP);
+
+ // If we have followed a pointer, we can just break as
+ // we've already found the end of the input. But if we
+ // have not followed a pointer yet return a parse
+ // error.
+ if pivot != start {
+ break;
+ }
+ return Err(Err::Error(error_position!(pos, ErrorKind::OctDigit)));
+ }
+
pos = &message[offset..];
if pivot == start {
pivot = rem;
@@ -89,19 +138,43 @@ pub fn dns_parse_name<'b>(start: &'b [u8], message: &'b [u8]) -> IResult<&'b [u8
// Return error if we've looped a certain number of times.
count += 1;
+
if count > 255 {
+ flags.insert(DNSNameFlags::LABEL_LIMIT);
+
+ // Our segment limit has been reached, if we have hit a
+ // pointer we can just return the truncated name. If we
+ // have not hit a pointer, we need to bail with an error.
+ if pivot != start {
+ flags.insert(DNSNameFlags::TRUNCATED);
+ break;
+ }
return Err(Err::Error(error_position!(pos, ErrorKind::OctDigit)));
}
+
+ if name.len() > MAX_NAME_LEN {
+ name.truncate(MAX_NAME_LEN);
+ flags.insert(DNSNameFlags::TRUNCATED);
+
+ // If we have pivoted due to a pointer we know where the
+ // end of the data is, so we can break early. Otherwise
+ // we'll keep parsing in hopes to find the end of the name
+ // so parsing can continue.
+ if pivot != start {
+ break;
+ }
+ }
}
// If we followed a pointer we return the position after the first
// pointer followed. Is there a better way to see if these slices
// diverged from each other? A straight up comparison would
// actually check the contents.
- if pivot.len() != start.len() {
- return Ok((pivot, name));
+ if pivot != start {
+ Ok((pivot, DNSName { value: name, flags }))
+ } else {
+ Ok((pos, DNSName { value: name, flags }))
}
- return Ok((pos, name));
}
/// Parse answer entries.
@@ -121,7 +194,7 @@ fn dns_parse_answer<'a>(
let mut input = slice;
struct Answer<'a> {
- name: Vec<u8>,
+ name: DNSName,
rrtype: u16,
rrclass: u16,
ttl: u32,
@@ -375,7 +448,7 @@ mod tests {
];
let expected_remainder: &[u8] = &[0x00, 0x01, 0x00];
let (remainder, name) = dns_parse_name(buf, buf).unwrap();
- assert_eq!("client-cf.dropbox.com".as_bytes(), &name[..]);
+ assert_eq!("client-cf.dropbox.com".as_bytes(), &name.value[..]);
assert_eq!(remainder, expected_remainder);
}
@@ -411,7 +484,13 @@ mod tests {
let res1 = dns_parse_name(start1, message);
assert_eq!(
res1,
- Ok((&start1[22..], "www.suricata-ids.org".as_bytes().to_vec()))
+ Ok((
+ &start1[22..],
+ DNSName {
+ value: "www.suricata-ids.org".as_bytes().to_vec(),
+ flags: DNSNameFlags::default(),
+ }
+ ))
);
// The second name starts at offset 80, but is just a pointer
@@ -420,7 +499,13 @@ mod tests {
let res2 = dns_parse_name(start2, message);
assert_eq!(
res2,
- Ok((&start2[2..], "www.suricata-ids.org".as_bytes().to_vec()))
+ Ok((
+ &start2[2..],
+ DNSName {
+ value: "www.suricata-ids.org".as_bytes().to_vec(),
+ flags: DNSNameFlags::default()
+ }
+ ))
);
// The third name starts at offset 94, but is a pointer to a
@@ -429,7 +514,13 @@ mod tests {
let res3 = dns_parse_name(start3, message);
assert_eq!(
res3,
- Ok((&start3[2..], "suricata-ids.org".as_bytes().to_vec()))
+ Ok((
+ &start3[2..],
+ DNSName {
+ value: "suricata-ids.org".as_bytes().to_vec(),
+ flags: DNSNameFlags::default()
+ }
+ ))
);
// The fourth name starts at offset 110, but is a pointer to a
@@ -438,7 +529,13 @@ mod tests {
let res4 = dns_parse_name(start4, message);
assert_eq!(
res4,
- Ok((&start4[2..], "suricata-ids.org".as_bytes().to_vec()))
+ Ok((
+ &start4[2..],
+ DNSName {
+ value: "suricata-ids.org".as_bytes().to_vec(),
+ flags: DNSNameFlags::default()
+ }
+ ))
);
}
@@ -473,7 +570,13 @@ mod tests {
let res = dns_parse_name(start, message);
assert_eq!(
res,
- Ok((&start[2..], "block.g1.dropbox.com".as_bytes().to_vec()))
+ Ok((
+ &start[2..],
+ DNSName {
+ value: "block.g1.dropbox.com".as_bytes().to_vec(),
+ flags: DNSNameFlags::default()
+ }
+ ))
);
}
@@ -512,7 +615,7 @@ mod tests {
assert_eq!(request.queries.len(), 1);
let query = &request.queries[0];
- assert_eq!(query.name, "www.suricata-ids.org".as_bytes().to_vec());
+ assert_eq!(query.name.value, "www.suricata-ids.org".as_bytes().to_vec());
assert_eq!(query.rrtype, 1);
assert_eq!(query.rrclass, 1);
}
@@ -569,20 +672,26 @@ mod tests {
assert_eq!(response.answers.len(), 3);
let answer1 = &response.answers[0];
- assert_eq!(answer1.name, "www.suricata-ids.org".as_bytes().to_vec());
+ assert_eq!(answer1.name.value, "www.suricata-ids.org".as_bytes().to_vec());
assert_eq!(answer1.rrtype, 5);
assert_eq!(answer1.rrclass, 1);
assert_eq!(answer1.ttl, 3544);
assert_eq!(
answer1.data,
- DNSRData::CNAME("suricata-ids.org".as_bytes().to_vec())
+ DNSRData::CNAME(DNSName {
+ value: "suricata-ids.org".as_bytes().to_vec(),
+ flags: Default::default(),
+ })
);
let answer2 = &response.answers[1];
assert_eq!(
answer2,
&DNSAnswerEntry {
- name: "suricata-ids.org".as_bytes().to_vec(),
+ name: DNSName {
+ value: "suricata-ids.org".as_bytes().to_vec(),
+ flags: Default::default(),
+ },
rrtype: 1,
rrclass: 1,
ttl: 244,
@@ -594,7 +703,10 @@ mod tests {
assert_eq!(
answer3,
&DNSAnswerEntry {
- name: "suricata-ids.org".as_bytes().to_vec(),
+ name: DNSName {
+ value: "suricata-ids.org".as_bytes().to_vec(),
+ flags: Default::default(),
+ },
rrtype: 1,
rrclass: 1,
ttl: 244,
@@ -653,15 +765,21 @@ mod tests {
assert_eq!(response.authorities.len(), 1);
let authority = &response.authorities[0];
- assert_eq!(authority.name, "oisf.net".as_bytes().to_vec());
+ assert_eq!(authority.name.value, "oisf.net".as_bytes().to_vec());
assert_eq!(authority.rrtype, 6);
assert_eq!(authority.rrclass, 1);
assert_eq!(authority.ttl, 899);
assert_eq!(
authority.data,
DNSRData::SOA(DNSRDataSOA {
- mname: "ns-110.awsdns-13.com".as_bytes().to_vec(),
- rname: "awsdns-hostmaster.amazon.com".as_bytes().to_vec(),
+ mname: DNSName {
+ value: "ns-110.awsdns-13.com".as_bytes().to_vec(),
+ flags: DNSNameFlags::default()
+ },
+ rname: DNSName {
+ value: "awsdns-hostmaster.amazon.com".as_bytes().to_vec(),
+ flags: DNSNameFlags::default()
+ },
serial: 1,
refresh: 7200,
retry: 900,
@@ -712,14 +830,14 @@ mod tests {
assert_eq!(response.queries.len(), 1);
let query = &response.queries[0];
- assert_eq!(query.name, "vaaaakardli.pirate.sea".as_bytes().to_vec());
+ assert_eq!(query.name.value, "vaaaakardli.pirate.sea".as_bytes().to_vec());
assert_eq!(query.rrtype, DNS_RECORD_TYPE_NULL);
assert_eq!(query.rrclass, 1);
assert_eq!(response.answers.len(), 1);
let answer = &response.answers[0];
- assert_eq!(answer.name, "vaaaakardli.pirate.sea".as_bytes().to_vec());
+ assert_eq!(answer.name.value, "vaaaakardli.pirate.sea".as_bytes().to_vec());
assert_eq!(answer.rrtype, DNS_RECORD_TYPE_NULL);
assert_eq!(answer.rrclass, 1);
assert_eq!(answer.ttl, 0);
@@ -819,7 +937,7 @@ mod tests {
assert_eq!(srv.weight, 1);
assert_eq!(srv.port, 5060);
assert_eq!(
- srv.target,
+ srv.target.value,
"sip-anycast-2.voice.google.com".as_bytes().to_vec()
);
}
@@ -834,7 +952,7 @@ mod tests {
assert_eq!(srv.weight, 1);
assert_eq!(srv.port, 5060);
assert_eq!(
- srv.target,
+ srv.target.value,
"sip-anycast-1.voice.google.com".as_bytes().to_vec()
);
}
@@ -848,4 +966,65 @@ mod tests {
}
}
}
+
+ #[test]
+ fn test_dns_parse_name_truncated() {
+ // Generate a non-compressed hostname over our maximum of 1024.
+ let mut buf: Vec<u8> = vec![];
+ for _ in 0..17 {
+ buf.push(0b0011_1111);
+ for _ in 0..63 {
+ buf.push(b'a');
+ }
+ }
+
+ let (rem, name) = dns_parse_name(&buf, &buf).unwrap();
+ assert_eq!(name.value.len(), MAX_NAME_LEN);
+ assert!(name.flags.contains(DNSNameFlags::TRUNCATED));
+ assert!(rem.is_empty());
+ }
+
+ #[test]
+ fn test_dns_parse_name_truncated_max_segments_no_pointer() {
+ let mut buf: Vec<u8> = vec![];
+ for _ in 0..256 {
+ buf.push(0b0000_0001);
+ buf.push(b'a');
+ }
+
+ // This should fail as we've hit the segment limit without a
+ // pointer, we'd need to keep parsing more segments to figure
+ // out where the next data point lies.
+ assert!(dns_parse_name(&buf, &buf).is_err());
+ }
+
+ #[test]
+ fn test_dns_parse_name_truncated_max_segments_with_pointer() {
+ let mut buf: Vec<u8> = vec![];
+
+ // "a" at the beginning of the buffer.
+ buf.push(0b0000_0001);
+ buf.push(b'a');
+
+ // Followed by a pointer back to the beginning.
+ buf.push(0b1100_0000);
+ buf.push(0b0000_0000);
+
+ // The start of the name, which is pointer to the beginning of
+ // the buffer.
+ buf.push(0b1100_0000);
+ buf.push(0b000_0000);
+
+ let (_rem, name) = dns_parse_name(&buf[4..], &buf).unwrap();
+ assert_eq!(name.value.len(), 255);
+ assert!(name.flags.contains(DNSNameFlags::TRUNCATED));
+ }
+
+ #[test]
+ fn test_dns_parse_name_self_reference() {
+ let mut buf = vec![];
+ buf.push(0b1100_0000);
+ buf.push(0b0000_0000);
+ assert!(dns_parse_name(&buf, &buf).is_err());
+ }
}
--
2.50.1

File diff suppressed because it is too large Load Diff

View File

@@ -1,114 +0,0 @@
From 5edb84fe234f47a0fedfbf9b10b49699152fe8cb Mon Sep 17 00:00:00 2001
From: Jason Ish <jason.ish@oisf.net>
Date: Thu, 31 Oct 2024 15:46:35 -0600
Subject: [PATCH] eve/dns: add truncation flags for fields that are truncated
If rrname, rdata or mname are truncated, set a flag field like
'rrname_truncated: true' to indicate that the name is truncated.
Ticket: #7280
(cherry picked from commit 37f4c52b22fcdde4adf9b479cb5700f89d00768d)
CVE: CVE-2024-55628
Upstream-Status: Backport [https://github.com/OISF/suricata/commit/5edb84fe234f47a0fedfbf9b10b49699152fe8cb]
Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
---
etc/schema.json | 7 +++++++
rust/src/dns/log.rs | 19 +++++++++++++++++++
2 files changed, 26 insertions(+)
diff --git a/etc/schema.json b/etc/schema.json
index 99f419f..422d77c 100644
--- a/etc/schema.json
+++ b/etc/schema.json
@@ -790,6 +790,9 @@
"rrname": {
"type": "string"
},
+ "rrname_truncated": {
+ "type": "boolean"
+ },
"rrtype": {
"type": "string"
},
@@ -2365,6 +2368,10 @@
"type": "array",
"items": {
"type": "integer"
+ },
+ "rrname_truncated": {
+ "description": "Set to true if the rrname was too long and truncated by Suricata",
+ "type": "boolean"
}
}
},
diff --git a/rust/src/dns/log.rs b/rust/src/dns/log.rs
index 6bf9589..d0e468d 100644
--- a/rust/src/dns/log.rs
+++ b/rust/src/dns/log.rs
@@ -399,7 +399,13 @@ fn dns_log_soa(soa: &DNSRDataSOA) -> Result<JsonBuilder, JsonError> {
let mut js = JsonBuilder::try_new_object()?;
js.set_string_from_bytes("mname", &soa.mname.value)?;
+ if soa.mname.flags.contains(DNSNameFlags::TRUNCATED) {
+ js.set_bool("mname_truncated", true)?;
+ }
js.set_string_from_bytes("rname", &soa.rname.value)?;
+ if soa.rname.flags.contains(DNSNameFlags::TRUNCATED) {
+ js.set_bool("rname_truncated", true)?;
+ }
js.set_uint("serial", soa.serial as u64)?;
js.set_uint("refresh", soa.refresh as u64)?;
js.set_uint("retry", soa.retry as u64)?;
@@ -444,6 +450,9 @@ fn dns_log_json_answer_detail(answer: &DNSAnswerEntry) -> Result<JsonBuilder, Js
let mut jsa = JsonBuilder::try_new_object()?;
jsa.set_string_from_bytes("rrname", &answer.name.value)?;
+ if answer.name.flags.contains(DNSNameFlags::TRUNCATED) {
+ jsa.set_bool("rrname_truncated", true)?;
+ }
jsa.set_string("rrtype", &dns_rrtype_string(answer.rrtype))?;
jsa.set_uint("ttl", answer.ttl as u64)?;
@@ -453,6 +462,9 @@ fn dns_log_json_answer_detail(answer: &DNSAnswerEntry) -> Result<JsonBuilder, Js
}
DNSRData::CNAME(name) | DNSRData::MX(name) | DNSRData::NS(name) | DNSRData::PTR(name) => {
jsa.set_string_from_bytes("rdata", &name.value)?;
+ if name.flags.contains(DNSNameFlags::TRUNCATED) {
+ jsa.set_bool("rdata_truncated", true)?;
+ }
}
DNSRData::TXT(bytes) | DNSRData::NULL(bytes) => {
jsa.set_string_from_bytes("rdata", bytes)?;
@@ -506,6 +518,9 @@ fn dns_log_json_answer(
if let Some(query) = response.queries.first() {
js.set_string_from_bytes("rrname", &query.name.value)?;
+ if query.name.flags.contains(DNSNameFlags::TRUNCATED) {
+ js.set_bool("rrname_truncated", true)?;
+ }
js.set_string("rrtype", &dns_rrtype_string(query.rrtype))?;
}
js.set_string("rcode", &dns_rcode_string(header.flags))?;
@@ -532,6 +547,7 @@ fn dns_log_json_answer(
| DNSRData::MX(name)
| DNSRData::NS(name)
| DNSRData::PTR(name) => {
+ // Flags like truncated not logged here as it would break the schema.
if !answer_types.contains_key(&type_string) {
answer_types
.insert(type_string.to_string(), JsonBuilder::try_new_array()?);
@@ -620,6 +636,9 @@ fn dns_log_query(
jb.set_string("type", "query")?;
jb.set_uint("id", request.header.tx_id as u64)?;
jb.set_string_from_bytes("rrname", &query.name.value)?;
+ if query.name.flags.contains(DNSNameFlags::TRUNCATED) {
+ jb.set_bool("rrname_truncated", true)?;
+ }
jb.set_string("rrtype", &dns_rrtype_string(query.rrtype))?;
jb.set_uint("tx_id", tx.id - 1)?;
if request.header.flags & 0x0040 != 0 {
--
2.50.1

View File

@@ -1,510 +0,0 @@
From 71212b78bd1b7b841c9d9a907d0b3eea71a54060 Mon Sep 17 00:00:00 2001
From: Jason Ish <jason.ish@oisf.net>
Date: Fri, 1 Nov 2024 11:39:23 -0600
Subject: [PATCH] dns: provide events for recoverable parse errors
Add events for the following resource name parsing issues:
- name truncated as its too long
- maximum number of labels reached
- infinite loop
Currently these events are only registered when encountered, but
recoverable. That is where we are able to return some of the name,
usually in a truncated state.
As name parsing has many code paths, we pass in a pointer to a flag
field that can be updated by the name parser, this is done in
addition to the flags being set on a specific name as when logging we
want to designate which fields are truncated, etc. But for alerts, we
just care that something happened during the parse. It also reduces
errors as it won't be forgotten to check for the flags and set the
event if some new parser is written that also parses names.
Ticket: #7280
(cherry picked from commit 19cf0f81335d9f787d587450f7105ad95a648951)
CVE: CVE-2024-55628
Upstream-Status: Backport [https://github.com/OISF/suricata/commit/71212b78bd1b7b841c9d9a907d0b3eea71a54060]
Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
---
rules/dns-events.rules | 9 +++
rust/src/dns/dns.rs | 36 ++++++++++-
rust/src/dns/parser.rs | 136 +++++++++++++++++++++++++----------------
3 files changed, 124 insertions(+), 57 deletions(-)
diff --git a/rules/dns-events.rules b/rules/dns-events.rules
index d4c02b5..cc43629 100644
--- a/rules/dns-events.rules
+++ b/rules/dns-events.rules
@@ -8,3 +8,12 @@ alert dns any any -> any any (msg:"SURICATA DNS Not a response"; flow:to_client;
# Z flag (reserved) not 0
alert dns any any -> any any (msg:"SURICATA DNS Z flag set"; app-layer-event:dns.z_flag_set; classtype:protocol-command-decode; sid:2240006; rev:2;)
alert dns any any -> any any (msg:"SURICATA DNS Invalid opcode"; app-layer-event:dns.invalid_opcode; classtype:protocol-command-decode; sid:2240007; rev:1;)
+
+# A resource name was too long (over 1025 chars)
+alert dns any any -> any any (msg:"SURICATA DNS Name too long"; app-layer-event:dns.name_too_long; classtype:protocol-command-decode; sid:224008; rev:1;)
+
+# An infinite loop was found while decoding a DNS resource name.
+alert dns any any -> any any (msg:"SURICATA DNS Infinite loop"; app-layer-event:dns.infinite_loop; classtype:protocol-command-decode; sid:224009; rev:1;)
+
+# Suricata's maximum number of DNS name labels was reached while parsing a resource name.
+alert dns any any -> any any (msg:"SURICATA DNS Too many labels"; app-layer-event:dns.too_many_labels; classtype:protocol-command-decode; sid:224010; rev:1;)
diff --git a/rust/src/dns/dns.rs b/rust/src/dns/dns.rs
index 680bf7e..34406dc 100644
--- a/rust/src/dns/dns.rs
+++ b/rust/src/dns/dns.rs
@@ -129,6 +129,12 @@ pub enum DNSEvent {
NotResponse,
ZFlagSet,
InvalidOpcode,
+ /// A DNS resource name was exessively long and was truncated.
+ NameTooLong,
+ /// An infinite loop was found while parsing a name.
+ InfiniteLoop,
+ /// Too many labels were found.
+ TooManyLabels,
}
#[derive(Debug, PartialEq, Eq)]
@@ -418,7 +424,7 @@ impl DNSState {
};
match parser::dns_parse_request_body(body, input, header) {
- Ok((_, request)) => {
+ Ok((_, (request, parse_flags))) => {
if request.header.flags & 0x8000 != 0 {
SCLogDebug!("DNS message is not a request");
self.set_event(DNSEvent::NotRequest);
@@ -441,6 +447,18 @@ impl DNSState {
self.set_event(DNSEvent::InvalidOpcode);
}
+ if parse_flags.contains(DNSNameFlags::TRUNCATED) {
+ self.set_event(DNSEvent::NameTooLong);
+ }
+
+ if parse_flags.contains(DNSNameFlags::INFINITE_LOOP) {
+ self.set_event(DNSEvent::InfiniteLoop);
+ }
+
+ if parse_flags.contains(DNSNameFlags::LABEL_LIMIT) {
+ self.set_event(DNSEvent::TooManyLabels);
+ }
+
return true;
}
Err(Err::Incomplete(_)) => {
@@ -490,7 +508,7 @@ impl DNSState {
};
match parser::dns_parse_response_body(body, input, header) {
- Ok((_, response)) => {
+ Ok((_, (response, parse_flags))) => {
SCLogDebug!("Response header flags: {}", response.header.flags);
if response.header.flags & 0x8000 == 0 {
@@ -519,6 +537,18 @@ impl DNSState {
self.set_event(DNSEvent::InvalidOpcode);
}
+ if parse_flags.contains(DNSNameFlags::TRUNCATED) {
+ self.set_event(DNSEvent::NameTooLong);
+ }
+
+ if parse_flags.contains(DNSNameFlags::INFINITE_LOOP) {
+ self.set_event(DNSEvent::InfiniteLoop);
+ }
+
+ if parse_flags.contains(DNSNameFlags::LABEL_LIMIT) {
+ self.set_event(DNSEvent::TooManyLabels);
+ }
+
return true;
}
Err(Err::Incomplete(_)) => {
@@ -718,7 +748,7 @@ fn probe(input: &[u8], dlen: usize) -> (bool, bool, bool) {
}
match parser::dns_parse_request(input) {
- Ok((_, request)) => {
+ Ok((_, (request, _))) => {
return probe_header_validity(&request.header, dlen);
}
Err(Err::Incomplete(_)) => match parser::dns_parse_header(input) {
diff --git a/rust/src/dns/parser.rs b/rust/src/dns/parser.rs
index 12929bc..c98ba05 100644
--- a/rust/src/dns/parser.rs
+++ b/rust/src/dns/parser.rs
@@ -81,7 +81,7 @@ static MAX_NAME_LEN: usize = 1025;
/// Parameters:
/// start: the start of the name
/// message: the complete message that start is a part of with the DNS header
-pub fn dns_parse_name<'b>(start: &'b [u8], message: &'b [u8]) -> IResult<&'b [u8], DNSName> {
+pub fn dns_parse_name<'b>(start: &'b [u8], message: &'b [u8], parse_flags: &mut DNSNameFlags) -> IResult<&'b [u8], DNSName> {
let mut pos = start;
let mut pivot = start;
let mut name: Vec<u8> = Vec::with_capacity(32);
@@ -166,6 +166,8 @@ pub fn dns_parse_name<'b>(start: &'b [u8], message: &'b [u8]) -> IResult<&'b [u8
}
}
+ parse_flags.insert(flags);
+
// If we followed a pointer we return the position after the first
// pointer followed. Is there a better way to see if these slices
// diverged from each other? A straight up comparison would
@@ -188,7 +190,7 @@ pub fn dns_parse_name<'b>(start: &'b [u8], message: &'b [u8]) -> IResult<&'b [u8
/// multi-string TXT entry as a single quote string, similar to the
/// output of dig. Something to consider for a future version.
fn dns_parse_answer<'a>(
- slice: &'a [u8], message: &'a [u8], count: usize,
+ slice: &'a [u8], message: &'a [u8], count: usize, flags: &mut DNSNameFlags,
) -> IResult<&'a [u8], Vec<DNSAnswerEntry>> {
let mut answers = Vec::new();
let mut input = slice;
@@ -201,8 +203,10 @@ fn dns_parse_answer<'a>(
data: &'a [u8],
}
- fn subparser<'a>(i: &'a [u8], message: &'a [u8]) -> IResult<&'a [u8], Answer<'a>> {
- let (i, name) = dns_parse_name(i, message)?;
+ fn subparser<'a>(
+ i: &'a [u8], message: &'a [u8], flags: &mut DNSNameFlags,
+ ) -> IResult<&'a [u8], Answer<'a>> {
+ let (i, name) = dns_parse_name(i, message, flags)?;
let (i, rrtype) = be_u16(i)?;
let (i, rrclass) = be_u16(i)?;
let (i, ttl) = be_u32(i)?;
@@ -218,7 +222,7 @@ fn dns_parse_answer<'a>(
}
for _ in 0..count {
- match subparser(input, message) {
+ match subparser(input, message, flags) {
Ok((rem, val)) => {
let n = match val.rrtype {
DNS_RECORD_TYPE_TXT => {
@@ -236,7 +240,7 @@ fn dns_parse_answer<'a>(
}
};
let result: IResult<&'a [u8], Vec<DNSRData>> =
- many_m_n(1, n, complete(|b| dns_parse_rdata(b, message, val.rrtype)))(val.data);
+ many_m_n(1, n, complete(|b| dns_parse_rdata(b, message, val.rrtype, flags)))(val.data);
match result {
Ok((_, rdatas)) => {
for rdata in rdatas {
@@ -266,18 +270,19 @@ fn dns_parse_answer<'a>(
pub fn dns_parse_response_body<'a>(
i: &'a [u8], message: &'a [u8], header: DNSHeader,
-) -> IResult<&'a [u8], DNSResponse> {
- let (i, queries) = count(|b| dns_parse_query(b, message), header.questions as usize)(i)?;
- let (i, answers) = dns_parse_answer(i, message, header.answer_rr as usize)?;
- let (i, authorities) = dns_parse_answer(i, message, header.authority_rr as usize)?;
+) -> IResult<&'a [u8], (DNSResponse, DNSNameFlags)> {
+ let mut flags = DNSNameFlags::default();
+ let (i, queries) = count(|b| dns_parse_query(b, message, &mut flags), header.questions as usize)(i)?;
+ let (i, answers) = dns_parse_answer(i, message, header.answer_rr as usize, &mut flags)?;
+ let (i, authorities) = dns_parse_answer(i, message, header.authority_rr as usize, &mut flags)?;
Ok((
i,
- DNSResponse {
+ (DNSResponse {
header,
queries,
answers,
authorities,
- },
+ }, flags),
))
}
@@ -286,9 +291,9 @@ pub fn dns_parse_response_body<'a>(
/// Arguments are suitable for using with call!:
///
/// call!(complete_dns_message_buffer)
-pub fn dns_parse_query<'a>(input: &'a [u8], message: &'a [u8]) -> IResult<&'a [u8], DNSQueryEntry> {
+pub fn dns_parse_query<'a>(input: &'a [u8], message: &'a [u8], flags: &mut DNSNameFlags) -> IResult<&'a [u8], DNSQueryEntry> {
let i = input;
- let (i, name) = dns_parse_name(i, message)?;
+ let (i, name) = dns_parse_name(i, message, flags)?;
let (i, rrtype) = be_u16(i)?;
let (i, rrclass) = be_u16(i)?;
Ok((
@@ -309,22 +314,30 @@ fn dns_parse_rdata_aaaa(input: &[u8]) -> IResult<&[u8], DNSRData> {
rest(input).map(|(input, data)| (input, DNSRData::AAAA(data.to_vec())))
}
-fn dns_parse_rdata_cname<'a>(input: &'a [u8], message: &'a [u8]) -> IResult<&'a [u8], DNSRData> {
- dns_parse_name(input, message).map(|(input, name)| (input, DNSRData::CNAME(name)))
+fn dns_parse_rdata_cname<'a>(
+ input: &'a [u8], message: &'a [u8], flags: &mut DNSNameFlags,
+) -> IResult<&'a [u8], DNSRData> {
+ dns_parse_name(input, message, flags).map(|(input, name)| (input, DNSRData::CNAME(name)))
}
-fn dns_parse_rdata_ns<'a>(input: &'a [u8], message: &'a [u8]) -> IResult<&'a [u8], DNSRData> {
- dns_parse_name(input, message).map(|(input, name)| (input, DNSRData::NS(name)))
+fn dns_parse_rdata_ns<'a>(
+ input: &'a [u8], message: &'a [u8], flags: &mut DNSNameFlags,
+) -> IResult<&'a [u8], DNSRData> {
+ dns_parse_name(input, message, flags).map(|(input, name)| (input, DNSRData::NS(name)))
}
-fn dns_parse_rdata_ptr<'a>(input: &'a [u8], message: &'a [u8]) -> IResult<&'a [u8], DNSRData> {
- dns_parse_name(input, message).map(|(input, name)| (input, DNSRData::PTR(name)))
+fn dns_parse_rdata_ptr<'a>(
+ input: &'a [u8], message: &'a [u8], flags: &mut DNSNameFlags,
+) -> IResult<&'a [u8], DNSRData> {
+ dns_parse_name(input, message, flags).map(|(input, name)| (input, DNSRData::PTR(name)))
}
-fn dns_parse_rdata_soa<'a>(input: &'a [u8], message: &'a [u8]) -> IResult<&'a [u8], DNSRData> {
+fn dns_parse_rdata_soa<'a>(
+ input: &'a [u8], message: &'a [u8], flags: &mut DNSNameFlags,
+) -> IResult<&'a [u8], DNSRData> {
let i = input;
- let (i, mname) = dns_parse_name(i, message)?;
- let (i, rname) = dns_parse_name(i, message)?;
+ let (i, mname) = dns_parse_name(i, message, flags)?;
+ let (i, rname) = dns_parse_name(i, message, flags)?;
let (i, serial) = be_u32(i)?;
let (i, refresh) = be_u32(i)?;
let (i, retry) = be_u32(i)?;
@@ -344,20 +357,24 @@ fn dns_parse_rdata_soa<'a>(input: &'a [u8], message: &'a [u8]) -> IResult<&'a [u
))
}
-fn dns_parse_rdata_mx<'a>(input: &'a [u8], message: &'a [u8]) -> IResult<&'a [u8], DNSRData> {
+fn dns_parse_rdata_mx<'a>(
+ input: &'a [u8], message: &'a [u8], flags: &mut DNSNameFlags,
+) -> IResult<&'a [u8], DNSRData> {
// For MX we skip over the preference field before
// parsing out the name.
let (i, _) = be_u16(input)?;
- let (i, name) = dns_parse_name(i, message)?;
+ let (i, name) = dns_parse_name(i, message, flags)?;
Ok((i, DNSRData::MX(name)))
}
-fn dns_parse_rdata_srv<'a>(input: &'a [u8], message: &'a [u8]) -> IResult<&'a [u8], DNSRData> {
+fn dns_parse_rdata_srv<'a>(
+ input: &'a [u8], message: &'a [u8], flags: &mut DNSNameFlags,
+) -> IResult<&'a [u8], DNSRData> {
let i = input;
let (i, priority) = be_u16(i)?;
let (i, weight) = be_u16(i)?;
let (i, port) = be_u16(i)?;
- let (i, target) = dns_parse_name(i, message)?;
+ let (i, target) = dns_parse_name(i, message, flags)?;
Ok((
i,
DNSRData::SRV(DNSRDataSRV {
@@ -398,26 +415,26 @@ fn dns_parse_rdata_unknown(input: &[u8]) -> IResult<&[u8], DNSRData> {
}
pub fn dns_parse_rdata<'a>(
- input: &'a [u8], message: &'a [u8], rrtype: u16,
+ input: &'a [u8], message: &'a [u8], rrtype: u16, flags: &mut DNSNameFlags
) -> IResult<&'a [u8], DNSRData> {
match rrtype {
DNS_RECORD_TYPE_A => dns_parse_rdata_a(input),
DNS_RECORD_TYPE_AAAA => dns_parse_rdata_aaaa(input),
- DNS_RECORD_TYPE_CNAME => dns_parse_rdata_cname(input, message),
- DNS_RECORD_TYPE_PTR => dns_parse_rdata_ptr(input, message),
- DNS_RECORD_TYPE_SOA => dns_parse_rdata_soa(input, message),
- DNS_RECORD_TYPE_MX => dns_parse_rdata_mx(input, message),
- DNS_RECORD_TYPE_NS => dns_parse_rdata_ns(input, message),
+ DNS_RECORD_TYPE_CNAME => dns_parse_rdata_cname(input, message, flags),
+ DNS_RECORD_TYPE_PTR => dns_parse_rdata_ptr(input, message, flags),
+ DNS_RECORD_TYPE_SOA => dns_parse_rdata_soa(input, message, flags),
+ DNS_RECORD_TYPE_MX => dns_parse_rdata_mx(input, message, flags),
+ DNS_RECORD_TYPE_NS => dns_parse_rdata_ns(input, message, flags),
DNS_RECORD_TYPE_TXT => dns_parse_rdata_txt(input),
DNS_RECORD_TYPE_NULL => dns_parse_rdata_null(input),
DNS_RECORD_TYPE_SSHFP => dns_parse_rdata_sshfp(input),
- DNS_RECORD_TYPE_SRV => dns_parse_rdata_srv(input, message),
+ DNS_RECORD_TYPE_SRV => dns_parse_rdata_srv(input, message, flags),
_ => dns_parse_rdata_unknown(input),
}
}
/// Parse a DNS request.
-pub fn dns_parse_request(input: &[u8]) -> IResult<&[u8], DNSRequest> {
+pub fn dns_parse_request(input: &[u8]) -> IResult<&[u8], (DNSRequest, DNSNameFlags)> {
let i = input;
let (i, header) = dns_parse_header(i)?;
dns_parse_request_body(i, input, header)
@@ -425,10 +442,11 @@ pub fn dns_parse_request(input: &[u8]) -> IResult<&[u8], DNSRequest> {
pub fn dns_parse_request_body<'a>(
input: &'a [u8], message: &'a [u8], header: DNSHeader,
-) -> IResult<&'a [u8], DNSRequest> {
+) -> IResult<&'a [u8], (DNSRequest, DNSNameFlags)> {
+ let mut flags = DNSNameFlags::default();
let i = input;
- let (i, queries) = count(|b| dns_parse_query(b, message), header.questions as usize)(i)?;
- Ok((i, DNSRequest { header, queries }))
+ let (i, queries) = count(|b| dns_parse_query(b, message, &mut flags), header.questions as usize)(i)?;
+ Ok((i, (DNSRequest { header, queries }, flags)))
}
#[cfg(test)]
@@ -447,7 +465,8 @@ mod tests {
0x03, 0x63, 0x6f, 0x6d, 0x00, 0x00, 0x01, 0x00, /* .com.... */
];
let expected_remainder: &[u8] = &[0x00, 0x01, 0x00];
- let (remainder, name) = dns_parse_name(buf, buf).unwrap();
+ let mut flags = DNSNameFlags::default();
+ let (remainder, name) = dns_parse_name(buf, buf, &mut flags).unwrap();
assert_eq!("client-cf.dropbox.com".as_bytes(), &name.value[..]);
assert_eq!(remainder, expected_remainder);
}
@@ -481,7 +500,8 @@ mod tests {
// The name at offset 54 is the complete name.
let start1 = &buf[54..];
- let res1 = dns_parse_name(start1, message);
+ let mut flags = DNSNameFlags::default();
+ let res1 = dns_parse_name(start1, message, &mut flags);
assert_eq!(
res1,
Ok((
@@ -496,7 +516,8 @@ mod tests {
// The second name starts at offset 80, but is just a pointer
// to the first.
let start2 = &buf[80..];
- let res2 = dns_parse_name(start2, message);
+ let mut flags = DNSNameFlags::default();
+ let res2 = dns_parse_name(start2, message, &mut flags);
assert_eq!(
res2,
Ok((
@@ -511,7 +532,8 @@ mod tests {
// The third name starts at offset 94, but is a pointer to a
// portion of the first.
let start3 = &buf[94..];
- let res3 = dns_parse_name(start3, message);
+ let mut flags = DNSNameFlags::default();
+ let res3 = dns_parse_name(start3, message, &mut flags);
assert_eq!(
res3,
Ok((
@@ -526,7 +548,8 @@ mod tests {
// The fourth name starts at offset 110, but is a pointer to a
// portion of the first.
let start4 = &buf[110..];
- let res4 = dns_parse_name(start4, message);
+ let mut flags = DNSNameFlags::default();
+ let res4 = dns_parse_name(start4, message, &mut flags);
assert_eq!(
res4,
Ok((
@@ -567,7 +590,8 @@ mod tests {
// packet).
let start: &[u8] = &buf[100..];
- let res = dns_parse_name(start, message);
+ let mut flags = DNSNameFlags::default();
+ let res = dns_parse_name(start, message, &mut flags);
assert_eq!(
res,
Ok((
@@ -595,7 +619,7 @@ mod tests {
let res = dns_parse_request(pkt);
match res {
- Ok((rem, request)) => {
+ Ok((rem, (request, _flags))) => {
// For now we have some remainder data as there is an
// additional record type we don't parse yet.
assert!(!rem.is_empty());
@@ -626,7 +650,7 @@ mod tests {
}
/// Parse a DNS response.
- fn dns_parse_response(message: &[u8]) -> IResult<&[u8], DNSResponse> {
+ fn dns_parse_response(message: &[u8]) -> IResult<&[u8], (DNSResponse, DNSNameFlags)> {
let i = message;
let (i, header) = dns_parse_header(i)?;
dns_parse_response_body(i, message, header)
@@ -653,7 +677,7 @@ mod tests {
let res = dns_parse_response(pkt);
match res {
- Ok((rem, response)) => {
+ Ok((rem, (response, _flags))) => {
// The response should be full parsed.
assert_eq!(rem.len(), 0);
@@ -745,7 +769,7 @@ mod tests {
let res = dns_parse_response(pkt);
match res {
- Ok((rem, response)) => {
+ Ok((rem, (response, _flags))) => {
// For now we have some remainder data as there is an
// additional record type we don't parse yet.
assert!(!rem.is_empty());
@@ -812,7 +836,7 @@ mod tests {
let res = dns_parse_response(pkt);
match res {
- Ok((rem, response)) => {
+ Ok((rem, (response, _flags))) => {
// The response should be fully parsed.
assert_eq!(rem.len(), 0);
@@ -924,7 +948,7 @@ mod tests {
let res = dns_parse_response(pkt);
match res {
- Ok((rem, response)) => {
+ Ok((rem, (response, _flags))) => {
// The data should be fully parsed.
assert_eq!(rem.len(), 0);
@@ -978,7 +1002,8 @@ mod tests {
}
}
- let (rem, name) = dns_parse_name(&buf, &buf).unwrap();
+ let mut flags = DNSNameFlags::default();
+ let (rem, name) = dns_parse_name(&buf, &buf, &mut flags).unwrap();
assert_eq!(name.value.len(), MAX_NAME_LEN);
assert!(name.flags.contains(DNSNameFlags::TRUNCATED));
assert!(rem.is_empty());
@@ -995,7 +1020,8 @@ mod tests {
// This should fail as we've hit the segment limit without a
// pointer, we'd need to keep parsing more segments to figure
// out where the next data point lies.
- assert!(dns_parse_name(&buf, &buf).is_err());
+ let mut flags = DNSNameFlags::default();
+ assert!(dns_parse_name(&buf, &buf, &mut flags).is_err());
}
#[test]
@@ -1015,7 +1041,8 @@ mod tests {
buf.push(0b1100_0000);
buf.push(0b000_0000);
- let (_rem, name) = dns_parse_name(&buf[4..], &buf).unwrap();
+ let mut flags = DNSNameFlags::default();
+ let (_rem, name) = dns_parse_name(&buf[4..], &buf, &mut flags).unwrap();
assert_eq!(name.value.len(), 255);
assert!(name.flags.contains(DNSNameFlags::TRUNCATED));
}
@@ -1025,6 +1052,7 @@ mod tests {
let mut buf = vec![];
buf.push(0b1100_0000);
buf.push(0b0000_0000);
- assert!(dns_parse_name(&buf, &buf).is_err());
+ let mut flags = DNSNameFlags::default();
+ assert!(dns_parse_name(&buf, &buf, &mut flags).is_err());
}
}
--
2.50.1

View File

@@ -1,124 +0,0 @@
From 2f432c99a9734ea3a75c9218f35060e11a7a39ad Mon Sep 17 00:00:00 2001
From: Victor Julien <vjulien@oisf.net>
Date: Tue, 18 Mar 2025 10:55:39 +0100
Subject: [PATCH] datasets: improve default hashsize handling
Make hashsize default local to dataset code, instead of relying on the
thash code.
Use the same default value as before.
(cherry picked from commit d32a39ca4b53d7f659f4f0a2a5c162ef97dc4797)
Upstream-Status: Backport [https://github.com/OISF/suricata/commit/2f432c99a9734ea3a75c9218f35060e11a7a39ad]
CVE: CVE-2025-29916
Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
---
src/datasets.c | 37 +++++++++++++++++++++++--------------
1 file changed, 23 insertions(+), 14 deletions(-)
diff --git a/src/datasets.c b/src/datasets.c
index 32bcf6e..89e7899 100644
--- a/src/datasets.c
+++ b/src/datasets.c
@@ -677,6 +677,11 @@ Dataset *DatasetGet(const char *name, enum DatasetTypes type, const char *save,
}
}
+ GetDefaultMemcap(&default_memcap, &default_hashsize);
+ if (hashsize == 0) {
+ hashsize = default_hashsize;
+ }
+
set = DatasetAlloc(name);
if (set == NULL) {
goto out_err;
@@ -696,12 +701,11 @@ Dataset *DatasetGet(const char *name, enum DatasetTypes type, const char *save,
char cnf_name[128];
snprintf(cnf_name, sizeof(cnf_name), "datasets.%s.hash", name);
- GetDefaultMemcap(&default_memcap, &default_hashsize);
switch (type) {
case DATASET_TYPE_MD5:
set->hash = THashInit(cnf_name, sizeof(Md5Type), Md5StrSet, Md5StrFree, Md5StrHash,
Md5StrCompare, load != NULL ? 1 : 0, memcap > 0 ? memcap : default_memcap,
- hashsize > 0 ? hashsize : default_hashsize);
+ hashsize);
if (set->hash == NULL)
goto out_err;
if (DatasetLoadMd5(set) < 0)
@@ -710,7 +714,7 @@ Dataset *DatasetGet(const char *name, enum DatasetTypes type, const char *save,
case DATASET_TYPE_STRING:
set->hash = THashInit(cnf_name, sizeof(StringType), StringSet, StringFree, StringHash,
StringCompare, load != NULL ? 1 : 0, memcap > 0 ? memcap : default_memcap,
- hashsize > 0 ? hashsize : default_hashsize);
+ hashsize);
if (set->hash == NULL)
goto out_err;
if (DatasetLoadString(set) < 0)
@@ -719,26 +723,25 @@ Dataset *DatasetGet(const char *name, enum DatasetTypes type, const char *save,
case DATASET_TYPE_SHA256:
set->hash = THashInit(cnf_name, sizeof(Sha256Type), Sha256StrSet, Sha256StrFree,
Sha256StrHash, Sha256StrCompare, load != NULL ? 1 : 0,
- memcap > 0 ? memcap : default_memcap,
- hashsize > 0 ? hashsize : default_hashsize);
+ memcap > 0 ? memcap : default_memcap, hashsize);
if (set->hash == NULL)
goto out_err;
if (DatasetLoadSha256(set) < 0)
goto out_err;
break;
case DATASET_TYPE_IPV4:
- set->hash = THashInit(cnf_name, sizeof(IPv4Type), IPv4Set, IPv4Free, IPv4Hash,
- IPv4Compare, load != NULL ? 1 : 0, memcap > 0 ? memcap : default_memcap,
- hashsize > 0 ? hashsize : default_hashsize);
+ set->hash =
+ THashInit(cnf_name, sizeof(IPv4Type), IPv4Set, IPv4Free, IPv4Hash, IPv4Compare,
+ load != NULL ? 1 : 0, memcap > 0 ? memcap : default_memcap, hashsize);
if (set->hash == NULL)
goto out_err;
if (DatasetLoadIPv4(set) < 0)
goto out_err;
break;
case DATASET_TYPE_IPV6:
- set->hash = THashInit(cnf_name, sizeof(IPv6Type), IPv6Set, IPv6Free, IPv6Hash,
- IPv6Compare, load != NULL ? 1 : 0, memcap > 0 ? memcap : default_memcap,
- hashsize > 0 ? hashsize : default_hashsize);
+ set->hash =
+ THashInit(cnf_name, sizeof(IPv6Type), IPv6Set, IPv6Free, IPv6Hash, IPv6Compare,
+ load != NULL ? 1 : 0, memcap > 0 ? memcap : default_memcap, hashsize);
if (set->hash == NULL)
goto out_err;
if (DatasetLoadIPv6(set) < 0)
@@ -825,6 +828,10 @@ void DatasetPostReloadCleanup(void)
SCMutexUnlock(&sets_lock);
}
+/* Value reflects THASH_DEFAULT_HASHSIZE which is what the default was earlier,
+ * despite 2048 commented out in the default yaml. */
+#define DATASETS_HASHSIZE_DEFAULT 4096
+
static void GetDefaultMemcap(uint64_t *memcap, uint32_t *hashsize)
{
const char *str = NULL;
@@ -836,12 +843,14 @@ static void GetDefaultMemcap(uint64_t *memcap, uint32_t *hashsize)
*memcap = 0;
}
}
+
+ *hashsize = (uint32_t)DATASETS_HASHSIZE_DEFAULT;
if (ConfGet("datasets.defaults.hashsize", &str) == 1) {
if (ParseSizeStringU32(str, hashsize) < 0) {
+ *hashsize = (uint32_t)DATASETS_HASHSIZE_DEFAULT;
SCLogWarning("hashsize value cannot be deduced: %s,"
- " resetting to default",
- str);
- *hashsize = 0;
+ " resetting to default: %u",
+ str, *hashsize);
}
}
}
--
2.49.0

View File

@@ -1,197 +0,0 @@
From e28c8c655a324a18932655a2c2b8f0d5aa1c55d7 Mon Sep 17 00:00:00 2001
From: Philippe Antoine <pantoine@oisf.net>
Date: Tue, 18 Mar 2025 10:55:39 +0100
Subject: [PATCH] detect: add configurable limits for datasets
Ticket: 7615
Avoids signatures setting extreme hash sizes, which would lead to very
high memory use.
Default to allowing:
- 65536 per dataset
- 16777216 total
To override these built-in defaults:
```yaml
datasets:
# Limits for per rule dataset instances to avoid rules using too many
# resources.
limits:
# Max value for per dataset `hashsize` setting
#single-hashsize: 65536
# Max combined hashsize values for all datasets.
#total-hashsizes: 16777216
```
(cherry picked from commit a7713db709b8a0be5fc5e5809ab58e9b14a16e85)
Upstream-Status: Backport [https://github.com/OISF/suricata/commit/e28c8c655a324a18932655a2c2b8f0d5aa1c55d7]
CVE: CVE-2025-29916
Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
---
src/datasets.c | 65 ++++++++++++++++++++++++++++++++++++++++++++++++
src/util-thash.c | 5 ----
suricata.yaml.in | 8 ++++++
3 files changed, 73 insertions(+), 5 deletions(-)
diff --git a/src/datasets.c b/src/datasets.c
index 89e7899..0729894 100644
--- a/src/datasets.c
+++ b/src/datasets.c
@@ -39,11 +39,16 @@
#include "util-misc.h"
#include "util-path.h"
#include "util-debug.h"
+#include "util-validate.h"
SCMutex sets_lock = SCMUTEX_INITIALIZER;
static Dataset *sets = NULL;
static uint32_t set_ids = 0;
+uint32_t dataset_max_one_hashsize = 65536;
+uint32_t dataset_max_total_hashsize = 16777216;
+uint32_t dataset_used_hashsize = 0;
+
static int DatasetAddwRep(Dataset *set, const uint8_t *data, const uint32_t data_len,
DataRepType *rep);
@@ -629,6 +634,34 @@ Dataset *DatasetFind(const char *name, enum DatasetTypes type)
return set;
}
+static bool DatasetCheckHashsize(const char *name, uint32_t hash_size)
+{
+ if (dataset_max_one_hashsize > 0 && hash_size > dataset_max_one_hashsize) {
+ SCLogError("hashsize %u in dataset '%s' exceeds configured 'single-hashsize' limit (%u)",
+ hash_size, name, dataset_max_one_hashsize);
+ return false;
+ }
+ // we cannot underflow as we know from conf loading that
+ // dataset_max_total_hashsize >= dataset_max_one_hashsize if dataset_max_total_hashsize > 0
+ if (dataset_max_total_hashsize > 0 &&
+ dataset_max_total_hashsize - hash_size < dataset_used_hashsize) {
+ SCLogError("hashsize %u in dataset '%s' exceeds configured 'total-hashsizes' limit (%u, in "
+ "use %u)",
+ hash_size, name, dataset_max_total_hashsize, dataset_used_hashsize);
+ return false;
+ }
+
+ return true;
+}
+
+static void DatasetUpdateHashsize(const char *name, uint32_t hash_size)
+{
+ if (dataset_max_total_hashsize > 0) {
+ dataset_used_hashsize += hash_size;
+ SCLogDebug("set %s adding with hash_size %u", name, hash_size);
+ }
+}
+
Dataset *DatasetGet(const char *name, enum DatasetTypes type, const char *save, const char *load,
uint64_t memcap, uint32_t hashsize)
{
@@ -682,6 +715,10 @@ Dataset *DatasetGet(const char *name, enum DatasetTypes type, const char *save,
hashsize = default_hashsize;
}
+ if (!DatasetCheckHashsize(name, hashsize)) {
+ goto out_err;
+ }
+
set = DatasetAlloc(name);
if (set == NULL) {
goto out_err;
@@ -755,6 +792,10 @@ Dataset *DatasetGet(const char *name, enum DatasetTypes type, const char *save,
set->next = sets;
sets = set;
+ /* hash size accounting */
+ DEBUG_VALIDATE_BUG_ON(set->hash->config.hash_size != hashsize);
+ DatasetUpdateHashsize(set->name, set->hash->config.hash_size);
+
SCMutexUnlock(&sets_lock);
return set;
out_err:
@@ -796,6 +837,9 @@ void DatasetReload(void)
continue;
}
set->hidden = true;
+ if (dataset_max_total_hashsize > 0) {
+ dataset_used_hashsize -= set->hash->config.hash_size;
+ }
SCLogDebug("Set %s at %p hidden successfully", set->name, set);
set = set->next;
}
@@ -863,6 +907,27 @@ int DatasetsInit(void)
uint32_t default_hashsize = 0;
GetDefaultMemcap(&default_memcap, &default_hashsize);
if (datasets != NULL) {
+ const char *str = NULL;
+ if (ConfGet("datasets.limits.total-hashsizes", &str) == 1) {
+ if (ParseSizeStringU32(str, &dataset_max_total_hashsize) < 0) {
+ FatalError("failed to parse datasets.limits.total-hashsizes value: %s", str);
+ }
+ }
+ if (ConfGet("datasets.limits.single-hashsize", &str) == 1) {
+ if (ParseSizeStringU32(str, &dataset_max_one_hashsize) < 0) {
+ FatalError("failed to parse datasets.limits.single-hashsize value: %s", str);
+ }
+ }
+ if (dataset_max_total_hashsize > 0 &&
+ dataset_max_total_hashsize < dataset_max_one_hashsize) {
+ FatalError("total-hashsizes (%u) cannot be smaller than single-hashsize (%u)",
+ dataset_max_total_hashsize, dataset_max_one_hashsize);
+ }
+ if (dataset_max_total_hashsize > 0 && dataset_max_one_hashsize == 0) {
+ // the total limit also applies for single limit
+ dataset_max_one_hashsize = dataset_max_total_hashsize;
+ }
+
int list_pos = 0;
ConfNode *iter = NULL;
TAILQ_FOREACH(iter, &datasets->head, next) {
diff --git a/src/util-thash.c b/src/util-thash.c
index 6443990..3fba3ef 100644
--- a/src/util-thash.c
+++ b/src/util-thash.c
@@ -310,16 +310,11 @@ THashTableContext *THashInit(const char *cnf_prefix, size_t data_size,
ctx->config.hash_size = hashsize > 0 ? hashsize : THASH_DEFAULT_HASHSIZE;
/* Reset memcap in case of loading from file to the highest possible value
unless defined by the rule keyword */
-#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
- // limit memcap size to default when fuzzing
- ctx->config.memcap = THASH_DEFAULT_MEMCAP;
-#else
if (memcap > 0) {
ctx->config.memcap = memcap;
} else {
ctx->config.memcap = reset_memcap ? UINT64_MAX : THASH_DEFAULT_MEMCAP;
}
-#endif
ctx->config.prealloc = THASH_DEFAULT_PREALLOC;
SC_ATOMIC_INIT(ctx->counter);
diff --git a/suricata.yaml.in b/suricata.yaml.in
index 6303991..b218515 100644
--- a/suricata.yaml.in
+++ b/suricata.yaml.in
@@ -1167,6 +1167,14 @@ datasets:
#memcap: 100mb
#hashsize: 2048
+ # Limits for per rule dataset instances to avoid rules using too many
+ # resources.
+ limits:
+ # Max value for per dataset `hashsize` setting
+ #single-hashsize: 65536
+ # Max combined hashsize values for all datasets.
+ #total-hashsizes: 16777216
+
rules:
# Set to true to allow absolute filenames and filenames that use
# ".." components to reference parent directories in rules that specify
--
2.49.0

View File

@@ -1,55 +0,0 @@
From d86c5f9f0c75736d4fce93e27c0773fcb27e1047 Mon Sep 17 00:00:00 2001
From: Victor Julien <vjulien@oisf.net>
Date: Mon, 17 Mar 2025 21:19:13 +0100
Subject: [PATCH] datasets: set higher hashsize limits
To avoid possible upgrade issues, allow higher defaults than in the
master branch. Add some upgrade guidance and a note that defaults will
probably be further reduced.
Upstream-Status: Backport [https://github.com/OISF/suricata/commit/d86c5f9f0c75736d4fce93e27c0773fcb27e1047]
CVE: CVE-2025-29916
Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
---
src/datasets.c | 5 +++--
suricata.yaml.in | 5 +++--
2 files changed, 6 insertions(+), 4 deletions(-)
diff --git a/src/datasets.c b/src/datasets.c
index 0729894..f99f63c 100644
--- a/src/datasets.c
+++ b/src/datasets.c
@@ -45,8 +45,9 @@ SCMutex sets_lock = SCMUTEX_INITIALIZER;
static Dataset *sets = NULL;
static uint32_t set_ids = 0;
-uint32_t dataset_max_one_hashsize = 65536;
-uint32_t dataset_max_total_hashsize = 16777216;
+/* 4x what we set in master to allow a smoother upgrade path */
+uint32_t dataset_max_one_hashsize = 262144;
+uint32_t dataset_max_total_hashsize = 67108864;
uint32_t dataset_used_hashsize = 0;
static int DatasetAddwRep(Dataset *set, const uint8_t *data, const uint32_t data_len,
diff --git a/suricata.yaml.in b/suricata.yaml.in
index b218515..59db9ef 100644
--- a/suricata.yaml.in
+++ b/suricata.yaml.in
@@ -1169,11 +1169,12 @@ datasets:
# Limits for per rule dataset instances to avoid rules using too many
# resources.
+ # Note: in Suricata 8 the built-in default will be set to lower values.
limits:
# Max value for per dataset `hashsize` setting
- #single-hashsize: 65536
+ #single-hashsize: 262144
# Max combined hashsize values for all datasets.
- #total-hashsizes: 16777216
+ #total-hashsizes: 67108864
rules:
# Set to true to allow absolute filenames and filenames that use
--
2.49.0

View File

@@ -1,115 +0,0 @@
From bab716776ba3561cfbfd1a57fc18ff1f6859f019 Mon Sep 17 00:00:00 2001
From: Philippe Antoine <pantoine@oisf.net>
Date: Tue, 17 Dec 2024 15:06:25 +0100
Subject: [PATCH] detect: limit base64_decode `bytes` to 64KiB
Ticket: 7613
Avoids potential large per-thread memory allocation. A buffer with the
size of the largest decode_base64 buffer size setting would be allocated
per thread. As this was a u32, it could mean a per-thread 4GiB memory
allocation.
64KiB was already the built-in default for cases where bytes size wasn't
specified.
(cherry picked from commit 32d0bd2bbb4d486623dec85a94952fde2515f2f0)
Upstream-Status: Backport [https://github.com/OISF/suricata/commit/bab716776ba3561cfbfd1a57fc18ff1f6859f019]
CVE: CVE-2025-29917
Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
---
doc/userguide/rules/base64-keywords.rst | 1 +
src/detect-base64-decode.c | 15 ++++++---------
src/detect.h | 2 +-
3 files changed, 8 insertions(+), 10 deletions(-)
diff --git a/doc/userguide/rules/base64-keywords.rst b/doc/userguide/rules/base64-keywords.rst
index 7daf0c2..cf4e679 100644
--- a/doc/userguide/rules/base64-keywords.rst
+++ b/doc/userguide/rules/base64-keywords.rst
@@ -15,6 +15,7 @@ Syntax::
base64_decode:bytes <value>, offset <value>, relative;
The ``bytes`` option specifies how many bytes Suricata should decode and make available for base64_data.
+This number is limited to 64KiB.
The decoding will stop at the end of the buffer.
The ``offset`` option specifies how many bytes Suricata should skip before decoding.
diff --git a/src/detect-base64-decode.c b/src/detect-base64-decode.c
index 25fdf10..5ae38c5 100644
--- a/src/detect-base64-decode.c
+++ b/src/detect-base64-decode.c
@@ -28,7 +28,7 @@
#define BASE64_DECODE_MAX 65535
typedef struct DetectBase64Decode_ {
- uint32_t bytes;
+ uint16_t bytes;
uint32_t offset;
uint8_t relative;
} DetectBase64Decode;
@@ -111,8 +111,8 @@ int DetectBase64DecodeDoMatch(DetectEngineThreadCtx *det_ctx, const Signature *s
return det_ctx->base64_decoded_len > 0;
}
-static int DetectBase64DecodeParse(const char *str, uint32_t *bytes,
- uint32_t *offset, uint8_t *relative)
+static int DetectBase64DecodeParse(
+ const char *str, uint16_t *bytes, uint32_t *offset, uint8_t *relative)
{
const char *bytes_str = NULL;
const char *offset_str = NULL;
@@ -132,7 +132,7 @@ static int DetectBase64DecodeParse(const char *str, uint32_t *bytes,
if (pcre_rc >= 3) {
if (pcre2_substring_get_bynumber(match, 2, (PCRE2_UCHAR8 **)&bytes_str, &pcre2_len) == 0) {
- if (StringParseUint32(bytes, 10, 0, bytes_str) <= 0) {
+ if (StringParseUint16(bytes, 10, 0, bytes_str) <= 0) {
SCLogError("Bad value for bytes: \"%s\"", bytes_str);
goto error;
}
@@ -186,7 +186,7 @@ error:
static int DetectBase64DecodeSetup(DetectEngineCtx *de_ctx, Signature *s,
const char *str)
{
- uint32_t bytes = 0;
+ uint16_t bytes = 0;
uint32_t offset = 0;
uint8_t relative = 0;
DetectBase64Decode *data = NULL;
@@ -238,9 +238,6 @@ static int DetectBase64DecodeSetup(DetectEngineCtx *de_ctx, Signature *s,
data->bytes = BASE64_DECODE_MAX;
}
if (data->bytes > de_ctx->base64_decode_max_len) {
-#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
- data->bytes = BASE64_DECODE_MAX;
-#endif
de_ctx->base64_decode_max_len = data->bytes;
}
@@ -272,7 +269,7 @@ static int g_http_header_buffer_id = 0;
static int DetectBase64TestDecodeParse(void)
{
int retval = 0;
- uint32_t bytes = 0;
+ uint16_t bytes = 0;
uint32_t offset = 0;
uint8_t relative = 0;
diff --git a/src/detect.h b/src/detect.h
index 2760dda..fd938a1 100644
--- a/src/detect.h
+++ b/src/detect.h
@@ -910,7 +910,7 @@ typedef struct DetectEngineCtx_ {
struct SigGroupHead_ *decoder_event_sgh;
/* Maximum size of the buffer for decoded base64 data. */
- uint32_t base64_decode_max_len;
+ uint16_t base64_decode_max_len;
/** Store rule file and line so that parsers can use them in errors. */
int rule_line;
--
2.49.0

View File

@@ -1,49 +0,0 @@
From f6c9490e1f7b0b375c286d5313ebf3bc81a95eb6 Mon Sep 17 00:00:00 2001
From: Philippe Antoine <pantoine@oisf.net>
Date: Tue, 28 Jan 2025 15:02:45 +0100
Subject: [PATCH] detect/pcre: avoid infinite loop after negated pcre
Ticket: 7526
The usage of negated pcre, followed by other relative payload
content keywords could lead to an infinite loop.
This is because regular (not negated) pcre can test multiple
occurences, but negated pcre should be tried only once.
(cherry picked from commit b14c67cbdf25fa6c7ffe0d04ddf3ebe67b12b50b)
Upstream-Status: Backport [https://github.com/OISF/suricata/commit/f6c9490e1f7b0b375c286d5313ebf3bc81a95eb6]
CVE: CVE-2025-29918
Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
---
src/detect-engine-content-inspection.c | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/src/detect-engine-content-inspection.c b/src/detect-engine-content-inspection.c
index 77ebb3f..2a789c9 100644
--- a/src/detect-engine-content-inspection.c
+++ b/src/detect-engine-content-inspection.c
@@ -450,7 +450,6 @@ uint8_t DetectEngineContentInspection(DetectEngineCtx *de_ctx, DetectEngineThrea
if (r == 0) {
goto no_match;
}
-
if (!(pe->flags & DETECT_PCRE_RELATIVE_NEXT)) {
SCLogDebug("no relative match coming up, so this is a match");
goto match;
@@ -473,6 +472,11 @@ uint8_t DetectEngineContentInspection(DetectEngineCtx *de_ctx, DetectEngineThrea
if (det_ctx->discontinue_matching)
goto no_match;
+ if (prev_offset == 0) {
+ // This happens for negated PCRE
+ // We do not search for another occurrence of this pcre
+ SCReturnInt(0);
+ }
det_ctx->buffer_offset = prev_buffer_offset;
det_ctx->pcre_match_start_offset = prev_offset;
} while (1);
--
2.49.0

View File

@@ -1,79 +0,0 @@
From 226580d502ae98c148aaecc4846f78694b5e253c Mon Sep 17 00:00:00 2001
From: Philippe Antoine <contact@catenacyber.fr>
Date: Tue, 11 Mar 2025 16:45:35 +0100
Subject: [PATCH] decompressors: do not take data after end
CVE: CVE-2025-53537
Upstream-Status: Backport [https://github.com/OISF/libhtp/commit/226580d502ae98c148aaecc4846f78694b5e253c]
Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
---
htp/htp_core.h | 5 ++++-
htp/htp_decompressors.c | 21 ++++++++++++---------
2 files changed, 16 insertions(+), 10 deletions(-)
diff --git a/htp/htp_core.h b/htp/htp_core.h
index 7c23212..fb142c9 100644
--- a/htp/htp_core.h
+++ b/htp/htp_core.h
@@ -161,7 +161,10 @@ enum htp_content_encoding_t {
HTP_COMPRESSION_DEFLATE = 3,
/** LZMA compression. */
- HTP_COMPRESSION_LZMA = 4
+ HTP_COMPRESSION_LZMA = 4,
+
+ /** No more data. */
+ HTP_COMPRESSION_OVER = 5
};
/**
diff --git a/htp/htp_decompressors.c b/htp/htp_decompressors.c
index 19950df..0d94c30 100644
--- a/htp/htp_decompressors.c
+++ b/htp/htp_decompressors.c
@@ -203,6 +203,8 @@ htp_status_t htp_gzip_decompressor_decompress(htp_decompressor_t *drec1, htp_tx_
}
return HTP_OK;
+ } else if (drec->zlib_initialized == HTP_COMPRESSION_OVER) {
+ return HTP_ERROR;
}
if (d->data == NULL) {
@@ -316,15 +318,9 @@ restart:
// no initialization means previous error on stream
return HTP_ERROR;
}
- if (GZIP_BUF_SIZE > drec->stream.avail_out) {
- if (rc == Z_DATA_ERROR) {
- // There is data even if there is an error
- // So use this data and log a warning
- htp_log(d->tx->connp, HTP_LOG_MARK, HTP_LOG_WARNING, 0, "GZip decompressor: inflate failed with %d", rc);
- rc = Z_STREAM_END;
- }
- }
- if (rc == Z_STREAM_END) {
+
+ int error_after_data = (rc == Z_DATA_ERROR && drec->restart == 0 && GZIP_BUF_SIZE > drec->stream.avail_out);
+ if (rc == Z_STREAM_END || error_after_data) {
// How many bytes do we have?
size_t len = GZIP_BUF_SIZE - drec->stream.avail_out;
@@ -351,6 +347,13 @@ restart:
drec->stream.next_out = drec->buffer;
// TODO Handle trailer.
+ if (error_after_data) {
+ // There is data even if there is an error
+ // So use this data and log a warning
+ htp_log(d->tx->connp, HTP_LOG_MARK, HTP_LOG_WARNING, 0, "GZip decompressor: inflate failed with %d", rc);
+ drec->zlib_initialized = HTP_COMPRESSION_OVER;
+ return HTP_ERROR;
+ }
return HTP_OK;
}
else if (rc != Z_OK) {
--
2.50.1

View File

@@ -1,31 +0,0 @@
From 9037ea35110a0d97be5cedf8d31fb4cd9a38c7a7 Mon Sep 17 00:00:00 2001
From: Philippe Antoine <contact@catenacyber.fr>
Date: Tue, 17 Jun 2025 10:12:47 +0200
Subject: [PATCH] decompressors: fix leak in lzma error case
Ticket: 7766
CVE: CVE-2025-53537
Upstream-Status: Backport [https://github.com/OISF/libhtp/commit/9037ea35110a0d97be5cedf8d31fb4cd9a38c7a7]
Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
---
htp/htp_decompressors.c | 3 +++
1 file changed, 3 insertions(+)
diff --git a/htp/htp_decompressors.c b/htp/htp_decompressors.c
index 0d94c30..ce6cfe1 100644
--- a/htp/htp_decompressors.c
+++ b/htp/htp_decompressors.c
@@ -351,6 +351,9 @@ restart:
// There is data even if there is an error
// So use this data and log a warning
htp_log(d->tx->connp, HTP_LOG_MARK, HTP_LOG_WARNING, 0, "GZip decompressor: inflate failed with %d", rc);
+ if (drec->zlib_initialized == HTP_COMPRESSION_LZMA) {
+ LzmaDec_Free(&drec->state, &lzma_Alloc);
+ }
drec->zlib_initialized = HTP_COMPRESSION_OVER;
return HTP_ERROR;
}
--
2.50.1

View File

@@ -1,32 +0,0 @@
Skip pkg Makefile from using its own rust steps
Upstream-Status: Inappropriate [OE Specific]
Signed-off-by: Armin Kuster <akuster808@gmail.com>
Index: suricata-7.0.0/Makefile.in
===================================================================
--- suricata-7.0.0.orig/Makefile.in
+++ suricata-7.0.0/Makefile.in
@@ -424,7 +424,7 @@ EXTRA_DIST = ChangeLog COPYING LICENSE s
acsite.m4 \
scripts/generate-images.sh
-SUBDIRS = $(HTP_DIR) rust src qa rules doc contrib etc python ebpf \
+SUBDIRS = $(HTP_DIR) src qa rules doc contrib etc python ebpf \
$(SURICATA_UPDATE_DIR)
CLEANFILES = stamp-h[0-9]*
Index: suricata-7.0.0/Makefile.am
===================================================================
--- suricata-7.0.0.orig/Makefile.am
+++ suricata-7.0.0/Makefile.am
@@ -8,7 +8,7 @@ EXTRA_DIST = ChangeLog COPYING LICENSE s
lua \
acsite.m4 \
scripts/generate-images.sh
-SUBDIRS = $(HTP_DIR) rust src qa rules doc contrib etc python ebpf \
+SUBDIRS = $(HTP_DIR) src qa rules doc contrib etc python ebpf \
$(SURICATA_UPDATE_DIR)
CLEANFILES = stamp-h[0-9]*

View File

@@ -4,12 +4,8 @@ require suricata.inc
LIC_FILES_CHKSUM = "file://LICENSE;beginline=1;endline=2;md5=596ab7963a1a0e5198e5a1c4aa621843" LIC_FILES_CHKSUM = "file://LICENSE;beginline=1;endline=2;md5=596ab7963a1a0e5198e5a1c4aa621843"
SRC_URI = "git://github.com/OISF/libhtp.git;protocol=https;branch=0.5.x \ SRC_URI = "git://github.com/OISF/libhtp.git;protocol=https;branch=0.5.x"
file://CVE-2024-45797.patch \ SRCREV = "314ca7360e141a1e40be58707b3abeefe32258c9"
file://CVE-2025-53537-001.patch \
file://CVE-2025-53537-002.patch \
"
SRCREV = "8bdfe7b9d04e5e948c8fbaa7472e14d884cc00af"
DEPENDS = "zlib" DEPENDS = "zlib"

File diff suppressed because it is too large Load Diff

View File

@@ -5,7 +5,7 @@ require suricata.inc
LIC_FILES_CHKSUM = "file://LICENSE;beginline=1;endline=2;md5=c70d8d3310941dcdfcd1e02800a1f548" LIC_FILES_CHKSUM = "file://LICENSE;beginline=1;endline=2;md5=c70d8d3310941dcdfcd1e02800a1f548"
SRC_URI = "http://www.openinfosecfoundation.org/download/suricata-${PV}.tar.gz" SRC_URI = "http://www.openinfosecfoundation.org/download/suricata-${PV}.tar.gz"
SRC_URI[sha256sum] = "7bcd1313118366451465dc3f8385a3f6aadd084ffe44dd257dda8105863bb769" SRC_URI[sha256sum] = "da5a591c749fed2bd986fc3b3cac25d9cfd3b453f57becf14610746999d3c5dd"
DEPENDS = "lz4 libhtp" DEPENDS = "lz4 libhtp"
@@ -15,29 +15,7 @@ SRC_URI += " \
file://suricata.yaml \ file://suricata.yaml \
file://suricata.service \ file://suricata.service \
file://run-ptest \ file://run-ptest \
file://fixup.patch \ file://0001-Skip-pkg-Makefile-from-using-its-own-rust-steps.patch \
file://CVE-2024-45795.patch \
file://CVE-2024-45796.patch \
file://CVE-2024-55605.patch \
file://CVE-2025-29916-01.patch \
file://CVE-2025-29916-02.patch \
file://CVE-2025-29916-03.patch \
file://CVE-2025-29917.patch \
file://CVE-2025-29918.patch \
file://CVE-2024-32663-001.patch \
file://CVE-2024-32663-002.patch \
file://CVE-2024-32664.patch \
file://CVE-2024-32867-001.patch \
file://CVE-2024-32867-002.patch \
file://CVE-2024-32867-003.patch \
file://CVE-2024-32867-004.patch \
file://CVE-2024-55627-001.patch \
file://CVE-2024-55627-002.patch \
file://CVE-2024-55627-003.patch \
file://CVE-2024-55628-001.patch \
file://CVE-2024-55628-002.patch \
file://CVE-2024-55628-003.patch \
file://CVE-2024-55628-004.patch \
" "
inherit autotools pkgconfig python3native systemd ptest cargo cargo-update-recipe-crates inherit autotools pkgconfig python3native systemd ptest cargo cargo-update-recipe-crates