mirror of
https://git.yoctoproject.org/meta-security
synced 2026-01-11 15:00:34 +00:00
suricata: update to 7.0.12
Also update libhtp to required version 0.5.52. See suricata release notes for more details about changes and CVEs fixed: https://suricata.io/2024/02/08/suricata-7-0-3-and-6-0-16-released/ https://suricata.io/2024/03/19/suricata-7-0-4-and-6-0-17-released/ https://suricata.io/2024/04/23/suricata-7-0-5-and-6-0-19-released/ https://suricata.io/2024/06/27/suricata-7-0-6-and-6-0-20-released/ https://suricata.io/2024/10/01/suricata-7-0-7-released/ https://suricata.io/2024/12/12/suricata-7-0-8-released/ https://suricata.io/2025/03/18/suricata-7-0-9-released/ https://suricata.io/2025/07/08/suricata-7-0-11-released/ https://suricata.io/2025/09/16/suricata-8-0-1-and-7-0-12-released/ Signed-off-by: Clayton Casciato <majortomtosourcecontrol@gmail.com>
This commit is contained in:
committed by
Scott Murray
parent
dec36ead2e
commit
fbb8343cf8
@@ -0,0 +1,40 @@
|
||||
From a59708a9300df8116867ac77f7829f7fd647325e Mon Sep 17 00:00:00 2001
|
||||
From: Clayton Casciato <ccasciato@21sw.us>
|
||||
Date: Mon, 3 Nov 2025 10:30:26 -0700
|
||||
Subject: [PATCH] Skip pkg Makefile from using its own rust steps
|
||||
|
||||
Upstream-Status: Inappropriate [OE Specific]
|
||||
|
||||
Signed-off-by: Armin Kuster <akuster808@gmail.com>
|
||||
Signed-off-by: Clayton Casciato <majortomtosourcecontrol@gmail.com>
|
||||
---
|
||||
Makefile.am | 2 +-
|
||||
Makefile.in | 2 +-
|
||||
2 files changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/Makefile.am b/Makefile.am
|
||||
index d0d3d09..a572912 100644
|
||||
--- a/Makefile.am
|
||||
+++ b/Makefile.am
|
||||
@@ -10,7 +10,7 @@ EXTRA_DIST = ChangeLog COPYING LICENSE suricata.yaml.in \
|
||||
scripts/generate-images.sh \
|
||||
scripts/docs-almalinux9-minimal-build.sh \
|
||||
scripts/docs-ubuntu-debian-minimal-build.sh
|
||||
-SUBDIRS = $(HTP_DIR) rust src qa rules doc contrib etc python ebpf \
|
||||
+SUBDIRS = $(HTP_DIR) src qa rules doc contrib etc python ebpf \
|
||||
$(SURICATA_UPDATE_DIR)
|
||||
|
||||
CLEANFILES = stamp-h[0-9]*
|
||||
diff --git a/Makefile.in b/Makefile.in
|
||||
index 7a89353..3864613 100644
|
||||
--- a/Makefile.in
|
||||
+++ b/Makefile.in
|
||||
@@ -428,7 +428,7 @@ EXTRA_DIST = ChangeLog COPYING LICENSE suricata.yaml.in \
|
||||
scripts/docs-almalinux9-minimal-build.sh \
|
||||
scripts/docs-ubuntu-debian-minimal-build.sh
|
||||
|
||||
-SUBDIRS = $(HTP_DIR) rust src qa rules doc contrib etc python ebpf \
|
||||
+SUBDIRS = $(HTP_DIR) src qa rules doc contrib etc python ebpf \
|
||||
$(SURICATA_UPDATE_DIR)
|
||||
|
||||
CLEANFILES = stamp-h[0-9]*
|
||||
@@ -1,53 +0,0 @@
|
||||
From a6052dca1e27f3c8f96ec7be0fe7514c56a0d56f Mon Sep 17 00:00:00 2001
|
||||
From: Victor Julien <vjulien@oisf.net>
|
||||
Date: Tue, 4 Jun 2024 14:43:22 +0200
|
||||
Subject: [PATCH 1/4] defrag: don't use completed tracker
|
||||
|
||||
When a Tracker is set up for a IPID, frags come in for it and it's
|
||||
reassembled and complete, the `DefragTracker::remove` flag is set. This
|
||||
is mean to tell the hash cleanup code to recyle the tracker and to let
|
||||
the lookup code skip the tracker during lookup.
|
||||
|
||||
A logic error lead to the following scenario:
|
||||
|
||||
1. there are sufficient frag trackers to make sure the hash table is
|
||||
filled with trackers
|
||||
2. frags for a Packet with IPID X are processed correctly (X1)
|
||||
3. frags for a new Packet that also has IPID X come in quickly after the
|
||||
first (X2).
|
||||
4. during the lookup, the frag for X2 hashes to a hash row that holds
|
||||
more than one tracker
|
||||
5. as the trackers in hash row are evaluated, it finds the tracker for
|
||||
X1, but since the `remove` bit is not checked, it is returned as the
|
||||
tracker for X2.
|
||||
6. reassembly fails, as the tracker is already complete
|
||||
|
||||
The logic error is that only for the first tracker in a row the `remove`
|
||||
bit was checked, leading to reuse to a closed tracker if there were more
|
||||
trackers in the hash row.
|
||||
|
||||
Ticket: #7042.
|
||||
|
||||
Upstream-Status: Backport from [https://github.com/OISF/suricata/commit/aab7f35c76721df19403a7c0c0025feae12f3b6b]
|
||||
CVE: CVE-2024-37151
|
||||
Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
|
||||
---
|
||||
src/defrag-hash.c | 2 +-
|
||||
1 file changed, 1 insertion(+), 1 deletion(-)
|
||||
|
||||
diff --git a/src/defrag-hash.c b/src/defrag-hash.c
|
||||
index 2f19ce2..87d40f9 100644
|
||||
--- a/src/defrag-hash.c
|
||||
+++ b/src/defrag-hash.c
|
||||
@@ -591,7 +591,7 @@ DefragTracker *DefragGetTrackerFromHash (Packet *p)
|
||||
return dt;
|
||||
}
|
||||
|
||||
- if (DefragTrackerCompare(dt, p) != 0) {
|
||||
+ if (!dt->remove && DefragTrackerCompare(dt, p) != 0) {
|
||||
/* we found our tracker, lets put it on top of the
|
||||
* hash list -- this rewards active trackers */
|
||||
if (dt->hnext) {
|
||||
--
|
||||
2.44.0
|
||||
|
||||
@@ -1,44 +0,0 @@
|
||||
From f1645ea911d4e90b1be8ee5863e8e1a665079cce Mon Sep 17 00:00:00 2001
|
||||
From: Philippe Antoine <pantoine@oisf.net>
|
||||
Date: Thu, 25 Apr 2024 21:24:33 +0200
|
||||
Subject: [PATCH 2/4] modbus: abort flow parsing on flood
|
||||
|
||||
Ticket: 6987
|
||||
|
||||
Let's not spend more resources for a flow which is trying to
|
||||
make us do it...
|
||||
|
||||
(cherry picked from commit 37509e8e0ed097f8e0174df754835ac60584fc72)
|
||||
|
||||
Upstream-Status: Backport from [https://github.com/OISF/suricata/commit/a753cdbe84caee3b66d0bf49b2712d29a50d67ae]
|
||||
CVE: CVE-2024-38534
|
||||
Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
|
||||
---
|
||||
rust/src/modbus/modbus.rs | 4 ++--
|
||||
1 file changed, 2 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/rust/src/modbus/modbus.rs b/rust/src/modbus/modbus.rs
|
||||
index 246e9ca..d2f7c6b 100644
|
||||
--- a/rust/src/modbus/modbus.rs
|
||||
+++ b/rust/src/modbus/modbus.rs
|
||||
@@ -189,7 +189,7 @@ impl ModbusState {
|
||||
None => {
|
||||
let mut tx = match self.new_tx() {
|
||||
Some(tx) => tx,
|
||||
- None => return AppLayerResult::ok(),
|
||||
+ None => return AppLayerResult::err(),
|
||||
};
|
||||
tx.set_events_from_flags(&msg.error_flags);
|
||||
tx.request = Some(msg);
|
||||
@@ -215,7 +215,7 @@ impl ModbusState {
|
||||
None => {
|
||||
let mut tx = match self.new_tx() {
|
||||
Some(tx) => tx,
|
||||
- None => return AppLayerResult::ok(),
|
||||
+ None => return AppLayerResult::err(),
|
||||
};
|
||||
if msg
|
||||
.access_type
|
||||
--
|
||||
2.44.0
|
||||
|
||||
@@ -1,57 +0,0 @@
|
||||
From 6b00dc36d7527f051c2346f03d20f8d9e5a60138 Mon Sep 17 00:00:00 2001
|
||||
From: Philippe Antoine <pantoine@oisf.net>
|
||||
Date: Mon, 17 Jun 2024 16:30:49 +0200
|
||||
Subject: [PATCH 3/4] http2: do not expand duplicate headers
|
||||
|
||||
Ticket: 7104
|
||||
|
||||
As this can cause a big mamory allocation due to the quadratic
|
||||
nature of the HPACK compression.
|
||||
|
||||
(cherry picked from commit 5bd17934df321b88f502d48afdd6cc8bad4787a7)
|
||||
|
||||
Upstream-Status: Backport from [https://github.com/OISF/suricata/commit/c82fa5ca0d1ce0bd8f936e0b860707a6571373b2]
|
||||
CVE: CVE-2024-38535
|
||||
Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
|
||||
---
|
||||
rust/src/http2/detect.rs | 8 ++++----
|
||||
1 file changed, 4 insertions(+), 4 deletions(-)
|
||||
|
||||
diff --git a/rust/src/http2/detect.rs b/rust/src/http2/detect.rs
|
||||
index 99261ad..9c2f8ab 100644
|
||||
--- a/rust/src/http2/detect.rs
|
||||
+++ b/rust/src/http2/detect.rs
|
||||
@@ -432,11 +432,11 @@ pub fn http2_frames_get_header_value_vec(
|
||||
if found == 0 {
|
||||
vec.extend_from_slice(&block.value);
|
||||
found = 1;
|
||||
- } else if found == 1 {
|
||||
+ } else if found == 1 && Rc::strong_count(&block.name) <= 2 {
|
||||
vec.extend_from_slice(&[b',', b' ']);
|
||||
vec.extend_from_slice(&block.value);
|
||||
found = 2;
|
||||
- } else {
|
||||
+ } else if Rc::strong_count(&block.name) <= 2 {
|
||||
vec.extend_from_slice(&[b',', b' ']);
|
||||
vec.extend_from_slice(&block.value);
|
||||
}
|
||||
@@ -469,14 +469,14 @@ fn http2_frames_get_header_value<'a>(
|
||||
if found == 0 {
|
||||
single = Ok(&block.value);
|
||||
found = 1;
|
||||
- } else if found == 1 {
|
||||
+ } else if found == 1 && Rc::strong_count(&block.name) <= 2 {
|
||||
if let Ok(s) = single {
|
||||
vec.extend_from_slice(s);
|
||||
}
|
||||
vec.extend_from_slice(&[b',', b' ']);
|
||||
vec.extend_from_slice(&block.value);
|
||||
found = 2;
|
||||
- } else {
|
||||
+ } else if Rc::strong_count(&block.name) <= 2 {
|
||||
vec.extend_from_slice(&[b',', b' ']);
|
||||
vec.extend_from_slice(&block.value);
|
||||
}
|
||||
--
|
||||
2.44.0
|
||||
|
||||
@@ -1,292 +0,0 @@
|
||||
From 390f09692eb99809c679d3f350c7cc185d163e1a Mon Sep 17 00:00:00 2001
|
||||
From: Philippe Antoine <pantoine@oisf.net>
|
||||
Date: Wed, 27 Mar 2024 14:33:54 +0100
|
||||
Subject: [PATCH] http2: use a reference counter for headers
|
||||
|
||||
Ticket: 6892
|
||||
|
||||
As HTTP hpack header compression allows one single byte to
|
||||
express a previously seen arbitrary-size header block (name+value)
|
||||
we should avoid to copy the vectors data, but just point
|
||||
to the same data, while reamining memory safe, even in the case
|
||||
of later headers eviction from the dybnamic table.
|
||||
|
||||
Rust std solution is Rc, and the use of clone, so long as the
|
||||
data is accessed by only one thread.
|
||||
|
||||
Note: This patch is needed to patch CVE-2024-38535 as it defines Rc.
|
||||
Upstream-Status: Backport from [https://github.com/OISF/suricata/commit/390f09692eb99809c679d3f350c7cc185d163e1a]
|
||||
Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
|
||||
---
|
||||
rust/src/http2/detect.rs | 19 +++++++------
|
||||
rust/src/http2/http2.rs | 2 +-
|
||||
rust/src/http2/parser.rs | 61 +++++++++++++++++++++-------------------
|
||||
3 files changed, 43 insertions(+), 39 deletions(-)
|
||||
|
||||
diff --git a/rust/src/http2/detect.rs b/rust/src/http2/detect.rs
|
||||
index 9c2f8ab..e068a17 100644
|
||||
--- a/rust/src/http2/detect.rs
|
||||
+++ b/rust/src/http2/detect.rs
|
||||
@@ -23,6 +23,7 @@ use crate::core::Direction;
|
||||
use crate::detect::uint::{detect_match_uint, DetectUintData};
|
||||
use std::ffi::CStr;
|
||||
use std::str::FromStr;
|
||||
+use std::rc::Rc;
|
||||
|
||||
fn http2_tx_has_frametype(
|
||||
tx: &mut HTTP2Transaction, direction: Direction, value: u8,
|
||||
@@ -404,7 +405,7 @@ fn http2_frames_get_header_firstvalue<'a>(
|
||||
for frame in frames {
|
||||
if let Some(blocks) = http2_header_blocks(frame) {
|
||||
for block in blocks.iter() {
|
||||
- if block.name == name.as_bytes() {
|
||||
+ if block.name.as_ref() == name.as_bytes() {
|
||||
return Ok(&block.value);
|
||||
}
|
||||
}
|
||||
@@ -428,7 +429,7 @@ pub fn http2_frames_get_header_value_vec(
|
||||
for frame in frames {
|
||||
if let Some(blocks) = http2_header_blocks(frame) {
|
||||
for block in blocks.iter() {
|
||||
- if block.name == name.as_bytes() {
|
||||
+ if block.name.as_ref() == name.as_bytes() {
|
||||
if found == 0 {
|
||||
vec.extend_from_slice(&block.value);
|
||||
found = 1;
|
||||
@@ -465,7 +466,7 @@ fn http2_frames_get_header_value<'a>(
|
||||
for frame in frames {
|
||||
if let Some(blocks) = http2_header_blocks(frame) {
|
||||
for block in blocks.iter() {
|
||||
- if block.name == name.as_bytes() {
|
||||
+ if block.name.as_ref() == name.as_bytes() {
|
||||
if found == 0 {
|
||||
single = Ok(&block.value);
|
||||
found = 1;
|
||||
@@ -905,8 +906,8 @@ fn http2_tx_set_header(state: &mut HTTP2State, name: &[u8], input: &[u8]) {
|
||||
};
|
||||
let mut blocks = Vec::new();
|
||||
let b = parser::HTTP2FrameHeaderBlock {
|
||||
- name: name.to_vec(),
|
||||
- value: input.to_vec(),
|
||||
+ name: Rc::new(name.to_vec()),
|
||||
+ value: Rc::new(input.to_vec()),
|
||||
error: parser::HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeSuccess,
|
||||
sizeupdate: 0,
|
||||
};
|
||||
@@ -1061,15 +1062,15 @@ mod tests {
|
||||
};
|
||||
let mut blocks = Vec::new();
|
||||
let b = parser::HTTP2FrameHeaderBlock {
|
||||
- name: "Host".as_bytes().to_vec(),
|
||||
- value: "abc.com".as_bytes().to_vec(),
|
||||
+ name: "Host".as_bytes().to_vec().into(),
|
||||
+ value: "abc.com".as_bytes().to_vec().into(),
|
||||
error: parser::HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeSuccess,
|
||||
sizeupdate: 0,
|
||||
};
|
||||
blocks.push(b);
|
||||
let b2 = parser::HTTP2FrameHeaderBlock {
|
||||
- name: "Host".as_bytes().to_vec(),
|
||||
- value: "efg.net".as_bytes().to_vec(),
|
||||
+ name: "Host".as_bytes().to_vec().into(),
|
||||
+ value: "efg.net".as_bytes().to_vec().into(),
|
||||
error: parser::HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeSuccess,
|
||||
sizeupdate: 0,
|
||||
};
|
||||
diff --git a/rust/src/http2/http2.rs b/rust/src/http2/http2.rs
|
||||
index 326030f..d14ca06 100644
|
||||
--- a/rust/src/http2/http2.rs
|
||||
+++ b/rust/src/http2/http2.rs
|
||||
@@ -204,7 +204,7 @@ impl HTTP2Transaction {
|
||||
|
||||
fn handle_headers(&mut self, blocks: &[parser::HTTP2FrameHeaderBlock], dir: Direction) {
|
||||
for block in blocks {
|
||||
- if block.name == b"content-encoding" {
|
||||
+ if block.name.as_ref() == b"content-encoding" {
|
||||
self.decoder.http2_encoding_fromvec(&block.value, dir);
|
||||
}
|
||||
}
|
||||
diff --git a/rust/src/http2/parser.rs b/rust/src/http2/parser.rs
|
||||
index adabeb2..1a46437 100644
|
||||
--- a/rust/src/http2/parser.rs
|
||||
+++ b/rust/src/http2/parser.rs
|
||||
@@ -30,6 +30,7 @@ use nom7::sequence::tuple;
|
||||
use nom7::{Err, IResult};
|
||||
use std::fmt;
|
||||
use std::str::FromStr;
|
||||
+use std::rc::Rc;
|
||||
|
||||
#[repr(u8)]
|
||||
#[derive(Clone, Copy, PartialEq, Eq, FromPrimitive, Debug)]
|
||||
@@ -295,8 +296,8 @@ fn http2_frame_header_static(n: u64, dyn_headers: &HTTP2DynTable) -> Option<HTTP
|
||||
};
|
||||
if !name.is_empty() {
|
||||
return Some(HTTP2FrameHeaderBlock {
|
||||
- name: name.as_bytes().to_vec(),
|
||||
- value: value.as_bytes().to_vec(),
|
||||
+ name: Rc::new(name.as_bytes().to_vec()),
|
||||
+ value: Rc::new(value.as_bytes().to_vec()),
|
||||
error: HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeSuccess,
|
||||
sizeupdate: 0,
|
||||
});
|
||||
@@ -304,23 +305,23 @@ fn http2_frame_header_static(n: u64, dyn_headers: &HTTP2DynTable) -> Option<HTTP
|
||||
//use dynamic table
|
||||
if n == 0 {
|
||||
return Some(HTTP2FrameHeaderBlock {
|
||||
- name: Vec::new(),
|
||||
- value: Vec::new(),
|
||||
+ name: Rc::new(Vec::new()),
|
||||
+ value: Rc::new(Vec::new()),
|
||||
error: HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeIndex0,
|
||||
sizeupdate: 0,
|
||||
});
|
||||
} else if dyn_headers.table.len() + HTTP2_STATIC_HEADERS_NUMBER < n as usize {
|
||||
return Some(HTTP2FrameHeaderBlock {
|
||||
- name: Vec::new(),
|
||||
- value: Vec::new(),
|
||||
+ name: Rc::new(Vec::new()),
|
||||
+ value: Rc::new(Vec::new()),
|
||||
error: HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeNotIndexed,
|
||||
sizeupdate: 0,
|
||||
});
|
||||
} else {
|
||||
let indyn = dyn_headers.table.len() - (n as usize - HTTP2_STATIC_HEADERS_NUMBER);
|
||||
let headcopy = HTTP2FrameHeaderBlock {
|
||||
- name: dyn_headers.table[indyn].name.to_vec(),
|
||||
- value: dyn_headers.table[indyn].value.to_vec(),
|
||||
+ name: dyn_headers.table[indyn].name.clone(),
|
||||
+ value: dyn_headers.table[indyn].value.clone(),
|
||||
error: HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeSuccess,
|
||||
sizeupdate: 0,
|
||||
};
|
||||
@@ -348,8 +349,10 @@ impl fmt::Display for HTTP2HeaderDecodeStatus {
|
||||
|
||||
#[derive(Clone, Debug)]
|
||||
pub struct HTTP2FrameHeaderBlock {
|
||||
- pub name: Vec<u8>,
|
||||
- pub value: Vec<u8>,
|
||||
+ // Use Rc reference counted so that indexed headers do not get copied.
|
||||
+ // Otherwise, this leads to quadratic complexity in memory occupation.
|
||||
+ pub name: Rc<Vec<u8>>,
|
||||
+ pub value: Rc<Vec<u8>>,
|
||||
pub error: HTTP2HeaderDecodeStatus,
|
||||
pub sizeupdate: u64,
|
||||
}
|
||||
@@ -391,7 +394,7 @@ fn http2_parse_headers_block_literal_common<'a>(
|
||||
) -> IResult<&'a [u8], HTTP2FrameHeaderBlock> {
|
||||
let (i3, name, error) = if index == 0 {
|
||||
match http2_parse_headers_block_string(input) {
|
||||
- Ok((r, n)) => Ok((r, n, HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeSuccess)),
|
||||
+ Ok((r, n)) => Ok((r, Rc::new(n), HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeSuccess)),
|
||||
Err(e) => Err(e),
|
||||
}
|
||||
} else {
|
||||
@@ -403,7 +406,7 @@ fn http2_parse_headers_block_literal_common<'a>(
|
||||
)),
|
||||
None => Ok((
|
||||
input,
|
||||
- Vec::new(),
|
||||
+ Rc::new(Vec::new()),
|
||||
HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeNotIndexed,
|
||||
)),
|
||||
}
|
||||
@@ -413,7 +416,7 @@ fn http2_parse_headers_block_literal_common<'a>(
|
||||
i4,
|
||||
HTTP2FrameHeaderBlock {
|
||||
name,
|
||||
- value,
|
||||
+ value: Rc::new(value),
|
||||
error,
|
||||
sizeupdate: 0,
|
||||
},
|
||||
@@ -435,8 +438,8 @@ fn http2_parse_headers_block_literal_incindex<'a>(
|
||||
match r {
|
||||
Ok((r, head)) => {
|
||||
let headcopy = HTTP2FrameHeaderBlock {
|
||||
- name: head.name.to_vec(),
|
||||
- value: head.value.to_vec(),
|
||||
+ name: head.name.clone(),
|
||||
+ value: head.value.clone(),
|
||||
error: head.error,
|
||||
sizeupdate: 0,
|
||||
};
|
||||
@@ -556,8 +559,8 @@ fn http2_parse_headers_block_dynamic_size<'a>(
|
||||
return Ok((
|
||||
i3,
|
||||
HTTP2FrameHeaderBlock {
|
||||
- name: Vec::new(),
|
||||
- value: Vec::new(),
|
||||
+ name: Rc::new(Vec::new()),
|
||||
+ value: Rc::new(Vec::new()),
|
||||
error: HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeSizeUpdate,
|
||||
sizeupdate: maxsize2,
|
||||
},
|
||||
@@ -614,8 +617,8 @@ fn http2_parse_headers_blocks<'a>(
|
||||
// if we error from http2_parse_var_uint, we keep the first parsed headers
|
||||
if err.code == ErrorKind::LengthValue {
|
||||
blocks.push(HTTP2FrameHeaderBlock {
|
||||
- name: Vec::new(),
|
||||
- value: Vec::new(),
|
||||
+ name: Rc::new(Vec::new()),
|
||||
+ value: Rc::new(Vec::new()),
|
||||
error: HTTP2HeaderDecodeStatus::HTTP2HeaderDecodeIntegerOverflow,
|
||||
sizeupdate: 0,
|
||||
});
|
||||
@@ -765,8 +768,8 @@ mod tests {
|
||||
match r0 {
|
||||
Ok((remainder, hd)) => {
|
||||
// Check the first message.
|
||||
- assert_eq!(hd.name, ":method".as_bytes().to_vec());
|
||||
- assert_eq!(hd.value, "GET".as_bytes().to_vec());
|
||||
+ assert_eq!(hd.name, ":method".as_bytes().to_vec().into());
|
||||
+ assert_eq!(hd.value, "GET".as_bytes().to_vec().into());
|
||||
// And we should have no bytes left.
|
||||
assert_eq!(remainder.len(), 0);
|
||||
}
|
||||
@@ -782,8 +785,8 @@ mod tests {
|
||||
match r1 {
|
||||
Ok((remainder, hd)) => {
|
||||
// Check the first message.
|
||||
- assert_eq!(hd.name, "accept".as_bytes().to_vec());
|
||||
- assert_eq!(hd.value, "*/*".as_bytes().to_vec());
|
||||
+ assert_eq!(hd.name, "accept".as_bytes().to_vec().into());
|
||||
+ assert_eq!(hd.value, "*/*".as_bytes().to_vec().into());
|
||||
// And we should have no bytes left.
|
||||
assert_eq!(remainder.len(), 0);
|
||||
assert_eq!(dynh.table.len(), 1);
|
||||
@@ -802,8 +805,8 @@ mod tests {
|
||||
match result {
|
||||
Ok((remainder, hd)) => {
|
||||
// Check the first message.
|
||||
- assert_eq!(hd.name, ":authority".as_bytes().to_vec());
|
||||
- assert_eq!(hd.value, "localhost:3000".as_bytes().to_vec());
|
||||
+ assert_eq!(hd.name, ":authority".as_bytes().to_vec().into());
|
||||
+ assert_eq!(hd.value, "localhost:3000".as_bytes().to_vec().into());
|
||||
// And we should have no bytes left.
|
||||
assert_eq!(remainder.len(), 0);
|
||||
assert_eq!(dynh.table.len(), 2);
|
||||
@@ -820,8 +823,8 @@ mod tests {
|
||||
match r3 {
|
||||
Ok((remainder, hd)) => {
|
||||
// same as before
|
||||
- assert_eq!(hd.name, ":authority".as_bytes().to_vec());
|
||||
- assert_eq!(hd.value, "localhost:3000".as_bytes().to_vec());
|
||||
+ assert_eq!(hd.name, ":authority".as_bytes().to_vec().into());
|
||||
+ assert_eq!(hd.value, "localhost:3000".as_bytes().to_vec().into());
|
||||
// And we should have no bytes left.
|
||||
assert_eq!(remainder.len(), 0);
|
||||
assert_eq!(dynh.table.len(), 2);
|
||||
@@ -856,8 +859,8 @@ mod tests {
|
||||
match r2 {
|
||||
Ok((remainder, hd)) => {
|
||||
// Check the first message.
|
||||
- assert_eq!(hd.name, ":path".as_bytes().to_vec());
|
||||
- assert_eq!(hd.value, "/doc/manual/html/index.html".as_bytes().to_vec());
|
||||
+ assert_eq!(hd.name, ":path".as_bytes().to_vec().into());
|
||||
+ assert_eq!(hd.value, "/doc/manual/html/index.html".as_bytes().to_vec().into());
|
||||
// And we should have no bytes left.
|
||||
assert_eq!(remainder.len(), 0);
|
||||
assert_eq!(dynh.table.len(), 2);
|
||||
--
|
||||
2.44.0
|
||||
|
||||
@@ -1,40 +0,0 @@
|
||||
From 4026bca7f04c419dd3f3ba17a1af17bbcbcf18bc Mon Sep 17 00:00:00 2001
|
||||
From: Philippe Antoine <pantoine@oisf.net>
|
||||
Date: Fri, 17 May 2024 09:39:52 +0200
|
||||
Subject: [PATCH 4/4] http: fix nul deref on memcap reached
|
||||
|
||||
HttpRangeOpenFileAux may return NULL in different cases, including
|
||||
when memcap is reached.
|
||||
But is only caller did not check it before calling HttpRangeAppendData
|
||||
which would dereference the NULL value.
|
||||
|
||||
Ticket: 7029
|
||||
(cherry picked from commit fd262df457f67f2174752dd6505ba2ed5911fd96)
|
||||
|
||||
Upstream-Status: Backport from [https://github.com/OISF/suricata/commit/2bd3bd0e318f19008e9fe068ab17277c530ffb92]
|
||||
CVE: CVE-2024-38536
|
||||
Signed-off-by: Siddharth Doshi <sdoshi@mvista.com>
|
||||
---
|
||||
src/app-layer-htp-range.c | 6 ++++--
|
||||
1 file changed, 4 insertions(+), 2 deletions(-)
|
||||
|
||||
diff --git a/src/app-layer-htp-range.c b/src/app-layer-htp-range.c
|
||||
index 3cdde35..f0d75a9 100644
|
||||
--- a/src/app-layer-htp-range.c
|
||||
+++ b/src/app-layer-htp-range.c
|
||||
@@ -351,8 +351,10 @@ static HttpRangeContainerBlock *HttpRangeOpenFile(HttpRangeContainerFile *c, uin
|
||||
{
|
||||
HttpRangeContainerBlock *r =
|
||||
HttpRangeOpenFileAux(c, start, end, total, sbcfg, name, name_len, flags);
|
||||
- if (HttpRangeAppendData(sbcfg, r, data, len) < 0) {
|
||||
- SCLogDebug("Failed to append data while opening");
|
||||
+ if (r) {
|
||||
+ if (HttpRangeAppendData(sbcfg, r, data, len) < 0) {
|
||||
+ SCLogDebug("Failed to append data while opening");
|
||||
+ }
|
||||
}
|
||||
return r;
|
||||
}
|
||||
--
|
||||
2.44.0
|
||||
|
||||
@@ -1,32 +0,0 @@
|
||||
Skip pkg Makefile from using its own rust steps
|
||||
|
||||
Upstream-Status: Inappropriate [OE Specific]
|
||||
|
||||
Signed-off-by: Armin Kuster <akuster808@gmail.com>
|
||||
|
||||
Index: suricata-7.0.0/Makefile.in
|
||||
===================================================================
|
||||
--- suricata-7.0.0.orig/Makefile.in
|
||||
+++ suricata-7.0.0/Makefile.in
|
||||
@@ -424,7 +424,7 @@ EXTRA_DIST = ChangeLog COPYING LICENSE s
|
||||
acsite.m4 \
|
||||
scripts/generate-images.sh
|
||||
|
||||
-SUBDIRS = $(HTP_DIR) rust src qa rules doc contrib etc python ebpf \
|
||||
+SUBDIRS = $(HTP_DIR) src qa rules doc contrib etc python ebpf \
|
||||
$(SURICATA_UPDATE_DIR)
|
||||
|
||||
CLEANFILES = stamp-h[0-9]*
|
||||
Index: suricata-7.0.0/Makefile.am
|
||||
===================================================================
|
||||
--- suricata-7.0.0.orig/Makefile.am
|
||||
+++ suricata-7.0.0/Makefile.am
|
||||
@@ -8,7 +8,7 @@ EXTRA_DIST = ChangeLog COPYING LICENSE s
|
||||
lua \
|
||||
acsite.m4 \
|
||||
scripts/generate-images.sh
|
||||
-SUBDIRS = $(HTP_DIR) rust src qa rules doc contrib etc python ebpf \
|
||||
+SUBDIRS = $(HTP_DIR) src qa rules doc contrib etc python ebpf \
|
||||
$(SURICATA_UPDATE_DIR)
|
||||
|
||||
CLEANFILES = stamp-h[0-9]*
|
||||
@@ -5,7 +5,7 @@ require suricata.inc
|
||||
LIC_FILES_CHKSUM = "file://LICENSE;beginline=1;endline=2;md5=596ab7963a1a0e5198e5a1c4aa621843"
|
||||
|
||||
SRC_URI = "git://github.com/OISF/libhtp.git;protocol=https;branch=0.5.x"
|
||||
SRCREV = "ebe480be4a5f3bb1d44be6a9a6c2755bd8ef7e71"
|
||||
SRCREV = "314ca7360e141a1e40be58707b3abeefe32258c9"
|
||||
|
||||
DEPENDS = "zlib"
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -5,7 +5,7 @@ require suricata.inc
|
||||
LIC_FILES_CHKSUM = "file://LICENSE;beginline=1;endline=2;md5=c70d8d3310941dcdfcd1e02800a1f548"
|
||||
|
||||
SRC_URI = "http://www.openinfosecfoundation.org/download/suricata-${PV}.tar.gz"
|
||||
SRC_URI[sha256sum] = "7bcd1313118366451465dc3f8385a3f6aadd084ffe44dd257dda8105863bb769"
|
||||
SRC_URI[sha256sum] = "da5a591c749fed2bd986fc3b3cac25d9cfd3b453f57becf14610746999d3c5dd"
|
||||
|
||||
DEPENDS = "lz4 libhtp"
|
||||
|
||||
@@ -15,12 +15,7 @@ SRC_URI += " \
|
||||
file://suricata.yaml \
|
||||
file://suricata.service \
|
||||
file://run-ptest \
|
||||
file://fixup.patch \
|
||||
file://CVE-2024-37151.patch \
|
||||
file://CVE-2024-38534.patch \
|
||||
file://CVE-2024-38535_pre.patch \
|
||||
file://CVE-2024-38535.patch \
|
||||
file://CVE-2024-38536.patch \
|
||||
file://0001-Skip-pkg-Makefile-from-using-its-own-rust-steps.patch \
|
||||
"
|
||||
|
||||
inherit autotools pkgconfig python3native systemd ptest cargo cargo-update-recipe-crates
|
||||
Reference in New Issue
Block a user