Files
meta-security/recipes-ids/suricata/files/CVE-2025-29916-02.patch
Hitendra Prajapati df1781ceb6 suricata: fix multiple CVEs
Backport fixes for:

* CVE-2025-29916 - Upstream-Status: Backport from 2f432c99a9 && e28c8c655a && d86c5f9f0c
* CVE-2025-29917 - Upstream-Status: Backport from bab716776b
* CVE-2025-29918 - Upstream-Status: Backport from f6c9490e1f

Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
Signed-off-by: Scott Murray <scott.murray@konsulko.com>
2025-11-22 22:56:53 +02:00

198 lines
6.8 KiB
Diff

From e28c8c655a324a18932655a2c2b8f0d5aa1c55d7 Mon Sep 17 00:00:00 2001
From: Philippe Antoine <pantoine@oisf.net>
Date: Tue, 18 Mar 2025 10:55:39 +0100
Subject: [PATCH] detect: add configurable limits for datasets
Ticket: 7615
Avoids signatures setting extreme hash sizes, which would lead to very
high memory use.
Default to allowing:
- 65536 per dataset
- 16777216 total
To override these built-in defaults:
```yaml
datasets:
# Limits for per rule dataset instances to avoid rules using too many
# resources.
limits:
# Max value for per dataset `hashsize` setting
#single-hashsize: 65536
# Max combined hashsize values for all datasets.
#total-hashsizes: 16777216
```
(cherry picked from commit a7713db709b8a0be5fc5e5809ab58e9b14a16e85)
Upstream-Status: Backport [https://github.com/OISF/suricata/commit/e28c8c655a324a18932655a2c2b8f0d5aa1c55d7]
CVE: CVE-2025-29916
Signed-off-by: Hitendra Prajapati <hprajapati@mvista.com>
---
src/datasets.c | 65 ++++++++++++++++++++++++++++++++++++++++++++++++
src/util-thash.c | 5 ----
suricata.yaml.in | 8 ++++++
3 files changed, 73 insertions(+), 5 deletions(-)
diff --git a/src/datasets.c b/src/datasets.c
index 89e7899..0729894 100644
--- a/src/datasets.c
+++ b/src/datasets.c
@@ -39,11 +39,16 @@
#include "util-misc.h"
#include "util-path.h"
#include "util-debug.h"
+#include "util-validate.h"
SCMutex sets_lock = SCMUTEX_INITIALIZER;
static Dataset *sets = NULL;
static uint32_t set_ids = 0;
+uint32_t dataset_max_one_hashsize = 65536;
+uint32_t dataset_max_total_hashsize = 16777216;
+uint32_t dataset_used_hashsize = 0;
+
static int DatasetAddwRep(Dataset *set, const uint8_t *data, const uint32_t data_len,
DataRepType *rep);
@@ -629,6 +634,34 @@ Dataset *DatasetFind(const char *name, enum DatasetTypes type)
return set;
}
+static bool DatasetCheckHashsize(const char *name, uint32_t hash_size)
+{
+ if (dataset_max_one_hashsize > 0 && hash_size > dataset_max_one_hashsize) {
+ SCLogError("hashsize %u in dataset '%s' exceeds configured 'single-hashsize' limit (%u)",
+ hash_size, name, dataset_max_one_hashsize);
+ return false;
+ }
+ // we cannot underflow as we know from conf loading that
+ // dataset_max_total_hashsize >= dataset_max_one_hashsize if dataset_max_total_hashsize > 0
+ if (dataset_max_total_hashsize > 0 &&
+ dataset_max_total_hashsize - hash_size < dataset_used_hashsize) {
+ SCLogError("hashsize %u in dataset '%s' exceeds configured 'total-hashsizes' limit (%u, in "
+ "use %u)",
+ hash_size, name, dataset_max_total_hashsize, dataset_used_hashsize);
+ return false;
+ }
+
+ return true;
+}
+
+static void DatasetUpdateHashsize(const char *name, uint32_t hash_size)
+{
+ if (dataset_max_total_hashsize > 0) {
+ dataset_used_hashsize += hash_size;
+ SCLogDebug("set %s adding with hash_size %u", name, hash_size);
+ }
+}
+
Dataset *DatasetGet(const char *name, enum DatasetTypes type, const char *save, const char *load,
uint64_t memcap, uint32_t hashsize)
{
@@ -682,6 +715,10 @@ Dataset *DatasetGet(const char *name, enum DatasetTypes type, const char *save,
hashsize = default_hashsize;
}
+ if (!DatasetCheckHashsize(name, hashsize)) {
+ goto out_err;
+ }
+
set = DatasetAlloc(name);
if (set == NULL) {
goto out_err;
@@ -755,6 +792,10 @@ Dataset *DatasetGet(const char *name, enum DatasetTypes type, const char *save,
set->next = sets;
sets = set;
+ /* hash size accounting */
+ DEBUG_VALIDATE_BUG_ON(set->hash->config.hash_size != hashsize);
+ DatasetUpdateHashsize(set->name, set->hash->config.hash_size);
+
SCMutexUnlock(&sets_lock);
return set;
out_err:
@@ -796,6 +837,9 @@ void DatasetReload(void)
continue;
}
set->hidden = true;
+ if (dataset_max_total_hashsize > 0) {
+ dataset_used_hashsize -= set->hash->config.hash_size;
+ }
SCLogDebug("Set %s at %p hidden successfully", set->name, set);
set = set->next;
}
@@ -863,6 +907,27 @@ int DatasetsInit(void)
uint32_t default_hashsize = 0;
GetDefaultMemcap(&default_memcap, &default_hashsize);
if (datasets != NULL) {
+ const char *str = NULL;
+ if (ConfGet("datasets.limits.total-hashsizes", &str) == 1) {
+ if (ParseSizeStringU32(str, &dataset_max_total_hashsize) < 0) {
+ FatalError("failed to parse datasets.limits.total-hashsizes value: %s", str);
+ }
+ }
+ if (ConfGet("datasets.limits.single-hashsize", &str) == 1) {
+ if (ParseSizeStringU32(str, &dataset_max_one_hashsize) < 0) {
+ FatalError("failed to parse datasets.limits.single-hashsize value: %s", str);
+ }
+ }
+ if (dataset_max_total_hashsize > 0 &&
+ dataset_max_total_hashsize < dataset_max_one_hashsize) {
+ FatalError("total-hashsizes (%u) cannot be smaller than single-hashsize (%u)",
+ dataset_max_total_hashsize, dataset_max_one_hashsize);
+ }
+ if (dataset_max_total_hashsize > 0 && dataset_max_one_hashsize == 0) {
+ // the total limit also applies for single limit
+ dataset_max_one_hashsize = dataset_max_total_hashsize;
+ }
+
int list_pos = 0;
ConfNode *iter = NULL;
TAILQ_FOREACH(iter, &datasets->head, next) {
diff --git a/src/util-thash.c b/src/util-thash.c
index 6443990..3fba3ef 100644
--- a/src/util-thash.c
+++ b/src/util-thash.c
@@ -310,16 +310,11 @@ THashTableContext *THashInit(const char *cnf_prefix, size_t data_size,
ctx->config.hash_size = hashsize > 0 ? hashsize : THASH_DEFAULT_HASHSIZE;
/* Reset memcap in case of loading from file to the highest possible value
unless defined by the rule keyword */
-#ifdef FUZZING_BUILD_MODE_UNSAFE_FOR_PRODUCTION
- // limit memcap size to default when fuzzing
- ctx->config.memcap = THASH_DEFAULT_MEMCAP;
-#else
if (memcap > 0) {
ctx->config.memcap = memcap;
} else {
ctx->config.memcap = reset_memcap ? UINT64_MAX : THASH_DEFAULT_MEMCAP;
}
-#endif
ctx->config.prealloc = THASH_DEFAULT_PREALLOC;
SC_ATOMIC_INIT(ctx->counter);
diff --git a/suricata.yaml.in b/suricata.yaml.in
index 6303991..b218515 100644
--- a/suricata.yaml.in
+++ b/suricata.yaml.in
@@ -1167,6 +1167,14 @@ datasets:
#memcap: 100mb
#hashsize: 2048
+ # Limits for per rule dataset instances to avoid rules using too many
+ # resources.
+ limits:
+ # Max value for per dataset `hashsize` setting
+ #single-hashsize: 65536
+ # Max combined hashsize values for all datasets.
+ #total-hashsizes: 16777216
+
rules:
# Set to true to allow absolute filenames and filenames that use
# ".." components to reference parent directories in rules that specify
--
2.49.0