From 2b595a9c15cc44439005a80ed8403ea7b72e7753 Mon Sep 17 00:00:00 2001 From: GrieferAtWork Date: Fri, 24 Nov 2023 16:30:40 +0100 Subject: [PATCH] Fix missing overflow checks when allocating large tuples --- include/deemon/tuple.h | 15 +++++++++++++++ src/deemon/objects/tuple.c | 27 ++++++++------------------- util/test-errors.dee | 4 ++-- 3 files changed, 25 insertions(+), 21 deletions(-) diff --git a/include/deemon/tuple.h b/include/deemon/tuple.h index 57deae759..736838980 100644 --- a/include/deemon/tuple.h +++ b/include/deemon/tuple.h @@ -22,6 +22,8 @@ #include "api.h" +#include + #include #include @@ -52,6 +54,19 @@ struct Dee_tuple_object { #define DeeTuple_GET(ob, i) ((DeeTupleObject *)Dee_REQUIRES_OBJECT(ob))->t_elem[i] #define DeeTuple_SET(ob, i, v) (void)(((DeeTupleObject *)Dee_REQUIRES_OBJECT(ob))->t_elem[i] = (DeeObject *)Dee_REQUIRES_OBJECT(v)) +/* Same as `DeeTuple_SIZEOF()', but makes sure that no overflow takes place. */ +#define DeeTuple_SIZEOF_SAFE(n_items) DeeTuple_SIZEOF_SAFE(n_items) +LOCAL ATTR_CONST WUNUSED size_t +(DCALL DeeTuple_SIZEOF_SAFE)(size_t n_items) { + size_t result; + if unlikely(__hybrid_overflow_umul(n_items, sizeof(DREF DeeObject *), &result)) + result = (size_t)-1; + if unlikely(__hybrid_overflow_uadd(result, COMPILER_OFFSETOF(DeeTupleObject, t_elem), &result)) + result = (size_t)-1; + return result; +} + + /* Define a statically allocated tuple: * >> PRIVATE WUNUSED DREF DeeObject *DCALL get_my_tuple(void) { * >> PRIVATE DEFINE_TUPLE(my_tuple, 2, { Dee_EmptyString, Dee_EmptyString }); diff --git a/src/deemon/objects/tuple.c b/src/deemon/objects/tuple.c index 6a1d19086..7a643cfb0 100644 --- a/src/deemon/objects/tuple.c +++ b/src/deemon/objects/tuple.c @@ -171,8 +171,7 @@ DeeTuple_NewUninitialized(size_t n) { } } #endif /* CONFIG_TUPLE_CACHE_MAXCOUNT */ - result = (DREF Tuple *)DeeObject_Malloc(offsetof(Tuple, t_elem) + - (n * sizeof(DeeObject *))); + result = (DREF Tuple *)DeeObject_Malloc(DeeTuple_SIZEOF_SAFE(n)); if unlikely(!result) goto done; result->t_size = n; @@ -215,8 +214,7 @@ DeeTuple_TryNewUninitialized(size_t n) { } } #endif /* CONFIG_TUPLE_CACHE_MAXCOUNT != 0 */ - result = (DREF Tuple *)DeeObject_TryMalloc(offsetof(Tuple, t_elem) + - (n * sizeof(DeeObject *))); + result = (DREF Tuple *)DeeObject_TryMalloc(DeeTuple_SIZEOF_SAFE(n)); if unlikely(!result) goto done; result->t_size = n; @@ -337,9 +335,7 @@ DeeTuple_ResizeUninitialized(/*inherit(on_success)*/ DREF Tuple *__restrict self #endif /* CONFIG_TUPLE_CACHE_MAXCOUNT */ /* Resize the old tuple. */ - new_tuple = (DREF Tuple *)DeeObject_Realloc(self, - offsetof(Tuple, t_elem) + - new_size * sizeof(DREF DeeObject *)); + new_tuple = (DREF Tuple *)DeeObject_Realloc(self, DeeTuple_SIZEOF_SAFE(new_size)); if unlikely(!new_tuple) goto err; #ifndef NDEBUG @@ -417,9 +413,7 @@ DeeTuple_TryResizeUninitialized(/*inherit(on_success)*/ DREF Tuple *__restrict s #endif /* CONFIG_TUPLE_CACHE_MAXCOUNT */ /* Try to resize the old tuple. */ - new_tuple = (DREF Tuple *)DeeObject_TryRealloc(self, - offsetof(Tuple, t_elem) + - new_size * sizeof(DREF DeeObject *)); + new_tuple = (DREF Tuple *)DeeObject_TryRealloc(self, DeeTuple_SIZEOF_SAFE(new_size)); if unlikely(!new_tuple) goto err; #ifndef NDEBUG @@ -489,9 +483,7 @@ DeeTuple_TruncateUninitialized(/*inherit(always)*/ DREF Tuple *__restrict self, } #endif /* CONFIG_TUPLE_CACHE_MAXCOUNT */ /* Try to resize the old tuple. */ - new_tuple = (DREF Tuple *)DeeObject_TryRealloc(self, - offsetof(Tuple, t_elem) + - new_size * sizeof(DREF DeeObject *)); + new_tuple = (DREF Tuple *)DeeObject_TryRealloc(self, DeeTuple_SIZEOF(new_size)); if unlikely(!new_tuple) new_tuple = (DREF Tuple *)self; new_tuple->t_size = new_size; @@ -697,9 +689,7 @@ DeeTuple_FromIterator(DeeObject *__restrict self) { } #endif /* CONFIG_TUPLE_CACHE_MAXCOUNT */ ASSERT(result->ob_refcnt == 1); - next = (DREF DeeObject *)DeeObject_TryRealloc(result, - offsetof(Tuple, t_elem) + - used_size * sizeof(DREF DeeObject *)); + next = (DREF DeeObject *)DeeObject_TryRealloc(result, DeeTuple_SIZEOF(used_size)); if likely(next) result = (DREF Tuple *)next; result->t_size = used_size; @@ -1496,7 +1486,7 @@ tuple_getrange_in(Tuple *__restrict self, #ifdef __OPTIMIZE_SIZE__ return tuple_getrange_i(self, begin, SSIZE_MAX); #else /* __OPTIMIZE_SIZE__ */ - size_t start, range_size; + size_t start; start = DeeSeqRange_Clamp_n(begin, self->t_size); if unlikely(start == 0) return_reference((DeeObject *)self); @@ -1810,8 +1800,7 @@ tuple_hash(Tuple *__restrict self) { PRIVATE WUNUSED NONNULL((1)) DREF DeeObject *DCALL tuple_sizeof(Tuple *self) { - return DeeInt_NewSize(offsetof(Tuple, t_elem) + - (self->t_size * sizeof(DeeObject *))); + return DeeInt_NewSize(DeeTuple_SIZEOF(self->t_size)); } PRIVATE WUNUSED NONNULL((1)) DREF DeeObject *DCALL diff --git a/util/test-errors.dee b/util/test-errors.dee index d97eeee65..1e5d301ce 100644 --- a/util/test-errors.dee +++ b/util/test-errors.dee @@ -298,8 +298,8 @@ function main() { //invokeTypeMembers(deemon.string); //invokeTypeMembers(deemon.Bytes); //invokeTypeMembers(deemon.int); - invokeTypeMembers(deemon.List); - //invokeTypeMembers(deemon.Tuple); + //invokeTypeMembers(deemon.List); + invokeTypeMembers(deemon.Tuple); //invokeTypeMembers(deemon.Sequence); //invokeTypeMembers(deemon.Object); //invokeTypeMembers(deemon.bool);