[SCM] Samba Shared Repository - branch master updated
Stefan Metzmacher
metze at samba.org
Fri Apr 8 02:16:02 MDT 2011
The branch, master has been updated
via eb0e276 talloc/testsuite: avoid memory leak reported by valgrind
via f9fdef8 talloc/testsuite: test more talloc_pool related things
via 2146ffd talloc: include valgrind headers if available
via 73cc85a talloc: add TC_INVALIDATE_POOL marco
via 9c9f208 talloc: add TC_UNDEFINE_GROW_CHUNK() marco
via 6126c35 talloc: add TC_INVALIDATE_SHRINK_CHUNK() marco
via 4895f55 talloc: add TC_INVALIDATE_FULL_CHUNK() macro
via efbb358 talloc: use VALGRIND_MAKE_MEM_UNDEFINED() before memmove()
via 1e70439 talloc: optimize talloc_free() and talloc_realloc() for talloc pools
via 73330f8 talloc: add TC_POOL_FIRST_CHUNK() macro
via 13fab67 talloc: add TC_POOL_SPACE_LEFT() macro
via 4084cb7 talloc: add TC_ALIGN16() macro
via d399401 talloc: use TC_HDR_SIZE instead of sizeof(struct talloc_chunk)
from ab0a881 ldb:ldb/common/ldb_modules.c - change the request counter type to be "unsigned"
http://gitweb.samba.org/?p=samba.git;a=shortlog;h=master
- Log -----------------------------------------------------------------
commit eb0e276f3a3b57405cd8cd36c74021350aba9a98
Author: Stefan Metzmacher <metze at samba.org>
Date: Mon Apr 4 15:53:46 2011 +0200
talloc/testsuite: avoid memory leak reported by valgrind
metze
Signed-off-By: Andrew Tridgell <tridge at samba.org>
Autobuild-User: Stefan Metzmacher <metze at samba.org>
Autobuild-Date: Fri Apr 8 10:15:41 CEST 2011 on sn-devel-104
commit f9fdef870e4c49d9e6c23ba085ba6dbd34ec5469
Author: Stefan Metzmacher <metze at samba.org>
Date: Thu Mar 31 19:50:47 2011 +0200
talloc/testsuite: test more talloc_pool related things
metze
Signed-off-By: Andrew Tridgell <tridge at samba.org>
commit 2146ffd764499d67e3f0576a2e78a1575cd52d9c
Author: Stefan Metzmacher <metze at samba.org>
Date: Mon Apr 4 16:28:31 2011 +0200
talloc: include valgrind headers if available
metze
Signed-off-By: Andrew Tridgell <tridge at samba.org>
commit 73cc85ac903387f2c7f8ef2d948b40b57887cf17
Author: Stefan Metzmacher <metze at samba.org>
Date: Mon Apr 4 15:08:20 2011 +0200
talloc: add TC_INVALIDATE_POOL marco
metze
Signed-off-By: Andrew Tridgell <tridge at samba.org>
commit 9c9f208598d34d700bfc2ed7302b206e863a4c9b
Author: Stefan Metzmacher <metze at samba.org>
Date: Mon Apr 4 13:29:47 2011 +0200
talloc: add TC_UNDEFINE_GROW_CHUNK() marco
metze
Signed-off-By: Andrew Tridgell <tridge at samba.org>
commit 6126c3506d493cd4836a7539586b74faea8ca0b3
Author: Stefan Metzmacher <metze at samba.org>
Date: Mon Apr 4 13:28:11 2011 +0200
talloc: add TC_INVALIDATE_SHRINK_CHUNK() marco
This invalidates the unused bytes if we shrink memory.
metze
Signed-off-By: Andrew Tridgell <tridge at samba.org>
commit 4895f55f0604a1851d45c24a8a584a10170d5917
Author: Stefan Metzmacher <metze at samba.org>
Date: Mon Apr 4 11:47:57 2011 +0200
talloc: add TC_INVALIDATE_FULL_CHUNK() macro
This makes it easier to mark a talloc pointer as
invalid.
metze
Signed-off-By: Andrew Tridgell <tridge at samba.org>
commit efbb35824e3845c3cdefea328dd1bf67d0f9087d
Author: Stefan Metzmacher <metze at samba.org>
Date: Mon Apr 4 16:46:21 2011 +0200
talloc: use VALGRIND_MAKE_MEM_UNDEFINED() before memmove()
metze
Signed-off-By: Andrew Tridgell <tridge at samba.org>
commit 1e70439f770181ca16a0749c2164d0237d3bfd0a
Author: Stefan Metzmacher <metze at samba.org>
Date: Thu Mar 31 16:58:46 2011 +0200
talloc: optimize talloc_free() and talloc_realloc() for talloc pools
metze
Signed-off-By: Andrew Tridgell <tridge at samba.org>
commit 73330f8436707b5ab46c0720ea735908948f5d27
Author: Stefan Metzmacher <metze at samba.org>
Date: Thu Mar 31 16:56:18 2011 +0200
talloc: add TC_POOL_FIRST_CHUNK() macro
metze
Signed-off-By: Andrew Tridgell <tridge at samba.org>
commit 13fab67995a0b836b92847536768703dac391b57
Author: Stefan Metzmacher <metze at samba.org>
Date: Thu Mar 31 16:51:40 2011 +0200
talloc: add TC_POOL_SPACE_LEFT() macro
metze
Signed-off-By: Andrew Tridgell <tridge at samba.org>
commit 4084cb723c4e14898a411fbd1cf8fa878ff290ff
Author: Stefan Metzmacher <metze at samba.org>
Date: Thu Mar 31 16:55:00 2011 +0200
talloc: add TC_ALIGN16() macro
metze
Signed-off-By: Andrew Tridgell <tridge at samba.org>
commit d39940113a24078bd563b0cfce4225648c632544
Author: Stefan Metzmacher <metze at samba.org>
Date: Thu Mar 31 15:18:55 2011 +0200
talloc: use TC_HDR_SIZE instead of sizeof(struct talloc_chunk)
As this includes the padding to 16 bytes.
metze
Signed-off-By: Andrew Tridgell <tridge at samba.org>
-----------------------------------------------------------------------
Summary of changes:
lib/talloc/talloc.c | 338 ++++++++++++++++++++++++++++++++++++++++++------
lib/talloc/testsuite.c | 57 ++++++++-
2 files changed, 351 insertions(+), 44 deletions(-)
Changeset truncated at 500 lines:
diff --git a/lib/talloc/talloc.c b/lib/talloc/talloc.c
index c616f34..6f952dc 100644
--- a/lib/talloc/talloc.c
+++ b/lib/talloc/talloc.c
@@ -45,6 +45,15 @@
#endif
#endif
+/* Special macros that are no-ops except when run under Valgrind on
+ * x86. They've moved a little bit from valgrind 1.0.4 to 1.9.4 */
+#ifdef HAVE_VALGRIND_MEMCHECK_H
+ /* memcheck.h includes valgrind.h */
+#include <valgrind/memcheck.h>
+#elif defined(HAVE_VALGRIND_H)
+#include <valgrind.h>
+#endif
+
/* use this to force every realloc to change the pointer, to stress test
code that might not cope */
#define ALWAYS_REALLOC 0
@@ -115,6 +124,77 @@ static struct {
#define TALLOC_FILL_ENV "TALLOC_FREE_FILL"
+/*
+ * do not wipe the header, to allow the
+ * double-free logic to still work
+ */
+#define TC_INVALIDATE_FULL_FILL_CHUNK(_tc) do { \
+ if (unlikely(talloc_fill.enabled)) { \
+ size_t _flen = (_tc)->size; \
+ char *_fptr = TC_PTR_FROM_CHUNK(_tc); \
+ memset(_fptr, talloc_fill.fill_value, _flen); \
+ } \
+} while (0)
+
+#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
+/* Mark the whole chunk as not accessable */
+#define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { \
+ size_t _flen = TC_HDR_SIZE + (_tc)->size; \
+ char *_fptr = (char *)(_tc); \
+ VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
+} while(0)
+#else
+#define TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc) do { } while (0)
+#endif
+
+#define TC_INVALIDATE_FULL_CHUNK(_tc) do { \
+ TC_INVALIDATE_FULL_FILL_CHUNK(_tc); \
+ TC_INVALIDATE_FULL_VALGRIND_CHUNK(_tc); \
+} while (0)
+
+#define TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size) do { \
+ if (unlikely(talloc_fill.enabled)) { \
+ size_t _flen = (_tc)->size - (_new_size); \
+ char *_fptr = TC_PTR_FROM_CHUNK(_tc); \
+ _fptr += (_new_size); \
+ memset(_fptr, talloc_fill.fill_value, _flen); \
+ } \
+} while (0)
+
+#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
+/* Mark the unused bytes not accessable */
+#define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { \
+ size_t _flen = (_tc)->size - (_new_size); \
+ char *_fptr = TC_PTR_FROM_CHUNK(_tc); \
+ _fptr += (_new_size); \
+ VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
+} while (0)
+#else
+#define TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
+#endif
+
+#define TC_INVALIDATE_SHRINK_CHUNK(_tc, _new_size) do { \
+ TC_INVALIDATE_SHRINK_FILL_CHUNK(_tc, _new_size); \
+ TC_INVALIDATE_SHRINK_VALGRIND_CHUNK(_tc, _new_size); \
+} while (0)
+
+#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
+/* Mark the new bytes as undefined */
+#define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { \
+ size_t _old_used = TC_HDR_SIZE + (_tc)->size; \
+ size_t _new_used = TC_HDR_SIZE + (_new_size); \
+ size_t _flen = _new_used - _old_used; \
+ char *_fptr = _old_used + (char *)(_tc); \
+ VALGRIND_MAKE_MEM_UNDEFINED(_fptr, _flen); \
+} while (0)
+#else
+#define TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size) do { } while (0)
+#endif
+
+#define TC_UNDEFINE_GROW_CHUNK(_tc, _new_size) do { \
+ TC_UNDEFINE_GROW_VALGRIND_CHUNK(_tc, _new_size); \
+} while (0)
+
struct talloc_reference_handle {
struct talloc_reference_handle *next, *prev;
void *ptr;
@@ -147,7 +227,8 @@ struct talloc_chunk {
};
/* 16 byte alignment seems to keep everyone happy */
-#define TC_HDR_SIZE ((sizeof(struct talloc_chunk)+15)&~15)
+#define TC_ALIGN16(s) (((s)+15)&~15)
+#define TC_HDR_SIZE TC_ALIGN16(sizeof(struct talloc_chunk))
#define TC_PTR_FROM_CHUNK(tc) ((void *)(TC_HDR_SIZE + (char*)tc))
_PUBLIC_ int talloc_version_major(void)
@@ -332,9 +413,47 @@ _PUBLIC_ const char *talloc_parent_name(const void *ptr)
#define TALLOC_POOL_HDR_SIZE 16
+#define TC_POOL_SPACE_LEFT(_pool_tc) \
+ PTR_DIFF(TC_HDR_SIZE + (_pool_tc)->size + (char *)(_pool_tc), \
+ (_pool_tc)->pool)
+
+#define TC_POOL_FIRST_CHUNK(_pool_tc) \
+ ((void *)(TC_HDR_SIZE + TALLOC_POOL_HDR_SIZE + (char *)(_pool_tc)))
+
+#define TC_POOLMEM_CHUNK_SIZE(_tc) \
+ TC_ALIGN16(TC_HDR_SIZE + (_tc)->size)
+
+#define TC_POOLMEM_NEXT_CHUNK(_tc) \
+ ((void *)(TC_POOLMEM_CHUNK_SIZE(tc) + (char*)(_tc)))
+
+/* Mark the whole remaining pool as not accessable */
+#define TC_INVALIDATE_FILL_POOL(_pool_tc) do { \
+ if (unlikely(talloc_fill.enabled)) { \
+ size_t _flen = TC_POOL_SPACE_LEFT(_pool_tc); \
+ char *_fptr = (_pool_tc)->pool; \
+ memset(_fptr, talloc_fill.fill_value, _flen); \
+ } \
+} while(0)
+
+#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
+/* Mark the whole remaining pool as not accessable */
+#define TC_INVALIDATE_VALGRIND_POOL(_pool_tc) do { \
+ size_t _flen = TC_POOL_SPACE_LEFT(_pool_tc); \
+ char *_fptr = (_pool_tc)->pool; \
+ VALGRIND_MAKE_MEM_NOACCESS(_fptr, _flen); \
+} while(0)
+#else
+#define TC_INVALIDATE_VALGRIND_POOL(_pool_tc) do { } while (0)
+#endif
+
+#define TC_INVALIDATE_POOL(_pool_tc) do { \
+ TC_INVALIDATE_FILL_POOL(_pool_tc); \
+ TC_INVALIDATE_VALGRIND_POOL(_pool_tc); \
+} while (0)
+
static unsigned int *talloc_pool_objectcount(struct talloc_chunk *tc)
{
- return (unsigned int *)((char *)tc + sizeof(struct talloc_chunk));
+ return (unsigned int *)((char *)tc + TC_HDR_SIZE);
}
/*
@@ -364,13 +483,12 @@ static struct talloc_chunk *talloc_alloc_pool(struct talloc_chunk *parent,
return NULL;
}
- space_left = ((char *)pool_ctx + TC_HDR_SIZE + pool_ctx->size)
- - ((char *)pool_ctx->pool);
+ space_left = TC_POOL_SPACE_LEFT(pool_ctx);
/*
* Align size to 16 bytes
*/
- chunk_size = ((size + 15) & ~15);
+ chunk_size = TC_ALIGN16(size);
if (space_left < chunk_size) {
return NULL;
@@ -461,13 +579,11 @@ _PUBLIC_ void *talloc_pool(const void *context, size_t size)
tc = talloc_chunk_from_ptr(result);
tc->flags |= TALLOC_FLAG_POOL;
- tc->pool = (char *)result + TALLOC_POOL_HDR_SIZE;
+ tc->pool = TC_POOL_FIRST_CHUNK(tc);
*talloc_pool_objectcount(tc) = 1;
-#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
- VALGRIND_MAKE_MEM_NOACCESS(tc->pool, size);
-#endif
+ TC_INVALIDATE_POOL(tc);
return result;
}
@@ -579,7 +695,7 @@ static inline int _talloc_free_internal(void *ptr, const char *location)
}
/* possibly initialised the talloc fill value */
- if (!talloc_fill.initialised) {
+ if (unlikely(!talloc_fill.initialised)) {
const char *fill = getenv(TALLOC_FILL_ENV);
if (fill != NULL) {
talloc_fill.enabled = true;
@@ -683,34 +799,49 @@ static inline int _talloc_free_internal(void *ptr, const char *location)
if (tc->flags & (TALLOC_FLAG_POOL|TALLOC_FLAG_POOLMEM)) {
struct talloc_chunk *pool;
+ void *next_tc = NULL;
unsigned int *pool_object_count;
- pool = (tc->flags & TALLOC_FLAG_POOL)
- ? tc : (struct talloc_chunk *)tc->pool;
+ if (unlikely(tc->flags & TALLOC_FLAG_POOL)) {
+ pool = tc;
+ } else {
+ pool = (struct talloc_chunk *)tc->pool;
+ next_tc = TC_POOLMEM_NEXT_CHUNK(tc);
+
+ TC_INVALIDATE_FULL_CHUNK(tc);
+ }
pool_object_count = talloc_pool_objectcount(pool);
- if (*pool_object_count == 0) {
+ if (unlikely(*pool_object_count == 0)) {
talloc_abort("Pool object count zero!");
return 0;
}
*pool_object_count -= 1;
- if (*pool_object_count == 0) {
- if (talloc_fill.enabled) {
- memset(TC_PTR_FROM_CHUNK(pool), talloc_fill.fill_value, pool->size);
- }
+ if (unlikely(*pool_object_count == 1)) {
+ /*
+ * if there is just object left in the pool
+ * it means this is the pool itself and
+ * the rest is available for new objects
+ * again.
+ */
+ pool->pool = TC_POOL_FIRST_CHUNK(pool);
+ TC_INVALIDATE_POOL(pool);
+ } else if (unlikely(*pool_object_count == 0)) {
+ TC_INVALIDATE_FULL_CHUNK(pool);
free(pool);
+ } else if (pool->pool == next_tc) {
+ /*
+ * if pool->pool still points to end of
+ * 'tc' (which is stored in the 'next_tc' variable),
+ * we can reclaim the memory of 'tc'.
+ */
+ pool->pool = tc;
}
- }
- else {
- if (talloc_fill.enabled) {
- /* don't wipe the header, to allow the
- double-free logic to still work
- */
- memset(TC_PTR_FROM_CHUNK(tc), talloc_fill.fill_value, tc->size);
- }
+ } else {
+ TC_INVALIDATE_FULL_CHUNK(tc);
free(tc);
}
return 0;
@@ -1108,15 +1239,6 @@ _PUBLIC_ void talloc_free_children(void *ptr)
_talloc_steal_internal(new_parent, child);
}
}
-
- if ((tc->flags & TALLOC_FLAG_POOL)
- && (*talloc_pool_objectcount(tc) == 1)) {
- tc->pool = ((char *)tc + TC_HDR_SIZE + TALLOC_POOL_HDR_SIZE);
-#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_NOACCESS)
- VALGRIND_MAKE_MEM_NOACCESS(
- tc->pool, tc->size - TALLOC_POOL_HDR_SIZE);
-#endif
- }
}
/*
@@ -1197,6 +1319,7 @@ _PUBLIC_ void *_talloc_realloc(const void *context, void *ptr, size_t size, cons
struct talloc_chunk *tc;
void *new_ptr;
bool malloced = false;
+ struct talloc_chunk *pool_tc = NULL;
/* size zero is equivalent to free() */
if (unlikely(size == 0)) {
@@ -1225,27 +1348,140 @@ _PUBLIC_ void *_talloc_realloc(const void *context, void *ptr, size_t size, cons
return NULL;
}
+ /* don't let anybody try to realloc a talloc_pool */
+ if (unlikely(tc->flags & TALLOC_FLAG_POOLMEM)) {
+ pool_tc = (struct talloc_chunk *)tc->pool;
+ }
+
+#if (ALWAYS_REALLOC == 0)
/* don't shrink if we have less than 1k to gain */
- if ((size < tc->size) && ((tc->size - size) < 1024)) {
- tc->size = size;
+ if (size < tc->size) {
+ if (pool_tc) {
+ void *next_tc = TC_POOLMEM_NEXT_CHUNK(tc);
+ TC_INVALIDATE_SHRINK_CHUNK(tc, size);
+ tc->size = size;
+ if (next_tc == pool_tc->pool) {
+ pool_tc->pool = TC_POOLMEM_NEXT_CHUNK(tc);
+ }
+ return ptr;
+ } else if ((tc->size - size) < 1024) {
+ TC_INVALIDATE_SHRINK_CHUNK(tc, size);
+ /* do not shrink if we have less than 1k to gain */
+ tc->size = size;
+ return ptr;
+ }
+ } else if (tc->size == size) {
+ /*
+ * do not change the pointer if it is exactly
+ * the same size.
+ */
return ptr;
}
+#endif
/* by resetting magic we catch users of the old memory */
tc->flags |= TALLOC_FLAG_FREE;
#if ALWAYS_REALLOC
- new_ptr = malloc(size + TC_HDR_SIZE);
- if (new_ptr) {
- memcpy(new_ptr, tc, MIN(tc->size, size) + TC_HDR_SIZE);
- free(tc);
+ if (pool_tc) {
+ new_ptr = talloc_alloc_pool(tc, size + TC_HDR_SIZE);
+ *talloc_pool_objectcount(pool_tc) -= 1;
+
+ if (new_ptr == NULL) {
+ new_ptr = malloc(TC_HDR_SIZE+size);
+ malloced = true;
+ }
+
+ if (new_ptr) {
+ memcpy(new_ptr, tc, MIN(tc->size,size) + TC_HDR_SIZE);
+ TC_INVALIDATE_FULL_CHUNK(tc);
+ }
+ } else {
+ new_ptr = malloc(size + TC_HDR_SIZE);
+ if (new_ptr) {
+ memcpy(new_ptr, tc, MIN(tc->size, size) + TC_HDR_SIZE);
+ free(tc);
+ }
}
#else
- if (tc->flags & TALLOC_FLAG_POOLMEM) {
+ if (pool_tc) {
+ void *next_tc = TC_POOLMEM_NEXT_CHUNK(tc);
+ size_t old_chunk_size = TC_POOLMEM_CHUNK_SIZE(tc);
+ size_t new_chunk_size = TC_ALIGN16(TC_HDR_SIZE + size);
+ size_t space_needed;
+ size_t space_left;
+
+ if (*talloc_pool_objectcount(pool_tc) == 2) {
+ /*
+ * optimize for the case where 'tc' is the only
+ * chunk in the pool.
+ */
+ space_needed = new_chunk_size;
+ space_left = pool_tc->size - TALLOC_POOL_HDR_SIZE;
+
+ if (space_left >= space_needed) {
+ size_t old_used = TC_HDR_SIZE + tc->size;
+ size_t new_used = TC_HDR_SIZE + size;
+ pool_tc->pool = TC_POOL_FIRST_CHUNK(pool_tc);
+#if defined(DEVELOPER) && defined(VALGRIND_MAKE_MEM_UNDEFINED)
+ /*
+ * we need to prepare the memmove into
+ * the unaccessable area.
+ */
+ {
+ size_t diff = PTR_DIFF(tc, pool_tc->pool);
+ size_t flen = MIN(diff, old_used);
+ char *fptr = (char *)pool_tc->pool;
+ VALGRIND_MAKE_MEM_UNDEFINED(fptr, flen);
+ }
+#endif
+ memmove(pool_tc->pool, tc, old_used);
+ new_ptr = pool_tc->pool;
+
+ TC_UNDEFINE_GROW_CHUNK(tc, size);
+
+ /*
+ * first we do not align the pool pointer
+ * because we want to invalidate the padding
+ * too.
+ */
+ pool_tc->pool = new_used + (char *)new_ptr;
+ TC_INVALIDATE_POOL(pool_tc);
+
+ /* now the aligned pointer */
+ pool_tc->pool = new_chunk_size + (char *)new_ptr;
+ goto got_new_ptr;
+ }
+
+ next_tc = NULL;
+ }
+
+ if (new_chunk_size == old_chunk_size) {
+ TC_UNDEFINE_GROW_CHUNK(tc, size);
+ tc->flags &= ~TALLOC_FLAG_FREE;
+ tc->size = size;
+ return ptr;
+ }
+
+ if (next_tc == pool_tc->pool) {
+ /*
+ * optimize for the case where 'tc' is the last
+ * chunk in the pool.
+ */
+ space_needed = new_chunk_size - old_chunk_size;
+ space_left = TC_POOL_SPACE_LEFT(pool_tc);
+
+ if (space_left >= space_needed) {
+ TC_UNDEFINE_GROW_CHUNK(tc, size);
+ tc->flags &= ~TALLOC_FLAG_FREE;
+ tc->size = size;
+ pool_tc->pool = TC_POOLMEM_NEXT_CHUNK(tc);
+ return ptr;
+ }
+ }
new_ptr = talloc_alloc_pool(tc, size + TC_HDR_SIZE);
- *talloc_pool_objectcount((struct talloc_chunk *)
- (tc->pool)) -= 1;
+ *talloc_pool_objectcount(pool_tc) -= 1;
if (new_ptr == NULL) {
new_ptr = malloc(TC_HDR_SIZE+size);
@@ -1254,11 +1490,27 @@ _PUBLIC_ void *_talloc_realloc(const void *context, void *ptr, size_t size, cons
if (new_ptr) {
memcpy(new_ptr, tc, MIN(tc->size,size) + TC_HDR_SIZE);
+ TC_INVALIDATE_FULL_CHUNK(tc);
+
+ if (*talloc_pool_objectcount(pool_tc) == 1) {
+ /*
+ * If the pool is empty now reclaim everything.
+ */
+ pool_tc->pool = TC_POOL_FIRST_CHUNK(pool_tc);
+ TC_INVALIDATE_POOL(pool_tc);
+ } else if (next_tc == pool_tc->pool) {
+ /*
+ * If it was reallocated and tc was the last
+ * chunk, we can reclaim the memory of tc.
+ */
+ pool_tc->pool = tc;
+ }
}
}
else {
new_ptr = realloc(tc, size + TC_HDR_SIZE);
}
+got_new_ptr:
#endif
if (unlikely(!new_ptr)) {
tc->flags &= ~TALLOC_FLAG_FREE;
diff --git a/lib/talloc/testsuite.c b/lib/talloc/testsuite.c
index ee6256b..ba583ab 100644
--- a/lib/talloc/testsuite.c
+++ b/lib/talloc/testsuite.c
@@ -1123,6 +1123,7 @@ static bool test_pool(void)
{
void *pool;
void *p1, *p2, *p3, *p4;
+ void *p2_2;
pool = talloc_pool(NULL, 1024);
@@ -1131,6 +1132,60 @@ static bool test_pool(void)
p3 = talloc_size(p1, 50);
p4 = talloc_size(p3, 1000);
+#if 1 /* this relies on ALWAYS_REALLOC == 0 in talloc.c */
+ p2_2 = talloc_realloc_size(pool, p2, 20+1);
+ torture_assert("pool realloc 20+1", p2_2 == p2, "failed: pointer changed");
+ p2_2 = talloc_realloc_size(pool, p2, 20-1);
+ torture_assert("pool realloc 20-1", p2_2 == p2, "failed: pointer changed");
+ p2_2 = talloc_realloc_size(pool, p2, 20-1);
+ torture_assert("pool realloc 20-1", p2_2 == p2, "failed: pointer changed");
+
+ talloc_free(p3);
+
+ /* this should reclaim the memory of p4 and p3 */
+ p2_2 = talloc_realloc_size(pool, p2, 400);
+ torture_assert("pool realloc 400", p2_2 == p2, "failed: pointer changed");
+
+ talloc_free(p1);
+
+ /* this should reclaim the memory of p1 */
+ p2_2 = talloc_realloc_size(pool, p2, 800);
+ torture_assert("pool realloc 800", p2_2 == p1, "failed: pointer not changed");
--
Samba Shared Repository
More information about the samba-cvs
mailing list