mirror of
https://github.com/jerryscript-project/jerryscript.git
synced 2025-12-15 16:29:21 +00:00
Removing pool bitmaps; optimizing search for free pool chunks. loop_arithmetics_1kk.js benchmark: 4.134s -> 3.479s.
This commit is contained in:
parent
b6d9ed42ec
commit
0194e63331
@ -36,11 +36,6 @@
|
|||||||
*/
|
*/
|
||||||
static const uint8_t mem_pool_free_chunk_magic_num = 0x71;
|
static const uint8_t mem_pool_free_chunk_magic_num = 0x71;
|
||||||
|
|
||||||
/**
|
|
||||||
* Number of bits in a single bitmap's bits' block.
|
|
||||||
*/
|
|
||||||
static const mword_t mem_bitmap_bits_in_block = sizeof (mword_t) * JERRY_BITSINBYTE;
|
|
||||||
|
|
||||||
static void mem_check_pool( mem_pool_state_t *pool_p);
|
static void mem_check_pool( mem_pool_state_t *pool_p);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -48,74 +43,70 @@ static void mem_check_pool( mem_pool_state_t *pool_p);
|
|||||||
*
|
*
|
||||||
* Pool will be located in the segment [pool_start; pool_start + pool_size).
|
* Pool will be located in the segment [pool_start; pool_start + pool_size).
|
||||||
* Part of pool space will be used for bitmap and the rest will store chunks.
|
* Part of pool space will be used for bitmap and the rest will store chunks.
|
||||||
*
|
|
||||||
* Warning:
|
|
||||||
* it is incorrect to suppose, that chunk number = pool_size / chunk_size.
|
|
||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
mem_pool_init(mem_pool_state_t *pool_p, /**< pool */
|
mem_pool_init(mem_pool_state_t *pool_p, /**< pool */
|
||||||
size_t chunk_size, /**< size of one chunk */
|
size_t chunk_size, /**< size of pool's chunk */
|
||||||
uint8_t *pool_start, /**< start of pool space */
|
uint8_t *pool_start, /**< start of pool space */
|
||||||
size_t pool_size) /**< pool space size */
|
size_t pool_size) /**< pool space size */
|
||||||
{
|
{
|
||||||
JERRY_ASSERT( pool_p != NULL );
|
JERRY_ASSERT( pool_p != NULL );
|
||||||
JERRY_ASSERT( (uintptr_t) pool_start % MEM_ALIGNMENT == 0);
|
JERRY_ASSERT( (uintptr_t) pool_start % MEM_ALIGNMENT == 0);
|
||||||
JERRY_ASSERT( chunk_size % MEM_ALIGNMENT == 0 );
|
|
||||||
|
|
||||||
pool_p->pool_start_p = pool_start;
|
pool_p->pool_start_p = pool_start;
|
||||||
pool_p->pool_size = pool_size;
|
pool_p->pool_size = pool_size;
|
||||||
pool_p->chunk_size = chunk_size;
|
|
||||||
|
|
||||||
const size_t bits_in_byte = JERRY_BITSINBYTE;
|
switch ( chunk_size )
|
||||||
const size_t bitmap_area_size_alignment = JERRY_MAX( sizeof (mword_t), MEM_ALIGNMENT);
|
{
|
||||||
|
case 4:
|
||||||
|
pool_p->chunk_size_log = 2;
|
||||||
|
break;
|
||||||
|
|
||||||
/*
|
case 8:
|
||||||
* Calculation chunks number
|
pool_p->chunk_size_log = 3;
|
||||||
*/
|
break;
|
||||||
|
|
||||||
size_t bitmap_area_size = 0;
|
case 16:
|
||||||
size_t chunks_area_size = JERRY_ALIGNDOWN( pool_size - bitmap_area_size, chunk_size);
|
pool_p->chunk_size_log = 4;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 32:
|
||||||
|
pool_p->chunk_size_log = 5;
|
||||||
|
break;
|
||||||
|
|
||||||
|
case 64:
|
||||||
|
pool_p->chunk_size_log = 6;
|
||||||
|
break;
|
||||||
|
|
||||||
|
default:
|
||||||
|
JERRY_UNREACHABLE();
|
||||||
|
}
|
||||||
|
|
||||||
|
JERRY_ASSERT( chunk_size % MEM_ALIGNMENT == 0 );
|
||||||
|
JERRY_ASSERT( chunk_size >= sizeof(mem_pool_chunk_offset_t) );
|
||||||
|
|
||||||
|
size_t chunks_area_size = JERRY_ALIGNDOWN( pool_size, chunk_size);
|
||||||
size_t chunks_number = chunks_area_size / chunk_size;
|
size_t chunks_number = chunks_area_size / chunk_size;
|
||||||
|
|
||||||
/* while there is not enough area to hold state of all chunks*/
|
JERRY_ASSERT( ( (mem_pool_chunk_offset_t) chunks_number ) == chunks_number );
|
||||||
while ( bitmap_area_size * bits_in_byte < chunks_number )
|
pool_p->chunks_number = (mem_pool_chunk_offset_t) chunks_number;
|
||||||
{
|
|
||||||
JERRY_ASSERT( bitmap_area_size + chunks_area_size <= pool_size );
|
|
||||||
|
|
||||||
/* correct bitmap area's size and, accordingly, chunks' area's size*/
|
pool_p->first_free_chunk = 0;
|
||||||
|
|
||||||
size_t new_bitmap_area_size = bitmap_area_size + bitmap_area_size_alignment;
|
|
||||||
size_t new_chunks_area_size = JERRY_ALIGNDOWN( pool_size - new_bitmap_area_size, chunk_size);
|
|
||||||
size_t new_chunks_number = new_chunks_area_size / chunk_size;
|
|
||||||
|
|
||||||
bitmap_area_size = new_bitmap_area_size;
|
|
||||||
chunks_area_size = new_chunks_area_size;
|
|
||||||
chunks_number = new_chunks_number;
|
|
||||||
}
|
|
||||||
|
|
||||||
/*
|
|
||||||
* Final calculation checks
|
|
||||||
*/
|
|
||||||
JERRY_ASSERT( bitmap_area_size * bits_in_byte >= chunks_number );
|
|
||||||
JERRY_ASSERT( chunks_area_size >= chunks_number * chunk_size );
|
|
||||||
JERRY_ASSERT( bitmap_area_size + chunks_area_size <= pool_size );
|
|
||||||
|
|
||||||
pool_p->bitmap_p = (mword_t*) pool_start;
|
|
||||||
pool_p->chunks_p = pool_start + bitmap_area_size;
|
|
||||||
|
|
||||||
JERRY_ASSERT( (uintptr_t) pool_p->chunks_p % MEM_ALIGNMENT == 0 );
|
|
||||||
|
|
||||||
pool_p->chunks_number = chunks_number;
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* All chunks are free right after initialization
|
* All chunks are free right after initialization
|
||||||
*/
|
*/
|
||||||
pool_p->free_chunks_number = chunks_number;
|
pool_p->free_chunks_number = pool_p->chunks_number;
|
||||||
__memset( pool_p->bitmap_p, 0, bitmap_area_size);
|
|
||||||
|
|
||||||
#ifndef JERRY_NDEBUG
|
for ( uint32_t chunk_index = 0;
|
||||||
__memset( pool_p->chunks_p, mem_pool_free_chunk_magic_num, chunks_area_size);
|
chunk_index < chunks_number;
|
||||||
#endif /* JERRY_NDEBUG */
|
chunk_index++ )
|
||||||
|
{
|
||||||
|
mem_pool_chunk_offset_t *next_free_chunk_offset_p =
|
||||||
|
(mem_pool_chunk_offset_t*) ( pool_p->pool_start_p + chunk_size * chunk_index );
|
||||||
|
|
||||||
|
*next_free_chunk_offset_p = chunk_index + 1;
|
||||||
|
}
|
||||||
|
|
||||||
mem_check_pool( pool_p);
|
mem_check_pool( pool_p);
|
||||||
} /* mem_pool_init */
|
} /* mem_pool_init */
|
||||||
@ -126,57 +117,27 @@ mem_pool_init(mem_pool_state_t *pool_p, /**< pool */
|
|||||||
uint8_t*
|
uint8_t*
|
||||||
mem_pool_alloc_chunk(mem_pool_state_t *pool_p) /**< pool */
|
mem_pool_alloc_chunk(mem_pool_state_t *pool_p) /**< pool */
|
||||||
{
|
{
|
||||||
mem_check_pool( pool_p);
|
mem_check_pool( pool_p);
|
||||||
|
|
||||||
if ( pool_p->free_chunks_number == 0 )
|
if ( unlikely( pool_p->free_chunks_number == 0 ) )
|
||||||
{
|
{
|
||||||
return NULL;
|
JERRY_ASSERT( pool_p->first_free_chunk == pool_p->chunks_number );
|
||||||
|
|
||||||
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
size_t chunk_index = 0;
|
JERRY_ASSERT( pool_p->first_free_chunk < pool_p->chunks_number );
|
||||||
size_t bitmap_block_index = 0;
|
|
||||||
|
|
||||||
while ( chunk_index < pool_p->chunks_number )
|
mem_pool_chunk_offset_t chunk_index = pool_p->first_free_chunk;
|
||||||
{
|
uint8_t *chunk_p = pool_p->pool_start_p + ( chunk_index << pool_p->chunk_size_log );
|
||||||
if ( ~pool_p->bitmap_p[ bitmap_block_index ] != 0 )
|
|
||||||
{
|
|
||||||
break;
|
|
||||||
} else
|
|
||||||
{
|
|
||||||
bitmap_block_index++;
|
|
||||||
chunk_index += mem_bitmap_bits_in_block;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if ( chunk_index >= pool_p->chunks_number )
|
mem_pool_chunk_offset_t *next_free_chunk_offset_p = (mem_pool_chunk_offset_t*) chunk_p;
|
||||||
{
|
pool_p->first_free_chunk = *next_free_chunk_offset_p;
|
||||||
/* no free chunks */
|
pool_p->free_chunks_number--;
|
||||||
return NULL;
|
|
||||||
}
|
|
||||||
|
|
||||||
/* found bitmap block with a zero bit */
|
mem_check_pool( pool_p);
|
||||||
|
|
||||||
mword_t bit = 1;
|
return chunk_p;
|
||||||
for ( size_t bit_index = 0;
|
|
||||||
bit_index < mem_bitmap_bits_in_block && chunk_index < pool_p->chunks_number;
|
|
||||||
bit_index++, chunk_index++, bit <<= 1 )
|
|
||||||
{
|
|
||||||
if ( ~pool_p->bitmap_p[ bitmap_block_index ] & bit )
|
|
||||||
{
|
|
||||||
/* found free chunk */
|
|
||||||
pool_p->bitmap_p[ bitmap_block_index ] |= bit;
|
|
||||||
|
|
||||||
uint8_t *chunk_p = &pool_p->chunks_p[ chunk_index * pool_p->chunk_size ];
|
|
||||||
pool_p->free_chunks_number--;
|
|
||||||
|
|
||||||
mem_check_pool( pool_p);
|
|
||||||
|
|
||||||
return chunk_p;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* that zero bit is at the end of the bitmap and doesn't correspond to any chunk */
|
|
||||||
return NULL;
|
|
||||||
} /* mem_pool_alloc_chunk */
|
} /* mem_pool_alloc_chunk */
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -184,25 +145,21 @@ mem_pool_alloc_chunk(mem_pool_state_t *pool_p) /**< pool */
|
|||||||
*/
|
*/
|
||||||
void
|
void
|
||||||
mem_pool_free_chunk(mem_pool_state_t *pool_p, /**< pool */
|
mem_pool_free_chunk(mem_pool_state_t *pool_p, /**< pool */
|
||||||
uint8_t *chunk_p) /**< chunk pointer */
|
uint8_t *chunk_p) /**< chunk pointer */
|
||||||
{
|
{
|
||||||
JERRY_ASSERT( pool_p->free_chunks_number < pool_p->chunks_number );
|
JERRY_ASSERT( pool_p->free_chunks_number < pool_p->chunks_number );
|
||||||
JERRY_ASSERT( chunk_p >= pool_p->chunks_p && chunk_p <= pool_p->chunks_p + pool_p->chunks_number * pool_p->chunk_size );
|
JERRY_ASSERT( chunk_p >= pool_p->pool_start_p && chunk_p <= pool_p->pool_start_p + pool_p->chunks_number * ( 1u << pool_p->chunk_size_log ) );
|
||||||
JERRY_ASSERT( ( (uintptr_t) chunk_p - (uintptr_t) pool_p->chunks_p ) % pool_p->chunk_size == 0 );
|
JERRY_ASSERT( ( (uintptr_t) chunk_p - (uintptr_t) pool_p->pool_start_p ) % ( 1u << pool_p->chunk_size_log ) == 0 );
|
||||||
|
|
||||||
mem_check_pool( pool_p);
|
mem_check_pool( pool_p);
|
||||||
|
|
||||||
size_t chunk_index = (size_t) (chunk_p - pool_p->chunks_p) / pool_p->chunk_size;
|
const size_t chunk_byte_offset = (size_t) (chunk_p - pool_p->pool_start_p);
|
||||||
size_t bitmap_block_index = chunk_index / mem_bitmap_bits_in_block;
|
const mem_pool_chunk_offset_t chunk_index = (mem_pool_chunk_offset_t) (chunk_byte_offset >> pool_p->chunk_size_log);
|
||||||
size_t bitmap_bit_in_block = chunk_index % mem_bitmap_bits_in_block;
|
mem_pool_chunk_offset_t *next_free_chunk_offset_p = (mem_pool_chunk_offset_t*) chunk_p;
|
||||||
mword_t bit_mask = ( 1lu << bitmap_bit_in_block );
|
|
||||||
|
|
||||||
#ifndef JERRY_NDEBUG
|
*next_free_chunk_offset_p = pool_p->first_free_chunk;
|
||||||
__memset( (uint8_t*) chunk_p, mem_pool_free_chunk_magic_num, pool_p->chunk_size);
|
|
||||||
#endif /* JERRY_NDEBUG */
|
|
||||||
JERRY_ASSERT( pool_p->bitmap_p[ bitmap_block_index ] & bit_mask );
|
|
||||||
|
|
||||||
pool_p->bitmap_p[ bitmap_block_index ] &= ~bit_mask;
|
pool_p->first_free_chunk = chunk_index;
|
||||||
pool_p->free_chunks_number++;
|
pool_p->free_chunks_number++;
|
||||||
|
|
||||||
mem_check_pool( pool_p);
|
mem_check_pool( pool_p);
|
||||||
@ -216,35 +173,20 @@ mem_check_pool( mem_pool_state_t __unused *pool_p) /**< pool (unused #ifdef JERR
|
|||||||
{
|
{
|
||||||
#ifndef JERRY_NDEBUG
|
#ifndef JERRY_NDEBUG
|
||||||
JERRY_ASSERT( pool_p->chunks_number != 0 );
|
JERRY_ASSERT( pool_p->chunks_number != 0 );
|
||||||
|
JERRY_ASSERT( pool_p->chunks_number * ( 1u << pool_p->chunk_size_log ) <= pool_p->pool_size );
|
||||||
JERRY_ASSERT( pool_p->free_chunks_number <= pool_p->chunks_number );
|
JERRY_ASSERT( pool_p->free_chunks_number <= pool_p->chunks_number );
|
||||||
JERRY_ASSERT( (uint8_t*) pool_p->bitmap_p == pool_p->pool_start_p );
|
|
||||||
JERRY_ASSERT( (uint8_t*) pool_p->chunks_p > pool_p->pool_start_p );
|
|
||||||
|
|
||||||
uint8_t free_chunk_template[ pool_p->chunk_size ];
|
|
||||||
__memset( &free_chunk_template, mem_pool_free_chunk_magic_num, sizeof (free_chunk_template));
|
|
||||||
|
|
||||||
size_t met_free_chunks_number = 0;
|
size_t met_free_chunks_number = 0;
|
||||||
|
mem_pool_chunk_offset_t chunk_index = pool_p->first_free_chunk;
|
||||||
|
|
||||||
for ( size_t chunk_index = 0, bitmap_block_index = 0;
|
while ( chunk_index != pool_p->chunks_number )
|
||||||
chunk_index < pool_p->chunks_number;
|
|
||||||
bitmap_block_index++ )
|
|
||||||
{
|
{
|
||||||
JERRY_ASSERT( (uint8_t*) & pool_p->bitmap_p[ bitmap_block_index ] < pool_p->chunks_p );
|
uint8_t *chunk_p = pool_p->pool_start_p + ( 1u << pool_p->chunk_size_log ) * chunk_index;
|
||||||
|
mem_pool_chunk_offset_t *next_free_chunk_offset_p = (mem_pool_chunk_offset_t*) chunk_p;
|
||||||
|
|
||||||
mword_t bitmap_block = pool_p->bitmap_p[ bitmap_block_index ];
|
met_free_chunks_number++;
|
||||||
|
|
||||||
mword_t bit_mask = 1;
|
chunk_index = *next_free_chunk_offset_p;
|
||||||
for ( size_t bitmap_bit_in_block = 0;
|
|
||||||
chunk_index < pool_p->chunks_number && bitmap_bit_in_block < mem_bitmap_bits_in_block;
|
|
||||||
bitmap_bit_in_block++, bit_mask <<= 1, chunk_index++ )
|
|
||||||
{
|
|
||||||
if ( ~bitmap_block & bit_mask )
|
|
||||||
{
|
|
||||||
met_free_chunks_number++;
|
|
||||||
|
|
||||||
JERRY_ASSERT( __memcmp( &pool_p->chunks_p[ chunk_index * pool_p->chunk_size ], free_chunk_template, pool_p->chunk_size) == 0 );
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
JERRY_ASSERT( met_free_chunks_number == pool_p->free_chunks_number );
|
JERRY_ASSERT( met_free_chunks_number == pool_p->free_chunks_number );
|
||||||
@ -254,4 +196,4 @@ mem_check_pool( mem_pool_state_t __unused *pool_p) /**< pool (unused #ifdef JERR
|
|||||||
/**
|
/**
|
||||||
* @}
|
* @}
|
||||||
* @}
|
* @}
|
||||||
*/
|
*/
|
||||||
|
|||||||
@ -24,6 +24,8 @@
|
|||||||
#ifndef JERRY_MEM_POOL_H
|
#ifndef JERRY_MEM_POOL_H
|
||||||
#define JERRY_MEM_POOL_H
|
#define JERRY_MEM_POOL_H
|
||||||
|
|
||||||
|
typedef uint32_t mem_pool_chunk_offset_t;
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* State of a memory pool
|
* State of a memory pool
|
||||||
*
|
*
|
||||||
@ -34,16 +36,16 @@ typedef struct mem_pool_state_t {
|
|||||||
uint8_t *pool_start_p; /**< first address of pool space */
|
uint8_t *pool_start_p; /**< first address of pool space */
|
||||||
size_t pool_size; /**< pool space size */
|
size_t pool_size; /**< pool space size */
|
||||||
|
|
||||||
size_t chunk_size; /**< size of one chunk */
|
size_t chunk_size_log; /**< log of size of one chunk */
|
||||||
|
|
||||||
mword_t *bitmap_p; /**< bitmap - pool chunks' state */
|
mem_pool_chunk_offset_t chunks_number; /**< number of chunks */
|
||||||
uint8_t *chunks_p; /**< chunks with data */
|
mem_pool_chunk_offset_t free_chunks_number; /**< number of free chunks */
|
||||||
|
|
||||||
size_t chunks_number; /**< number of chunks */
|
mem_pool_chunk_offset_t first_free_chunk; /**< offset of first free chunk
|
||||||
size_t free_chunks_number; /**< number of free chunks */
|
from the beginning of the pool */
|
||||||
|
|
||||||
struct mem_pool_state_t *next_pool_p; /**< pointer to the next pool with same chunk size */
|
struct mem_pool_state_t *next_pool_p; /**< pointer to the next pool with same chunk size */
|
||||||
} mem_pool_state_t;
|
} __attribute__((aligned(64))) mem_pool_state_t;
|
||||||
|
|
||||||
extern void mem_pool_init(mem_pool_state_t *pool_p, size_t chunk_size, uint8_t *pool_start, size_t pool_size);
|
extern void mem_pool_init(mem_pool_state_t *pool_p, size_t chunk_size, uint8_t *pool_start, size_t pool_size);
|
||||||
extern uint8_t* mem_pool_alloc_chunk(mem_pool_state_t *pool_p);
|
extern uint8_t* mem_pool_alloc_chunk(mem_pool_state_t *pool_p);
|
||||||
|
|||||||
@ -93,31 +93,30 @@ mem_get_chunk_size( mem_pool_chunk_type_t chunk_type) /**< chunk type */
|
|||||||
void
|
void
|
||||||
mem_pools_init(void)
|
mem_pools_init(void)
|
||||||
{
|
{
|
||||||
for ( uint32_t i = 0; i < MEM_POOL_CHUNK_TYPE__COUNT; i++ )
|
for ( uint32_t i = 0; i < MEM_POOL_CHUNK_TYPE__COUNT; i++ )
|
||||||
{
|
{
|
||||||
mem_pools[ i ] = NULL;
|
mem_pools[ i ] = NULL;
|
||||||
mem_free_chunks_number[ i ] = 0;
|
mem_free_chunks_number[ i ] = 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Space, at least for four pool headers and a bitmap entry.
|
* Space, at least for four pool headers and a bitmap entry.
|
||||||
*
|
*
|
||||||
* TODO: Research.
|
* TODO: Research.
|
||||||
*/
|
*/
|
||||||
size_t pool_space_size = mem_heap_recommend_allocation_size( 4 * sizeof (mem_pool_state_t) + sizeof (mword_t) );
|
size_t pool_space_size = mem_heap_recommend_allocation_size( 4 * sizeof (mem_pool_state_t) );
|
||||||
|
|
||||||
mem_space_for_pool_for_pool_headers = mem_heap_alloc_block(pool_space_size,
|
mem_space_for_pool_for_pool_headers = mem_heap_alloc_block( pool_space_size, MEM_HEAP_ALLOC_LONG_TERM);
|
||||||
MEM_HEAP_ALLOC_LONG_TERM);
|
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Get chunk type, checking that there is a type corresponding to specified size.
|
* Get chunk type, checking that there is a type corresponding to specified size.
|
||||||
*/
|
*/
|
||||||
const mem_pool_chunk_type_t chunk_type = mem_size_to_pool_chunk_type( sizeof(mem_pool_state_t));
|
const mem_pool_chunk_type_t chunk_type = mem_size_to_pool_chunk_type( sizeof(mem_pool_state_t));
|
||||||
|
|
||||||
mem_pool_init(&mem_pool_for_pool_headers,
|
mem_pool_init(&mem_pool_for_pool_headers,
|
||||||
mem_get_chunk_size( chunk_type),
|
mem_get_chunk_size( chunk_type),
|
||||||
mem_space_for_pool_for_pool_headers,
|
mem_space_for_pool_for_pool_headers,
|
||||||
pool_space_size);
|
pool_space_size);
|
||||||
|
|
||||||
mem_pools_stat_init();
|
mem_pools_stat_init();
|
||||||
} /* mem_pools_init */
|
} /* mem_pools_init */
|
||||||
@ -238,7 +237,7 @@ mem_pools_free(mem_pool_chunk_type_t chunk_type, /**< the chunk type */
|
|||||||
/**
|
/**
|
||||||
* Search for the pool containing specified chunk.
|
* Search for the pool containing specified chunk.
|
||||||
*/
|
*/
|
||||||
while ( !( chunk_p >= pool_state->chunks_p
|
while ( !( chunk_p >= pool_state->pool_start_p
|
||||||
&& chunk_p <= pool_state->pool_start_p + pool_state->pool_size ) )
|
&& chunk_p <= pool_state->pool_start_p + pool_state->pool_size ) )
|
||||||
{
|
{
|
||||||
prev_pool_state = pool_state;
|
prev_pool_state = pool_state;
|
||||||
|
|||||||
@ -18,6 +18,7 @@
|
|||||||
#include "globals.h"
|
#include "globals.h"
|
||||||
#include "mem-allocator.h"
|
#include "mem-allocator.h"
|
||||||
#include "mem-pool.h"
|
#include "mem-pool.h"
|
||||||
|
#include "mem-poolman.h"
|
||||||
|
|
||||||
extern void srand (unsigned int __seed);
|
extern void srand (unsigned int __seed);
|
||||||
extern int rand (void);
|
extern int rand (void);
|
||||||
@ -51,7 +52,7 @@ main( int __unused argc,
|
|||||||
mem_pool_state_t pool;
|
mem_pool_state_t pool;
|
||||||
uint8_t test_pool[test_pool_area_size] __attribute__((aligned(MEM_ALIGNMENT)));
|
uint8_t test_pool[test_pool_area_size] __attribute__((aligned(MEM_ALIGNMENT)));
|
||||||
|
|
||||||
const size_t chunk_size = MEM_ALIGNMENT * ( ( (size_t) rand() % test_max_chunk_size_divided_by_alignment ) + 1 );
|
const size_t chunk_size = mem_get_chunk_size( rand() % MEM_POOL_CHUNK_TYPE__COUNT );
|
||||||
|
|
||||||
mem_pool_init( &pool, chunk_size, test_pool, sizeof (test_pool));
|
mem_pool_init( &pool, chunk_size, test_pool, sizeof (test_pool));
|
||||||
|
|
||||||
|
|||||||
Loading…
x
Reference in New Issue
Block a user