Compacting pool header to 8 bytes. Replaced typed pool chunks with fixed-size untyped ones. loop_arithmetics_1kk.js benchmark: 2.98517 -> 2.9443.

This commit is contained in:
Ruben Ayrapetyan 2014-08-08 23:11:02 +04:00
parent 49a809d56f
commit 7b04e9eaeb
14 changed files with 322 additions and 397 deletions

View File

@ -18,6 +18,7 @@
*/
#include "globals.h"
#include "jerry-libc.h"
#include "mem-allocator.h"
#include "mem-heap.h"
#include "mem-poolman.h"
@ -54,6 +55,25 @@ mem_finalize( bool is_show_mem_stats) /**< show heap memory stats
if (is_show_mem_stats)
{
mem_heap_print( false, false, true);
#ifdef MEM_STATS
mem_pools_stats_t stats;
mem_pools_get_stats( &stats);
__printf("Pools stats:\n");
__printf(" Chunk size: %u\n"
" Pools: %lu\n"
" Allocated chunks: %lu\n"
" Free chunks: %lu\n"
" Peak pools: %lu\n"
" Peak allocated chunks: %lu\n\n",
MEM_POOL_CHUNK_SIZE,
stats.pools_count,
stats.allocated_chunks,
stats.free_chunks,
stats.peak_pools_count,
stats.peak_allocated_chunks);
#endif /* MEM_STATS */
}
mem_heap_finalize();
@ -62,8 +82,46 @@ mem_finalize( bool is_show_mem_stats) /**< show heap memory stats
/**
* Get base pointer for allocation area.
*/
uintptr_t
static uintptr_t
mem_get_base_pointer( void)
{
return (uintptr_t) mem_heap_area;
} /* mem_get_base_pointer */
/**
* Compress pointer.
*/
uintptr_t
mem_compress_pointer(void *pointer) /**< pointer to compress */
{
JERRY_ASSERT( pointer != NULL );
uintptr_t int_ptr = (uintptr_t) pointer;
JERRY_ASSERT(int_ptr % MEM_ALIGNMENT == 0);
int_ptr -= mem_get_base_pointer();
int_ptr >>= MEM_ALIGNMENT_LOG;
JERRY_ASSERT((int_ptr & ~((1u << MEM_HEAP_OFFSET_LOG) - 1)) == 0);
JERRY_ASSERT( int_ptr != MEM_COMPRESSED_POINTER_NULL );
return int_ptr;
} /* mem_compress_pointer */
/**
* Decompress pointer.
*/
void*
mem_decompress_pointer(uintptr_t compressed_pointer) /**< pointer to decompress */
{
JERRY_ASSERT( compressed_pointer != MEM_COMPRESSED_POINTER_NULL );
uintptr_t int_ptr = compressed_pointer;
int_ptr <<= MEM_ALIGNMENT_LOG;
int_ptr += mem_get_base_pointer();
return (void*) int_ptr;
} /* mem_decompress_pointer */

View File

@ -24,24 +24,32 @@
#define JERRY_MEM_ALLOCATOR_H
#include "globals.h"
#include "mem-config.h"
#include "mem-heap.h"
/**
* Logarithm of required alignment for allocated units/blocks
* Representation of NULL value for compressed pointers
*/
#define MEM_ALIGNMENT_LOG 2
#define MEM_COMPRESSED_POINTER_NULL 0
/**
* Required alignment for allocated units/blocks
*/
#define MEM_ALIGNMENT (1 << MEM_ALIGNMENT_LOG)
/**
* Width of compressed memory pointer
*/
#define MEM_COMPRESSED_POINTER_WIDTH ( MEM_HEAP_OFFSET_LOG - MEM_ALIGNMENT_LOG )
extern void mem_init(void);
extern void mem_finalize(bool is_show_mem_stats);
uintptr_t mem_get_base_pointer(void);
extern uintptr_t mem_compress_pointer(void *pointer);
extern void* mem_decompress_pointer(uintptr_t compressed_pointer);
#endif /* !JERRY_MEM_ALLOCATOR_H */
/**
* @}
*/
*/

View File

@ -0,0 +1,39 @@
/* Copyright 2014 Samsung Electronics Co., Ltd.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef MEM_CONFIG_H
#define MEM_CONFIG_H
/**
* Log2 of maximum possible offset in the heap
*/
#define MEM_HEAP_OFFSET_LOG 16
/**
* Size of one pool chunk
*/
#define MEM_POOL_CHUNK_SIZE 16
/**
* Log2 of maximum number of chunks in a pool
*/
#define MEM_POOL_MAX_CHUNKS_NUMBER_LOG 16
/**
* Logarithm of required alignment for allocated units/blocks
*/
#define MEM_ALIGNMENT_LOG 2
#endif /* MEM_CONFIG_H */

View File

@ -27,6 +27,7 @@
#include "globals.h"
#include "jerry-libc.h"
#include "mem-allocator.h"
#include "mem-config.h"
#include "mem-heap.h"
/*
@ -201,12 +202,13 @@ mem_get_block_chunks_count_from_data_size( size_t block_allocated_size) /**< siz
*/
void
mem_heap_init(uint8_t *heap_start, /**< first address of heap space */
size_t heap_size) /**< heap space size */
size_t heap_size) /**< heap space size */
{
JERRY_ASSERT( heap_start != NULL );
JERRY_ASSERT( heap_size != 0 );
JERRY_ASSERT( heap_size % MEM_HEAP_CHUNK_SIZE == 0 );
JERRY_ASSERT( (uintptr_t) heap_start % MEM_ALIGNMENT == 0);
JERRY_ASSERT( heap_size <= ( 1u << MEM_HEAP_OFFSET_LOG ) );
mem_heap.heap_start = heap_start;
mem_heap.heap_size = heap_size;

View File

@ -31,13 +31,14 @@
#include "mem-allocator.h"
#include "mem-pool.h"
/**
* Magic number to fill free chunks in debug version
*/
static const uint8_t mem_pool_free_chunk_magic_num = 0x71;
static void mem_check_pool( mem_pool_state_t *pool_p);
/**
* Get address of pool chunk with specified index
*/
#define MEM_POOL_CHUNK_ADDRESS( pool_header_p, chunk_index) ( (uint8_t*) ( MEM_POOL_SPACE_START( pool_p) + \
MEM_POOL_CHUNK_SIZE * chunk_index ) )
/**
* Initialization of memory pool.
*
@ -46,66 +47,40 @@ static void mem_check_pool( mem_pool_state_t *pool_p);
*/
void
mem_pool_init(mem_pool_state_t *pool_p, /**< pool */
size_t chunk_size, /**< size of pool's chunk */
uint8_t *pool_start, /**< start of pool space */
size_t pool_size) /**< pool space size */
size_t pool_size) /**< pool size */
{
JERRY_ASSERT( pool_p != NULL );
JERRY_ASSERT( (uintptr_t) pool_start % MEM_ALIGNMENT == 0);
JERRY_ASSERT( (size_t)MEM_POOL_SPACE_START( pool_p) % MEM_ALIGNMENT == 0);
pool_p->pool_start_p = pool_start;
pool_p->pool_size = pool_size;
JERRY_STATIC_ASSERT( MEM_POOL_CHUNK_SIZE % MEM_ALIGNMENT == 0 );
JERRY_STATIC_ASSERT( MEM_POOL_MAX_CHUNKS_NUMBER_LOG <= sizeof(mem_pool_chunk_index_t) * JERRY_BITSINBYTE );
JERRY_ASSERT( sizeof(mem_pool_chunk_index_t) <= MEM_POOL_CHUNK_SIZE );
switch ( chunk_size )
{
case 4:
pool_p->chunk_size_log = 2;
break;
const size_t pool_space_size = pool_size - sizeof(mem_pool_state_t);
const size_t chunks_number = pool_space_size / MEM_POOL_CHUNK_SIZE;
case 8:
pool_p->chunk_size_log = 3;
break;
JERRY_ASSERT( ( (mem_pool_chunk_index_t) chunks_number ) == chunks_number );
case 16:
pool_p->chunk_size_log = 4;
break;
case 32:
pool_p->chunk_size_log = 5;
break;
case 64:
pool_p->chunk_size_log = 6;
break;
default:
JERRY_UNREACHABLE();
}
JERRY_ASSERT( chunk_size % MEM_ALIGNMENT == 0 );
JERRY_ASSERT( chunk_size >= sizeof(mem_pool_chunk_offset_t) );
size_t chunks_area_size = JERRY_ALIGNDOWN( pool_size, chunk_size);
size_t chunks_number = chunks_area_size / chunk_size;
JERRY_ASSERT( ( (mem_pool_chunk_offset_t) chunks_number ) == chunks_number );
pool_p->chunks_number = (mem_pool_chunk_offset_t) chunks_number;
pool_p->first_free_chunk = 0;
pool_p->chunks_number = (mem_pool_chunk_index_t) chunks_number;
/*
* All chunks are free right after initialization
*/
pool_p->free_chunks_number = pool_p->chunks_number;
for ( uint32_t chunk_index = 0;
/*
* Chunk with zero index is first free chunk in the pool now
*/
pool_p->first_free_chunk = 0;
for ( mem_pool_chunk_index_t chunk_index = 0;
chunk_index < chunks_number;
chunk_index++ )
{
mem_pool_chunk_offset_t *next_free_chunk_offset_p =
(mem_pool_chunk_offset_t*) ( pool_p->pool_start_p + chunk_size * chunk_index );
mem_pool_chunk_index_t *next_free_chunk_index_p =
(mem_pool_chunk_index_t*) MEM_POOL_CHUNK_ADDRESS( pool_p, chunk_index);
*next_free_chunk_offset_p = chunk_index + 1;
*next_free_chunk_index_p = (mem_pool_chunk_index_t) (chunk_index + 1u);
}
mem_check_pool( pool_p);
@ -128,11 +103,11 @@ mem_pool_alloc_chunk(mem_pool_state_t *pool_p) /**< pool */
JERRY_ASSERT( pool_p->first_free_chunk < pool_p->chunks_number );
mem_pool_chunk_offset_t chunk_index = pool_p->first_free_chunk;
uint8_t *chunk_p = pool_p->pool_start_p + ( chunk_index << pool_p->chunk_size_log );
mem_pool_chunk_index_t chunk_index = pool_p->first_free_chunk;
uint8_t *chunk_p = MEM_POOL_CHUNK_ADDRESS( pool_p, chunk_index);
mem_pool_chunk_offset_t *next_free_chunk_offset_p = (mem_pool_chunk_offset_t*) chunk_p;
pool_p->first_free_chunk = *next_free_chunk_offset_p;
mem_pool_chunk_index_t *next_free_chunk_index_p = (mem_pool_chunk_index_t*) chunk_p;
pool_p->first_free_chunk = *next_free_chunk_index_p;
pool_p->free_chunks_number--;
mem_check_pool( pool_p);
@ -148,16 +123,17 @@ mem_pool_free_chunk(mem_pool_state_t *pool_p, /**< pool */
uint8_t *chunk_p) /**< chunk pointer */
{
JERRY_ASSERT( pool_p->free_chunks_number < pool_p->chunks_number );
JERRY_ASSERT( chunk_p >= pool_p->pool_start_p && chunk_p <= pool_p->pool_start_p + pool_p->chunks_number * ( 1u << pool_p->chunk_size_log ) );
JERRY_ASSERT( ( (uintptr_t) chunk_p - (uintptr_t) pool_p->pool_start_p ) % ( 1u << pool_p->chunk_size_log ) == 0 );
JERRY_ASSERT( chunk_p >= MEM_POOL_SPACE_START( pool_p) && chunk_p <= MEM_POOL_SPACE_START( pool_p) + pool_p->chunks_number * MEM_POOL_CHUNK_SIZE );
JERRY_ASSERT( ( (uintptr_t) chunk_p - (uintptr_t) MEM_POOL_SPACE_START( pool_p) ) % MEM_POOL_CHUNK_SIZE == 0 );
mem_check_pool( pool_p);
const size_t chunk_byte_offset = (size_t) (chunk_p - pool_p->pool_start_p);
const mem_pool_chunk_offset_t chunk_index = (mem_pool_chunk_offset_t) (chunk_byte_offset >> pool_p->chunk_size_log);
mem_pool_chunk_offset_t *next_free_chunk_offset_p = (mem_pool_chunk_offset_t*) chunk_p;
const size_t chunk_byte_offset = (size_t) (chunk_p - MEM_POOL_SPACE_START( pool_p));
const mem_pool_chunk_index_t chunk_index = (mem_pool_chunk_index_t) (chunk_byte_offset / MEM_POOL_CHUNK_SIZE);
*next_free_chunk_offset_p = pool_p->first_free_chunk;
mem_pool_chunk_index_t *next_free_chunk_index_p = (mem_pool_chunk_index_t*) chunk_p;
*next_free_chunk_index_p = pool_p->first_free_chunk;
pool_p->first_free_chunk = chunk_index;
pool_p->free_chunks_number++;
@ -173,20 +149,19 @@ mem_check_pool( mem_pool_state_t __unused *pool_p) /**< pool (unused #ifdef JERR
{
#ifndef JERRY_NDEBUG
JERRY_ASSERT( pool_p->chunks_number != 0 );
JERRY_ASSERT( pool_p->chunks_number * ( 1u << pool_p->chunk_size_log ) <= pool_p->pool_size );
JERRY_ASSERT( pool_p->free_chunks_number <= pool_p->chunks_number );
size_t met_free_chunks_number = 0;
mem_pool_chunk_offset_t chunk_index = pool_p->first_free_chunk;
mem_pool_chunk_index_t chunk_index = pool_p->first_free_chunk;
while ( chunk_index != pool_p->chunks_number )
{
uint8_t *chunk_p = pool_p->pool_start_p + ( 1u << pool_p->chunk_size_log ) * chunk_index;
mem_pool_chunk_offset_t *next_free_chunk_offset_p = (mem_pool_chunk_offset_t*) chunk_p;
uint8_t *chunk_p = MEM_POOL_CHUNK_ADDRESS( pool_p, chunk_index);
mem_pool_chunk_index_t *next_free_chunk_index_p = (mem_pool_chunk_index_t*) chunk_p;
met_free_chunks_number++;
chunk_index = *next_free_chunk_offset_p;
chunk_index = *next_free_chunk_index_p;
}
JERRY_ASSERT( met_free_chunks_number == pool_p->free_chunks_number );

View File

@ -13,10 +13,6 @@
* limitations under the License.
*/
/** \addtogroup pool Memory pool
* @{
*/
#ifndef JERRY_MEM_POOL_INTERNAL
#error "Please, use mem_poolman.h instead of mem_pool.h"
#endif
@ -24,35 +20,42 @@
#ifndef JERRY_MEM_POOL_H
#define JERRY_MEM_POOL_H
typedef uint32_t mem_pool_chunk_offset_t;
#include "mem-config.h"
/** \addtogroup pool Memory pool
* @{
*/
/**
* Get pool's space size
*/
#define MEM_POOL_SPACE_START( pool_header_p) ( (uint8_t*) ( (mem_pool_state_t*) pool_header_p + 1 ) )
/**
* Index of chunk in a pool
*/
typedef uint16_t mem_pool_chunk_index_t;
/**
* State of a memory pool
*
* TODO:
* Compact the struct
*/
typedef struct mem_pool_state_t {
uint8_t *pool_start_p; /**< first address of pool space */
size_t pool_size; /**< pool space size */
unsigned int chunks_number : MEM_POOL_MAX_CHUNKS_NUMBER_LOG; /**< number of chunks (mem_pool_chunk_index_t) */
unsigned int free_chunks_number : MEM_POOL_MAX_CHUNKS_NUMBER_LOG; /**< number of free chunks (mem_pool_chunk_index_t) */
size_t chunk_size_log; /**< log of size of one chunk */
unsigned int first_free_chunk : MEM_POOL_MAX_CHUNKS_NUMBER_LOG; /**< offset of first free chunk
from the beginning of the pool
(mem_pool_chunk_index_t) */
mem_pool_chunk_offset_t chunks_number; /**< number of chunks */
mem_pool_chunk_offset_t free_chunks_number; /**< number of free chunks */
unsigned int next_pool_cp : MEM_HEAP_OFFSET_LOG; /**< pointer to the next pool with same chunk size */
} mem_pool_state_t;
mem_pool_chunk_offset_t first_free_chunk; /**< offset of first free chunk
from the beginning of the pool */
struct mem_pool_state_t *next_pool_p; /**< pointer to the next pool with same chunk size */
} __attribute__((aligned(64))) mem_pool_state_t;
extern void mem_pool_init(mem_pool_state_t *pool_p, size_t chunk_size, uint8_t *pool_start, size_t pool_size);
extern void mem_pool_init(mem_pool_state_t *pool_p, size_t pool_size);
extern uint8_t* mem_pool_alloc_chunk(mem_pool_state_t *pool_p);
extern void mem_pool_free_chunk(mem_pool_state_t *pool_p, uint8_t *chunk_p);
#endif /* JERRY_MEM_POOL_H */
/**
* @}
*/
#endif /* JERRY_MEM_POOL_H */

View File

@ -34,24 +34,14 @@
#include "mem-poolman.h"
/**
* Lists of pools for possible chunk sizes
* Lists of pools
*/
mem_pool_state_t *mem_pools[ MEM_POOL_CHUNK_TYPE__COUNT ];
mem_pool_state_t *mem_pools;
/**
* Number of free chunks of possible chunk sizes
* Number of free chunks
*/
size_t mem_free_chunks_number[ MEM_POOL_CHUNK_TYPE__COUNT ];
/**
* Pool, containing pool headers
*/
mem_pool_state_t mem_pool_for_pool_headers;
/**
* Space for pool, containing pool headers
*/
uint8_t *mem_space_for_pool_for_pool_headers;
size_t mem_free_chunks_number;
#ifdef MEM_STATS
/**
@ -60,63 +50,26 @@ uint8_t *mem_space_for_pool_for_pool_headers;
mem_pools_stats_t mem_pools_stats;
static void mem_pools_stat_init( void);
static void mem_pools_stat_alloc_pool( mem_pool_chunk_type_t);
static void mem_pools_stat_free_pool( mem_pool_chunk_type_t);
static void mem_pools_stat_alloc_chunk( mem_pool_chunk_type_t);
static void mem_pools_stat_free_chunk( mem_pool_chunk_type_t);
static void mem_pools_stat_alloc_pool( void);
static void mem_pools_stat_free_pool( void);
static void mem_pools_stat_alloc_chunk(void );
static void mem_pools_stat_free_chunk( void);
#else /* !MEM_STATS */
# define mem_pools_stat_init()
# define mem_pools_stat_alloc_pool(v)
# define mem_pools_stat_free_pool(v)
# define mem_pools_stat_alloc_chunk(v)
# define mem_pools_stat_free_chunk(v)
# define mem_pools_stat_alloc_pool()
# define mem_pools_stat_free_pool()
# define mem_pools_stat_alloc_chunk()
# define mem_pools_stat_free_chunk()
#endif /* !MEM_STATS */
/**
* Get chunk size from chunk type.
*
* @return size (in bytes) of chunk of specified type
*/
size_t
mem_get_chunk_size( mem_pool_chunk_type_t chunk_type) /**< chunk type */
{
uint32_t chunk_type_id = (uint32_t) chunk_type;
JERRY_ASSERT( chunk_type_id < MEM_POOL_CHUNK_TYPE__COUNT );
return ( 1u << ( chunk_type_id + 2 ) );
} /* mem_get_chunk_size */
/**
* Initialize pool manager
*/
void
mem_pools_init(void)
mem_pools_init( void)
{
for ( uint32_t i = 0; i < MEM_POOL_CHUNK_TYPE__COUNT; i++ )
{
mem_pools[ i ] = NULL;
mem_free_chunks_number[ i ] = 0;
}
/**
* Space, at least for four pool headers and a bitmap entry.
*
* TODO: Research.
*/
size_t pool_space_size = mem_heap_recommend_allocation_size( 4 * sizeof (mem_pool_state_t) );
mem_space_for_pool_for_pool_headers = mem_heap_alloc_block( pool_space_size, MEM_HEAP_ALLOC_LONG_TERM);
/*
* Get chunk type, checking that there is a type corresponding to specified size.
*/
const mem_pool_chunk_type_t chunk_type = mem_size_to_pool_chunk_type( sizeof(mem_pool_state_t));
mem_pool_init(&mem_pool_for_pool_headers,
mem_get_chunk_size( chunk_type),
mem_space_for_pool_for_pool_headers,
pool_space_size);
mem_pools = NULL;
mem_free_chunks_number = 0;
mem_pools_stat_init();
} /* mem_pools_init */
@ -125,20 +78,10 @@ mem_pools_init(void)
* Finalize pool manager
*/
void
mem_pools_finalize(void)
mem_pools_finalize( void)
{
for ( uint32_t i = 0; i < MEM_POOL_CHUNK_TYPE__COUNT; i++ )
{
JERRY_ASSERT( mem_pools[ i ] == NULL );
JERRY_ASSERT( mem_free_chunks_number[ i ] == 0 );
}
JERRY_ASSERT( mem_pool_for_pool_headers.chunks_number == mem_pool_for_pool_headers.free_chunks_number );
__memset( &mem_pool_for_pool_headers, 0, sizeof(mem_pool_for_pool_headers));
mem_heap_free_block( mem_space_for_pool_for_pool_headers);
mem_space_for_pool_for_pool_headers = NULL;
JERRY_ASSERT( mem_pools == NULL );
JERRY_ASSERT( mem_free_chunks_number == 0 );
} /* mem_pools_finalize */
/**
@ -148,57 +91,39 @@ mem_pools_finalize(void)
* or NULL - if not enough memory.
*/
uint8_t*
mem_pools_alloc( mem_pool_chunk_type_t chunk_type) /**< chunk type */
mem_pools_alloc( void)
{
size_t chunk_size = mem_get_chunk_size( chunk_type);
/**
* If there are no free chunks, allocate new pool.
*/
if ( mem_free_chunks_number[ chunk_type ] == 0 )
if ( mem_free_chunks_number == 0 )
{
mem_pool_state_t *pool_state = (mem_pool_state_t*) mem_pool_alloc_chunk( &mem_pool_for_pool_headers);
/**
* Space, at least for header and eight chunks.
*
* TODO: Config.
*/
size_t pool_size = mem_heap_recommend_allocation_size( sizeof(mem_pool_state_t) + 8 * MEM_POOL_CHUNK_SIZE );
mem_pool_state_t *pool_state = (mem_pool_state_t*) mem_heap_alloc_block( pool_size, MEM_HEAP_ALLOC_LONG_TERM);
if ( pool_state == NULL )
{
/**
* Not enough space for new pool' header.
* Not enough space for new pool.
*/
return NULL;
}
/**
* Space, at least for eight chunks and a bitmap entry.
*
* TODO: Research.
*/
size_t pool_space_size = mem_heap_recommend_allocation_size( 8 * chunk_size + sizeof (mword_t) );
mem_pool_init( pool_state, pool_size);
uint8_t *pool_space = mem_heap_alloc_block( pool_space_size,
MEM_HEAP_ALLOC_LONG_TERM);
pool_state->next_pool_cp = ( mem_pools == NULL ) ? MEM_COMPRESSED_POINTER_NULL
: (uint16_t) mem_compress_pointer( mem_pools);
mem_pools = pool_state;
if ( pool_space == NULL )
{
/**
* Not enough memory. Freeing pool header that was allocated above.
*/
mem_pool_free_chunk( &mem_pool_for_pool_headers, (uint8_t*) pool_state);
return NULL;
}
mem_pool_init( pool_state,
chunk_size,
pool_space,
pool_space_size);
pool_state->next_pool_p = mem_pools[ chunk_type ];
mem_pools[ chunk_type ] = pool_state;
mem_free_chunks_number[ chunk_type ] += pool_state->free_chunks_number;
mem_free_chunks_number += pool_state->chunks_number;
mem_pools_stat_alloc_pool( chunk_type);
mem_pools_stat_alloc_pool();
}
/**
@ -206,11 +131,11 @@ mem_pools_alloc( mem_pool_chunk_type_t chunk_type) /**< chunk type */
*
* Search for the pool.
*/
mem_pool_state_t *pool_state = mem_pools[ chunk_type ];
mem_pool_state_t *pool_state = mem_pools;
while ( pool_state->free_chunks_number == 0 )
while ( pool_state->first_free_chunk == pool_state->chunks_number )
{
pool_state = pool_state->next_pool_p;
pool_state = mem_decompress_pointer( pool_state->next_pool_cp);
JERRY_ASSERT( pool_state != NULL );
}
@ -218,9 +143,9 @@ mem_pools_alloc( mem_pool_chunk_type_t chunk_type) /**< chunk type */
/**
* And allocate chunk within it.
*/
mem_free_chunks_number[ chunk_type ]--;
mem_free_chunks_number--;
mem_pools_stat_alloc_chunk( chunk_type);
mem_pools_stat_alloc_chunk();
return mem_pool_alloc_chunk( pool_state);
} /* mem_pools_alloc */
@ -229,19 +154,18 @@ mem_pools_alloc( mem_pool_chunk_type_t chunk_type) /**< chunk type */
* Free the chunk
*/
void
mem_pools_free(mem_pool_chunk_type_t chunk_type, /**< the chunk type */
uint8_t *chunk_p) /**< pointer to the chunk */
mem_pools_free( uint8_t *chunk_p) /**< pointer to the chunk */
{
mem_pool_state_t *pool_state = mem_pools[ chunk_type ], *prev_pool_state = NULL;
mem_pool_state_t *pool_state = mem_pools, *prev_pool_state = NULL;
/**
* Search for the pool containing specified chunk.
*/
while ( !( chunk_p >= pool_state->pool_start_p
&& chunk_p <= pool_state->pool_start_p + pool_state->pool_size ) )
while ( !( chunk_p >= MEM_POOL_SPACE_START( pool_state)
&& chunk_p <= MEM_POOL_SPACE_START( pool_state) + pool_state->chunks_number * MEM_POOL_CHUNK_SIZE ) )
{
prev_pool_state = pool_state;
pool_state = pool_state->next_pool_p;
pool_state = mem_decompress_pointer( pool_state->next_pool_cp);
JERRY_ASSERT( pool_state != NULL );
}
@ -250,9 +174,9 @@ mem_pools_free(mem_pool_chunk_type_t chunk_type, /**< the chunk type */
* Free the chunk
*/
mem_pool_free_chunk( pool_state, chunk_p);
mem_free_chunks_number[ chunk_type ]++;
mem_free_chunks_number++;
mem_pools_stat_free_chunk( chunk_type);
mem_pools_stat_free_chunk();
/**
* If all chunks of the pool are free, free the pool itself.
@ -261,19 +185,24 @@ mem_pools_free(mem_pool_chunk_type_t chunk_type, /**< the chunk type */
{
if ( prev_pool_state != NULL )
{
prev_pool_state->next_pool_p = pool_state->next_pool_p;
prev_pool_state->next_pool_cp = pool_state->next_pool_cp;
} else
{
mem_pools[ chunk_type ] = pool_state->next_pool_p;
if ( pool_state->next_pool_cp == MEM_COMPRESSED_POINTER_NULL )
{
mem_pools = NULL;
}
else
{
mem_pools = mem_decompress_pointer( pool_state->next_pool_cp);
}
}
mem_free_chunks_number[ chunk_type ] -= pool_state->chunks_number;
mem_free_chunks_number -= pool_state->chunks_number;
mem_heap_free_block( pool_state->pool_start_p);
mem_heap_free_block( (uint8_t*)pool_state);
mem_pool_free_chunk( &mem_pool_for_pool_headers, (uint8_t*) pool_state);
mem_pools_stat_free_pool( chunk_type);
mem_pools_stat_free_pool();
}
} /* mem_pools_free */
@ -302,14 +231,14 @@ mem_pools_stat_init( void)
* Account allocation of a pool
*/
static void
mem_pools_stat_alloc_pool( mem_pool_chunk_type_t chunk_type) /**< chunk type */
mem_pools_stat_alloc_pool( void)
{
mem_pools_stats.pools_count[ chunk_type ]++;
mem_pools_stats.free_chunks[ chunk_type ] = mem_free_chunks_number[ chunk_type ];
mem_pools_stats.pools_count++;
mem_pools_stats.free_chunks = mem_free_chunks_number;
if ( mem_pools_stats.pools_count[ chunk_type ] > mem_pools_stats.peak_pools_count[ chunk_type ] )
if ( mem_pools_stats.pools_count > mem_pools_stats.peak_pools_count )
{
mem_pools_stats.peak_pools_count[ chunk_type ] = mem_pools_stats.pools_count[ chunk_type ];
mem_pools_stats.peak_pools_count = mem_pools_stats.pools_count;
}
} /* mem_pools_stat_alloc_pool */
@ -317,28 +246,28 @@ mem_pools_stat_alloc_pool( mem_pool_chunk_type_t chunk_type) /**< chunk type */
* Account freeing of a pool
*/
static void
mem_pools_stat_free_pool( mem_pool_chunk_type_t chunk_type) /**< chunk type */
mem_pools_stat_free_pool( void)
{
JERRY_ASSERT( mem_pools_stats.pools_count[ chunk_type ] > 0 );
JERRY_ASSERT( mem_pools_stats.pools_count > 0 );
mem_pools_stats.pools_count[ chunk_type ]--;
mem_pools_stats.free_chunks[ chunk_type ] = mem_free_chunks_number[ chunk_type ];
mem_pools_stats.pools_count--;
mem_pools_stats.free_chunks = mem_free_chunks_number;
} /* mem_pools_stat_free_pool */
/**
* Account allocation of chunk in a pool
*/
static void
mem_pools_stat_alloc_chunk( mem_pool_chunk_type_t chunk_type) /**< chunk type */
mem_pools_stat_alloc_chunk(void)
{
JERRY_ASSERT( mem_pools_stats.free_chunks[ chunk_type ] > 0 );
JERRY_ASSERT( mem_pools_stats.free_chunks > 0 );
mem_pools_stats.allocated_chunks[ chunk_type ]++;
mem_pools_stats.free_chunks[ chunk_type ]--;
mem_pools_stats.allocated_chunks++;
mem_pools_stats.free_chunks--;
if ( mem_pools_stats.allocated_chunks[ chunk_type ] > mem_pools_stats.peak_allocated_chunks[ chunk_type ] )
if ( mem_pools_stats.allocated_chunks > mem_pools_stats.peak_allocated_chunks )
{
mem_pools_stats.peak_allocated_chunks[ chunk_type ] = mem_pools_stats.allocated_chunks[ chunk_type ];
mem_pools_stats.peak_allocated_chunks = mem_pools_stats.allocated_chunks;
}
} /* mem_pools_stat_alloc_chunk */
@ -346,12 +275,12 @@ mem_pools_stat_alloc_chunk( mem_pool_chunk_type_t chunk_type) /**< chunk type */
* Account freeing of chunk in a pool
*/
static void
mem_pools_stat_free_chunk( mem_pool_chunk_type_t chunk_type) /**< chunk type */
mem_pools_stat_free_chunk(void)
{
JERRY_ASSERT( mem_pools_stats.allocated_chunks[ chunk_type ] > 0 );
JERRY_ASSERT( mem_pools_stats.allocated_chunks > 0 );
mem_pools_stats.allocated_chunks[ chunk_type ]--;
mem_pools_stats.free_chunks[ chunk_type ]++;
mem_pools_stats.allocated_chunks--;
mem_pools_stats.free_chunks++;
} /* mem_pools_stat_free_chunk */
#endif /* MEM_STATS */

View File

@ -29,34 +29,10 @@
#include "globals.h"
/**
* Pool chunks's possible sizes
*/
typedef enum {
MEM_POOL_CHUNK_TYPE_4, /**< 4-byte chunk */
MEM_POOL_CHUNK_TYPE_8, /**< 8-byte chunk */
MEM_POOL_CHUNK_TYPE_16, /**< 16-byte chunk */
MEM_POOL_CHUNK_TYPE_32, /**< 32-byte chunk */
MEM_POOL_CHUNK_TYPE_64, /**< 64-byte chunk */
MEM_POOL_CHUNK_TYPE__COUNT /**< count of possible pool chunks' sizes */
} mem_pool_chunk_type_t;
/**
* Convert size to pool chunk type.
*/
#define mem_size_to_pool_chunk_type( size) ((size) == 4 ? MEM_POOL_CHUNK_TYPE_4 : \
((size) == 8 ? MEM_POOL_CHUNK_TYPE_8 : \
((size) == 16 ? MEM_POOL_CHUNK_TYPE_16 : \
((size) == 32 ? MEM_POOL_CHUNK_TYPE_32 : \
((size) == 64 ? MEM_POOL_CHUNK_TYPE_64 : \
jerry_unreferenced_expression)))))
extern size_t mem_get_chunk_size( mem_pool_chunk_type_t chunk_type);
extern void mem_pools_init(void);
extern void mem_pools_finalize(void);
extern uint8_t* mem_pools_alloc(mem_pool_chunk_type_t chunk_type);
extern void mem_pools_free(mem_pool_chunk_type_t chunk_type, uint8_t *chunk_p);
extern uint8_t* mem_pools_alloc(void);
extern void mem_pools_free(uint8_t *chunk_p);
#ifdef MEM_STATS
/**
@ -64,20 +40,20 @@ extern void mem_pools_free(mem_pool_chunk_type_t chunk_type, uint8_t *chunk_p);
*/
typedef struct
{
/** pools' count, per type */
size_t pools_count[ MEM_POOL_CHUNK_TYPE__COUNT ];
/** pools' count */
size_t pools_count;
/** peak pools' count, per type */
size_t peak_pools_count[ MEM_POOL_CHUNK_TYPE__COUNT ];
/** peak pools' count */
size_t peak_pools_count;
/** allocated chunks count, per type */
size_t allocated_chunks[ MEM_POOL_CHUNK_TYPE__COUNT ];
/** allocated chunks count */
size_t allocated_chunks;
/** peak allocated chunks count, per type */
size_t peak_allocated_chunks[ MEM_POOL_CHUNK_TYPE__COUNT ];
/** peak allocated chunks count */
size_t peak_allocated_chunks;
/** free chunks count, per type */
size_t free_chunks[ MEM_POOL_CHUNK_TYPE__COUNT ];
/** free chunks count */
size_t free_chunks;
} mem_pools_stats_t;
extern void mem_pools_get_stats( mem_pools_stats_t *out_pools_stats_p);

View File

@ -58,7 +58,7 @@ JERRY_STATIC_ASSERT( sizeof (ecma_completion_value_t) == sizeof(uint32_t) );
ecma_alloc_ ## ecma_type (void) \
{ \
ecma_ ## ecma_type ## _t *p ## ecma_type = (ecma_ ## ecma_type ## _t *) \
mem_pools_alloc( mem_size_to_pool_chunk_type( sizeof(ecma_ ## ecma_type ## _t))); \
mem_pools_alloc(); \
\
if ( likely( p ## ecma_type != NULL ) ) \
{ \
@ -72,7 +72,7 @@ ecma_alloc_ ## ecma_type (void) \
ecma_gc_run( gen_id ); \
\
p ## ecma_type = (ecma_ ## ecma_type ## _t *) \
mem_pools_alloc( mem_size_to_pool_chunk_type( sizeof(ecma_ ## ecma_type ## _t))); \
mem_pools_alloc(); \
\
if ( likely( p ## ecma_type != NULL ) ) \
{ \
@ -88,8 +88,7 @@ ecma_alloc_ ## ecma_type (void) \
#define DEALLOC( ecma_type) void \
ecma_dealloc_ ## ecma_type( ecma_ ## ecma_type ## _t *p ## ecma_type) \
{ \
mem_pools_free( mem_size_to_pool_chunk_type( sizeof(ecma_ ## ecma_type ## _t)), \
(uint8_t*) p ## ecma_type); \
mem_pools_free( (uint8_t*) p ## ecma_type); \
}
/**

View File

@ -37,12 +37,12 @@
* The offset is shifted right by MEM_ALIGNMENT_LOG.
* Least significant MEM_ALIGNMENT_LOG bits of non-shifted offset are zeroes.
*/
#define ECMA_POINTER_FIELD_WIDTH 14
#define ECMA_POINTER_FIELD_WIDTH MEM_COMPRESSED_POINTER_WIDTH
/**
* The null value for compressed pointers
* The NULL value for compressed pointers
*/
#define ECMA_NULL_POINTER 0
#define ECMA_NULL_POINTER MEM_COMPRESSED_POINTER_NULL
/**
* @}

View File

@ -26,42 +26,6 @@
#include "ecma-helpers.h"
#include "jerry-libc.h"
/**
* Compress pointer.
*/
uintptr_t
ecma_compress_pointer(void *pointer) /**< pointer to compress */
{
JERRY_ASSERT( pointer != NULL );
uintptr_t int_ptr = (uintptr_t) pointer;
JERRY_ASSERT(int_ptr % MEM_ALIGNMENT == 0);
int_ptr -= mem_get_base_pointer();
int_ptr >>= MEM_ALIGNMENT_LOG;
JERRY_ASSERT((int_ptr & ~((1u << ECMA_POINTER_FIELD_WIDTH) - 1)) == 0);
return int_ptr;
} /* ecma_compress_pointer */
/**
* Decompress pointer.
*/
void*
ecma_decompress_pointer(uintptr_t compressed_pointer) /**< pointer to decompress */
{
JERRY_ASSERT( compressed_pointer != ECMA_NULL_POINTER );
uintptr_t int_ptr = compressed_pointer;
int_ptr <<= MEM_ALIGNMENT_LOG;
int_ptr += mem_get_base_pointer();
return (void*) int_ptr;
} /* ecma_decompress_pointer */
/**
* Create an object with specified prototype object
* (or NULL prototype if there is not prototype for the object)

View File

@ -24,15 +24,13 @@
#define JERRY_ECMA_HELPERS_H
#include "ecma-globals.h"
extern uintptr_t ecma_compress_pointer(void *pointer);
extern void* ecma_decompress_pointer(uintptr_t compressed_pointer);
#include "mem-allocator.h"
/**
* Get value of pointer from specified compressed pointer field.
*/
#define ecma_get_pointer( field) \
( ( unlikely( field == ECMA_NULL_POINTER ) ) ? NULL : ecma_decompress_pointer( field) )
( ( unlikely( field == ECMA_NULL_POINTER ) ) ? NULL : mem_decompress_pointer( field) )
/**
* Set value of compressed pointer field so that it will correspond
@ -45,7 +43,7 @@ extern void* ecma_decompress_pointer(uintptr_t compressed_pointer);
} \
while(0); \
(field) = ( unlikely ( ( non_compressed_pointer ) == NULL ) ? ECMA_NULL_POINTER \
: ecma_compress_pointer( non_compressed_pointer) \
: mem_compress_pointer( non_compressed_pointer) \
& ( ( 1u << ECMA_POINTER_FIELD_WIDTH ) - 1) )
/**
@ -53,7 +51,7 @@ extern void* ecma_decompress_pointer(uintptr_t compressed_pointer);
* to specified non_compressed_pointer.
*/
#define ecma_set_non_null_pointer( field, non_compressed_pointer) \
(field) = ( ecma_compress_pointer( non_compressed_pointer) & ( ( 1u << ECMA_POINTER_FIELD_WIDTH ) - 1) )
(field) = ( mem_compress_pointer( non_compressed_pointer) & ( ( 1u << ECMA_POINTER_FIELD_WIDTH ) - 1) )
/* ecma-helpers-value.c */
extern bool ecma_is_value_empty( ecma_value_t value);

View File

@ -35,9 +35,6 @@ const uint32_t test_iters = 64;
// Subiterations count
const uint32_t test_max_sub_iters = 1024;
// Maximum size of chunk divided by MEM_ALIGNMENT
const uint32_t test_max_chunk_size_divided_by_alignment = 32;
int
main( int __unused argc,
char __unused **argv)
@ -49,26 +46,24 @@ main( int __unused argc,
for ( uint32_t i = 0; i < test_iters; i++ )
{
mem_pool_state_t pool;
uint8_t test_pool[test_pool_area_size] __attribute__((aligned(MEM_ALIGNMENT)));
mem_pool_state_t* pool_p = (mem_pool_state_t*) test_pool;
const size_t chunk_size = mem_get_chunk_size( rand() % MEM_POOL_CHUNK_TYPE__COUNT );
mem_pool_init( &pool, chunk_size, test_pool, sizeof (test_pool));
mem_pool_init( pool_p, sizeof (test_pool));
const size_t subiters = ( (size_t) rand() % test_max_sub_iters ) + 1;
uint8_t* ptrs[subiters];
for ( size_t j = 0; j < subiters; j++ )
{
ptrs[j] = mem_pool_alloc_chunk( &pool);
ptrs[j] = mem_pool_alloc_chunk( pool_p);
// TODO: Enable check with condition that j <= minimum count of chunks that fit in the pool
// JERRY_ASSERT(ptrs[j] != NULL);
if ( ptrs[j] != NULL )
{
memset(ptrs[j], 0, chunk_size);
memset(ptrs[j], 0, MEM_POOL_CHUNK_SIZE);
}
}
@ -78,12 +73,12 @@ main( int __unused argc,
{
if ( ptrs[j] != NULL )
{
for ( size_t k = 0; k < chunk_size; k++ )
for ( size_t k = 0; k < MEM_POOL_CHUNK_SIZE; k++ )
{
JERRY_ASSERT( ((uint8_t*)ptrs[j])[k] == 0 );
}
mem_pool_free_chunk( &pool, ptrs[j]);
mem_pool_free_chunk( pool_p, ptrs[j]);
}
}
}

View File

@ -29,9 +29,6 @@ extern void srand (unsigned int __seed);
extern int rand (void);
extern long int time (long int *__timer);
// Heap size is 8K
const size_t test_heap_size = 8 * 1024;
// Iterations count
const uint32_t test_iters = 16384;
@ -42,81 +39,63 @@ int
main( int __unused argc,
char __unused **argv)
{
uint8_t heap[test_heap_size] __attribute__((aligned(MEM_ALIGNMENT)));
mem_init();
mem_heap_init( heap, sizeof (heap));
mem_pools_init();
srand((unsigned int) time(NULL));
unsigned int seed = (unsigned int)rand();
__printf("seed=%u\n", seed);
srand(seed);
srand((unsigned int) time(NULL));
unsigned int seed = (unsigned int)rand();
__printf("seed=%u\n", seed);
srand(seed);
for ( uint32_t i = 0; i < test_iters; i++ )
for ( uint32_t i = 0; i < test_iters; i++ )
{
const size_t subiters = ( (size_t) rand() % test_max_sub_iters ) + 1;
const size_t subiters = ( (size_t) rand() % test_max_sub_iters ) + 1;
uint8_t * ptrs[subiters];
mem_pool_chunk_type_t types[subiters];
for ( size_t j = 0; j < subiters; j++ )
uint8_t *ptrs[subiters];
for ( size_t j = 0; j < subiters; j++ )
{
mem_pool_chunk_type_t type = (mem_pool_chunk_type_t) (rand() % MEM_POOL_CHUNK_TYPE__COUNT);
const size_t chunk_size = mem_get_chunk_size( type);
ptrs[j] = mem_pools_alloc();
// JERRY_ASSERT(ptrs[j] != NULL);
types[j] = type;
ptrs[j] = mem_pools_alloc( type);
// JERRY_ASSERT(ptrs[j] != NULL);
if ( ptrs[j] != NULL )
if ( ptrs[j] != NULL )
{
__memset(ptrs[j], 0, chunk_size);
__memset(ptrs[j], 0, MEM_POOL_CHUNK_SIZE);
}
}
// mem_heap_print( false);
for ( size_t j = 0; j < subiters; j++ )
{
if ( ptrs[j] != NULL )
{
mem_pool_chunk_type_t type = types[j];
const size_t chunk_size = mem_get_chunk_size( type);
// mem_heap_print( false);
for ( size_t k = 0; k < chunk_size; k++ )
for ( size_t j = 0; j < subiters; j++ )
{
if ( ptrs[j] != NULL )
{
for ( size_t k = 0; k < MEM_POOL_CHUNK_SIZE; k++ )
{
JERRY_ASSERT( ((uint8_t*) ptrs[j])[k] == 0 );
JERRY_ASSERT( ((uint8_t*) ptrs[j])[k] == 0 );
}
mem_pools_free( type, ptrs[j]);
mem_pools_free( ptrs[j]);
}
}
}
#ifdef MEM_STATS
mem_pools_stats_t stats;
mem_pools_get_stats( &stats);
mem_pools_stats_t stats;
mem_pools_get_stats( &stats);
__printf("Pools stats:\n");
__printf(" Chunk size: %u\n"
" Pools: %lu\n"
" Allocated chunks: %lu\n"
" Free chunks: %lu\n"
" Peak pools: %lu\n"
" Peak allocated chunks: %lu\n\n",
MEM_POOL_CHUNK_SIZE,
stats.pools_count,
stats.allocated_chunks,
stats.free_chunks,
stats.peak_pools_count,
stats.peak_allocated_chunks);
#endif /* MEM_STATS */
__printf("Pools stats:\n");
for(mem_pool_chunk_type_t type = 0;
type < MEM_POOL_CHUNK_TYPE__COUNT;
type++)
{
__printf(" Chunk size: %u\n"
" Pools: %lu\n"
" Allocated chunks: %lu\n"
" Free chunks: %lu\n"
" Peak pools: %lu\n"
" Peak allocated chunks: %lu\n",
mem_get_chunk_size( type),
stats.pools_count[ type ],
stats.allocated_chunks[ type ],
stats.free_chunks[ type ],
stats.peak_pools_count[ type ],
stats.peak_allocated_chunks[ type ]);
}
__printf("\n");
return 0;
return 0;
} /* main */