Change 'mem' namspace to 'jmem'

The 'mem_' prefix is too general, so it might clash with
symbols in other libraries. Renamed the directory, file,
funtion and type names.

Related issue: #1052

JerryScript-DCO-1.0-Signed-off-by: László Langó llango.u-szeged@partner.samsung.com
This commit is contained in:
László Langó 2016-05-13 14:09:31 +02:00
parent aaa3d22677
commit 92bb551d45
63 changed files with 1612 additions and 1612 deletions

View File

@ -107,7 +107,7 @@ project (JerryCore C ASM)
${CMAKE_SOURCE_DIR}/jerry-core
${CMAKE_SOURCE_DIR}/jerry-core/lit
${CMAKE_SOURCE_DIR}/jerry-core/rcs
${CMAKE_SOURCE_DIR}/jerry-core/mem
${CMAKE_SOURCE_DIR}/jerry-core/jmem
${CMAKE_SOURCE_DIR}/jerry-core/vm
${CMAKE_SOURCE_DIR}/jerry-core/ecma/builtin-objects
${CMAKE_SOURCE_DIR}/jerry-core/ecma/base
@ -125,7 +125,7 @@ project (JerryCore C ASM)
file(GLOB SOURCE_CORE_API *.c)
file(GLOB SOURCE_CORE_LIT lit/*.c)
file(GLOB SOURCE_CORE_RCS rcs/*.c)
file(GLOB SOURCE_CORE_MEM mem/*.c)
file(GLOB SOURCE_CORE_MEM jmem/*.c)
file(GLOB SOURCE_CORE_VM vm/*.c)
file(GLOB SOURCE_CORE_ECMA_BUILTINS ecma/builtin-objects/*.c)
file(GLOB SOURCE_CORE_ECMA_BASE ecma/base/*.c)

View File

@ -19,7 +19,7 @@
#include "ecma-gc.h"
#include "ecma-lcache.h"
#include "jrt.h"
#include "mem-poolman.h"
#include "jmem-poolman.h"
JERRY_STATIC_ASSERT (sizeof (ecma_property_value_t) == sizeof (ecma_value_t),
size_of_ecma_property_value_t_must_be_equal_to_size_of_ecma_value_t);
@ -65,7 +65,7 @@ JERRY_STATIC_ASSERT (sizeof (ecma_getter_setter_pointers_t) <= sizeof (uint64_t)
#define ALLOC(ecma_type) ecma_ ## ecma_type ## _t * \
ecma_alloc_ ## ecma_type (void) \
{ \
ecma_ ## ecma_type ## _t *p ## ecma_type = (ecma_ ## ecma_type ## _t *) mem_pools_alloc (); \
ecma_ ## ecma_type ## _t *p ## ecma_type = (ecma_ ## ecma_type ## _t *) jmem_pools_alloc (); \
\
JERRY_ASSERT (p ## ecma_type != NULL); \
\
@ -78,7 +78,7 @@ JERRY_STATIC_ASSERT (sizeof (ecma_getter_setter_pointers_t) <= sizeof (uint64_t)
#define DEALLOC(ecma_type) void \
ecma_dealloc_ ## ecma_type (ecma_ ## ecma_type ## _t *p ## ecma_type) \
{ \
mem_pools_free ((uint8_t *) p ## ecma_type); \
jmem_pools_free ((uint8_t *) p ## ecma_type); \
}
/**
@ -104,7 +104,7 @@ DECLARE_ROUTINES_FOR (external_pointer)
ecma_property_pair_t *
ecma_alloc_property_pair (void)
{
return mem_heap_alloc_block (sizeof (ecma_property_pair_t));
return jmem_heap_alloc_block (sizeof (ecma_property_pair_t));
} /* ecma_alloc_property_pair */
/**
@ -113,7 +113,7 @@ ecma_alloc_property_pair (void)
extern void
ecma_dealloc_property_pair (ecma_property_pair_t *property_pair_p) /**< property pair to be freed */
{
mem_heap_free_block (property_pair_p, sizeof (ecma_property_pair_t));
jmem_heap_free_block (property_pair_p, sizeof (ecma_property_pair_t));
} /* ecma_dealloc_property_pair */
/**

View File

@ -564,10 +564,10 @@ ecma_gc_run (void)
* Try to free some memory (depending on severity).
*/
void
ecma_try_to_give_back_some_memory (mem_try_give_memory_back_severity_t severity) /**< severity of
* the request */
ecma_try_to_give_back_some_memory (jmem_try_give_memory_back_severity_t severity) /**< severity of
* the request */
{
if (severity == MEM_TRY_GIVE_MEMORY_BACK_SEVERITY_LOW)
if (severity == JMEM_TRY_GIVE_MEMORY_BACK_SEVERITY_LOW)
{
/*
* If there is enough newly allocated objects since last GC, probably it is worthwhile to start GC now.
@ -580,7 +580,7 @@ ecma_try_to_give_back_some_memory (mem_try_give_memory_back_severity_t severity)
}
else
{
JERRY_ASSERT (severity == MEM_TRY_GIVE_MEMORY_BACK_SEVERITY_HIGH);
JERRY_ASSERT (severity == JMEM_TRY_GIVE_MEMORY_BACK_SEVERITY_HIGH);
/* Freeing as much memory as we currently can */
ecma_lcache_invalidate_all ();

View File

@ -17,7 +17,7 @@
#define ECMA_GC_H
#include "ecma-globals.h"
#include "mem-allocator.h"
#include "jmem-allocator.h"
/** \addtogroup ecma ECMA
* @{
@ -31,7 +31,7 @@ extern void ecma_init_gc_info (ecma_object_t *);
extern void ecma_ref_object (ecma_object_t *);
extern void ecma_deref_object (ecma_object_t *);
extern void ecma_gc_run (void);
extern void ecma_try_to_give_back_some_memory (mem_try_give_memory_back_severity_t);
extern void ecma_try_to_give_back_some_memory (jmem_try_give_memory_back_severity_t);
/**
* @}

View File

@ -20,7 +20,7 @@
#include "config.h"
#include "jrt.h"
#include "lit-magic-strings.h"
#include "mem-allocator.h"
#include "jmem-allocator.h"
/** \addtogroup ecma ECMA
* @{
@ -36,15 +36,15 @@
* Ecma-pointer field is used to calculate ecma value's address.
*
* Ecma-pointer contains value's shifted offset from common Ecma-pointers' base.
* The offset is shifted right by MEM_ALIGNMENT_LOG.
* Least significant MEM_ALIGNMENT_LOG bits of non-shifted offset are zeroes.
* The offset is shifted right by JMEM_ALIGNMENT_LOG.
* Least significant JMEM_ALIGNMENT_LOG bits of non-shifted offset are zeroes.
*/
#define ECMA_POINTER_FIELD_WIDTH MEM_CP_WIDTH
#define ECMA_POINTER_FIELD_WIDTH JMEM_CP_WIDTH
/**
* The NULL value for compressed pointers
*/
#define ECMA_NULL_POINTER MEM_CP_NULL
#define ECMA_NULL_POINTER JMEM_CP_NULL
/**
* @}
@ -98,7 +98,7 @@ typedef int32_t ecma_integer_value_t;
#if UINTPTR_MAX <= UINT32_MAX
/**
* MEM_ALIGNMENT_LOG aligned pointers can be stored directly in ecma_value_t
* JMEM_ALIGNMENT_LOG aligned pointers can be stored directly in ecma_value_t
*/
#define ECMA_VALUE_CAN_STORE_UINTPTR_VALUE_DIRECTLY
@ -335,7 +335,7 @@ typedef struct
{
ecma_property_t types[ECMA_PROPERTY_PAIR_ITEM_COUNT]; /**< two property type slot. The first represent
* the type of this property (e.g. property pair) */
mem_cpointer_t next_property_cp; /**< next cpointer */
jmem_cpointer_t next_property_cp; /**< next cpointer */
} ecma_property_header_t;
/**
@ -343,8 +343,8 @@ typedef struct
*/
typedef struct
{
mem_cpointer_t getter_p; /**< pointer to getter object */
mem_cpointer_t setter_p; /**< pointer to setter object */
jmem_cpointer_t getter_p; /**< pointer to getter object */
jmem_cpointer_t setter_p; /**< pointer to setter object */
} ecma_getter_setter_pointers_t;
/**
@ -363,7 +363,7 @@ typedef struct
{
ecma_property_header_t header; /**< header of the property */
ecma_property_value_t values[ECMA_PROPERTY_PAIR_ITEM_COUNT]; /**< property value slots */
mem_cpointer_t names_cp[ECMA_PROPERTY_PAIR_ITEM_COUNT]; /**< property name slots */
jmem_cpointer_t names_cp[ECMA_PROPERTY_PAIR_ITEM_COUNT]; /**< property name slots */
} ecma_property_pair_t;
/**
@ -488,13 +488,13 @@ typedef struct ecma_object_t
uint16_t type_flags_refs;
/** next in the object chain maintained by the garbage collector */
mem_cpointer_t gc_next_cp;
jmem_cpointer_t gc_next_cp;
/** compressed pointer to property list or bound object */
mem_cpointer_t property_list_or_bound_object_cp;
jmem_cpointer_t property_list_or_bound_object_cp;
/** object prototype or outer reference */
mem_cpointer_t prototype_or_outer_reference_cp;
jmem_cpointer_t prototype_or_outer_reference_cp;
} ecma_object_t;
/**
@ -730,10 +730,10 @@ typedef struct
ecma_length_t unit_number;
/** Compressed pointer to first chunk with collection's data */
mem_cpointer_t first_chunk_cp;
jmem_cpointer_t first_chunk_cp;
/** Compressed pointer to last chunk with collection's data */
mem_cpointer_t last_chunk_cp;
jmem_cpointer_t last_chunk_cp;
} ecma_collection_header_t;
/**
@ -742,10 +742,10 @@ typedef struct
typedef struct
{
/** Characters */
lit_utf8_byte_t data[ sizeof (uint64_t) - sizeof (mem_cpointer_t) ];
lit_utf8_byte_t data[ sizeof (uint64_t) - sizeof (jmem_cpointer_t) ];
/** Compressed pointer to next chunk */
mem_cpointer_t next_chunk_cp;
jmem_cpointer_t next_chunk_cp;
} ecma_collection_chunk_t;
/**
@ -809,10 +809,10 @@ typedef struct ecma_string_t
union
{
/** Index of string in literal table */
mem_cpointer_t lit_cp;
jmem_cpointer_t lit_cp;
/** Compressed pointer to an ecma_collection_header_t */
mem_cpointer_t utf8_collection_cp;
jmem_cpointer_t utf8_collection_cp;
/**
* Actual data of an ascii string type
@ -820,13 +820,13 @@ typedef struct ecma_string_t
struct
{
/** Compressed pointer to a raw character array */
mem_cpointer_t ascii_collection_cp;
jmem_cpointer_t ascii_collection_cp;
/** Size of ascii string in bytes */
uint16_t size;
} ascii_string;
/** Compressed pointer to an ecma_number_t */
mem_cpointer_t number_cp;
jmem_cpointer_t number_cp;
/** UInt32-represented number placed locally in the descriptor */
uint32_t uint32_number;
@ -852,7 +852,7 @@ typedef uintptr_t ecma_external_pointer_t;
*/
typedef struct
{
uint16_t size; /**< real size >> MEM_ALIGNMENT_LOG */
uint16_t size; /**< real size >> JMEM_ALIGNMENT_LOG */
uint16_t refs; /**< reference counter for the byte code */
uint16_t status_flags; /**< various status flags:
* CBC_CODE_FLAGS_FUNCTION flag tells whether

View File

@ -185,7 +185,7 @@ ecma_new_ecma_string_from_utf8 (const lit_utf8_byte_t *string_p, /**< utf-8 stri
{
string_desc_p->refs_and_container = ECMA_STRING_CONTAINER_HEAP_ASCII_STRING | ECMA_STRING_REF_ONE;
const size_t data_size = string_size;
lit_utf8_byte_t *data_p = (lit_utf8_byte_t *) mem_heap_alloc_block (data_size);
lit_utf8_byte_t *data_p = (lit_utf8_byte_t *) jmem_heap_alloc_block (data_size);
string_desc_p->u.ascii_string.size = (uint16_t) string_size;
memcpy (data_p, string_p, string_size);
ECMA_SET_NON_NULL_POINTER (string_desc_p->u.ascii_string.ascii_collection_cp, data_p);
@ -194,7 +194,7 @@ ecma_new_ecma_string_from_utf8 (const lit_utf8_byte_t *string_p, /**< utf-8 stri
{
string_desc_p->refs_and_container = ECMA_STRING_CONTAINER_HEAP_UTF8_STRING | ECMA_STRING_REF_ONE;
const size_t data_size = string_size + sizeof (ecma_string_heap_header_t);
ecma_string_heap_header_t *data_p = (ecma_string_heap_header_t *) mem_heap_alloc_block (data_size);
ecma_string_heap_header_t *data_p = (ecma_string_heap_header_t *) jmem_heap_alloc_block (data_size);
JERRY_ASSERT (string_length <= UINT16_MAX);
@ -370,7 +370,7 @@ ecma_concat_ecma_strings (ecma_string_t *string1_p, /**< first ecma-string */
{
string_desc_p->refs_and_container = ECMA_STRING_CONTAINER_HEAP_ASCII_STRING | ECMA_STRING_REF_ONE;
const size_t data_size = new_size;
lit_utf8_byte_t *data_p = (lit_utf8_byte_t *) mem_heap_alloc_block (data_size);
lit_utf8_byte_t *data_p = (lit_utf8_byte_t *) jmem_heap_alloc_block (data_size);
lit_utf8_size_t bytes_copied = ecma_string_to_utf8_string (string1_p, data_p, str1_size);
JERRY_ASSERT (bytes_copied == str1_size);
bytes_copied = ecma_string_to_utf8_string (string2_p, data_p + str1_size, str2_size);
@ -386,7 +386,7 @@ ecma_concat_ecma_strings (ecma_string_t *string1_p, /**< first ecma-string */
{
string_desc_p->refs_and_container = ECMA_STRING_CONTAINER_HEAP_UTF8_STRING | ECMA_STRING_REF_ONE;
const size_t data_size = new_size + sizeof (ecma_string_heap_header_t);
ecma_string_heap_header_t *data_p = (ecma_string_heap_header_t *) mem_heap_alloc_block (data_size);
ecma_string_heap_header_t *data_p = (ecma_string_heap_header_t *) jmem_heap_alloc_block (data_size);
lit_utf8_size_t bytes_copied = ecma_string_to_utf8_string (string1_p,
(lit_utf8_byte_t *) (data_p + 1),
str1_size);
@ -459,7 +459,7 @@ ecma_copy_ecma_string (ecma_string_t *string_desc_p) /**< string descriptor */
string_desc_p->u.utf8_collection_cp);
JERRY_ASSERT (data_p != NULL);
const size_t data_size = data_p->size + sizeof (ecma_string_heap_header_t);
ecma_string_heap_header_t *new_data_p = (ecma_string_heap_header_t *) mem_heap_alloc_block (data_size);
ecma_string_heap_header_t *new_data_p = (ecma_string_heap_header_t *) jmem_heap_alloc_block (data_size);
memcpy (new_data_p, data_p, data_size);
ECMA_SET_NON_NULL_POINTER (new_str_p->u.utf8_collection_cp, new_data_p);
@ -478,7 +478,7 @@ ecma_copy_ecma_string (ecma_string_t *string_desc_p) /**< string descriptor */
JERRY_ASSERT (data_p != NULL);
const size_t data_size = string_desc_p->u.ascii_string.size;
lit_utf8_byte_t *new_data_p = (lit_utf8_byte_t *) mem_heap_alloc_block (data_size);
lit_utf8_byte_t *new_data_p = (lit_utf8_byte_t *) jmem_heap_alloc_block (data_size);
memcpy (new_data_p, data_p, data_size);
ECMA_SET_NON_NULL_POINTER (new_str_p->u.ascii_string.ascii_collection_cp, new_data_p);
@ -552,7 +552,7 @@ ecma_deref_ecma_string (ecma_string_t *string_p) /**< ecma-string */
ecma_string_heap_header_t *const data_p = ECMA_GET_NON_NULL_POINTER (ecma_string_heap_header_t,
string_p->u.utf8_collection_cp);
mem_heap_free_block (data_p, data_p->size + sizeof (ecma_string_heap_header_t));
jmem_heap_free_block (data_p, data_p->size + sizeof (ecma_string_heap_header_t));
break;
}
@ -561,7 +561,7 @@ ecma_deref_ecma_string (ecma_string_t *string_p) /**< ecma-string */
lit_utf8_byte_t *const data_p = ECMA_GET_NON_NULL_POINTER (lit_utf8_byte_t,
string_p->u.ascii_string.ascii_collection_cp);
mem_heap_free_block (data_p, string_p->u.ascii_string.size);
jmem_heap_free_block (data_p, string_p->u.ascii_string.size);
break;
}
@ -802,7 +802,7 @@ ecma_string_get_number_in_desc_size (const uint32_t uint32_number) /**< number i
* @return number of bytes in the buffer
*/
static inline lit_utf8_size_t __attr_always_inline___
ecma_string_get_heap_number_size (mem_cpointer_t number_cp) /**< Compressed pointer to an ecma_number_t */
ecma_string_get_heap_number_size (jmem_cpointer_t number_cp) /**< Compressed pointer to an ecma_number_t */
{
const ecma_number_t *num_p = ECMA_GET_NON_NULL_POINTER (ecma_number_t, number_cp);
lit_utf8_byte_t buffer[ECMA_MAX_CHARS_IN_STRINGIFIED_NUMBER];
@ -1052,7 +1052,7 @@ ecma_compare_ecma_strings_longpath (const ecma_string_t *string1_p, /* ecma-stri
}
else
{
utf8_string1_p = (lit_utf8_byte_t *) mem_heap_alloc_block ((size_t) strings_size);
utf8_string1_p = (lit_utf8_byte_t *) jmem_heap_alloc_block ((size_t) strings_size);
lit_utf8_size_t bytes_copied = ecma_string_to_utf8_string (string1_p, utf8_string1_p, strings_size);
JERRY_ASSERT (bytes_copied == strings_size);
@ -1083,7 +1083,7 @@ ecma_compare_ecma_strings_longpath (const ecma_string_t *string1_p, /* ecma-stri
}
else
{
utf8_string2_p = (lit_utf8_byte_t *) mem_heap_alloc_block ((size_t) strings_size);
utf8_string2_p = (lit_utf8_byte_t *) jmem_heap_alloc_block ((size_t) strings_size);
lit_utf8_size_t bytes_copied = ecma_string_to_utf8_string (string2_p, utf8_string2_p, strings_size);
JERRY_ASSERT (bytes_copied == strings_size);
@ -1094,12 +1094,12 @@ ecma_compare_ecma_strings_longpath (const ecma_string_t *string1_p, /* ecma-stri
if (is_utf8_string1_on_heap)
{
mem_heap_free_block ((void *) utf8_string1_p, (size_t) strings_size);
jmem_heap_free_block ((void *) utf8_string1_p, (size_t) strings_size);
}
if (is_utf8_string2_on_heap)
{
mem_heap_free_block ((void *) utf8_string2_p, (size_t) strings_size);
jmem_heap_free_block ((void *) utf8_string2_p, (size_t) strings_size);
}
return is_equal;
@ -1188,7 +1188,7 @@ ecma_compare_ecma_strings_relational (const ecma_string_t *string1_p, /**< ecma-
if (sizeof (utf8_string1_buffer) < utf8_string1_size)
{
utf8_string1_p = (lit_utf8_byte_t *) mem_heap_alloc_block (utf8_string1_size);
utf8_string1_p = (lit_utf8_byte_t *) jmem_heap_alloc_block (utf8_string1_size);
is_utf8_string1_on_heap = true;
}
else
@ -1230,7 +1230,7 @@ ecma_compare_ecma_strings_relational (const ecma_string_t *string1_p, /**< ecma-
if (sizeof (utf8_string2_buffer) < utf8_string2_size)
{
utf8_string2_p = (lit_utf8_byte_t *) mem_heap_alloc_block (utf8_string2_size);
utf8_string2_p = (lit_utf8_byte_t *) jmem_heap_alloc_block (utf8_string2_size);
is_utf8_string2_on_heap = true;
}
else
@ -1249,12 +1249,12 @@ ecma_compare_ecma_strings_relational (const ecma_string_t *string1_p, /**< ecma-
if (is_utf8_string1_on_heap)
{
mem_heap_free_block ((void *) utf8_string1_p, (size_t) utf8_string1_size);
jmem_heap_free_block ((void *) utf8_string1_p, (size_t) utf8_string1_size);
}
if (is_utf8_string2_on_heap)
{
mem_heap_free_block ((void *) utf8_string2_p, (size_t) utf8_string2_size);
jmem_heap_free_block ((void *) utf8_string2_p, (size_t) utf8_string2_size);
}
return is_first_less_than_second;
@ -1387,7 +1387,7 @@ ecma_string_get_char_at_pos (const ecma_string_t *string_p, /**< ecma-string */
ecma_char_t ch;
MEM_DEFINE_LOCAL_ARRAY (utf8_str_p, buffer_size, lit_utf8_byte_t);
JMEM_DEFINE_LOCAL_ARRAY (utf8_str_p, buffer_size, lit_utf8_byte_t);
lit_utf8_size_t sz = ecma_string_to_utf8_string (string_p, utf8_str_p, buffer_size);
JERRY_ASSERT (sz == buffer_size);
@ -1399,7 +1399,7 @@ ecma_string_get_char_at_pos (const ecma_string_t *string_p, /**< ecma-string */
ch = utf8_str_p[index];
MEM_FINALIZE_LOCAL_ARRAY (utf8_str_p);
JMEM_FINALIZE_LOCAL_ARRAY (utf8_str_p);
return ch;
} /* ecma_string_get_char_at_pos */
@ -1547,7 +1547,7 @@ ecma_string_substr (const ecma_string_t *string_p, /**< pointer to an ecma strin
*/
ecma_string_t *ecma_string_p;
MEM_DEFINE_LOCAL_ARRAY (utf8_str_p, buffer_size, lit_utf8_byte_t);
JMEM_DEFINE_LOCAL_ARRAY (utf8_str_p, buffer_size, lit_utf8_byte_t);
lit_utf8_size_t sz = ecma_string_to_utf8_string (string_p, utf8_str_p, buffer_size);
JERRY_ASSERT (sz == buffer_size);
@ -1578,7 +1578,7 @@ ecma_string_substr (const ecma_string_t *string_p, /**< pointer to an ecma strin
ecma_string_p = ecma_new_ecma_string_from_utf8 (start_p, (lit_utf8_size_t) (end_p - start_p));
}
MEM_FINALIZE_LOCAL_ARRAY (utf8_str_p);
JMEM_FINALIZE_LOCAL_ARRAY (utf8_str_p);
return ecma_string_p;
}

View File

@ -40,11 +40,11 @@ JERRY_STATIC_ASSERT (ECMA_TYPE___MAX <= ECMA_VALUE_TYPE_MASK,
JERRY_STATIC_ASSERT ((ECMA_VALUE_FULL_MASK + 1) == (1 << ECMA_VALUE_SHIFT),
ecma_value_part_must_start_after_flags);
JERRY_STATIC_ASSERT (ECMA_VALUE_SHIFT <= MEM_ALIGNMENT_LOG,
JERRY_STATIC_ASSERT (ECMA_VALUE_SHIFT <= JMEM_ALIGNMENT_LOG,
ecma_value_shift_must_be_less_than_or_equal_than_mem_alignment_log);
JERRY_STATIC_ASSERT ((sizeof (ecma_value_t) * JERRY_BITSINBYTE)
>= (sizeof (mem_cpointer_t) * JERRY_BITSINBYTE + ECMA_VALUE_SHIFT),
>= (sizeof (jmem_cpointer_t) * JERRY_BITSINBYTE + ECMA_VALUE_SHIFT),
ecma_value_must_be_large_enough_to_store_compressed_pointers);
#ifdef ECMA_VALUE_CAN_STORE_UINTPTR_VALUE_DIRECTLY
@ -86,7 +86,7 @@ ecma_pointer_to_ecma_value (const void *ptr) /**< pointer */
#else /* !ECMA_VALUE_CAN_STORE_UINTPTR_VALUE_DIRECTLY */
mem_cpointer_t ptr_cp;
jmem_cpointer_t ptr_cp;
ECMA_SET_NON_NULL_POINTER (ptr_cp, ptr);
return ((ecma_value_t) ptr_cp) << ECMA_VALUE_SHIFT;

View File

@ -45,7 +45,7 @@ ecma_new_values_collection (const ecma_value_t values_buffer[], /**< ecma values
header_p->unit_number = values_number;
mem_cpointer_t *next_chunk_cp_p = &header_p->first_chunk_cp;
jmem_cpointer_t *next_chunk_cp_p = &header_p->first_chunk_cp;
ecma_collection_chunk_t *last_chunk_p = NULL;
ecma_value_t *cur_value_buf_iter_p = NULL;
ecma_value_t *cur_value_buf_end_p = NULL;
@ -303,7 +303,7 @@ ecma_collection_iterator_init (ecma_collection_iterator_t *iterator_p, /**< cont
ecma_collection_header_t *collection_p) /**< header of collection */
{
iterator_p->header_p = collection_p;
iterator_p->next_chunk_cp = (collection_p != NULL ? collection_p->first_chunk_cp : MEM_CP_NULL);
iterator_p->next_chunk_cp = (collection_p != NULL ? collection_p->first_chunk_cp : JMEM_CP_NULL);
iterator_p->current_index = 0;
iterator_p->current_value_p = NULL;
iterator_p->current_chunk_end_p = NULL;

View File

@ -106,7 +106,7 @@ ecma_create_object (ecma_object_t *prototype_object_p, /**< pointer to prototybe
ecma_init_gc_info (new_object_p);
new_object_p->property_list_or_bound_object_cp = MEM_CP_NULL;
new_object_p->property_list_or_bound_object_cp = JMEM_CP_NULL;
ECMA_SET_POINTER (new_object_p->prototype_or_outer_reference_cp,
prototype_object_p);
@ -134,7 +134,7 @@ ecma_create_decl_lex_env (ecma_object_t *outer_lexical_environment_p) /**< outer
ecma_init_gc_info (new_lexical_environment_p);
new_lexical_environment_p->property_list_or_bound_object_cp = MEM_CP_NULL;
new_lexical_environment_p->property_list_or_bound_object_cp = JMEM_CP_NULL;
ECMA_SET_POINTER (new_lexical_environment_p->prototype_or_outer_reference_cp,
outer_lexical_environment_p);
@ -398,7 +398,7 @@ ecma_create_property (ecma_object_t *object_p, /**< the object */
{
JERRY_ASSERT (ECMA_PROPERTY_PAIR_ITEM_COUNT == 2);
mem_cpointer_t *property_list_head_p = &object_p->property_list_or_bound_object_cp;
jmem_cpointer_t *property_list_head_p = &object_p->property_list_or_bound_object_cp;
bool has_hashmap = false;
if (*property_list_head_p != ECMA_NULL_POINTER)
@ -1468,7 +1468,7 @@ ecma_bytecode_deref (ecma_compiled_code_t *bytecode_p) /**< byte code pointer */
for (uint32_t i = const_literal_end; i < literal_end; i++)
{
mem_cpointer_t bytecode_cpointer = literal_start_p[i];
jmem_cpointer_t bytecode_cpointer = literal_start_p[i];
ecma_compiled_code_t *bytecode_literal_p = ECMA_GET_NON_NULL_POINTER (ecma_compiled_code_t,
bytecode_cpointer);
@ -1488,8 +1488,8 @@ ecma_bytecode_deref (ecma_compiled_code_t *bytecode_p) /**< byte code pointer */
#endif /* !CONFIG_ECMA_COMPACT_PROFILE_DISABLE_REGEXP_BUILTIN */
}
mem_heap_free_block (bytecode_p,
((size_t) bytecode_p->size) << MEM_ALIGNMENT_LOG);
jmem_heap_free_block (bytecode_p,
((size_t) bytecode_p->size) << JMEM_ALIGNMENT_LOG);
} /* ecma_bytecode_deref */
/**

View File

@ -18,9 +18,9 @@
#define ECMA_HELPERS_H
#include "ecma-globals.h"
#include "jmem-allocator.h"
#include "lit-cpointer.h"
#include "lit-strings.h"
#include "mem-allocator.h"
/** \addtogroup ecma ECMA
* @{
@ -32,25 +32,25 @@
/**
* Get value of pointer from specified non-null compressed pointer.
*/
#define ECMA_GET_NON_NULL_POINTER(type, field) MEM_CP_GET_NON_NULL_POINTER (type, field)
#define ECMA_GET_NON_NULL_POINTER(type, field) JMEM_CP_GET_NON_NULL_POINTER (type, field)
/**
* Get value of pointer from specified compressed pointer.
*/
#define ECMA_GET_POINTER(type, field) MEM_CP_GET_POINTER (type, field)
#define ECMA_GET_POINTER(type, field) JMEM_CP_GET_POINTER (type, field)
/**
* Set value of non-null compressed pointer so that it will correspond
* to specified non_compressed_pointer.
*/
#define ECMA_SET_NON_NULL_POINTER(field, non_compressed_pointer) MEM_CP_SET_NON_NULL_POINTER (field, \
non_compressed_pointer)
#define ECMA_SET_NON_NULL_POINTER(field, non_compressed_pointer) JMEM_CP_SET_NON_NULL_POINTER (field, \
non_compressed_pointer)
/**
* Set value of compressed pointer so that it will correspond
* to specified non_compressed_pointer.
*/
#define ECMA_SET_POINTER(field, non_compressed_pointer) MEM_CP_SET_POINTER (field, non_compressed_pointer)
#define ECMA_SET_POINTER(field, non_compressed_pointer) JMEM_CP_SET_POINTER (field, non_compressed_pointer)
/**
* Convert ecma-string's contents to a cesu-8 string and put it into a buffer.
@ -65,7 +65,7 @@
\
if (utf8_ptr == NULL) \
{ \
utf8_ptr = (const lit_utf8_byte_t *) (mem_heap_alloc_block (utf8_str_size)); \
utf8_ptr = (const lit_utf8_byte_t *) jmem_heap_alloc_block (utf8_str_size); \
lit_utf8_size_t sz = ecma_string_to_utf8_string (ecma_str_ptr, (lit_utf8_byte_t *) utf8_ptr, utf8_str_size); \
JERRY_ASSERT (sz == utf8_str_size); \
utf8_ptr ## must_be_freed = true; \
@ -110,7 +110,7 @@
if (utf8_ptr ## must_be_freed) \
{ \
JERRY_ASSERT (utf8_ptr != NULL); \
mem_heap_free_block ((void *) utf8_ptr, utf8_str_size); \
jmem_heap_free_block ((void *) utf8_ptr, utf8_str_size); \
}
/* ecma-helpers-value.c */
@ -226,7 +226,7 @@ extern ecma_collection_header_t *ecma_new_strings_collection (ecma_string_t *[],
typedef struct
{
ecma_collection_header_t *header_p; /**< collection header */
mem_cpointer_t next_chunk_cp; /**< compressed pointer to next chunk */
jmem_cpointer_t next_chunk_cp; /**< compressed pointer to next chunk */
ecma_length_t current_index; /**< index of current element */
const ecma_value_t *current_value_p; /**< pointer to current element */
const ecma_value_t *current_chunk_beg_p; /**< pointer to beginning of current chunk's data */

View File

@ -19,7 +19,7 @@
#include "ecma-init-finalize.h"
#include "ecma-lcache.h"
#include "ecma-lex-env.h"
#include "mem-allocator.h"
#include "jmem-allocator.h"
/** \addtogroup ecma ECMA
* @{
@ -39,7 +39,7 @@ ecma_init (void)
ecma_lcache_init ();
ecma_init_environment ();
mem_register_a_try_give_memory_back_callback (ecma_try_to_give_back_some_memory);
jmem_register_a_try_give_memory_back_callback (ecma_try_to_give_back_some_memory);
} /* ecma_init */
/**
@ -48,7 +48,7 @@ ecma_init (void)
void
ecma_finalize (void)
{
mem_unregister_a_try_give_memory_back_callback (ecma_try_to_give_back_some_memory);
jmem_unregister_a_try_give_memory_back_callback (ecma_try_to_give_back_some_memory);
ecma_finalize_environment ();
ecma_lcache_invalidate_all ();

View File

@ -37,10 +37,10 @@ typedef struct
ecma_property_t *prop_p;
/** Compressed pointer to object (ECMA_NULL_POINTER marks record empty) */
mem_cpointer_t object_cp;
jmem_cpointer_t object_cp;
/** Compressed pointer to property's name */
mem_cpointer_t prop_name_cp;
jmem_cpointer_t prop_name_cp;
} ecma_lcache_hash_entry_t;
/**

View File

@ -32,7 +32,7 @@
* Compute the total size of the property hashmap.
*/
#define ECMA_PROPERTY_HASHMAP_GET_TOTAL_SIZE(max_property_count) \
(sizeof (ecma_property_hashmap_t) + (max_property_count * sizeof (mem_cpointer_t)) + (max_property_count >> 3))
(sizeof (ecma_property_hashmap_t) + (max_property_count * sizeof (jmem_cpointer_t)) + (max_property_count >> 3))
/**
* Number of items in the stepping table.
@ -110,7 +110,7 @@ ecma_property_hashmap_create (ecma_object_t *object_p) /**< object */
size_t total_size = ECMA_PROPERTY_HASHMAP_GET_TOTAL_SIZE (max_property_count);
ecma_property_hashmap_t *hashmap_p = (ecma_property_hashmap_t *) mem_heap_alloc_block (total_size);
ecma_property_hashmap_t *hashmap_p = (ecma_property_hashmap_t *) jmem_heap_alloc_block (total_size);
memset (hashmap_p, 0, total_size);
hashmap_p->header.types[0].type_and_flags = ECMA_PROPERTY_TYPE_HASHMAP;
@ -118,7 +118,7 @@ ecma_property_hashmap_create (ecma_object_t *object_p) /**< object */
hashmap_p->max_property_count = max_property_count;
hashmap_p->null_count = max_property_count - named_property_count;
mem_cpointer_t *pair_list_p = (mem_cpointer_t *) (hashmap_p + 1);
jmem_cpointer_t *pair_list_p = (jmem_cpointer_t *) (hashmap_p + 1);
uint8_t *bits_p = (uint8_t *) (pair_list_p + max_property_count);
uint32_t mask = max_property_count - 1;
@ -225,8 +225,8 @@ ecma_property_hashmap_free (ecma_object_t *object_p) /**< object */
object_p->property_list_or_bound_object_cp = property_p->next_property_cp;
mem_heap_free_block (hashmap_p,
ECMA_PROPERTY_HASHMAP_GET_TOTAL_SIZE (hashmap_p->max_property_count));
jmem_heap_free_block (hashmap_p,
ECMA_PROPERTY_HASHMAP_GET_TOTAL_SIZE (hashmap_p->max_property_count));
#else /* CONFIG_ECMA_PROPERTY_HASHMAP_DISABLE */
(void) object_p;
#endif /* !CONFIG_ECMA_PROPERTY_HASHMAP_DISABLE */
@ -275,7 +275,7 @@ ecma_property_hashmap_insert (ecma_object_t *object_p, /**< object */
uint32_t start_entry_index = entry_index;
#endif /* !JERRY_NDEBUG */
mem_cpointer_t *pair_list_p = (mem_cpointer_t *) (hashmap_p + 1);
jmem_cpointer_t *pair_list_p = (jmem_cpointer_t *) (hashmap_p + 1);
while (pair_list_p[entry_index] != ECMA_NULL_POINTER)
{
@ -333,7 +333,7 @@ ecma_property_hashmap_delete (ecma_object_t *object_p, /**< object */
uint32_t entry_index = name_p->hash;
uint32_t step = ecma_property_hashmap_steps[entry_index & (ECMA_PROPERTY_HASHMAP_NUMBER_OF_STEPS - 1)];
uint32_t mask = hashmap_p->max_property_count - 1;
mem_cpointer_t *pair_list_p = (mem_cpointer_t *) (hashmap_p + 1);
jmem_cpointer_t *pair_list_p = (jmem_cpointer_t *) (hashmap_p + 1);
uint8_t *bits_p = (uint8_t *) (pair_list_p + hashmap_p->max_property_count);
if (mask < (1u << LIT_STRING_HASH_BITS))
@ -442,7 +442,7 @@ ecma_property_hashmap_find (ecma_property_hashmap_t *hashmap_p, /**< hashmap */
uint32_t entry_index = name_p->hash;
uint32_t step = ecma_property_hashmap_steps[entry_index & (ECMA_PROPERTY_HASHMAP_NUMBER_OF_STEPS - 1)];
uint32_t mask = hashmap_p->max_property_count - 1;
mem_cpointer_t *pair_list_p = (mem_cpointer_t *) (hashmap_p + 1);
jmem_cpointer_t *pair_list_p = (jmem_cpointer_t *) (hashmap_p + 1);
uint8_t *bits_p = (uint8_t *) (pair_list_p + hashmap_p->max_property_count);
if (mask < (1u << LIT_STRING_HASH_BITS))

View File

@ -1237,7 +1237,7 @@ ecma_builtin_array_prototype_object_sort (ecma_value_t this_arg, /**< this argum
}
}
MEM_DEFINE_LOCAL_ARRAY (values_buffer, defined_prop_count, ecma_value_t);
JMEM_DEFINE_LOCAL_ARRAY (values_buffer, defined_prop_count, ecma_value_t);
ecma_collection_iterator_init (&iter, array_index_props_p);
@ -1296,7 +1296,7 @@ ecma_builtin_array_prototype_object_sort (ecma_value_t this_arg, /**< this argum
ecma_free_value (values_buffer[index]);
}
MEM_FINALIZE_LOCAL_ARRAY (values_buffer);
JMEM_FINALIZE_LOCAL_ARRAY (values_buffer);
/* Undefined properties should be in the back of the array. */

View File

@ -139,7 +139,7 @@ ecma_builtin_error_prototype_object_to_string (ecma_value_t this_arg) /**< this
const lit_utf8_size_t space_size = lit_get_magic_string_size (LIT_MAGIC_STRING_SPACE_CHAR);
const lit_utf8_size_t size = name_size + msg_size + colon_size + space_size;
MEM_DEFINE_LOCAL_ARRAY (ret_str_buffer, size, lit_utf8_byte_t);
JMEM_DEFINE_LOCAL_ARRAY (ret_str_buffer, size, lit_utf8_byte_t);
lit_utf8_byte_t *ret_str_buffer_p = ret_str_buffer;
lit_utf8_size_t bytes = ecma_string_to_utf8_string (name_string_p, ret_str_buffer_p, name_size);
@ -165,7 +165,7 @@ ecma_builtin_error_prototype_object_to_string (ecma_value_t this_arg) /**< this
ret_str_p = ecma_new_ecma_string_from_utf8 (ret_str_buffer,
size);
MEM_FINALIZE_LOCAL_ARRAY (ret_str_buffer);
JMEM_FINALIZE_LOCAL_ARRAY (ret_str_buffer);
}
ret_value = ecma_make_string_value (ret_str_p);

View File

@ -125,7 +125,7 @@ ecma_builtin_function_prototype_object_apply (ecma_value_t this_arg, /**< this a
const uint32_t length = ecma_number_to_uint32 (length_number);
/* 6. */
MEM_DEFINE_LOCAL_ARRAY (arguments_list_p, length, ecma_value_t);
JMEM_DEFINE_LOCAL_ARRAY (arguments_list_p, length, ecma_value_t);
uint32_t last_index = 0;
/* 7. */
@ -160,7 +160,7 @@ ecma_builtin_function_prototype_object_apply (ecma_value_t this_arg, /**< this a
ecma_free_value (arguments_list_p[index]);
}
MEM_FINALIZE_LOCAL_ARRAY (arguments_list_p);
JMEM_FINALIZE_LOCAL_ARRAY (arguments_list_p);
ECMA_OP_TO_NUMBER_FINALIZE (length_number);
ECMA_FINALIZE (length_value);

View File

@ -81,9 +81,9 @@ ecma_builtin_global_object_print (ecma_value_t this_arg __attr_unused___, /**< t
lit_utf8_size_t utf8_str_size = ecma_string_get_size (str_p);
MEM_DEFINE_LOCAL_ARRAY (utf8_str_p,
utf8_str_size,
lit_utf8_byte_t);
JMEM_DEFINE_LOCAL_ARRAY (utf8_str_p,
utf8_str_size,
lit_utf8_byte_t);
lit_utf8_size_t actual_sz = ecma_string_to_utf8_string (str_p, utf8_str_p, utf8_str_size);
JERRY_ASSERT (actual_sz == utf8_str_size);
@ -124,7 +124,7 @@ ecma_builtin_global_object_print (ecma_value_t this_arg __attr_unused___, /**< t
printf (" ");
}
MEM_FINALIZE_LOCAL_ARRAY (utf8_str_p);
JMEM_FINALIZE_LOCAL_ARRAY (utf8_str_p);
ECMA_FINALIZE (str_value);
}
@ -735,9 +735,9 @@ ecma_builtin_global_object_decode_uri_helper (ecma_value_t uri __attr_unused___,
ecma_string_t *input_string_p = ecma_get_string_from_value (string);
lit_utf8_size_t input_size = ecma_string_get_size (input_string_p);
MEM_DEFINE_LOCAL_ARRAY (input_start_p,
input_size + 1,
lit_utf8_byte_t);
JMEM_DEFINE_LOCAL_ARRAY (input_start_p,
input_size + 1,
lit_utf8_byte_t);
lit_utf8_size_t sz = ecma_string_to_utf8_string (input_string_p,
input_start_p,
@ -807,7 +807,7 @@ ecma_builtin_global_object_decode_uri_helper (ecma_value_t uri __attr_unused___,
if (ecma_is_value_empty (ret_value))
{
MEM_DEFINE_LOCAL_ARRAY (output_start_p,
JMEM_DEFINE_LOCAL_ARRAY (output_start_p,
output_size,
lit_utf8_byte_t);
@ -934,10 +934,10 @@ ecma_builtin_global_object_decode_uri_helper (ecma_value_t uri __attr_unused___,
}
}
MEM_FINALIZE_LOCAL_ARRAY (output_start_p);
JMEM_FINALIZE_LOCAL_ARRAY (output_start_p);
}
MEM_FINALIZE_LOCAL_ARRAY (input_start_p);
JMEM_FINALIZE_LOCAL_ARRAY (input_start_p);
ECMA_FINALIZE (string);
return ret_value;
@ -1013,9 +1013,9 @@ ecma_builtin_global_object_encode_uri_helper (ecma_value_t uri, /**< uri argumen
ecma_string_t *input_string_p = ecma_get_string_from_value (string);
lit_utf8_size_t input_size = ecma_string_get_size (input_string_p);
MEM_DEFINE_LOCAL_ARRAY (input_start_p,
input_size,
lit_utf8_byte_t);
JMEM_DEFINE_LOCAL_ARRAY (input_start_p,
input_size,
lit_utf8_byte_t);
lit_utf8_size_t sz = ecma_string_to_utf8_string (input_string_p,
input_start_p,
@ -1092,9 +1092,9 @@ ecma_builtin_global_object_encode_uri_helper (ecma_value_t uri, /**< uri argumen
if (ecma_is_value_empty (ret_value))
{
MEM_DEFINE_LOCAL_ARRAY (output_start_p,
output_length,
lit_utf8_byte_t);
JMEM_DEFINE_LOCAL_ARRAY (output_start_p,
output_length,
lit_utf8_byte_t);
lit_utf8_byte_t *output_char_p = output_start_p;
input_char_p = input_start_p;
@ -1147,10 +1147,10 @@ ecma_builtin_global_object_encode_uri_helper (ecma_value_t uri, /**< uri argumen
ret_value = ecma_make_string_value (output_string_p);
MEM_FINALIZE_LOCAL_ARRAY (output_start_p);
JMEM_FINALIZE_LOCAL_ARRAY (output_start_p);
}
MEM_FINALIZE_LOCAL_ARRAY (input_start_p);
JMEM_FINALIZE_LOCAL_ARRAY (input_start_p);
ECMA_FINALIZE (string);
return ret_value;
@ -1234,9 +1234,9 @@ ecma_builtin_global_object_escape (ecma_value_t this_arg __attr_unused___, /**<
ecma_string_t *input_string_p = ecma_get_string_from_value (string);
lit_utf8_size_t input_size = ecma_string_get_size (input_string_p);
MEM_DEFINE_LOCAL_ARRAY (input_start_p,
input_size,
lit_utf8_byte_t);
JMEM_DEFINE_LOCAL_ARRAY (input_start_p,
input_size,
lit_utf8_byte_t);
lit_utf8_size_t sz = ecma_string_to_utf8_string (input_string_p,
input_start_p,
@ -1276,9 +1276,9 @@ ecma_builtin_global_object_escape (ecma_value_t this_arg __attr_unused___, /**<
}
}
MEM_DEFINE_LOCAL_ARRAY (output_start_p,
output_length,
lit_utf8_byte_t);
JMEM_DEFINE_LOCAL_ARRAY (output_start_p,
output_length,
lit_utf8_byte_t);
lit_utf8_byte_t *output_char_p = output_start_p;
@ -1327,9 +1327,9 @@ ecma_builtin_global_object_escape (ecma_value_t this_arg __attr_unused___, /**<
ret_value = ecma_make_string_value (output_string_p);
MEM_FINALIZE_LOCAL_ARRAY (output_start_p);
JMEM_FINALIZE_LOCAL_ARRAY (output_start_p);
MEM_FINALIZE_LOCAL_ARRAY (input_start_p);
JMEM_FINALIZE_LOCAL_ARRAY (input_start_p);
ECMA_FINALIZE (string);
return ret_value;
@ -1357,7 +1357,7 @@ ecma_builtin_global_object_unescape (ecma_value_t this_arg __attr_unused___, /**
lit_utf8_size_t input_size = ecma_string_get_size (input_string_p);
/* 3. */
MEM_DEFINE_LOCAL_ARRAY (input_start_p, input_size, lit_utf8_byte_t);
JMEM_DEFINE_LOCAL_ARRAY (input_start_p, input_size, lit_utf8_byte_t);
lit_utf8_size_t sz = ecma_string_to_utf8_string (input_string_p, input_start_p, input_size);
JERRY_ASSERT (sz == input_size);
@ -1426,7 +1426,7 @@ ecma_builtin_global_object_unescape (ecma_value_t this_arg __attr_unused___, /**
ecma_string_t *output_string_p = ecma_new_ecma_string_from_utf8 (input_start_p, output_length);
ret_value = ecma_make_string_value (output_string_p);
MEM_FINALIZE_LOCAL_ARRAY (input_start_p);
JMEM_FINALIZE_LOCAL_ARRAY (input_start_p);
ECMA_FINALIZE (string);
return ret_value;

View File

@ -272,7 +272,7 @@ ecma_builtin_helper_json_create_hex_digit_ecma_string (uint8_t value) /**< value
/* 2.c.iii */
ecma_string_t *hex_str_p = ecma_get_magic_string (LIT_MAGIC_STRING__EMPTY);
MEM_DEFINE_LOCAL_ARRAY (hex_buff, 4, lit_utf8_byte_t);
JMEM_DEFINE_LOCAL_ARRAY (hex_buff, 4, lit_utf8_byte_t);
for (uint32_t i = 0; i < 4; i++)
{
@ -297,7 +297,7 @@ ecma_builtin_helper_json_create_hex_digit_ecma_string (uint8_t value) /**< value
ecma_deref_ecma_string (hex_str_p);
hex_str_p = ecma_new_ecma_string_from_utf8 ((lit_utf8_byte_t *) hex_buff, 4);
MEM_FINALIZE_LOCAL_ARRAY (hex_buff);
JMEM_FINALIZE_LOCAL_ARRAY (hex_buff);
JERRY_ASSERT (ecma_string_get_length (hex_str_p));

View File

@ -85,7 +85,7 @@ ecma_builtin_helper_object_to_string (const ecma_value_t this_arg) /**< this arg
'Null' or one of possible object's classes.
The string with null character is maximum 19 characters long. */
const lit_utf8_size_t buffer_size = 19;
MEM_DEFINE_LOCAL_ARRAY (str_buffer, buffer_size, lit_utf8_byte_t);
JMEM_DEFINE_LOCAL_ARRAY (str_buffer, buffer_size, lit_utf8_byte_t);
lit_utf8_byte_t *buffer_ptr = str_buffer;
@ -107,7 +107,7 @@ ecma_builtin_helper_object_to_string (const ecma_value_t this_arg) /**< this arg
ret_string_p = ecma_new_ecma_string_from_utf8 (str_buffer, (lit_utf8_size_t) (buffer_ptr - str_buffer));
MEM_FINALIZE_LOCAL_ARRAY (str_buffer);
JMEM_FINALIZE_LOCAL_ARRAY (str_buffer);
return ecma_make_string_value (ret_string_p);
} /* ecma_builtin_helper_object_to_string */

View File

@ -961,7 +961,7 @@ ecma_builtin_json_stringify (ecma_value_t this_arg __attr_unused___, /**< 'this'
}
else
{
MEM_DEFINE_LOCAL_ARRAY (space_buff, space, char);
JMEM_DEFINE_LOCAL_ARRAY (space_buff, space, char);
for (int32_t i = 0; i < space; i++)
{
@ -970,7 +970,7 @@ ecma_builtin_json_stringify (ecma_value_t this_arg __attr_unused___, /**< 'this'
context.gap_str_p = ecma_new_ecma_string_from_utf8 ((lit_utf8_byte_t *) space_buff, (lit_utf8_size_t) space);
MEM_FINALIZE_LOCAL_ARRAY (space_buff);
JMEM_FINALIZE_LOCAL_ARRAY (space_buff);
}
ECMA_OP_TO_NUMBER_FINALIZE (array_length_num);

View File

@ -196,7 +196,7 @@ ecma_builtin_number_prototype_object_to_string (ecma_value_t this_arg, /**< this
should_round = true;
}
MEM_DEFINE_LOCAL_ARRAY (buff, buff_size, lit_utf8_byte_t);
JMEM_DEFINE_LOCAL_ARRAY (buff, buff_size, lit_utf8_byte_t);
int buff_index = 0;
/* Calculate digits for whole part. */
@ -310,7 +310,7 @@ ecma_builtin_number_prototype_object_to_string (ecma_value_t this_arg, /**< this
JERRY_ASSERT (buff_index <= buff_size);
ecma_string_t *str_p = ecma_new_ecma_string_from_utf8 (buff, (lit_utf8_size_t) buff_index);
ret_value = ecma_make_string_value (str_p);
MEM_FINALIZE_LOCAL_ARRAY (buff);
JMEM_FINALIZE_LOCAL_ARRAY (buff);
}
ECMA_OP_TO_NUMBER_FINALIZE (arg_num);
}
@ -463,7 +463,7 @@ ecma_builtin_number_prototype_object_to_fixed (ecma_value_t this_arg, /**< this
}
JERRY_ASSERT (buffer_size > 0);
MEM_DEFINE_LOCAL_ARRAY (buff, buffer_size, lit_utf8_byte_t);
JMEM_DEFINE_LOCAL_ARRAY (buff, buffer_size, lit_utf8_byte_t);
lit_utf8_byte_t *p = buff;
@ -559,7 +559,7 @@ ecma_builtin_number_prototype_object_to_fixed (ecma_value_t this_arg, /**< this
ecma_string_t *str = ecma_new_ecma_string_from_utf8 (buff, (lit_utf8_size_t) (p - buff));
ret_value = ecma_make_string_value (str);
MEM_FINALIZE_LOCAL_ARRAY (buff);
JMEM_FINALIZE_LOCAL_ARRAY (buff);
}
}
}
@ -666,7 +666,7 @@ ecma_builtin_number_prototype_object_to_exponential (ecma_value_t this_arg, /**<
buffer_size++;
}
MEM_DEFINE_LOCAL_ARRAY (buff, buffer_size, lit_utf8_byte_t);
JMEM_DEFINE_LOCAL_ARRAY (buff, buffer_size, lit_utf8_byte_t);
int digit = 0;
uint64_t scale = 1;
@ -745,7 +745,7 @@ ecma_builtin_number_prototype_object_to_exponential (ecma_value_t this_arg, /**<
*actual_char_p = '\0';
ecma_string_t *str = ecma_new_ecma_string_from_utf8 (buff, (lit_utf8_size_t) (actual_char_p - buff));
ret_value = ecma_make_string_value (str);
MEM_FINALIZE_LOCAL_ARRAY (buff);
JMEM_FINALIZE_LOCAL_ARRAY (buff);
}
}
}
@ -862,7 +862,7 @@ ecma_builtin_number_prototype_object_to_precision (ecma_value_t this_arg, /**< t
buffer_size++;
}
MEM_DEFINE_LOCAL_ARRAY (buff, buffer_size, lit_utf8_byte_t);
JMEM_DEFINE_LOCAL_ARRAY (buff, buffer_size, lit_utf8_byte_t);
lit_utf8_byte_t *actual_char_p = buff;
uint64_t scale = 1;
@ -978,7 +978,7 @@ ecma_builtin_number_prototype_object_to_precision (ecma_value_t this_arg, /**< t
ecma_string_t *str_p = ecma_new_ecma_string_from_utf8 (buff, (lit_utf8_size_t) (actual_char_p - buff));
ret_value = ecma_make_string_value (str_p);
MEM_FINALIZE_LOCAL_ARRAY (buff);
JMEM_FINALIZE_LOCAL_ARRAY (buff);
}
}
ECMA_OP_TO_NUMBER_FINALIZE (arg_num);

View File

@ -705,7 +705,7 @@ ecma_builtin_object_object_define_properties (ecma_value_t this_arg __attr_unuse
ecma_collection_iterator_init (&iter, prop_names_p);
// 4.
MEM_DEFINE_LOCAL_ARRAY (property_descriptors, property_number, ecma_property_descriptor_t);
JMEM_DEFINE_LOCAL_ARRAY (property_descriptors, property_number, ecma_property_descriptor_t);
uint32_t property_descriptor_number = 0;
@ -756,7 +756,7 @@ ecma_builtin_object_object_define_properties (ecma_value_t this_arg __attr_unuse
ecma_free_property_descriptor (&property_descriptors[index]);
}
MEM_FINALIZE_LOCAL_ARRAY (property_descriptors);
JMEM_FINALIZE_LOCAL_ARRAY (property_descriptors);
ecma_free_values_collection (prop_names_p, true);

View File

@ -794,9 +794,9 @@ ecma_builtin_string_prototype_object_replace_get_string (ecma_builtin_replace_se
if (context_p->is_replace_callable)
{
MEM_DEFINE_LOCAL_ARRAY (arguments_list,
match_length + 2,
ecma_value_t);
JMEM_DEFINE_LOCAL_ARRAY (arguments_list,
match_length + 2,
ecma_value_t);
/* An error might occure during the array copy and
* uninitalized elements must not be freed. */
@ -847,7 +847,7 @@ ecma_builtin_string_prototype_object_replace_get_string (ecma_builtin_replace_se
ecma_free_value (arguments_list[i]);
}
MEM_FINALIZE_LOCAL_ARRAY (arguments_list);
JMEM_FINALIZE_LOCAL_ARRAY (arguments_list);
}
else
{
@ -2078,9 +2078,9 @@ ecma_builtin_string_prototype_object_conversion_helper (ecma_value_t this_arg, /
/* Second phase. */
MEM_DEFINE_LOCAL_ARRAY (output_start_p,
output_length,
lit_utf8_byte_t);
JMEM_DEFINE_LOCAL_ARRAY (output_start_p,
output_length,
lit_utf8_byte_t);
lit_utf8_byte_t *output_char_p = output_start_p;
@ -2120,7 +2120,7 @@ ecma_builtin_string_prototype_object_conversion_helper (ecma_value_t this_arg, /
ret_value = ecma_make_string_value (output_string_p);
MEM_FINALIZE_LOCAL_ARRAY (output_start_p);
JMEM_FINALIZE_LOCAL_ARRAY (output_start_p);
ECMA_FINALIZE_UTF8_STRING (input_start_p, input_start_size);
ECMA_FINALIZE (to_string_val);

View File

@ -69,9 +69,9 @@ ecma_builtin_string_object_from_char_code (ecma_value_t this_arg __attr_unused__
{
lit_utf8_size_t utf8_buf_size = args_number * LIT_CESU8_MAX_BYTES_IN_CODE_UNIT;
MEM_DEFINE_LOCAL_ARRAY (utf8_buf_p,
utf8_buf_size,
lit_utf8_byte_t);
JMEM_DEFINE_LOCAL_ARRAY (utf8_buf_p,
utf8_buf_size,
lit_utf8_byte_t);
lit_utf8_size_t utf8_buf_used = 0;
@ -96,7 +96,7 @@ ecma_builtin_string_object_from_char_code (ecma_value_t this_arg __attr_unused__
ret_string_p = ecma_new_ecma_string_from_utf8 (utf8_buf_p, utf8_buf_used);
}
MEM_FINALIZE_LOCAL_ARRAY (utf8_buf_p);
JMEM_FINALIZE_LOCAL_ARRAY (utf8_buf_p);
}
if (ecma_is_value_empty (ret_value))

View File

@ -276,7 +276,7 @@ ecma_op_array_object_define_own_property (ecma_object_t *obj_p, /**< the array o
ecma_length_t array_index_props_num = array_index_props_p->unit_number;
MEM_DEFINE_LOCAL_ARRAY (array_index_values_p, array_index_props_num, uint32_t);
JMEM_DEFINE_LOCAL_ARRAY (array_index_values_p, array_index_props_num, uint32_t);
ecma_collection_iterator_t iter;
ecma_collection_iterator_init (&iter, array_index_props_p);
@ -339,7 +339,7 @@ ecma_op_array_object_define_own_property (ecma_object_t *obj_p, /**< the array o
}
}
MEM_FINALIZE_LOCAL_ARRAY (array_index_values_p);
JMEM_FINALIZE_LOCAL_ARRAY (array_index_values_p);
ecma_free_values_collection (array_index_props_p, true);

View File

@ -701,7 +701,7 @@ ecma_op_function_call (ecma_object_t *func_obj_p, /**< Function object */
ecma_length_t merged_args_list_len = bound_arg_list_p->unit_number + arguments_list_len;
MEM_DEFINE_LOCAL_ARRAY (merged_args_list_p, merged_args_list_len, ecma_value_t);
JMEM_DEFINE_LOCAL_ARRAY (merged_args_list_p, merged_args_list_len, ecma_value_t);
ecma_function_bind_merge_arg_lists (merged_args_list_p,
bound_arg_list_p,
@ -714,7 +714,7 @@ ecma_op_function_call (ecma_object_t *func_obj_p, /**< Function object */
merged_args_list_p,
merged_args_list_len);
MEM_FINALIZE_LOCAL_ARRAY (merged_args_list_p);
JMEM_FINALIZE_LOCAL_ARRAY (merged_args_list_p);
}
else
{
@ -892,7 +892,7 @@ ecma_op_function_construct (ecma_object_t *func_obj_p, /**< Function object */
ecma_length_t merged_args_list_len = bound_arg_list_p->unit_number + arguments_list_len;
MEM_DEFINE_LOCAL_ARRAY (merged_args_list_p, merged_args_list_len, ecma_value_t);
JMEM_DEFINE_LOCAL_ARRAY (merged_args_list_p, merged_args_list_len, ecma_value_t);
ecma_function_bind_merge_arg_lists (merged_args_list_p,
bound_arg_list_p,
@ -904,7 +904,7 @@ ecma_op_function_construct (ecma_object_t *func_obj_p, /**< Function object */
merged_args_list_p,
merged_args_list_len);
MEM_FINALIZE_LOCAL_ARRAY (merged_args_list_p);
JMEM_FINALIZE_LOCAL_ARRAY (merged_args_list_p);
}
else
{

View File

@ -134,7 +134,7 @@ ecma_op_create_arguments_object (ecma_object_t *func_obj_p, /**< callee function
indx++)
{
// i.
if (literal_p[indx] == MEM_CP_NULL)
if (literal_p[indx] == JMEM_CP_NULL)
{
continue;
}

View File

@ -734,10 +734,10 @@ ecma_op_object_get_property_names (ecma_object_t *obj_p, /**< object */
}
/* Second pass: collecting properties names into arrays */
MEM_DEFINE_LOCAL_ARRAY (names_p,
array_index_named_properties_count + string_named_properties_count,
ecma_string_t *);
MEM_DEFINE_LOCAL_ARRAY (array_index_names_p, array_index_named_properties_count, uint32_t);
JMEM_DEFINE_LOCAL_ARRAY (names_p,
array_index_named_properties_count + string_named_properties_count,
ecma_string_t *);
JMEM_DEFINE_LOCAL_ARRAY (array_index_names_p, array_index_named_properties_count, uint32_t);
uint32_t name_pos = array_index_named_properties_count + string_named_properties_count;
uint32_t array_index_name_pos = 0;
@ -803,7 +803,7 @@ ecma_op_object_get_property_names (ecma_object_t *obj_p, /**< object */
JERRY_ASSERT (name_pos == 0);
MEM_FINALIZE_LOCAL_ARRAY (array_index_names_p);
JMEM_FINALIZE_LOCAL_ARRAY (array_index_names_p);
ecma_free_values_collection (prop_names_p, true);
@ -870,7 +870,7 @@ ecma_op_object_get_property_names (ecma_object_t *obj_p, /**< object */
ecma_deref_ecma_string (name_p);
}
MEM_FINALIZE_LOCAL_ARRAY (names_p);
JMEM_FINALIZE_LOCAL_ARRAY (names_p);
}
ecma_free_values_collection (skipped_non_enumerable_p, true);

View File

@ -35,7 +35,7 @@ typedef struct
ecma_value_t base;
/** referenced name */
__extension__ mem_cpointer_t referenced_name_cp : ECMA_POINTER_FIELD_WIDTH;
__extension__ jmem_cpointer_t referenced_name_cp : ECMA_POINTER_FIELD_WIDTH;
/** strict reference flag */
unsigned int is_strict : 1;

View File

@ -540,7 +540,7 @@ re_match_regexp (re_matcher_ctx_t *re_ctx_p, /**< RegExp matcher context */
const lit_utf8_byte_t *sub_str_p = NULL;
uint32_t array_size = re_ctx_p->num_of_captures + re_ctx_p->num_of_non_captures;
MEM_DEFINE_LOCAL_ARRAY (saved_bck_p, array_size, lit_utf8_byte_t *);
JMEM_DEFINE_LOCAL_ARRAY (saved_bck_p, array_size, lit_utf8_byte_t *);
size_t size = (size_t) (array_size) * sizeof (lit_utf8_byte_t *);
memcpy (saved_bck_p, re_ctx_p->saved_p, size);
@ -592,7 +592,7 @@ re_match_regexp (re_matcher_ctx_t *re_ctx_p, /**< RegExp matcher context */
}
}
MEM_FINALIZE_LOCAL_ARRAY (saved_bck_p);
JMEM_FINALIZE_LOCAL_ARRAY (saved_bck_p);
return match_value;
}
case RE_OP_CHAR_CLASS:
@ -1292,7 +1292,7 @@ ecma_regexp_exec_helper (ecma_value_t regexp_value, /**< RegExp object */
JERRY_ASSERT (re_ctx.num_of_captures % 2 == 0);
re_ctx.num_of_non_captures = bc_p->num_of_non_captures;
MEM_DEFINE_LOCAL_ARRAY (saved_p, re_ctx.num_of_captures + re_ctx.num_of_non_captures, const lit_utf8_byte_t *);
JMEM_DEFINE_LOCAL_ARRAY (saved_p, re_ctx.num_of_captures + re_ctx.num_of_non_captures, const lit_utf8_byte_t *);
for (uint32_t i = 0; i < re_ctx.num_of_captures + re_ctx.num_of_non_captures; i++)
{
@ -1301,7 +1301,7 @@ ecma_regexp_exec_helper (ecma_value_t regexp_value, /**< RegExp object */
re_ctx.saved_p = saved_p;
uint32_t num_of_iter_length = (re_ctx.num_of_captures / 2) + (re_ctx.num_of_non_captures - 1);
MEM_DEFINE_LOCAL_ARRAY (num_of_iter_p, num_of_iter_length, uint32_t);
JMEM_DEFINE_LOCAL_ARRAY (num_of_iter_p, num_of_iter_length, uint32_t);
for (uint32_t i = 0; i < num_of_iter_length; i++)
{
@ -1466,8 +1466,8 @@ ecma_regexp_exec_helper (ecma_value_t regexp_value, /**< RegExp object */
}
}
MEM_FINALIZE_LOCAL_ARRAY (num_of_iter_p);
MEM_FINALIZE_LOCAL_ARRAY (saved_p);
JMEM_FINALIZE_LOCAL_ARRAY (num_of_iter_p);
JMEM_FINALIZE_LOCAL_ARRAY (saved_p);
ECMA_FINALIZE_UTF8_STRING (input_buffer_p, input_buffer_size);
return ret_value;

View File

@ -25,7 +25,7 @@
typedef struct
{
/* The size of this structure is recommended to be divisible by
* MEM_ALIGNMENT. Otherwise some bytes after the header are wasted. */
* JMEM_ALIGNMENT. Otherwise some bytes after the header are wasted. */
uint32_t version; /**< version number */
uint32_t lit_table_offset; /**< offset of the literal table */
uint32_t lit_table_size; /**< size of literal table */

View File

@ -922,7 +922,7 @@ jerry_dispatch_external_function (ecma_object_t *function_object_p, /**< externa
ecma_value_t completion_value;
MEM_DEFINE_LOCAL_ARRAY (api_arg_values, args_count, jerry_api_value_t);
JMEM_DEFINE_LOCAL_ARRAY (api_arg_values, args_count, jerry_api_value_t);
ecma_collection_iterator_t args_iterator;
ecma_collection_iterator_init (&args_iterator, arg_collection_p);
@ -967,7 +967,7 @@ jerry_dispatch_external_function (ecma_object_t *function_object_p, /**< externa
jerry_api_release_value (&api_arg_values[i]);
}
MEM_FINALIZE_LOCAL_ARRAY (api_arg_values);
JMEM_FINALIZE_LOCAL_ARRAY (api_arg_values);
return completion_value;
} /* jerry_dispatch_external_function */
@ -1399,7 +1399,7 @@ jerry_api_invoke_function (bool is_invoke_as_constructor, /**< true - invoke fun
bool is_successful = true;
MEM_DEFINE_LOCAL_ARRAY (arguments_list_p, args_count, ecma_value_t);
JMEM_DEFINE_LOCAL_ARRAY (arguments_list_p, args_count, ecma_value_t);
for (uint32_t i = 0; i < args_count; ++i)
{
@ -1456,7 +1456,7 @@ jerry_api_invoke_function (bool is_invoke_as_constructor, /**< true - invoke fun
ecma_free_value (arguments_list_p[i]);
}
MEM_FINALIZE_LOCAL_ARRAY (arguments_list_p);
JMEM_FINALIZE_LOCAL_ARRAY (arguments_list_p);
return is_successful;
} /* jerry_api_invoke_function */
@ -1633,23 +1633,23 @@ jerry_init (jerry_flag_t flags) /**< combination of Jerry flags */
if (flags & (JERRY_FLAG_MEM_STATS | JERRY_FLAG_MEM_STATS_SEPARATE))
{
#ifndef MEM_STATS
#ifndef JMEM_STATS
flags &= (jerry_flag_t) ~(JERRY_FLAG_MEM_STATS | JERRY_FLAG_MEM_STATS_SEPARATE);
JERRY_WARNING_MSG ("Ignoring memory statistics option because of '!MEM_STATS' build configuration.\n");
#else /* MEM_STATS */
JERRY_WARNING_MSG ("Ignoring memory statistics option because of '!JMEM_STATS' build configuration.\n");
#else /* JMEM_STATS */
if (flags & JERRY_FLAG_MEM_STATS_SEPARATE)
{
flags |= JERRY_FLAG_MEM_STATS;
}
#endif /* !MEM_STATS */
#endif /* !JMEM_STATS */
}
jerry_flags = flags;
jerry_make_api_available ();
mem_init ();
jmem_init ();
lit_init ();
ecma_init ();
} /* jerry_init */
@ -1667,7 +1667,7 @@ jerry_cleanup (void)
vm_finalize ();
ecma_finalize ();
lit_finalize ();
mem_finalize (is_show_mem_stats);
jmem_finalize (is_show_mem_stats);
} /* jerry_cleanup */
/**
@ -1719,13 +1719,13 @@ jerry_parse (const jerry_api_char_t *source_p, /**< script source */
return false;
}
#ifdef MEM_STATS
#ifdef JMEM_STATS
if (jerry_flags & JERRY_FLAG_MEM_STATS_SEPARATE)
{
mem_stats_print ();
mem_stats_reset_peak ();
jmem_stats_print ();
jmem_stats_reset_peak ();
}
#endif /* MEM_STATS */
#endif /* JMEM_STATS */
vm_init (bytecode_data_p);
@ -1824,15 +1824,15 @@ snapshot_add_compiled_code (ecma_compiled_code_t *compiled_code_p) /**< compiled
return 0;
}
JERRY_ASSERT ((snapshot_buffer_write_offset & (MEM_ALIGNMENT - 1)) == 0);
JERRY_ASSERT ((snapshot_buffer_write_offset & (JMEM_ALIGNMENT - 1)) == 0);
if ((snapshot_buffer_write_offset >> MEM_ALIGNMENT_LOG) > 0xffffu)
if ((snapshot_buffer_write_offset >> JMEM_ALIGNMENT_LOG) > 0xffffu)
{
snapshot_error_occured = true;
return 0;
}
uint16_t start_offset = (uint16_t) (snapshot_buffer_write_offset >> MEM_ALIGNMENT_LOG);
uint16_t start_offset = (uint16_t) (snapshot_buffer_write_offset >> JMEM_ALIGNMENT_LOG);
ecma_compiled_code_t *copied_compiled_code_p;
copied_compiled_code_p = (ecma_compiled_code_t *) (snapshot_buffer_p + snapshot_buffer_write_offset);
@ -1849,7 +1849,7 @@ snapshot_add_compiled_code (ecma_compiled_code_t *compiled_code_p) /**< compiled
snapshot_buffer_write_offset += sizeof (ecma_compiled_code_t);
mem_cpointer_t pattern_cp = ((re_compiled_code_t *) compiled_code_p)->pattern_cp;
jmem_cpointer_t pattern_cp = ((re_compiled_code_t *) compiled_code_p)->pattern_cp;
ecma_string_t *pattern_string_p = ECMA_GET_NON_NULL_POINTER (ecma_string_t,
pattern_cp);
@ -1870,13 +1870,13 @@ snapshot_add_compiled_code (ecma_compiled_code_t *compiled_code_p) /**< compiled
ECMA_FINALIZE_UTF8_STRING (buffer_p, buffer_size);
snapshot_buffer_write_offset = JERRY_ALIGNUP (snapshot_buffer_write_offset, MEM_ALIGNMENT);
snapshot_buffer_write_offset = JERRY_ALIGNUP (snapshot_buffer_write_offset, JMEM_ALIGNMENT);
/* Regexp character size is stored in refs. */
copied_compiled_code_p->refs = (uint16_t) pattern_size;
pattern_size += (ecma_length_t) sizeof (ecma_compiled_code_t);
copied_compiled_code_p->size = (uint16_t) ((pattern_size + MEM_ALIGNMENT - 1) >> MEM_ALIGNMENT_LOG);
copied_compiled_code_p->size = (uint16_t) ((pattern_size + JMEM_ALIGNMENT - 1) >> JMEM_ALIGNMENT_LOG);
copied_compiled_code_p->status_flags = compiled_code_p->status_flags;
@ -1890,7 +1890,7 @@ snapshot_add_compiled_code (ecma_compiled_code_t *compiled_code_p) /**< compiled
snapshot_buffer_size,
&snapshot_buffer_write_offset,
compiled_code_p,
((size_t) compiled_code_p->size) << MEM_ALIGNMENT_LOG))
((size_t) compiled_code_p->size) << JMEM_ALIGNMENT_LOG))
{
snapshot_error_occured = true;
return 0;
@ -1954,7 +1954,7 @@ jerry_snapshot_set_offsets (uint8_t *buffer_p, /**< buffer */
do
{
ecma_compiled_code_t *bytecode_p = (ecma_compiled_code_t *) buffer_p;
uint32_t code_size = ((uint32_t) bytecode_p->size) << MEM_ALIGNMENT_LOG;
uint32_t code_size = ((uint32_t) bytecode_p->size) << JMEM_ALIGNMENT_LOG;
if (bytecode_p->status_flags & CBC_CODE_FLAGS_FUNCTION)
{
@ -1991,7 +1991,7 @@ jerry_snapshot_set_offsets (uint8_t *buffer_p, /**< buffer */
{
lit_mem_to_snapshot_id_map_entry_t *current_p = lit_map_p;
if (literal_start_p[i] != MEM_CP_NULL)
if (literal_start_p[i] != JMEM_CP_NULL)
{
while (current_p->literal_id != literal_start_p[i])
{
@ -2007,14 +2007,14 @@ jerry_snapshot_set_offsets (uint8_t *buffer_p, /**< buffer */
for (uint32_t i = register_clear_start; i < register_end; i++)
{
literal_start_p[i] = MEM_CP_NULL;
literal_start_p[i] = JMEM_CP_NULL;
}
for (uint32_t i = register_end; i < const_literal_end; i++)
{
lit_mem_to_snapshot_id_map_entry_t *current_p = lit_map_p;
if (literal_start_p[i] != MEM_CP_NULL)
if (literal_start_p[i] != JMEM_CP_NULL)
{
while (current_p->literal_id != literal_start_p[i])
{
@ -2058,7 +2058,7 @@ jerry_parse_and_save_snapshot (const jerry_api_char_t *source_p, /**< script sou
ecma_compiled_code_t *bytecode_data_p;
snapshot_buffer_write_offset = JERRY_ALIGNUP (sizeof (jerry_snapshot_header_t),
MEM_ALIGNMENT);
JMEM_ALIGNMENT);
snapshot_error_occured = false;
snapshot_buffer_p = buffer_p;
snapshot_buffer_size = buffer_size;
@ -2117,7 +2117,7 @@ jerry_parse_and_save_snapshot (const jerry_api_char_t *source_p, /**< script sou
return 0;
}
jerry_snapshot_set_offsets (buffer_p + JERRY_ALIGNUP (sizeof (jerry_snapshot_header_t), MEM_ALIGNMENT),
jerry_snapshot_set_offsets (buffer_p + JERRY_ALIGNUP (sizeof (jerry_snapshot_header_t), JMEM_ALIGNMENT),
(uint32_t) (header.lit_table_offset - sizeof (jerry_snapshot_header_t)),
lit_map_p);
@ -2131,7 +2131,7 @@ jerry_parse_and_save_snapshot (const jerry_api_char_t *source_p, /**< script sou
if (lit_map_p != NULL)
{
mem_heap_free_block_size_stored (lit_map_p);
jmem_heap_free_block_size_stored (lit_map_p);
}
ecma_bytecode_deref (bytecode_data_p);
@ -2169,7 +2169,7 @@ snapshot_load_compiled_code (const uint8_t *snapshot_data_p, /**< snapshot data
bool copy_bytecode) /**< byte code should be copied to memory */
{
ecma_compiled_code_t *bytecode_p = (ecma_compiled_code_t *) (snapshot_data_p + offset);
uint32_t code_size = ((uint32_t) bytecode_p->size) << MEM_ALIGNMENT_LOG;
uint32_t code_size = ((uint32_t) bytecode_p->size) << JMEM_ALIGNMENT_LOG;
if (!(bytecode_p->status_flags & CBC_CODE_FLAGS_FUNCTION))
{
@ -2218,7 +2218,7 @@ snapshot_load_compiled_code (const uint8_t *snapshot_data_p, /**< snapshot data
if (copy_bytecode
|| (header_size + (literal_end * sizeof (uint16_t)) + BYTECODE_NO_COPY_TRESHOLD > code_size))
{
bytecode_p = (ecma_compiled_code_t *) mem_heap_alloc_block (code_size);
bytecode_p = (ecma_compiled_code_t *) jmem_heap_alloc_block (code_size);
memcpy (bytecode_p, snapshot_data_p + offset, code_size);
}
@ -2227,13 +2227,13 @@ snapshot_load_compiled_code (const uint8_t *snapshot_data_p, /**< snapshot data
code_size = (uint32_t) (header_size + literal_end * sizeof (lit_cpointer_t));
uint8_t *real_bytecode_p = ((uint8_t *) bytecode_p) + code_size;
uint32_t total_size = JERRY_ALIGNUP (code_size + 1 + sizeof (uint8_t *), MEM_ALIGNMENT);
uint32_t total_size = JERRY_ALIGNUP (code_size + 1 + sizeof (uint8_t *), JMEM_ALIGNMENT);
bytecode_p = (ecma_compiled_code_t *) mem_heap_alloc_block (total_size);
bytecode_p = (ecma_compiled_code_t *) jmem_heap_alloc_block (total_size);
memcpy (bytecode_p, snapshot_data_p + offset, code_size);
bytecode_p->size = (uint16_t) (total_size >> MEM_ALIGNMENT_LOG);
bytecode_p->size = (uint16_t) (total_size >> JMEM_ALIGNMENT_LOG);
uint8_t *instructions_p = ((uint8_t *) bytecode_p);
@ -2262,7 +2262,7 @@ snapshot_load_compiled_code (const uint8_t *snapshot_data_p, /**< snapshot data
for (uint32_t i = const_literal_end; i < literal_end; i++)
{
size_t literal_offset = ((size_t) literal_start_p[i]) << MEM_ALIGNMENT_LOG;
size_t literal_offset = ((size_t) literal_start_p[i]) << JMEM_ALIGNMENT_LOG;
if (literal_offset == offset)
{
@ -2350,7 +2350,7 @@ jerry_exec_snapshot (const void *snapshot_p, /**< snapshot */
if (lit_map_p != NULL)
{
mem_heap_free_block_size_stored (lit_map_p);
jmem_heap_free_block_size_stored (lit_map_p);
}
if (bytecode_p == NULL)

View File

@ -13,21 +13,21 @@
* limitations under the License.
*/
#ifndef MEM_ALLOCATOR_INTERNAL_H
#define MEM_ALLOCATOR_INTERNAL_H
#ifndef JMEM_ALLOCATOR_INTERNAL_H
#define JMEM_ALLOCATOR_INTERNAL_H
#ifndef MEM_ALLOCATOR_INTERNAL
#ifndef JMEM_ALLOCATOR_INTERNAL
# error "The header is for internal routines of memory allocator component. Please, don't use the routines directly."
#endif /* !MEM_ALLOCATOR_INTERNAL */
#endif /* !JMEM_ALLOCATOR_INTERNAL */
/** \addtogroup mem Memory allocation
* @{
*/
extern void mem_run_try_to_give_memory_back_callbacks (mem_try_give_memory_back_severity_t);
extern void jmem_run_try_to_give_memory_back_callbacks (jmem_try_give_memory_back_severity_t);
/**
* @}
*/
#endif /* !MEM_ALLOCATOR_INTERNAL_H */
#endif /* !JMEM_ALLOCATOR_INTERNAL_H */

View File

@ -0,0 +1,149 @@
/* Copyright 2014-2016 Samsung Electronics Co., Ltd.
* Copyright 2016 University of Szeged.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Allocator implementation
*/
#include "jrt.h"
#include "jrt-libc-includes.h"
#include "jmem-allocator.h"
#include "jmem-heap.h"
#include "jmem-poolman.h"
#define JMEM_ALLOCATOR_INTERNAL
#include "jmem-allocator-internal.h"
/**
* The 'try to give memory back' callback
*/
static jmem_try_give_memory_back_callback_t jmem_try_give_memory_back_callback = NULL;
/**
* Initialize memory allocators.
*/
void
jmem_init (void)
{
jmem_heap_init ();
jmem_pools_init ();
} /* jmem_init */
/**
* Finalize memory allocators.
*/
void
jmem_finalize (bool is_show_mem_stats) /**< show heap memory stats
before finalization? */
{
jmem_pools_finalize ();
#ifdef JMEM_STATS
if (is_show_mem_stats)
{
jmem_stats_print ();
}
#else /* !JMEM_STATS */
(void) is_show_mem_stats;
#endif /* JMEM_STATS */
jmem_heap_finalize ();
} /* jmem_finalize */
/**
* Compress pointer
*
* @return packed pointer
*/
uintptr_t
jmem_compress_pointer (const void *pointer_p) /**< pointer to compress */
{
JERRY_ASSERT (jmem_is_heap_pointer (pointer_p));
return jmem_heap_compress_pointer (pointer_p);
} /* jmem_compress_pointer */
/**
* Decompress pointer
*
* @return unpacked pointer
*/
void *
jmem_decompress_pointer (uintptr_t compressed_pointer) /**< pointer to decompress */
{
return jmem_heap_decompress_pointer (compressed_pointer);
} /* jmem_decompress_pointer */
/**
* Register specified 'try to give memory back' callback routine
*/
void
jmem_register_a_try_give_memory_back_callback (jmem_try_give_memory_back_callback_t callback) /**< callback routine */
{
/* Currently only one callback is supported */
JERRY_ASSERT (jmem_try_give_memory_back_callback == NULL);
jmem_try_give_memory_back_callback = callback;
} /* jmem_register_a_try_give_memory_back_callback */
/**
* Unregister specified 'try to give memory back' callback routine
*/
void
jmem_unregister_a_try_give_memory_back_callback (jmem_try_give_memory_back_callback_t callback) /**< callback routine */
{
/* Currently only one callback is supported */
JERRY_ASSERT (jmem_try_give_memory_back_callback == callback);
jmem_try_give_memory_back_callback = NULL;
} /* jmem_unregister_a_try_give_memory_back_callback */
/**
* Run 'try to give memory back' callbacks with specified severity
*/
void
jmem_run_try_to_give_memory_back_callbacks (jmem_try_give_memory_back_severity_t severity) /**< severity of
the request */
{
if (jmem_try_give_memory_back_callback != NULL)
{
jmem_try_give_memory_back_callback (severity);
}
jmem_pools_collect_empty ();
} /* jmem_run_try_to_give_memory_back_callbacks */
#ifdef JMEM_STATS
/**
* Reset peak values in memory usage statistics
*/
void
jmem_stats_reset_peak (void)
{
jmem_heap_stats_reset_peak ();
jmem_pools_stats_reset_peak ();
} /* jmem_stats_reset_peak */
/**
* Print memory usage statistics
*/
void
jmem_stats_print (void)
{
jmem_heap_stats_print ();
jmem_pools_stats_print ();
} /* jmem_stats_print */
#endif /* JMEM_STATS */

View File

@ -16,13 +16,13 @@
/**
* Allocator interface
*/
#ifndef MEM_ALLOCATOR_H
#define MEM_ALLOCATOR_H
#ifndef JMEM_ALLOCATOR_H
#define JMEM_ALLOCATOR_H
#include "jrt.h"
#include "mem-config.h"
#include "mem-heap.h"
#include "mem-poolman.h"
#include "jmem-config.h"
#include "jmem-heap.h"
#include "jmem-poolman.h"
/** \addtogroup mem Memory allocation
* @{
@ -31,27 +31,27 @@
/**
* Compressed pointer
*/
typedef uint16_t mem_cpointer_t;
typedef uint16_t jmem_cpointer_t;
/**
* Representation of NULL value for compressed pointers
*/
#define MEM_CP_NULL ((mem_cpointer_t) 0)
#define JMEM_CP_NULL ((jmem_cpointer_t) 0)
/**
* Required alignment for allocated units/blocks
*/
#define MEM_ALIGNMENT (1u << MEM_ALIGNMENT_LOG)
#define JMEM_ALIGNMENT (1u << JMEM_ALIGNMENT_LOG)
/**
* Width of compressed memory pointer
*/
#define MEM_CP_WIDTH (MEM_HEAP_OFFSET_LOG - MEM_ALIGNMENT_LOG)
#define JMEM_CP_WIDTH (JMEM_HEAP_OFFSET_LOG - JMEM_ALIGNMENT_LOG)
/**
* Compressed pointer value mask
*/
#define MEM_CP_MASK ((1ull << MEM_CP_WIDTH) - 1)
#define JMEM_CP_MASK ((1ull << JMEM_CP_WIDTH) - 1)
/**
* Severity of a 'try give memory back' request
@ -64,69 +64,69 @@ typedef uint16_t mem_cpointer_t;
*/
typedef enum
{
MEM_TRY_GIVE_MEMORY_BACK_SEVERITY_LOW, /* 'low' severity */
MEM_TRY_GIVE_MEMORY_BACK_SEVERITY_HIGH, /* 'high' severity */
} mem_try_give_memory_back_severity_t;
JMEM_TRY_GIVE_MEMORY_BACK_SEVERITY_LOW, /* 'low' severity */
JMEM_TRY_GIVE_MEMORY_BACK_SEVERITY_HIGH, /* 'high' severity */
} jmem_try_give_memory_back_severity_t;
/**
* A 'try give memory back' callback routine type.
*/
typedef void (*mem_try_give_memory_back_callback_t) (mem_try_give_memory_back_severity_t);
typedef void (*jmem_try_give_memory_back_callback_t) (jmem_try_give_memory_back_severity_t);
/**
* Get value of pointer from specified non-null compressed pointer value
*/
#define MEM_CP_GET_NON_NULL_POINTER(type, cp_value) \
((type *) (mem_decompress_pointer (cp_value)))
#define JMEM_CP_GET_NON_NULL_POINTER(type, cp_value) \
((type *) (jmem_decompress_pointer (cp_value)))
/**
* Get value of pointer from specified compressed pointer value
*/
#define MEM_CP_GET_POINTER(type, cp_value) \
(((unlikely ((cp_value) == MEM_CP_NULL)) ? NULL : MEM_CP_GET_NON_NULL_POINTER (type, cp_value)))
#define JMEM_CP_GET_POINTER(type, cp_value) \
(((unlikely ((cp_value) == JMEM_CP_NULL)) ? NULL : JMEM_CP_GET_NON_NULL_POINTER (type, cp_value)))
/**
* Set value of non-null compressed pointer so that it will correspond
* to specified non_compressed_pointer
*/
#define MEM_CP_SET_NON_NULL_POINTER(cp_value, non_compressed_pointer) \
(cp_value) = (mem_compress_pointer (non_compressed_pointer) & MEM_CP_MASK)
#define JMEM_CP_SET_NON_NULL_POINTER(cp_value, non_compressed_pointer) \
(cp_value) = (jmem_compress_pointer (non_compressed_pointer) & JMEM_CP_MASK)
/**
* Set value of compressed pointer so that it will correspond
* to specified non_compressed_pointer
*/
#define MEM_CP_SET_POINTER(cp_value, non_compressed_pointer) \
#define JMEM_CP_SET_POINTER(cp_value, non_compressed_pointer) \
do \
{ \
void *ptr_value = (void *) non_compressed_pointer; \
\
if (unlikely ((ptr_value) == NULL)) \
{ \
(cp_value) = MEM_CP_NULL; \
(cp_value) = JMEM_CP_NULL; \
} \
else \
{ \
MEM_CP_SET_NON_NULL_POINTER (cp_value, ptr_value); \
JMEM_CP_SET_NON_NULL_POINTER (cp_value, ptr_value); \
} \
} while (false);
extern void mem_init (void);
extern void mem_finalize (bool);
extern void jmem_init (void);
extern void jmem_finalize (bool);
extern uintptr_t mem_compress_pointer (const void *);
extern void *mem_decompress_pointer (uintptr_t);
extern uintptr_t jmem_compress_pointer (const void *);
extern void *jmem_decompress_pointer (uintptr_t);
extern void mem_register_a_try_give_memory_back_callback (mem_try_give_memory_back_callback_t);
extern void mem_unregister_a_try_give_memory_back_callback (mem_try_give_memory_back_callback_t);
extern void jmem_register_a_try_give_memory_back_callback (jmem_try_give_memory_back_callback_t);
extern void jmem_unregister_a_try_give_memory_back_callback (jmem_try_give_memory_back_callback_t);
#ifdef MEM_STATS
extern void mem_stats_reset_peak (void);
extern void mem_stats_print (void);
#endif /* MEM_STATS */
#ifdef JMEM_STATS
extern void jmem_stats_reset_peak (void);
extern void jmem_stats_print (void);
#endif /* JMEM_STATS */
/**
* @}
*/
#endif /* !MEM_ALLOCATOR_H */
#endif /* !JMEM_ALLOCATOR_H */

View File

@ -13,29 +13,29 @@
* limitations under the License.
*/
#ifndef MEM_CONFIG_H
#define MEM_CONFIG_H
#ifndef JMEM_CONFIG_H
#define JMEM_CONFIG_H
#include "config.h"
/**
* Log2 of maximum possible offset in the heap
*/
#define MEM_HEAP_OFFSET_LOG (CONFIG_MEM_HEAP_OFFSET_LOG)
#define JMEM_HEAP_OFFSET_LOG (CONFIG_MEM_HEAP_OFFSET_LOG)
/**
* Size of heap
*/
#define MEM_HEAP_SIZE ((size_t) (CONFIG_MEM_HEAP_AREA_SIZE))
#define JMEM_HEAP_SIZE ((size_t) (CONFIG_MEM_HEAP_AREA_SIZE))
/**
* Size of pool chunk
*/
#define MEM_POOL_CHUNK_SIZE ((size_t) (CONFIG_MEM_POOL_CHUNK_SIZE))
#define JMEM_POOL_CHUNK_SIZE ((size_t) (CONFIG_MEM_POOL_CHUNK_SIZE))
/**
* Logarithm of required alignment for allocated units/blocks
*/
#define MEM_ALIGNMENT_LOG 3
#define JMEM_ALIGNMENT_LOG 3
#endif /* !MEM_CONFIG_H */
#endif /* !JMEM_CONFIG_H */

775
jerry-core/jmem/jmem-heap.c Normal file
View File

@ -0,0 +1,775 @@
/* Copyright 2014-2016 Samsung Electronics Co., Ltd.
* Copyright 2016 University of Szeged.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Heap implementation
*/
#include "jrt.h"
#include "jrt-bit-fields.h"
#include "jrt-libc-includes.h"
#include "jmem-allocator.h"
#include "jmem-config.h"
#include "jmem-heap.h"
#define JMEM_ALLOCATOR_INTERNAL
#include "jmem-allocator-internal.h"
/** \addtogroup mem Memory allocation
* @{
*
* \addtogroup heap Heap
* @{
*/
/*
* Valgrind-related options and headers
*/
#ifdef JERRY_VALGRIND
# include "memcheck.h"
# define VALGRIND_NOACCESS_SPACE(p, s) VALGRIND_MAKE_MEM_NOACCESS((p), (s))
# define VALGRIND_UNDEFINED_SPACE(p, s) VALGRIND_MAKE_MEM_UNDEFINED((p), (s))
# define VALGRIND_DEFINED_SPACE(p, s) VALGRIND_MAKE_MEM_DEFINED((p), (s))
#else /* !JERRY_VALGRIND */
# define VALGRIND_NOACCESS_SPACE(p, s)
# define VALGRIND_UNDEFINED_SPACE(p, s)
# define VALGRIND_DEFINED_SPACE(p, s)
#endif /* JERRY_VALGRIND */
#ifdef JERRY_VALGRIND_FREYA
# include "memcheck.h"
/**
* Tells whether a pool manager allocator request is in progress.
*/
static bool valgrind_freya_mempool_request = false;
/**
* Called by pool manager before a heap allocation or free.
*/
void jmem_heap_valgrind_freya_mempool_request (void)
{
valgrind_freya_mempool_request = true;
} /* jmem_heap_valgrind_freya_mempool_request */
# define VALGRIND_FREYA_CHECK_MEMPOOL_REQUEST \
bool mempool_request = valgrind_freya_mempool_request; \
valgrind_freya_mempool_request = false
# define VALGRIND_FREYA_MALLOCLIKE_SPACE(p, s) \
if (!mempool_request) \
{ \
VALGRIND_MALLOCLIKE_BLOCK((p), (s), 0, 0); \
}
# define VALGRIND_FREYA_FREELIKE_SPACE(p) \
if (!mempool_request) \
{ \
VALGRIND_FREELIKE_BLOCK((p), 0); \
}
#else /* !JERRY_VALGRIND_FREYA */
# define VALGRIND_FREYA_CHECK_MEMPOOL_REQUEST
# define VALGRIND_FREYA_MALLOCLIKE_SPACE(p, s)
# define VALGRIND_FREYA_FREELIKE_SPACE(p)
#endif /* JERRY_VALGRIND_FREYA */
/* Calculate heap area size, leaving space for a pointer to the free list */
#define JMEM_HEAP_AREA_SIZE (JMEM_HEAP_SIZE - JMEM_ALIGNMENT)
#define JMEM_HEAP_END_OF_LIST ((jmem_heap_free_t *const) ~((uint32_t) 0x0))
/**
* Free region node
*/
typedef struct
{
uint32_t next_offset; /**< Offset of next region in list */
uint32_t size; /**< Size of region */
} jmem_heap_free_t;
#if UINTPTR_MAX > UINT32_MAX
#define JMEM_HEAP_GET_OFFSET_FROM_ADDR(p) ((uint32_t) ((uint8_t *) (p) - (uint8_t *) jmem_heap.area))
#define JMEM_HEAP_GET_ADDR_FROM_OFFSET(u) ((jmem_heap_free_t *) &jmem_heap.area[u])
#else /* UINTPTR_MAX <= UINT32_MAX */
/* In this case we simply store the pointer, since it fits anyway. */
#define JMEM_HEAP_GET_OFFSET_FROM_ADDR(p) ((uint32_t) (p))
#define JMEM_HEAP_GET_ADDR_FROM_OFFSET(u) ((jmem_heap_free_t *)(u))
#endif /* UINTPTR_MAX > UINT32_MAX */
/**
* Get end of region
*/
static inline jmem_heap_free_t * __attr_always_inline___ __attr_pure___
jmem_heap_get_region_end (jmem_heap_free_t *curr_p) /**< current region */
{
return (jmem_heap_free_t *)((uint8_t *) curr_p + curr_p->size);
} /* jmem_heap_get_region_end */
/**
* Heap structure
*/
typedef struct
{
/** First node in free region list */
jmem_heap_free_t first;
/**
* Heap area
*/
uint8_t area[JMEM_HEAP_AREA_SIZE] __attribute__ ((aligned (JMEM_ALIGNMENT)));
} jmem_heap_t;
/**
* Heap
*/
#ifndef JERRY_HEAP_SECTION_ATTR
jmem_heap_t jmem_heap;
#else /* JERRY_HEAP_SECTION_ATTR */
jmem_heap_t jmem_heap __attribute__ ((section (JERRY_HEAP_SECTION_ATTR)));
#endif /* !JERRY_HEAP_SECTION_ATTR */
/**
* Check size of heap is corresponding to configuration
*/
JERRY_STATIC_ASSERT (sizeof (jmem_heap) <= JMEM_HEAP_SIZE,
size_of_mem_heap_must_be_less_than_or_equal_to_MEM_HEAP_SIZE);
/**
* Size of allocated regions
*/
size_t jmem_heap_allocated_size;
/**
* Current limit of heap usage, that is upon being reached, causes call of "try give memory back" callbacks
*/
size_t jmem_heap_limit;
/* This is used to speed up deallocation. */
jmem_heap_free_t *jmem_heap_list_skip_p;
#ifdef JMEM_STATS
/**
* Heap's memory usage statistics
*/
static jmem_heap_stats_t jmem_heap_stats;
static void jmem_heap_stat_init (void);
static void jmem_heap_stat_alloc (size_t num);
static void jmem_heap_stat_free (size_t num);
static void jmem_heap_stat_skip ();
static void jmem_heap_stat_nonskip ();
static void jmem_heap_stat_alloc_iter ();
static void jmem_heap_stat_free_iter ();
# define JMEM_HEAP_STAT_INIT() jmem_heap_stat_init ()
# define JMEM_HEAP_STAT_ALLOC(v1) jmem_heap_stat_alloc (v1)
# define JMEM_HEAP_STAT_FREE(v1) jmem_heap_stat_free (v1)
# define JMEM_HEAP_STAT_SKIP() jmem_heap_stat_skip ()
# define JMEM_HEAP_STAT_NONSKIP() jmem_heap_stat_nonskip ()
# define JMEM_HEAP_STAT_ALLOC_ITER() jmem_heap_stat_alloc_iter ()
# define JMEM_HEAP_STAT_FREE_ITER() jmem_heap_stat_free_iter ()
#else /* !JMEM_STATS */
# define JMEM_HEAP_STAT_INIT()
# define JMEM_HEAP_STAT_ALLOC(v1)
# define JMEM_HEAP_STAT_FREE(v1)
# define JMEM_HEAP_STAT_SKIP()
# define JMEM_HEAP_STAT_NONSKIP()
# define JMEM_HEAP_STAT_ALLOC_ITER()
# define JMEM_HEAP_STAT_FREE_ITER()
#endif /* JMEM_STATS */
/**
* Startup initialization of heap
*/
void
jmem_heap_init (void)
{
JERRY_STATIC_ASSERT ((uintptr_t) jmem_heap.area % JMEM_ALIGNMENT == 0,
jmem_heap_area_must_be_multiple_of_MEM_ALIGNMENT);
JERRY_STATIC_ASSERT ((1u << JMEM_HEAP_OFFSET_LOG) >= JMEM_HEAP_SIZE,
two_pow_mem_heap_offset_should_not_be_less_than_mem_heap_size);
jmem_heap_allocated_size = 0;
jmem_heap_limit = CONFIG_MEM_HEAP_DESIRED_LIMIT;
jmem_heap.first.size = 0;
jmem_heap_free_t *const region_p = (jmem_heap_free_t *) jmem_heap.area;
jmem_heap.first.next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (region_p);
region_p->size = sizeof (jmem_heap.area);
region_p->next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (JMEM_HEAP_END_OF_LIST);
jmem_heap_list_skip_p = &jmem_heap.first;
VALGRIND_NOACCESS_SPACE (jmem_heap.area, JMEM_HEAP_AREA_SIZE);
JMEM_HEAP_STAT_INIT ();
} /* jmem_heap_init */
/**
* Finalize heap
*/
void jmem_heap_finalize (void)
{
JERRY_ASSERT (jmem_heap_allocated_size == 0);
VALGRIND_NOACCESS_SPACE (&jmem_heap, sizeof (jmem_heap));
} /* jmem_heap_finalize */
/**
* Allocation of memory region.
*
* See also:
* jmem_heap_alloc_block
*
* @return pointer to allocated memory block - if allocation is successful,
* NULL - if there is not enough memory.
*/
static __attribute__((hot))
void *jmem_heap_alloc_block_internal (const size_t size)
{
// Align size
const size_t required_size = ((size + JMEM_ALIGNMENT - 1) / JMEM_ALIGNMENT) * JMEM_ALIGNMENT;
jmem_heap_free_t *data_space_p = NULL;
VALGRIND_DEFINED_SPACE (&jmem_heap.first, sizeof (jmem_heap_free_t));
// Fast path for 8 byte chunks, first region is guaranteed to be sufficient
if (required_size == JMEM_ALIGNMENT
&& likely (jmem_heap.first.next_offset != JMEM_HEAP_GET_OFFSET_FROM_ADDR (JMEM_HEAP_END_OF_LIST)))
{
data_space_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (jmem_heap.first.next_offset);
JERRY_ASSERT (jmem_is_heap_pointer (data_space_p));
VALGRIND_DEFINED_SPACE (data_space_p, sizeof (jmem_heap_free_t));
jmem_heap_allocated_size += JMEM_ALIGNMENT;
JMEM_HEAP_STAT_ALLOC_ITER ();
if (data_space_p->size == JMEM_ALIGNMENT)
{
jmem_heap.first.next_offset = data_space_p->next_offset;
}
else
{
JERRY_ASSERT (data_space_p->size > JMEM_ALIGNMENT);
jmem_heap_free_t *const remaining_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (jmem_heap.first.next_offset) + 1;
VALGRIND_DEFINED_SPACE (remaining_p, sizeof (jmem_heap_free_t));
remaining_p->size = data_space_p->size - JMEM_ALIGNMENT;
remaining_p->next_offset = data_space_p->next_offset;
VALGRIND_NOACCESS_SPACE (remaining_p, sizeof (jmem_heap_free_t));
jmem_heap.first.next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (remaining_p);
}
VALGRIND_UNDEFINED_SPACE (data_space_p, sizeof (jmem_heap_free_t));
if (unlikely (data_space_p == jmem_heap_list_skip_p))
{
jmem_heap_list_skip_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (jmem_heap.first.next_offset);
}
}
// Slow path for larger regions
else
{
jmem_heap_free_t *current_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (jmem_heap.first.next_offset);
jmem_heap_free_t *prev_p = &jmem_heap.first;
while (current_p != JMEM_HEAP_END_OF_LIST)
{
JERRY_ASSERT (jmem_is_heap_pointer (current_p));
VALGRIND_DEFINED_SPACE (current_p, sizeof (jmem_heap_free_t));
JMEM_HEAP_STAT_ALLOC_ITER ();
const uint32_t next_offset = current_p->next_offset;
JERRY_ASSERT (jmem_is_heap_pointer (JMEM_HEAP_GET_ADDR_FROM_OFFSET (next_offset))
|| next_offset == JMEM_HEAP_GET_OFFSET_FROM_ADDR (JMEM_HEAP_END_OF_LIST));
if (current_p->size >= required_size)
{
// Region is sufficiently big, store address
data_space_p = current_p;
jmem_heap_allocated_size += required_size;
// Region was larger than necessary
if (current_p->size > required_size)
{
// Get address of remaining space
jmem_heap_free_t *const remaining_p = (jmem_heap_free_t *) ((uint8_t *) current_p + required_size);
// Update metadata
VALGRIND_DEFINED_SPACE (remaining_p, sizeof (jmem_heap_free_t));
remaining_p->size = current_p->size - (uint32_t) required_size;
remaining_p->next_offset = next_offset;
VALGRIND_NOACCESS_SPACE (remaining_p, sizeof (jmem_heap_free_t));
// Update list
VALGRIND_DEFINED_SPACE (prev_p, sizeof (jmem_heap_free_t));
prev_p->next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (remaining_p);
VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t));
}
// Block is an exact fit
else
{
// Remove the region from the list
VALGRIND_DEFINED_SPACE (prev_p, sizeof (jmem_heap_free_t));
prev_p->next_offset = next_offset;
VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t));
}
jmem_heap_list_skip_p = prev_p;
// Found enough space
break;
}
VALGRIND_NOACCESS_SPACE (current_p, sizeof (jmem_heap_free_t));
// Next in list
prev_p = current_p;
current_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (next_offset);
}
}
while (jmem_heap_allocated_size >= jmem_heap_limit)
{
jmem_heap_limit += CONFIG_MEM_HEAP_DESIRED_LIMIT;
}
VALGRIND_NOACCESS_SPACE (&jmem_heap.first, sizeof (jmem_heap_free_t));
if (unlikely (!data_space_p))
{
return NULL;
}
JERRY_ASSERT ((uintptr_t) data_space_p % JMEM_ALIGNMENT == 0);
VALGRIND_UNDEFINED_SPACE (data_space_p, size);
JMEM_HEAP_STAT_ALLOC (size);
return (void *) data_space_p;
} /* jmem_heap_finalize */
/**
* Allocation of memory block, running 'try to give memory back' callbacks, if there is not enough memory.
*
* Note:
* if after running the callbacks, there is still not enough memory, engine is terminated with ERR_OUT_OF_MEMORY.
*
* @return pointer to allocated memory block
*/
void * __attribute__((hot))
jmem_heap_alloc_block (const size_t size)
{
if (unlikely (size == 0))
{
return NULL;
}
VALGRIND_FREYA_CHECK_MEMPOOL_REQUEST;
#ifdef JMEM_GC_BEFORE_EACH_ALLOC
jmem_run_try_to_give_memory_back_callbacks (JMEM_TRY_GIVE_MEMORY_BACK_SEVERITY_HIGH);
#endif /* JMEM_GC_BEFORE_EACH_ALLOC */
if (jmem_heap_allocated_size + size >= jmem_heap_limit)
{
jmem_run_try_to_give_memory_back_callbacks (JMEM_TRY_GIVE_MEMORY_BACK_SEVERITY_LOW);
}
void *data_space_p = jmem_heap_alloc_block_internal (size);
if (likely (data_space_p != NULL))
{
VALGRIND_FREYA_MALLOCLIKE_SPACE (data_space_p, size);
return data_space_p;
}
for (jmem_try_give_memory_back_severity_t severity = JMEM_TRY_GIVE_MEMORY_BACK_SEVERITY_LOW;
severity <= JMEM_TRY_GIVE_MEMORY_BACK_SEVERITY_HIGH;
severity = (jmem_try_give_memory_back_severity_t) (severity + 1))
{
jmem_run_try_to_give_memory_back_callbacks (severity);
data_space_p = jmem_heap_alloc_block_internal (size);
if (likely (data_space_p != NULL))
{
VALGRIND_FREYA_MALLOCLIKE_SPACE (data_space_p, size);
return data_space_p;
}
}
JERRY_ASSERT (data_space_p == NULL);
jerry_fatal (ERR_OUT_OF_MEMORY);
} /* jmem_heap_alloc_block */
/**
* Allocate block and store block size.
*
* Note: block will only be aligned to 4 bytes.
*/
inline void * __attr_always_inline___
jmem_heap_alloc_block_store_size (size_t size) /**< required size */
{
if (unlikely (size == 0))
{
return NULL;
}
size += sizeof (jmem_heap_free_t);
jmem_heap_free_t *const data_space_p = (jmem_heap_free_t *) jmem_heap_alloc_block (size);
data_space_p->size = (uint32_t) size;
return (void *) (data_space_p + 1);
} /* jmem_heap_alloc_block_store_size */
/**
* Free the memory block.
*/
void __attribute__((hot))
jmem_heap_free_block (void *ptr, /**< pointer to beginning of data space of the block */
const size_t size) /**< size of allocated region */
{
VALGRIND_FREYA_CHECK_MEMPOOL_REQUEST;
/* checking that ptr points to the heap */
JERRY_ASSERT (jmem_is_heap_pointer (ptr));
JERRY_ASSERT (size > 0);
JERRY_ASSERT (jmem_heap_limit >= jmem_heap_allocated_size);
VALGRIND_FREYA_FREELIKE_SPACE (ptr);
VALGRIND_NOACCESS_SPACE (ptr, size);
JMEM_HEAP_STAT_FREE_ITER ();
jmem_heap_free_t *block_p = (jmem_heap_free_t *) ptr;
jmem_heap_free_t *prev_p;
jmem_heap_free_t *next_p;
VALGRIND_DEFINED_SPACE (&jmem_heap.first, sizeof (jmem_heap_free_t));
if (block_p > jmem_heap_list_skip_p)
{
prev_p = jmem_heap_list_skip_p;
JMEM_HEAP_STAT_SKIP ();
}
else
{
prev_p = &jmem_heap.first;
JMEM_HEAP_STAT_NONSKIP ();
}
JERRY_ASSERT (jmem_is_heap_pointer (block_p));
const uint32_t block_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (block_p);
VALGRIND_DEFINED_SPACE (prev_p, sizeof (jmem_heap_free_t));
// Find position of region in the list
while (prev_p->next_offset < block_offset)
{
jmem_heap_free_t *const next_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (prev_p->next_offset);
JERRY_ASSERT (jmem_is_heap_pointer (next_p));
VALGRIND_DEFINED_SPACE (next_p, sizeof (jmem_heap_free_t));
VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t));
prev_p = next_p;
JMEM_HEAP_STAT_FREE_ITER ();
}
next_p = JMEM_HEAP_GET_ADDR_FROM_OFFSET (prev_p->next_offset);
VALGRIND_DEFINED_SPACE (next_p, sizeof (jmem_heap_free_t));
/* Realign size */
const size_t aligned_size = (size + JMEM_ALIGNMENT - 1) / JMEM_ALIGNMENT * JMEM_ALIGNMENT;
VALGRIND_DEFINED_SPACE (block_p, sizeof (jmem_heap_free_t));
VALGRIND_DEFINED_SPACE (prev_p, sizeof (jmem_heap_free_t));
// Update prev
if (jmem_heap_get_region_end (prev_p) == block_p)
{
// Can be merged
prev_p->size += (uint32_t) aligned_size;
VALGRIND_NOACCESS_SPACE (block_p, sizeof (jmem_heap_free_t));
block_p = prev_p;
}
else
{
block_p->size = (uint32_t) aligned_size;
prev_p->next_offset = block_offset;
}
VALGRIND_DEFINED_SPACE (next_p, sizeof (jmem_heap_free_t));
// Update next
if (jmem_heap_get_region_end (block_p) == next_p)
{
if (unlikely (next_p == jmem_heap_list_skip_p))
{
jmem_heap_list_skip_p = block_p;
}
// Can be merged
block_p->size += next_p->size;
block_p->next_offset = next_p->next_offset;
}
else
{
block_p->next_offset = JMEM_HEAP_GET_OFFSET_FROM_ADDR (next_p);
}
jmem_heap_list_skip_p = prev_p;
VALGRIND_NOACCESS_SPACE (prev_p, sizeof (jmem_heap_free_t));
VALGRIND_NOACCESS_SPACE (block_p, size);
VALGRIND_NOACCESS_SPACE (next_p, sizeof (jmem_heap_free_t));
JERRY_ASSERT (jmem_heap_allocated_size > 0);
jmem_heap_allocated_size -= aligned_size;
while (jmem_heap_allocated_size + CONFIG_MEM_HEAP_DESIRED_LIMIT <= jmem_heap_limit)
{
jmem_heap_limit -= CONFIG_MEM_HEAP_DESIRED_LIMIT;
}
VALGRIND_NOACCESS_SPACE (&jmem_heap.first, sizeof (jmem_heap_free_t));
JERRY_ASSERT (jmem_heap_limit >= jmem_heap_allocated_size);
JMEM_HEAP_STAT_FREE (size);
} /* jmem_heap_free_block */
/**
* Free block with stored size
*/
inline void __attr_always_inline___
jmem_heap_free_block_size_stored (void *ptr) /**< pointer to the memory block */
{
jmem_heap_free_t *const original_p = ((jmem_heap_free_t *) ptr) - 1;
JERRY_ASSERT (original_p + 1 == ptr);
jmem_heap_free_block (original_p, original_p->size);
} /* jmem_heap_free_block_size_stored */
/**
* Compress pointer
*
* @return packed heap pointer
*/
uintptr_t __attr_pure___ __attribute__((hot))
jmem_heap_compress_pointer (const void *pointer_p) /**< pointer to compress */
{
JERRY_ASSERT (pointer_p != NULL);
JERRY_ASSERT (jmem_is_heap_pointer (pointer_p));
uintptr_t int_ptr = (uintptr_t) pointer_p;
const uintptr_t heap_start = (uintptr_t) &jmem_heap;
JERRY_ASSERT (int_ptr % JMEM_ALIGNMENT == 0);
int_ptr -= heap_start;
int_ptr >>= JMEM_ALIGNMENT_LOG;
JERRY_ASSERT ((int_ptr & ~((1u << JMEM_HEAP_OFFSET_LOG) - 1)) == 0);
JERRY_ASSERT (int_ptr != JMEM_CP_NULL);
return int_ptr;
} /* jmem_heap_compress_pointer */
/**
* Decompress pointer
*
* @return unpacked heap pointer
*/
void * __attr_pure___ __attribute__((hot))
jmem_heap_decompress_pointer (uintptr_t compressed_pointer) /**< pointer to decompress */
{
JERRY_ASSERT (compressed_pointer != JMEM_CP_NULL);
uintptr_t int_ptr = compressed_pointer;
const uintptr_t heap_start = (uintptr_t) &jmem_heap;
int_ptr <<= JMEM_ALIGNMENT_LOG;
int_ptr += heap_start;
JERRY_ASSERT (jmem_is_heap_pointer ((void *) int_ptr));
return (void *) int_ptr;
} /* jmem_heap_decompress_pointer */
#ifndef JERRY_NDEBUG
/**
* Check whether the pointer points to the heap
*
* Note:
* the routine should be used only for assertion checks
*
* @return true - if pointer points to the heap,
* false - otherwise
*/
bool
jmem_is_heap_pointer (const void *pointer) /**< pointer */
{
return ((uint8_t *) pointer >= jmem_heap.area
&& (uint8_t *) pointer <= ((uint8_t *) jmem_heap.area + JMEM_HEAP_AREA_SIZE));
} /* jmem_is_heap_pointer */
#endif /* !JERRY_NDEBUG */
#ifdef JMEM_STATS
/**
* Get heap memory usage statistics
*/
void
jmem_heap_get_stats (jmem_heap_stats_t *out_heap_stats_p) /**< [out] heap stats */
{
JERRY_ASSERT (out_heap_stats_p != NULL);
*out_heap_stats_p = jmem_heap_stats;
} /* jmem_heap_get_stats */
/**
* Reset peak values in memory usage statistics
*/
void
jmem_heap_stats_reset_peak (void)
{
jmem_heap_stats.peak_allocated_bytes = jmem_heap_stats.allocated_bytes;
jmem_heap_stats.peak_waste_bytes = jmem_heap_stats.waste_bytes;
} /* jmem_heap_stats_reset_peak */
/**
* Print heap memory usage statistics
*/
void
jmem_heap_stats_print (void)
{
printf ("Heap stats:\n"
" Heap size = %zu bytes\n"
" Allocated = %zu bytes\n"
" Waste = %zu bytes\n"
" Peak allocated = %zu bytes\n"
" Peak waste = %zu bytes\n"
" Skip-ahead ratio = %zu.%04zu\n"
" Average alloc iteration = %zu.%04zu\n"
" Average free iteration = %zu.%04zu\n"
"\n",
jmem_heap_stats.size,
jmem_heap_stats.allocated_bytes,
jmem_heap_stats.waste_bytes,
jmem_heap_stats.peak_allocated_bytes,
jmem_heap_stats.peak_waste_bytes,
jmem_heap_stats.skip_count / jmem_heap_stats.nonskip_count,
jmem_heap_stats.skip_count % jmem_heap_stats.nonskip_count * 10000 / jmem_heap_stats.nonskip_count,
jmem_heap_stats.alloc_iter_count / jmem_heap_stats.alloc_count,
jmem_heap_stats.alloc_iter_count % jmem_heap_stats.alloc_count * 10000 / jmem_heap_stats.alloc_count,
jmem_heap_stats.free_iter_count / jmem_heap_stats.free_count,
jmem_heap_stats.free_iter_count % jmem_heap_stats.free_count * 10000 / jmem_heap_stats.free_count);
} /* jmem_heap_stats_print */
/**
* Initalize heap memory usage statistics account structure
*/
static void
jmem_heap_stat_init ()
{
memset (&jmem_heap_stats, 0, sizeof (jmem_heap_stats));
jmem_heap_stats.size = JMEM_HEAP_AREA_SIZE;
} /* jmem_heap_stat_init */
/**
* Account allocation
*/
static void
jmem_heap_stat_alloc (size_t size) /**< Size of allocated block */
{
const size_t aligned_size = (size + JMEM_ALIGNMENT - 1) / JMEM_ALIGNMENT * JMEM_ALIGNMENT;
const size_t waste_bytes = aligned_size - size;
jmem_heap_stats.allocated_bytes += aligned_size;
jmem_heap_stats.waste_bytes += waste_bytes;
jmem_heap_stats.alloc_count++;
if (jmem_heap_stats.allocated_bytes > jmem_heap_stats.peak_allocated_bytes)
{
jmem_heap_stats.peak_allocated_bytes = jmem_heap_stats.allocated_bytes;
}
if (jmem_heap_stats.allocated_bytes > jmem_heap_stats.global_peak_allocated_bytes)
{
jmem_heap_stats.global_peak_allocated_bytes = jmem_heap_stats.allocated_bytes;
}
if (jmem_heap_stats.waste_bytes > jmem_heap_stats.peak_waste_bytes)
{
jmem_heap_stats.peak_waste_bytes = jmem_heap_stats.waste_bytes;
}
if (jmem_heap_stats.waste_bytes > jmem_heap_stats.global_peak_waste_bytes)
{
jmem_heap_stats.global_peak_waste_bytes = jmem_heap_stats.waste_bytes;
}
} /* jmem_heap_stat_alloc */
/**
* Account freeing
*/
static void
jmem_heap_stat_free (size_t size) /**< Size of freed block */
{
const size_t aligned_size = (size + JMEM_ALIGNMENT - 1) / JMEM_ALIGNMENT * JMEM_ALIGNMENT;
const size_t waste_bytes = aligned_size - size;
jmem_heap_stats.free_count++;
jmem_heap_stats.allocated_bytes -= aligned_size;
jmem_heap_stats.waste_bytes -= waste_bytes;
} /* jmem_heap_stat_free */
/**
* Counts number of skip-aheads during insertion of free block
*/
static void
jmem_heap_stat_skip ()
{
jmem_heap_stats.skip_count++;
} /* jmem_heap_stat_skip */
/**
* Counts number of times we could not skip ahead during free block insertion
*/
static void
jmem_heap_stat_nonskip ()
{
jmem_heap_stats.nonskip_count++;
} /* jmem_heap_stat_nonskip */
/**
* Count number of iterations required for allocations
*/
static void
jmem_heap_stat_alloc_iter ()
{
jmem_heap_stats.alloc_iter_count++;
} /* jmem_heap_stat_alloc_iter */
/**
* Counts number of iterations required for inserting free blocks
*/
static void
jmem_heap_stat_free_iter ()
{
jmem_heap_stats.free_iter_count++;
} /* jmem_heap_stat_free_iter */
#endif /* JMEM_STATS */
/**
* @}
* @}
*/

View File

@ -17,8 +17,8 @@
/**
* Heap allocator interface
*/
#ifndef MEM_HEAP_H
#define MEM_HEAP_H
#ifndef JMEM_HEAP_H
#define JMEM_HEAP_H
#include "jrt.h"
@ -29,17 +29,17 @@
* @{
*/
extern void mem_heap_init (void);
extern void mem_heap_finalize (void);
extern void *mem_heap_alloc_block (const size_t);
extern void mem_heap_free_block (void *, const size_t);
extern void *mem_heap_alloc_block_store_size (size_t);
extern void mem_heap_free_block_size_stored (void *);
extern uintptr_t mem_heap_compress_pointer (const void *);
extern void *mem_heap_decompress_pointer (uintptr_t);
extern bool mem_is_heap_pointer (const void *);
extern void jmem_heap_init (void);
extern void jmem_heap_finalize (void);
extern void *jmem_heap_alloc_block (const size_t);
extern void jmem_heap_free_block (void *, const size_t);
extern void *jmem_heap_alloc_block_store_size (size_t);
extern void jmem_heap_free_block_size_stored (void *);
extern uintptr_t jmem_heap_compress_pointer (const void *);
extern void *jmem_heap_decompress_pointer (uintptr_t);
extern bool jmem_is_heap_pointer (const void *);
#ifdef MEM_STATS
#ifdef JMEM_STATS
/**
* Heap memory usage statistics
*/
@ -64,12 +64,12 @@ typedef struct
size_t free_count;
size_t free_iter_count;
} mem_heap_stats_t;
} jmem_heap_stats_t;
extern void mem_heap_get_stats (mem_heap_stats_t *);
extern void mem_heap_stats_reset_peak (void);
extern void mem_heap_stats_print (void);
#endif /* MEM_STATS */
extern void jmem_heap_get_stats (jmem_heap_stats_t *);
extern void jmem_heap_stats_reset_peak (void);
extern void jmem_heap_stats_print (void);
#endif /* JMEM_STATS */
#ifdef JERRY_VALGRIND_FREYA
@ -77,13 +77,13 @@ extern void mem_heap_stats_print (void);
#error Valgrind and valgrind-freya modes are not compatible.
#endif /* JERRY_VALGRIND */
extern void mem_heap_valgrind_freya_mempool_request (void);
extern void jmem_heap_valgrind_freya_mempool_request (void);
#define MEM_HEAP_VALGRIND_FREYA_MEMPOOL_REQUEST() mem_heap_valgrind_freya_mempool_request ()
#define JMEM_HEAP_VALGRIND_FREYA_MEMPOOL_REQUEST() jmem_heap_valgrind_freya_mempool_request ()
#else /* !JERRY_VALGRIND_FREYA */
#define MEM_HEAP_VALGRIND_FREYA_MEMPOOL_REQUEST()
#define JMEM_HEAP_VALGRIND_FREYA_MEMPOOL_REQUEST()
#endif /* JERRY_VALGRIND_FREYA */
@ -95,21 +95,21 @@ extern void mem_heap_valgrind_freya_mempool_request (void);
* Warning:
* if there is not enough memory on the heap, shutdown engine with ERR_OUT_OF_MEMORY.
*/
#define MEM_DEFINE_LOCAL_ARRAY(var_name, number, type) \
#define JMEM_DEFINE_LOCAL_ARRAY(var_name, number, type) \
{ \
size_t var_name ## ___size = (size_t) (number) * sizeof (type); \
type *var_name = (type *) (mem_heap_alloc_block (var_name ## ___size));
type *var_name = (type *) (jmem_heap_alloc_block (var_name ## ___size));
/**
* Free the previously defined local array variable, freeing corresponding block on the heap,
* if it was allocated (i.e. if the array's size was non-zero).
*/
#define MEM_FINALIZE_LOCAL_ARRAY(var_name) \
#define JMEM_FINALIZE_LOCAL_ARRAY(var_name) \
if (var_name != NULL) \
{ \
JERRY_ASSERT (var_name ## ___size != 0); \
\
mem_heap_free_block (var_name, var_name ## ___size); \
jmem_heap_free_block (var_name, var_name ## ___size); \
} \
else \
{ \
@ -122,4 +122,4 @@ extern void mem_heap_valgrind_freya_mempool_request (void);
* @}
*/
#endif /* !MEM_HEAP_H */
#endif /* !JMEM_HEAP_H */

View File

@ -0,0 +1,312 @@
/* Copyright 2014-2016 Samsung Electronics Co., Ltd.
* Copyright 2016 University of Szeged.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Memory pool manager implementation
*/
#include "jrt.h"
#include "jrt-libc-includes.h"
#include "jmem-allocator.h"
#include "jmem-heap.h"
#include "jmem-poolman.h"
#define JMEM_ALLOCATOR_INTERNAL
#include "jmem-allocator-internal.h"
/** \addtogroup mem Memory allocation
* @{
*
* \addtogroup poolman Memory pool manager
* @{
*/
/**
* Node for free chunk list
*/
typedef struct jmem_pools_chunk
{
struct jmem_pools_chunk *next_p; /**< pointer to next pool chunk */
} jmem_pools_chunk_t;
/**
* List of free pool chunks
*/
jmem_pools_chunk_t *jmem_free_chunk_p;
#ifdef JMEM_STATS
/**
* Pools' memory usage statistics
*/
jmem_pools_stats_t jmem_pools_stats;
static void jmem_pools_stat_init (void);
static void jmem_pools_stat_free_pool (void);
static void jmem_pools_stat_new_alloc (void);
static void jmem_pools_stat_reuse (void);
static void jmem_pools_stat_dealloc (void);
# define JMEM_POOLS_STAT_INIT() jmem_pools_stat_init ()
# define JMEM_POOLS_STAT_FREE_POOL() jmem_pools_stat_free_pool ()
# define JMEM_POOLS_STAT_NEW_ALLOC() jmem_pools_stat_new_alloc ()
# define JMEM_POOLS_STAT_REUSE() jmem_pools_stat_reuse ()
# define JMEM_POOLS_STAT_DEALLOC() jmem_pools_stat_dealloc ()
#else /* !JMEM_STATS */
# define JMEM_POOLS_STAT_INIT()
# define JMEM_POOLS_STAT_FREE_POOL()
# define JMEM_POOLS_STAT_NEW_ALLOC()
# define JMEM_POOLS_STAT_REUSE()
# define JMEM_POOLS_STAT_DEALLOC()
#endif /* JMEM_STATS */
/*
* Valgrind-related options and headers
*/
#ifdef JERRY_VALGRIND
# include "memcheck.h"
# define VALGRIND_NOACCESS_SPACE(p, s) VALGRIND_MAKE_MEM_NOACCESS((p), (s))
# define VALGRIND_UNDEFINED_SPACE(p, s) VALGRIND_MAKE_MEM_UNDEFINED((p), (s))
# define VALGRIND_DEFINED_SPACE(p, s) VALGRIND_MAKE_MEM_DEFINED((p), (s))
#else /* !JERRY_VALGRIND */
# define VALGRIND_NOACCESS_SPACE(p, s)
# define VALGRIND_UNDEFINED_SPACE(p, s)
# define VALGRIND_DEFINED_SPACE(p, s)
#endif /* JERRY_VALGRIND */
#ifdef JERRY_VALGRIND_FREYA
# include "memcheck.h"
# define VALGRIND_FREYA_MALLOCLIKE_SPACE(p, s) VALGRIND_MALLOCLIKE_BLOCK((p), (s), 0, 0)
# define VALGRIND_FREYA_FREELIKE_SPACE(p) VALGRIND_FREELIKE_BLOCK((p), 0)
#else /* !JERRY_VALGRIND_FREYA */
# define VALGRIND_FREYA_MALLOCLIKE_SPACE(p, s)
# define VALGRIND_FREYA_FREELIKE_SPACE(p)
#endif /* JERRY_VALGRIND_FREYA */
/**
* Initialize pool manager
*/
void
jmem_pools_init (void)
{
JERRY_STATIC_ASSERT (sizeof (jmem_pools_chunk_t) <= JMEM_POOL_CHUNK_SIZE,
size_of_mem_pools_chunk_t_must_be_less_than_or_equal_to_MEM_POOL_CHUNK_SIZE);
jmem_free_chunk_p = NULL;
JMEM_POOLS_STAT_INIT ();
} /* jmem_pools_init */
/**
* Finalize pool manager
*/
void
jmem_pools_finalize (void)
{
jmem_pools_collect_empty ();
JERRY_ASSERT (jmem_free_chunk_p == NULL);
} /* jmem_pools_finalize */
/**
* Allocate a chunk of specified size
*
* @return pointer to allocated chunk, if allocation was successful,
* or NULL - if not enough memory.
*/
inline void * __attribute__((hot)) __attr_always_inline___
jmem_pools_alloc (void)
{
#ifdef JMEM_GC_BEFORE_EACH_ALLOC
jmem_run_try_to_give_memory_back_callbacks (JMEM_TRY_GIVE_MEMORY_BACK_SEVERITY_HIGH);
#endif /* JMEM_GC_BEFORE_EACH_ALLOC */
if (jmem_free_chunk_p != NULL)
{
const jmem_pools_chunk_t *const chunk_p = jmem_free_chunk_p;
JMEM_POOLS_STAT_REUSE ();
VALGRIND_DEFINED_SPACE (chunk_p, JMEM_POOL_CHUNK_SIZE);
jmem_free_chunk_p = chunk_p->next_p;
VALGRIND_UNDEFINED_SPACE (chunk_p, JMEM_POOL_CHUNK_SIZE);
return (void *) chunk_p;
}
else
{
JMEM_POOLS_STAT_NEW_ALLOC ();
return (void *) jmem_heap_alloc_block (JMEM_POOL_CHUNK_SIZE);
}
} /* jmem_pools_alloc */
/**
* Free the chunk
*/
void __attribute__((hot))
jmem_pools_free (void *chunk_p) /**< pointer to the chunk */
{
jmem_pools_chunk_t *const chunk_to_free_p = (jmem_pools_chunk_t *) chunk_p;
VALGRIND_DEFINED_SPACE (chunk_to_free_p, JMEM_POOL_CHUNK_SIZE);
chunk_to_free_p->next_p = jmem_free_chunk_p;
jmem_free_chunk_p = chunk_to_free_p;
VALGRIND_NOACCESS_SPACE (chunk_to_free_p, JMEM_POOL_CHUNK_SIZE);
JMEM_POOLS_STAT_FREE_POOL ();
} /* jmem_pools_free */
/**
* Collect empty pool chunks
*/
void
jmem_pools_collect_empty ()
{
while (jmem_free_chunk_p)
{
VALGRIND_DEFINED_SPACE (jmem_free_chunk_p, sizeof (jmem_pools_chunk_t));
jmem_pools_chunk_t *const next_p = jmem_free_chunk_p->next_p;
VALGRIND_NOACCESS_SPACE (jmem_free_chunk_p, sizeof (jmem_pools_chunk_t));
jmem_heap_free_block (jmem_free_chunk_p, JMEM_POOL_CHUNK_SIZE);
JMEM_POOLS_STAT_DEALLOC ();
jmem_free_chunk_p = next_p;
}
} /* jmem_pools_collect_empty */
#ifdef JMEM_STATS
/**
* Get pools memory usage statistics
*/
void
jmem_pools_get_stats (jmem_pools_stats_t *out_pools_stats_p) /**< [out] pools' stats */
{
JERRY_ASSERT (out_pools_stats_p != NULL);
*out_pools_stats_p = jmem_pools_stats;
} /* jmem_pools_get_stats */
/**
* Reset peak values in memory usage statistics
*/
void
jmem_pools_stats_reset_peak (void)
{
jmem_pools_stats.peak_pools_count = jmem_pools_stats.pools_count;
} /* jmem_pools_stats_reset_peak */
/**
* Print pools memory usage statistics
*/
void
jmem_pools_stats_print (void)
{
printf ("Pools stats:\n"
" Chunk size: %zu\n"
" Pool chunks: %zu\n"
" Peak pool chunks: %zu\n"
" Free chunks: %zu\n"
" Pool reuse ratio: %zu.%04zu\n",
JMEM_POOL_CHUNK_SIZE,
jmem_pools_stats.pools_count,
jmem_pools_stats.peak_pools_count,
jmem_pools_stats.free_chunks,
jmem_pools_stats.reused_count / jmem_pools_stats.new_alloc_count,
jmem_pools_stats.reused_count % jmem_pools_stats.new_alloc_count * 10000 / jmem_pools_stats.new_alloc_count);
} /* jmem_pools_stats_print */
/**
* Initalize pools' memory usage statistics account structure
*/
static void
jmem_pools_stat_init (void)
{
memset (&jmem_pools_stats, 0, sizeof (jmem_pools_stats));
} /* jmem_pools_stat_init */
/**
* Account for allocation of new pool chunk
*/
static void
jmem_pools_stat_new_alloc (void)
{
jmem_pools_stats.pools_count++;
jmem_pools_stats.new_alloc_count++;
if (jmem_pools_stats.pools_count > jmem_pools_stats.peak_pools_count)
{
jmem_pools_stats.peak_pools_count = jmem_pools_stats.pools_count;
}
if (jmem_pools_stats.pools_count > jmem_pools_stats.global_peak_pools_count)
{
jmem_pools_stats.global_peak_pools_count = jmem_pools_stats.pools_count;
}
} /* jmem_pools_stat_new_alloc */
/**
* Account for reuse of pool chunk
*/
static void
jmem_pools_stat_reuse (void)
{
jmem_pools_stats.pools_count++;
jmem_pools_stats.free_chunks--;
jmem_pools_stats.reused_count++;
if (jmem_pools_stats.pools_count > jmem_pools_stats.peak_pools_count)
{
jmem_pools_stats.peak_pools_count = jmem_pools_stats.pools_count;
}
if (jmem_pools_stats.pools_count > jmem_pools_stats.global_peak_pools_count)
{
jmem_pools_stats.global_peak_pools_count = jmem_pools_stats.pools_count;
}
} /* jmem_pools_stat_reuse */
/**
* Account for freeing a chunk
*/
static void
jmem_pools_stat_free_pool (void)
{
JERRY_ASSERT (jmem_pools_stats.pools_count > 0);
jmem_pools_stats.pools_count--;
jmem_pools_stats.free_chunks++;
} /* jmem_pools_stat_free_pool */
/**
* Account for freeing a chunk
*/
static void
jmem_pools_stat_dealloc (void)
{
jmem_pools_stats.free_chunks--;
} /* jmem_pools_stat_dealloc */
#endif /* JMEM_STATS */
/**
* @}
* @}
*/

View File

@ -17,8 +17,8 @@
/**
* Pool manager interface
*/
#ifndef MEM_POOLMAN_H
#define MEM_POOLMAN_H
#ifndef JMEM_POOLMAN_H
#define JMEM_POOLMAN_H
#include "jrt.h"
@ -29,13 +29,13 @@
* @{
*/
extern void mem_pools_init (void);
extern void mem_pools_finalize (void);
extern void *mem_pools_alloc (void);
extern void mem_pools_free (void *);
extern void mem_pools_collect_empty (void);
extern void jmem_pools_init (void);
extern void jmem_pools_finalize (void);
extern void *jmem_pools_alloc (void);
extern void jmem_pools_free (void *);
extern void jmem_pools_collect_empty (void);
#ifdef MEM_STATS
#ifdef JMEM_STATS
/**
* Pools' memory usage statistics
*/
@ -58,16 +58,16 @@ typedef struct
/* Number of reused pool chunks */
size_t reused_count;
} mem_pools_stats_t;
} jmem_pools_stats_t;
extern void mem_pools_get_stats (mem_pools_stats_t *);
extern void mem_pools_stats_reset_peak (void);
extern void mem_pools_stats_print (void);
#endif /* MEM_STATS */
extern void jmem_pools_get_stats (jmem_pools_stats_t *);
extern void jmem_pools_stats_reset_peak (void);
extern void jmem_pools_stats_print (void);
#endif /* JMEM_STATS */
/**
* @}
* @}
*/
#endif /* !MEM_POOLMAN_H */
#endif /* !JMEM_POOLMAN_H */

View File

@ -27,10 +27,10 @@ lit_cpointer_compress (lit_record_t *pointer) /**< pointer to compress */
{
if (pointer == NULL)
{
return MEM_CP_NULL;
return JMEM_CP_NULL;
}
return (lit_cpointer_t) mem_compress_pointer (pointer);
return (lit_cpointer_t) jmem_compress_pointer (pointer);
} /* lit_cpointer_compress */
/**
@ -41,12 +41,12 @@ lit_cpointer_compress (lit_record_t *pointer) /**< pointer to compress */
inline lit_record_t * __attr_pure___ __attr_always_inline___
lit_cpointer_decompress (lit_cpointer_t compressed_pointer) /**< recordset-specific compressed pointer */
{
if (compressed_pointer == MEM_CP_NULL)
if (compressed_pointer == JMEM_CP_NULL)
{
return NULL;
}
return (lit_record_t *) mem_decompress_pointer (compressed_pointer);
return (lit_record_t *) jmem_decompress_pointer (compressed_pointer);
} /* lit_cpointer_decompress */
/**
@ -57,5 +57,5 @@ lit_cpointer_decompress (lit_cpointer_t compressed_pointer) /**< recordset-speci
inline lit_cpointer_t __attr_pure___ __attr_always_inline___
lit_cpointer_null_cp (void)
{
return MEM_CP_NULL;
return JMEM_CP_NULL;
} /* lit_cpointer_null_cp */

View File

@ -18,16 +18,16 @@
#define LIT_CPOINTER_H
#include "lit-literal-storage.h"
#include "mem-allocator.h"
#include "jmem-allocator.h"
#define LIT_CPOINTER_WIDTH (MEM_CP_WIDTH + MEM_ALIGNMENT_LOG - MEM_ALIGNMENT_LOG)
#define LIT_CPOINTER_WIDTH (JMEM_CP_WIDTH + JMEM_ALIGNMENT_LOG - JMEM_ALIGNMENT_LOG)
/**
* Dynamic storage-specific extended compressed pointer
*
* Note:
* the pointer can represent addresses aligned by lit_DYN_STORAGE_LENGTH_UNIT,
* while mem_cpointer_t can only represent addresses aligned by MEM_ALIGNMENT.
* while jmem_cpointer_t can only represent addresses aligned by JMEM_ALIGNMENT.
*/
typedef uint16_t lit_cpointer_t;

View File

@ -30,7 +30,7 @@ lit_record_t *
lit_create_charset_literal (const lit_utf8_byte_t *str_p, /**< string to be placed into the record */
const lit_utf8_size_t buf_size) /**< size in bytes of the buffer which holds the string */
{
lit_charset_record_t *rec_p = (lit_charset_record_t *) mem_heap_alloc_block (buf_size + LIT_CHARSET_HEADER_SIZE);
lit_charset_record_t *rec_p = (lit_charset_record_t *) jmem_heap_alloc_block (buf_size + LIT_CHARSET_HEADER_SIZE);
rec_p->type = LIT_RECORD_TYPE_CHARSET;
rec_p->next = (uint16_t) lit_cpointer_compress (lit_storage);
@ -52,7 +52,7 @@ lit_create_charset_literal (const lit_utf8_byte_t *str_p, /**< string to be plac
lit_record_t *
lit_create_magic_literal (const lit_magic_string_id_t id) /**< id of magic string */
{
lit_magic_record_t *rec_p = (lit_magic_record_t *) mem_heap_alloc_block (sizeof (lit_magic_record_t));
lit_magic_record_t *rec_p = (lit_magic_record_t *) jmem_heap_alloc_block (sizeof (lit_magic_record_t));
rec_p->type = LIT_RECORD_TYPE_MAGIC_STR;
rec_p->next = (uint16_t) lit_cpointer_compress (lit_storage);
lit_storage = (lit_record_t *) rec_p;
@ -70,7 +70,7 @@ lit_create_magic_literal (const lit_magic_string_id_t id) /**< id of magic strin
lit_record_t *
lit_create_magic_literal_ex (const lit_magic_string_ex_id_t id) /**< id of magic string */
{
lit_magic_record_t *rec_p = (lit_magic_record_t *) mem_heap_alloc_block (sizeof (lit_magic_record_t));
lit_magic_record_t *rec_p = (lit_magic_record_t *) jmem_heap_alloc_block (sizeof (lit_magic_record_t));
rec_p->type = LIT_RECORD_TYPE_MAGIC_STR_EX;
rec_p->next = (uint16_t) lit_cpointer_compress (lit_storage);
lit_storage = (lit_record_t *) rec_p;
@ -88,7 +88,7 @@ lit_create_magic_literal_ex (const lit_magic_string_ex_id_t id) /**< id of magic
lit_record_t *
lit_create_number_literal (const ecma_number_t num) /**< numeric value */
{
lit_number_record_t *rec_p = (lit_number_record_t *) mem_heap_alloc_block (sizeof (lit_number_record_t));
lit_number_record_t *rec_p = (lit_number_record_t *) jmem_heap_alloc_block (sizeof (lit_number_record_t));
rec_p->type = (uint8_t) LIT_RECORD_TYPE_NUMBER;
rec_p->next = (uint16_t) lit_cpointer_compress (lit_storage);
@ -150,7 +150,7 @@ lit_record_t *
lit_free_literal (lit_record_t *lit_p) /**< literal record */
{
lit_record_t *const ret_p = lit_cpointer_decompress (lit_p->next);
mem_heap_free_block (lit_p, lit_get_literal_size (lit_p));
jmem_heap_free_block (lit_p, lit_get_literal_size (lit_p));
return ret_p;
} /* lit_free_literal */

View File

@ -160,7 +160,7 @@ lit_save_literals_for_snapshot (uint8_t *buffer_p, /**< [out] output snapshot bu
size_t id_map_size = sizeof (lit_mem_to_snapshot_id_map_entry_t) * literals_num;
lit_mem_to_snapshot_id_map_entry_t *id_map_p;
id_map_p = (lit_mem_to_snapshot_id_map_entry_t *) mem_heap_alloc_block_store_size (id_map_size);
id_map_p = (lit_mem_to_snapshot_id_map_entry_t *) jmem_heap_alloc_block_store_size (id_map_size);
uint32_t literal_index = 0;
lit_literal_t lit;
@ -207,7 +207,7 @@ lit_save_literals_for_snapshot (uint8_t *buffer_p, /**< [out] output snapshot bu
if (!is_ok)
{
mem_heap_free_block_size_stored (id_map_p);
jmem_heap_free_block_size_stored (id_map_p);
return false;
}
@ -215,7 +215,7 @@ lit_save_literals_for_snapshot (uint8_t *buffer_p, /**< [out] output snapshot bu
*out_map_p = id_map_p;
}
uint32_t aligned_size = JERRY_ALIGNUP (lit_table_size, MEM_ALIGNMENT);
uint32_t aligned_size = JERRY_ALIGNUP (lit_table_size, JMEM_ALIGNMENT);
if (aligned_size != lit_table_size)
{
@ -284,7 +284,7 @@ lit_load_literals_from_snapshot (const uint8_t *lit_table_p, /**< buffer with li
size_t id_map_size = sizeof (lit_mem_to_snapshot_id_map_entry_t) * literals_num;
lit_mem_to_snapshot_id_map_entry_t *id_map_p;
id_map_p = (lit_mem_to_snapshot_id_map_entry_t *) mem_heap_alloc_block_store_size (id_map_size);
id_map_p = (lit_mem_to_snapshot_id_map_entry_t *) jmem_heap_alloc_block_store_size (id_map_size);
bool is_ok = true;
uint32_t lit_index;
@ -395,7 +395,7 @@ lit_load_literals_from_snapshot (const uint8_t *lit_table_p, /**< buffer with li
return true;
}
mem_heap_free_block_size_stored (id_map_p);
jmem_heap_free_block_size_stored (id_map_p);
return false;
} /* lit_load_literals_from_snapshot */

View File

@ -1,149 +0,0 @@
/* Copyright 2014-2016 Samsung Electronics Co., Ltd.
* Copyright 2016 University of Szeged.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Allocator implementation
*/
#include "jrt.h"
#include "jrt-libc-includes.h"
#include "mem-allocator.h"
#include "mem-heap.h"
#include "mem-poolman.h"
#define MEM_ALLOCATOR_INTERNAL
#include "mem-allocator-internal.h"
/**
* The 'try to give memory back' callback
*/
static mem_try_give_memory_back_callback_t mem_try_give_memory_back_callback = NULL;
/**
* Initialize memory allocators.
*/
void
mem_init (void)
{
mem_heap_init ();
mem_pools_init ();
} /* mem_init */
/**
* Finalize memory allocators.
*/
void
mem_finalize (bool is_show_mem_stats) /**< show heap memory stats
before finalization? */
{
mem_pools_finalize ();
#ifdef MEM_STATS
if (is_show_mem_stats)
{
mem_stats_print ();
}
#else /* !MEM_STATS */
(void) is_show_mem_stats;
#endif /* MEM_STATS */
mem_heap_finalize ();
} /* mem_finalize */
/**
* Compress pointer
*
* @return packed pointer
*/
uintptr_t
mem_compress_pointer (const void *pointer_p) /**< pointer to compress */
{
JERRY_ASSERT (mem_is_heap_pointer (pointer_p));
return mem_heap_compress_pointer (pointer_p);
} /* mem_compress_pointer */
/**
* Decompress pointer
*
* @return unpacked pointer
*/
void *
mem_decompress_pointer (uintptr_t compressed_pointer) /**< pointer to decompress */
{
return mem_heap_decompress_pointer (compressed_pointer);
} /* mem_decompress_pointer */
/**
* Register specified 'try to give memory back' callback routine
*/
void
mem_register_a_try_give_memory_back_callback (mem_try_give_memory_back_callback_t callback) /* callback routine */
{
/* Currently only one callback is supported */
JERRY_ASSERT (mem_try_give_memory_back_callback == NULL);
mem_try_give_memory_back_callback = callback;
} /* mem_register_a_try_give_memory_back_callback */
/**
* Unregister specified 'try to give memory back' callback routine
*/
void
mem_unregister_a_try_give_memory_back_callback (mem_try_give_memory_back_callback_t callback) /* callback routine */
{
/* Currently only one callback is supported */
JERRY_ASSERT (mem_try_give_memory_back_callback == callback);
mem_try_give_memory_back_callback = NULL;
} /* mem_unregister_a_try_give_memory_back_callback */
/**
* Run 'try to give memory back' callbacks with specified severity
*/
void
mem_run_try_to_give_memory_back_callbacks (mem_try_give_memory_back_severity_t severity) /**< severity of
the request */
{
if (mem_try_give_memory_back_callback != NULL)
{
mem_try_give_memory_back_callback (severity);
}
mem_pools_collect_empty ();
} /* mem_run_try_to_give_memory_back_callbacks */
#ifdef MEM_STATS
/**
* Reset peak values in memory usage statistics
*/
void
mem_stats_reset_peak (void)
{
mem_heap_stats_reset_peak ();
mem_pools_stats_reset_peak ();
} /* mem_stats_reset_peak */
/**
* Print memory usage statistics
*/
void
mem_stats_print (void)
{
mem_heap_stats_print ();
mem_pools_stats_print ();
} /* mem_stats_print */
#endif /* MEM_STATS */

View File

@ -1,775 +0,0 @@
/* Copyright 2014-2016 Samsung Electronics Co., Ltd.
* Copyright 2016 University of Szeged.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Heap implementation
*/
#include "jrt.h"
#include "jrt-bit-fields.h"
#include "jrt-libc-includes.h"
#include "mem-allocator.h"
#include "mem-config.h"
#include "mem-heap.h"
#define MEM_ALLOCATOR_INTERNAL
#include "mem-allocator-internal.h"
/** \addtogroup mem Memory allocation
* @{
*
* \addtogroup heap Heap
* @{
*/
/*
* Valgrind-related options and headers
*/
#ifdef JERRY_VALGRIND
# include "memcheck.h"
# define VALGRIND_NOACCESS_SPACE(p, s) VALGRIND_MAKE_MEM_NOACCESS((p), (s))
# define VALGRIND_UNDEFINED_SPACE(p, s) VALGRIND_MAKE_MEM_UNDEFINED((p), (s))
# define VALGRIND_DEFINED_SPACE(p, s) VALGRIND_MAKE_MEM_DEFINED((p), (s))
#else /* !JERRY_VALGRIND */
# define VALGRIND_NOACCESS_SPACE(p, s)
# define VALGRIND_UNDEFINED_SPACE(p, s)
# define VALGRIND_DEFINED_SPACE(p, s)
#endif /* JERRY_VALGRIND */
#ifdef JERRY_VALGRIND_FREYA
# include "memcheck.h"
/**
* Tells whether a pool manager allocator request is in progress.
*/
static bool valgrind_freya_mempool_request = false;
/**
* Called by pool manager before a heap allocation or free.
*/
void mem_heap_valgrind_freya_mempool_request (void)
{
valgrind_freya_mempool_request = true;
} /* mem_heap_valgrind_freya_mempool_request */
# define VALGRIND_FREYA_CHECK_MEMPOOL_REQUEST \
bool mempool_request = valgrind_freya_mempool_request; \
valgrind_freya_mempool_request = false
# define VALGRIND_FREYA_MALLOCLIKE_SPACE(p, s) \
if (!mempool_request) \
{ \
VALGRIND_MALLOCLIKE_BLOCK((p), (s), 0, 0); \
}
# define VALGRIND_FREYA_FREELIKE_SPACE(p) \
if (!mempool_request) \
{ \
VALGRIND_FREELIKE_BLOCK((p), 0); \
}
#else /* !JERRY_VALGRIND_FREYA */
# define VALGRIND_FREYA_CHECK_MEMPOOL_REQUEST
# define VALGRIND_FREYA_MALLOCLIKE_SPACE(p, s)
# define VALGRIND_FREYA_FREELIKE_SPACE(p)
#endif /* JERRY_VALGRIND_FREYA */
/* Calculate heap area size, leaving space for a pointer to the free list */
#define MEM_HEAP_AREA_SIZE (MEM_HEAP_SIZE - MEM_ALIGNMENT)
#define MEM_HEAP_END_OF_LIST ((mem_heap_free_t *const) ~((uint32_t) 0x0))
/**
* Free region node
*/
typedef struct
{
uint32_t next_offset; /* Offset of next region in list */
uint32_t size; /* Size of region */
} mem_heap_free_t;
#if UINTPTR_MAX > UINT32_MAX
#define MEM_HEAP_GET_OFFSET_FROM_ADDR(p) ((uint32_t) ((uint8_t *) (p) - (uint8_t *) mem_heap.area))
#define MEM_HEAP_GET_ADDR_FROM_OFFSET(u) ((mem_heap_free_t *) &mem_heap.area[u])
#else /* UINTPTR_MAX <= UINT32_MAX */
/* In this case we simply store the pointer, since it fits anyway. */
#define MEM_HEAP_GET_OFFSET_FROM_ADDR(p) ((uint32_t) (p))
#define MEM_HEAP_GET_ADDR_FROM_OFFSET(u) ((mem_heap_free_t *)(u))
#endif /* UINTPTR_MAX > UINT32_MAX */
/**
* Get end of region
*/
static inline mem_heap_free_t * __attr_always_inline___ __attr_pure___
mem_heap_get_region_end (mem_heap_free_t *curr_p) /**< current region */
{
return (mem_heap_free_t *)((uint8_t *) curr_p + curr_p->size);
} /* mem_heap_get_region_end */
/**
* Heap structure
*/
typedef struct
{
/** First node in free region list */
mem_heap_free_t first;
/**
* Heap area
*/
uint8_t area[MEM_HEAP_AREA_SIZE] __attribute__ ((aligned (MEM_ALIGNMENT)));
} mem_heap_t;
/**
* Heap
*/
#ifndef JERRY_HEAP_SECTION_ATTR
mem_heap_t mem_heap;
#else /* JERRY_HEAP_SECTION_ATTR */
mem_heap_t mem_heap __attribute__ ((section (JERRY_HEAP_SECTION_ATTR)));
#endif /* !JERRY_HEAP_SECTION_ATTR */
/**
* Check size of heap is corresponding to configuration
*/
JERRY_STATIC_ASSERT (sizeof (mem_heap) <= MEM_HEAP_SIZE,
size_of_mem_heap_must_be_less_than_or_equal_to_MEM_HEAP_SIZE);
/**
* Size of allocated regions
*/
size_t mem_heap_allocated_size;
/**
* Current limit of heap usage, that is upon being reached, causes call of "try give memory back" callbacks
*/
size_t mem_heap_limit;
/* This is used to speed up deallocation. */
mem_heap_free_t *mem_heap_list_skip_p;
#ifdef MEM_STATS
/**
* Heap's memory usage statistics
*/
static mem_heap_stats_t mem_heap_stats;
static void mem_heap_stat_init (void);
static void mem_heap_stat_alloc (size_t num);
static void mem_heap_stat_free (size_t num);
static void mem_heap_stat_skip ();
static void mem_heap_stat_nonskip ();
static void mem_heap_stat_alloc_iter ();
static void mem_heap_stat_free_iter ();
# define MEM_HEAP_STAT_INIT() mem_heap_stat_init ()
# define MEM_HEAP_STAT_ALLOC(v1) mem_heap_stat_alloc (v1)
# define MEM_HEAP_STAT_FREE(v1) mem_heap_stat_free (v1)
# define MEM_HEAP_STAT_SKIP() mem_heap_stat_skip ()
# define MEM_HEAP_STAT_NONSKIP() mem_heap_stat_nonskip ()
# define MEM_HEAP_STAT_ALLOC_ITER() mem_heap_stat_alloc_iter ()
# define MEM_HEAP_STAT_FREE_ITER() mem_heap_stat_free_iter ()
#else /* !MEM_STATS */
# define MEM_HEAP_STAT_INIT()
# define MEM_HEAP_STAT_ALLOC(v1)
# define MEM_HEAP_STAT_FREE(v1)
# define MEM_HEAP_STAT_SKIP()
# define MEM_HEAP_STAT_NONSKIP()
# define MEM_HEAP_STAT_ALLOC_ITER()
# define MEM_HEAP_STAT_FREE_ITER()
#endif /* MEM_STATS */
/**
* Startup initialization of heap
*/
void
mem_heap_init (void)
{
JERRY_STATIC_ASSERT ((uintptr_t) mem_heap.area % MEM_ALIGNMENT == 0,
mem_heap_area_must_be_multiple_of_MEM_ALIGNMENT);
JERRY_STATIC_ASSERT ((1u << MEM_HEAP_OFFSET_LOG) >= MEM_HEAP_SIZE,
two_pow_mem_heap_offset_should_not_be_less_than_mem_heap_size);
mem_heap_allocated_size = 0;
mem_heap_limit = CONFIG_MEM_HEAP_DESIRED_LIMIT;
mem_heap.first.size = 0;
mem_heap_free_t *const region_p = (mem_heap_free_t *) mem_heap.area;
mem_heap.first.next_offset = MEM_HEAP_GET_OFFSET_FROM_ADDR (region_p);
region_p->size = sizeof (mem_heap.area);
region_p->next_offset = MEM_HEAP_GET_OFFSET_FROM_ADDR (MEM_HEAP_END_OF_LIST);
mem_heap_list_skip_p = &mem_heap.first;
VALGRIND_NOACCESS_SPACE (mem_heap.area, MEM_HEAP_AREA_SIZE);
MEM_HEAP_STAT_INIT ();
} /* mem_heap_init */
/**
* Finalize heap
*/
void mem_heap_finalize (void)
{
JERRY_ASSERT (mem_heap_allocated_size == 0);
VALGRIND_NOACCESS_SPACE (&mem_heap, sizeof (mem_heap));
} /* mem_heap_finalize */
/**
* Allocation of memory region.
*
* See also:
* mem_heap_alloc_block
*
* @return pointer to allocated memory block - if allocation is successful,
* NULL - if there is not enough memory.
*/
static __attribute__((hot))
void *mem_heap_alloc_block_internal (const size_t size)
{
// Align size
const size_t required_size = ((size + MEM_ALIGNMENT - 1) / MEM_ALIGNMENT) * MEM_ALIGNMENT;
mem_heap_free_t *data_space_p = NULL;
VALGRIND_DEFINED_SPACE (&mem_heap.first, sizeof (mem_heap_free_t));
// Fast path for 8 byte chunks, first region is guaranteed to be sufficient
if (required_size == MEM_ALIGNMENT
&& likely (mem_heap.first.next_offset != MEM_HEAP_GET_OFFSET_FROM_ADDR (MEM_HEAP_END_OF_LIST)))
{
data_space_p = MEM_HEAP_GET_ADDR_FROM_OFFSET (mem_heap.first.next_offset);
JERRY_ASSERT (mem_is_heap_pointer (data_space_p));
VALGRIND_DEFINED_SPACE (data_space_p, sizeof (mem_heap_free_t));
mem_heap_allocated_size += MEM_ALIGNMENT;
MEM_HEAP_STAT_ALLOC_ITER ();
if (data_space_p->size == MEM_ALIGNMENT)
{
mem_heap.first.next_offset = data_space_p->next_offset;
}
else
{
JERRY_ASSERT (data_space_p->size > MEM_ALIGNMENT);
mem_heap_free_t *const remaining_p = MEM_HEAP_GET_ADDR_FROM_OFFSET (mem_heap.first.next_offset) + 1;
VALGRIND_DEFINED_SPACE (remaining_p, sizeof (mem_heap_free_t));
remaining_p->size = data_space_p->size - MEM_ALIGNMENT;
remaining_p->next_offset = data_space_p->next_offset;
VALGRIND_NOACCESS_SPACE (remaining_p, sizeof (mem_heap_free_t));
mem_heap.first.next_offset = MEM_HEAP_GET_OFFSET_FROM_ADDR (remaining_p);
}
VALGRIND_UNDEFINED_SPACE (data_space_p, sizeof (mem_heap_free_t));
if (unlikely (data_space_p == mem_heap_list_skip_p))
{
mem_heap_list_skip_p = MEM_HEAP_GET_ADDR_FROM_OFFSET (mem_heap.first.next_offset);
}
}
// Slow path for larger regions
else
{
mem_heap_free_t *current_p = MEM_HEAP_GET_ADDR_FROM_OFFSET (mem_heap.first.next_offset);
mem_heap_free_t *prev_p = &mem_heap.first;
while (current_p != MEM_HEAP_END_OF_LIST)
{
JERRY_ASSERT (mem_is_heap_pointer (current_p));
VALGRIND_DEFINED_SPACE (current_p, sizeof (mem_heap_free_t));
MEM_HEAP_STAT_ALLOC_ITER ();
const uint32_t next_offset = current_p->next_offset;
JERRY_ASSERT (mem_is_heap_pointer (MEM_HEAP_GET_ADDR_FROM_OFFSET (next_offset))
|| next_offset == MEM_HEAP_GET_OFFSET_FROM_ADDR (MEM_HEAP_END_OF_LIST));
if (current_p->size >= required_size)
{
// Region is sufficiently big, store address
data_space_p = current_p;
mem_heap_allocated_size += required_size;
// Region was larger than necessary
if (current_p->size > required_size)
{
// Get address of remaining space
mem_heap_free_t *const remaining_p = (mem_heap_free_t *) ((uint8_t *) current_p + required_size);
// Update metadata
VALGRIND_DEFINED_SPACE (remaining_p, sizeof (mem_heap_free_t));
remaining_p->size = current_p->size - (uint32_t) required_size;
remaining_p->next_offset = next_offset;
VALGRIND_NOACCESS_SPACE (remaining_p, sizeof (mem_heap_free_t));
// Update list
VALGRIND_DEFINED_SPACE (prev_p, sizeof (mem_heap_free_t));
prev_p->next_offset = MEM_HEAP_GET_OFFSET_FROM_ADDR (remaining_p);
VALGRIND_NOACCESS_SPACE (prev_p, sizeof (mem_heap_free_t));
}
// Block is an exact fit
else
{
// Remove the region from the list
VALGRIND_DEFINED_SPACE (prev_p, sizeof (mem_heap_free_t));
prev_p->next_offset = next_offset;
VALGRIND_NOACCESS_SPACE (prev_p, sizeof (mem_heap_free_t));
}
mem_heap_list_skip_p = prev_p;
// Found enough space
break;
}
VALGRIND_NOACCESS_SPACE (current_p, sizeof (mem_heap_free_t));
// Next in list
prev_p = current_p;
current_p = MEM_HEAP_GET_ADDR_FROM_OFFSET (next_offset);
}
}
while (mem_heap_allocated_size >= mem_heap_limit)
{
mem_heap_limit += CONFIG_MEM_HEAP_DESIRED_LIMIT;
}
VALGRIND_NOACCESS_SPACE (&mem_heap.first, sizeof (mem_heap_free_t));
if (unlikely (!data_space_p))
{
return NULL;
}
JERRY_ASSERT ((uintptr_t) data_space_p % MEM_ALIGNMENT == 0);
VALGRIND_UNDEFINED_SPACE (data_space_p, size);
MEM_HEAP_STAT_ALLOC (size);
return (void *) data_space_p;
} /* mem_heap_finalize */
/**
* Allocation of memory block, running 'try to give memory back' callbacks, if there is not enough memory.
*
* Note:
* if after running the callbacks, there is still not enough memory, engine is terminated with ERR_OUT_OF_MEMORY.
*
* @return pointer to allocated memory block
*/
void * __attribute__((hot))
mem_heap_alloc_block (const size_t size)
{
if (unlikely (size == 0))
{
return NULL;
}
VALGRIND_FREYA_CHECK_MEMPOOL_REQUEST;
#ifdef MEM_GC_BEFORE_EACH_ALLOC
mem_run_try_to_give_memory_back_callbacks (MEM_TRY_GIVE_MEMORY_BACK_SEVERITY_HIGH);
#endif /* MEM_GC_BEFORE_EACH_ALLOC */
if (mem_heap_allocated_size + size >= mem_heap_limit)
{
mem_run_try_to_give_memory_back_callbacks (MEM_TRY_GIVE_MEMORY_BACK_SEVERITY_LOW);
}
void *data_space_p = mem_heap_alloc_block_internal (size);
if (likely (data_space_p != NULL))
{
VALGRIND_FREYA_MALLOCLIKE_SPACE (data_space_p, size);
return data_space_p;
}
for (mem_try_give_memory_back_severity_t severity = MEM_TRY_GIVE_MEMORY_BACK_SEVERITY_LOW;
severity <= MEM_TRY_GIVE_MEMORY_BACK_SEVERITY_HIGH;
severity = (mem_try_give_memory_back_severity_t) (severity + 1))
{
mem_run_try_to_give_memory_back_callbacks (severity);
data_space_p = mem_heap_alloc_block_internal (size);
if (likely (data_space_p != NULL))
{
VALGRIND_FREYA_MALLOCLIKE_SPACE (data_space_p, size);
return data_space_p;
}
}
JERRY_ASSERT (data_space_p == NULL);
jerry_fatal (ERR_OUT_OF_MEMORY);
} /* mem_heap_alloc_block */
/**
* Allocate block and store block size.
*
* Note: block will only be aligned to 4 bytes.
*/
inline void * __attr_always_inline___
mem_heap_alloc_block_store_size (size_t size) /**< required size */
{
if (unlikely (size == 0))
{
return NULL;
}
size += sizeof (mem_heap_free_t);
mem_heap_free_t *const data_space_p = (mem_heap_free_t *) mem_heap_alloc_block (size);
data_space_p->size = (uint32_t) size;
return (void *) (data_space_p + 1);
} /* mem_heap_alloc_block_store_size */
/**
* Free the memory block.
*/
void __attribute__((hot))
mem_heap_free_block (void *ptr, /**< pointer to beginning of data space of the block */
const size_t size) /**< size of allocated region */
{
VALGRIND_FREYA_CHECK_MEMPOOL_REQUEST;
/* checking that ptr points to the heap */
JERRY_ASSERT (mem_is_heap_pointer (ptr));
JERRY_ASSERT (size > 0);
JERRY_ASSERT (mem_heap_limit >= mem_heap_allocated_size);
VALGRIND_FREYA_FREELIKE_SPACE (ptr);
VALGRIND_NOACCESS_SPACE (ptr, size);
MEM_HEAP_STAT_FREE_ITER ();
mem_heap_free_t *block_p = (mem_heap_free_t *) ptr;
mem_heap_free_t *prev_p;
mem_heap_free_t *next_p;
VALGRIND_DEFINED_SPACE (&mem_heap.first, sizeof (mem_heap_free_t));
if (block_p > mem_heap_list_skip_p)
{
prev_p = mem_heap_list_skip_p;
MEM_HEAP_STAT_SKIP ();
}
else
{
prev_p = &mem_heap.first;
MEM_HEAP_STAT_NONSKIP ();
}
JERRY_ASSERT (mem_is_heap_pointer (block_p));
const uint32_t block_offset = MEM_HEAP_GET_OFFSET_FROM_ADDR (block_p);
VALGRIND_DEFINED_SPACE (prev_p, sizeof (mem_heap_free_t));
// Find position of region in the list
while (prev_p->next_offset < block_offset)
{
mem_heap_free_t *const next_p = MEM_HEAP_GET_ADDR_FROM_OFFSET (prev_p->next_offset);
JERRY_ASSERT (mem_is_heap_pointer (next_p));
VALGRIND_DEFINED_SPACE (next_p, sizeof (mem_heap_free_t));
VALGRIND_NOACCESS_SPACE (prev_p, sizeof (mem_heap_free_t));
prev_p = next_p;
MEM_HEAP_STAT_FREE_ITER ();
}
next_p = MEM_HEAP_GET_ADDR_FROM_OFFSET (prev_p->next_offset);
VALGRIND_DEFINED_SPACE (next_p, sizeof (mem_heap_free_t));
/* Realign size */
const size_t aligned_size = (size + MEM_ALIGNMENT - 1) / MEM_ALIGNMENT * MEM_ALIGNMENT;
VALGRIND_DEFINED_SPACE (block_p, sizeof (mem_heap_free_t));
VALGRIND_DEFINED_SPACE (prev_p, sizeof (mem_heap_free_t));
// Update prev
if (mem_heap_get_region_end (prev_p) == block_p)
{
// Can be merged
prev_p->size += (uint32_t) aligned_size;
VALGRIND_NOACCESS_SPACE (block_p, sizeof (mem_heap_free_t));
block_p = prev_p;
}
else
{
block_p->size = (uint32_t) aligned_size;
prev_p->next_offset = block_offset;
}
VALGRIND_DEFINED_SPACE (next_p, sizeof (mem_heap_free_t));
// Update next
if (mem_heap_get_region_end (block_p) == next_p)
{
if (unlikely (next_p == mem_heap_list_skip_p))
{
mem_heap_list_skip_p = block_p;
}
// Can be merged
block_p->size += next_p->size;
block_p->next_offset = next_p->next_offset;
}
else
{
block_p->next_offset = MEM_HEAP_GET_OFFSET_FROM_ADDR (next_p);
}
mem_heap_list_skip_p = prev_p;
VALGRIND_NOACCESS_SPACE (prev_p, sizeof (mem_heap_free_t));
VALGRIND_NOACCESS_SPACE (block_p, size);
VALGRIND_NOACCESS_SPACE (next_p, sizeof (mem_heap_free_t));
JERRY_ASSERT (mem_heap_allocated_size > 0);
mem_heap_allocated_size -= aligned_size;
while (mem_heap_allocated_size + CONFIG_MEM_HEAP_DESIRED_LIMIT <= mem_heap_limit)
{
mem_heap_limit -= CONFIG_MEM_HEAP_DESIRED_LIMIT;
}
VALGRIND_NOACCESS_SPACE (&mem_heap.first, sizeof (mem_heap_free_t));
JERRY_ASSERT (mem_heap_limit >= mem_heap_allocated_size);
MEM_HEAP_STAT_FREE (size);
} /* mem_heap_free_block */
/**
* Free block with stored size
*/
inline void __attr_always_inline___
mem_heap_free_block_size_stored (void *ptr) /**< pointer to the memory block */
{
mem_heap_free_t *const original_p = ((mem_heap_free_t *) ptr) - 1;
JERRY_ASSERT (original_p + 1 == ptr);
mem_heap_free_block (original_p, original_p->size);
} /* mem_heap_free_block_size_stored */
/**
* Compress pointer
*
* @return packed heap pointer
*/
uintptr_t __attr_pure___ __attribute__((hot))
mem_heap_compress_pointer (const void *pointer_p) /**< pointer to compress */
{
JERRY_ASSERT (pointer_p != NULL);
JERRY_ASSERT (mem_is_heap_pointer (pointer_p));
uintptr_t int_ptr = (uintptr_t) pointer_p;
const uintptr_t heap_start = (uintptr_t) &mem_heap;
JERRY_ASSERT (int_ptr % MEM_ALIGNMENT == 0);
int_ptr -= heap_start;
int_ptr >>= MEM_ALIGNMENT_LOG;
JERRY_ASSERT ((int_ptr & ~((1u << MEM_HEAP_OFFSET_LOG) - 1)) == 0);
JERRY_ASSERT (int_ptr != MEM_CP_NULL);
return int_ptr;
} /* mem_heap_compress_pointer */
/**
* Decompress pointer
*
* @return unpacked heap pointer
*/
void * __attr_pure___ __attribute__((hot))
mem_heap_decompress_pointer (uintptr_t compressed_pointer) /**< pointer to decompress */
{
JERRY_ASSERT (compressed_pointer != MEM_CP_NULL);
uintptr_t int_ptr = compressed_pointer;
const uintptr_t heap_start = (uintptr_t) &mem_heap;
int_ptr <<= MEM_ALIGNMENT_LOG;
int_ptr += heap_start;
JERRY_ASSERT (mem_is_heap_pointer ((void *) int_ptr));
return (void *) int_ptr;
} /* mem_heap_decompress_pointer */
#ifndef JERRY_NDEBUG
/**
* Check whether the pointer points to the heap
*
* Note:
* the routine should be used only for assertion checks
*
* @return true - if pointer points to the heap,
* false - otherwise
*/
bool
mem_is_heap_pointer (const void *pointer) /**< pointer */
{
return ((uint8_t *) pointer >= mem_heap.area
&& (uint8_t *) pointer <= ((uint8_t *) mem_heap.area + MEM_HEAP_AREA_SIZE));
} /* mem_is_heap_pointer */
#endif /* !JERRY_NDEBUG */
#ifdef MEM_STATS
/**
* Get heap memory usage statistics
*/
void
mem_heap_get_stats (mem_heap_stats_t *out_heap_stats_p) /**< [out] heap stats */
{
JERRY_ASSERT (out_heap_stats_p != NULL);
*out_heap_stats_p = mem_heap_stats;
} /* mem_heap_get_stats */
/**
* Reset peak values in memory usage statistics
*/
void
mem_heap_stats_reset_peak (void)
{
mem_heap_stats.peak_allocated_bytes = mem_heap_stats.allocated_bytes;
mem_heap_stats.peak_waste_bytes = mem_heap_stats.waste_bytes;
} /* mem_heap_stats_reset_peak */
/**
* Print heap memory usage statistics
*/
void
mem_heap_stats_print (void)
{
printf ("Heap stats:\n"
" Heap size = %zu bytes\n"
" Allocated = %zu bytes\n"
" Waste = %zu bytes\n"
" Peak allocated = %zu bytes\n"
" Peak waste = %zu bytes\n"
" Skip-ahead ratio = %zu.%04zu\n"
" Average alloc iteration = %zu.%04zu\n"
" Average free iteration = %zu.%04zu\n"
"\n",
mem_heap_stats.size,
mem_heap_stats.allocated_bytes,
mem_heap_stats.waste_bytes,
mem_heap_stats.peak_allocated_bytes,
mem_heap_stats.peak_waste_bytes,
mem_heap_stats.skip_count / mem_heap_stats.nonskip_count,
mem_heap_stats.skip_count % mem_heap_stats.nonskip_count * 10000 / mem_heap_stats.nonskip_count,
mem_heap_stats.alloc_iter_count / mem_heap_stats.alloc_count,
mem_heap_stats.alloc_iter_count % mem_heap_stats.alloc_count * 10000 / mem_heap_stats.alloc_count,
mem_heap_stats.free_iter_count / mem_heap_stats.free_count,
mem_heap_stats.free_iter_count % mem_heap_stats.free_count * 10000 / mem_heap_stats.free_count);
} /* mem_heap_stats_print */
/**
* Initalize heap memory usage statistics account structure
*/
static void
mem_heap_stat_init ()
{
memset (&mem_heap_stats, 0, sizeof (mem_heap_stats));
mem_heap_stats.size = MEM_HEAP_AREA_SIZE;
} /* mem_heap_stat_init */
/**
* Account allocation
*/
static void
mem_heap_stat_alloc (size_t size) /**< Size of allocated block */
{
const size_t aligned_size = (size + MEM_ALIGNMENT - 1) / MEM_ALIGNMENT * MEM_ALIGNMENT;
const size_t waste_bytes = aligned_size - size;
mem_heap_stats.allocated_bytes += aligned_size;
mem_heap_stats.waste_bytes += waste_bytes;
mem_heap_stats.alloc_count++;
if (mem_heap_stats.allocated_bytes > mem_heap_stats.peak_allocated_bytes)
{
mem_heap_stats.peak_allocated_bytes = mem_heap_stats.allocated_bytes;
}
if (mem_heap_stats.allocated_bytes > mem_heap_stats.global_peak_allocated_bytes)
{
mem_heap_stats.global_peak_allocated_bytes = mem_heap_stats.allocated_bytes;
}
if (mem_heap_stats.waste_bytes > mem_heap_stats.peak_waste_bytes)
{
mem_heap_stats.peak_waste_bytes = mem_heap_stats.waste_bytes;
}
if (mem_heap_stats.waste_bytes > mem_heap_stats.global_peak_waste_bytes)
{
mem_heap_stats.global_peak_waste_bytes = mem_heap_stats.waste_bytes;
}
} /* mem_heap_stat_alloc */
/**
* Account freeing
*/
static void
mem_heap_stat_free (size_t size) /**< Size of freed block */
{
const size_t aligned_size = (size + MEM_ALIGNMENT - 1) / MEM_ALIGNMENT * MEM_ALIGNMENT;
const size_t waste_bytes = aligned_size - size;
mem_heap_stats.free_count++;
mem_heap_stats.allocated_bytes -= aligned_size;
mem_heap_stats.waste_bytes -= waste_bytes;
} /* mem_heap_stat_free */
/**
* Counts number of skip-aheads during insertion of free block
*/
static void
mem_heap_stat_skip ()
{
mem_heap_stats.skip_count++;
} /* mem_heap_stat_skip */
/**
* Counts number of times we could not skip ahead during free block insertion
*/
static void
mem_heap_stat_nonskip ()
{
mem_heap_stats.nonskip_count++;
} /* mem_heap_stat_nonskip */
/**
* Count number of iterations required for allocations
*/
static void
mem_heap_stat_alloc_iter ()
{
mem_heap_stats.alloc_iter_count++;
} /* mem_heap_stat_alloc_iter */
/**
* Counts number of iterations required for inserting free blocks
*/
static void
mem_heap_stat_free_iter ()
{
mem_heap_stats.free_iter_count++;
} /* mem_heap_stat_free_iter */
#endif /* MEM_STATS */
/**
* @}
* @}
*/

View File

@ -1,312 +0,0 @@
/* Copyright 2014-2016 Samsung Electronics Co., Ltd.
* Copyright 2016 University of Szeged.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* Memory pool manager implementation
*/
#include "jrt.h"
#include "jrt-libc-includes.h"
#include "mem-allocator.h"
#include "mem-heap.h"
#include "mem-poolman.h"
#define MEM_ALLOCATOR_INTERNAL
#include "mem-allocator-internal.h"
/** \addtogroup mem Memory allocation
* @{
*
* \addtogroup poolman Memory pool manager
* @{
*/
/**
* Node for free chunk list
*/
typedef struct mem_pools_chunk
{
struct mem_pools_chunk *next_p; /* pointer to next pool chunk */
} mem_pools_chunk_t;
/**
* List of free pool chunks
*/
mem_pools_chunk_t *mem_free_chunk_p;
#ifdef MEM_STATS
/**
* Pools' memory usage statistics
*/
mem_pools_stats_t mem_pools_stats;
static void mem_pools_stat_init (void);
static void mem_pools_stat_free_pool (void);
static void mem_pools_stat_new_alloc (void);
static void mem_pools_stat_reuse (void);
static void mem_pools_stat_dealloc (void);
# define MEM_POOLS_STAT_INIT() mem_pools_stat_init ()
# define MEM_POOLS_STAT_FREE_POOL() mem_pools_stat_free_pool ()
# define MEM_POOLS_STAT_NEW_ALLOC() mem_pools_stat_new_alloc ()
# define MEM_POOLS_STAT_REUSE() mem_pools_stat_reuse ()
# define MEM_POOLS_STAT_DEALLOC() mem_pools_stat_dealloc ()
#else /* !MEM_STATS */
# define MEM_POOLS_STAT_INIT()
# define MEM_POOLS_STAT_FREE_POOL()
# define MEM_POOLS_STAT_NEW_ALLOC()
# define MEM_POOLS_STAT_REUSE()
# define MEM_POOLS_STAT_DEALLOC()
#endif /* MEM_STATS */
/*
* Valgrind-related options and headers
*/
#ifdef JERRY_VALGRIND
# include "memcheck.h"
# define VALGRIND_NOACCESS_SPACE(p, s) VALGRIND_MAKE_MEM_NOACCESS((p), (s))
# define VALGRIND_UNDEFINED_SPACE(p, s) VALGRIND_MAKE_MEM_UNDEFINED((p), (s))
# define VALGRIND_DEFINED_SPACE(p, s) VALGRIND_MAKE_MEM_DEFINED((p), (s))
#else /* !JERRY_VALGRIND */
# define VALGRIND_NOACCESS_SPACE(p, s)
# define VALGRIND_UNDEFINED_SPACE(p, s)
# define VALGRIND_DEFINED_SPACE(p, s)
#endif /* JERRY_VALGRIND */
#ifdef JERRY_VALGRIND_FREYA
# include "memcheck.h"
# define VALGRIND_FREYA_MALLOCLIKE_SPACE(p, s) VALGRIND_MALLOCLIKE_BLOCK((p), (s), 0, 0)
# define VALGRIND_FREYA_FREELIKE_SPACE(p) VALGRIND_FREELIKE_BLOCK((p), 0)
#else /* !JERRY_VALGRIND_FREYA */
# define VALGRIND_FREYA_MALLOCLIKE_SPACE(p, s)
# define VALGRIND_FREYA_FREELIKE_SPACE(p)
#endif /* JERRY_VALGRIND_FREYA */
/**
* Initialize pool manager
*/
void
mem_pools_init (void)
{
JERRY_STATIC_ASSERT (sizeof (mem_pools_chunk_t) <= MEM_POOL_CHUNK_SIZE,
size_of_mem_pools_chunk_t_must_be_less_than_or_equal_to_MEM_POOL_CHUNK_SIZE);
mem_free_chunk_p = NULL;
MEM_POOLS_STAT_INIT ();
} /* mem_pools_init */
/**
* Finalize pool manager
*/
void
mem_pools_finalize (void)
{
mem_pools_collect_empty ();
JERRY_ASSERT (mem_free_chunk_p == NULL);
} /* mem_pools_finalize */
/**
* Allocate a chunk of specified size
*
* @return pointer to allocated chunk, if allocation was successful,
* or NULL - if not enough memory.
*/
inline void * __attribute__((hot)) __attr_always_inline___
mem_pools_alloc (void)
{
#ifdef MEM_GC_BEFORE_EACH_ALLOC
mem_run_try_to_give_memory_back_callbacks (MEM_TRY_GIVE_MEMORY_BACK_SEVERITY_HIGH);
#endif /* MEM_GC_BEFORE_EACH_ALLOC */
if (mem_free_chunk_p != NULL)
{
const mem_pools_chunk_t *const chunk_p = mem_free_chunk_p;
MEM_POOLS_STAT_REUSE ();
VALGRIND_DEFINED_SPACE (chunk_p, MEM_POOL_CHUNK_SIZE);
mem_free_chunk_p = chunk_p->next_p;
VALGRIND_UNDEFINED_SPACE (chunk_p, MEM_POOL_CHUNK_SIZE);
return (void *) chunk_p;
}
else
{
MEM_POOLS_STAT_NEW_ALLOC ();
return (void *) mem_heap_alloc_block (MEM_POOL_CHUNK_SIZE);
}
} /* mem_pools_alloc */
/**
* Free the chunk
*/
void __attribute__((hot))
mem_pools_free (void *chunk_p) /**< pointer to the chunk */
{
mem_pools_chunk_t *const chunk_to_free_p = (mem_pools_chunk_t *) chunk_p;
VALGRIND_DEFINED_SPACE (chunk_to_free_p, MEM_POOL_CHUNK_SIZE);
chunk_to_free_p->next_p = mem_free_chunk_p;
mem_free_chunk_p = chunk_to_free_p;
VALGRIND_NOACCESS_SPACE (chunk_to_free_p, MEM_POOL_CHUNK_SIZE);
MEM_POOLS_STAT_FREE_POOL ();
} /* mem_pools_free */
/**
* Collect empty pool chunks
*/
void
mem_pools_collect_empty ()
{
while (mem_free_chunk_p)
{
VALGRIND_DEFINED_SPACE (mem_free_chunk_p, sizeof (mem_pools_chunk_t));
mem_pools_chunk_t *const next_p = mem_free_chunk_p->next_p;
VALGRIND_NOACCESS_SPACE (mem_free_chunk_p, sizeof (mem_pools_chunk_t));
mem_heap_free_block (mem_free_chunk_p, MEM_POOL_CHUNK_SIZE);
MEM_POOLS_STAT_DEALLOC ();
mem_free_chunk_p = next_p;
}
} /* mem_pools_collect_empty */
#ifdef MEM_STATS
/**
* Get pools memory usage statistics
*/
void
mem_pools_get_stats (mem_pools_stats_t *out_pools_stats_p) /**< [out] pools' stats */
{
JERRY_ASSERT (out_pools_stats_p != NULL);
*out_pools_stats_p = mem_pools_stats;
} /* mem_pools_get_stats */
/**
* Reset peak values in memory usage statistics
*/
void
mem_pools_stats_reset_peak (void)
{
mem_pools_stats.peak_pools_count = mem_pools_stats.pools_count;
} /* mem_pools_stats_reset_peak */
/**
* Print pools memory usage statistics
*/
void
mem_pools_stats_print (void)
{
printf ("Pools stats:\n"
" Chunk size: %zu\n"
" Pool chunks: %zu\n"
" Peak pool chunks: %zu\n"
" Free chunks: %zu\n"
" Pool reuse ratio: %zu.%04zu\n",
MEM_POOL_CHUNK_SIZE,
mem_pools_stats.pools_count,
mem_pools_stats.peak_pools_count,
mem_pools_stats.free_chunks,
mem_pools_stats.reused_count / mem_pools_stats.new_alloc_count,
mem_pools_stats.reused_count % mem_pools_stats.new_alloc_count * 10000 / mem_pools_stats.new_alloc_count);
} /* mem_pools_stats_print */
/**
* Initalize pools' memory usage statistics account structure
*/
static void
mem_pools_stat_init (void)
{
memset (&mem_pools_stats, 0, sizeof (mem_pools_stats));
} /* mem_pools_stat_init */
/**
* Account for allocation of new pool chunk
*/
static void
mem_pools_stat_new_alloc (void)
{
mem_pools_stats.pools_count++;
mem_pools_stats.new_alloc_count++;
if (mem_pools_stats.pools_count > mem_pools_stats.peak_pools_count)
{
mem_pools_stats.peak_pools_count = mem_pools_stats.pools_count;
}
if (mem_pools_stats.pools_count > mem_pools_stats.global_peak_pools_count)
{
mem_pools_stats.global_peak_pools_count = mem_pools_stats.pools_count;
}
} /* mem_pools_stat_new_alloc */
/**
* Account for reuse of pool chunk
*/
static void
mem_pools_stat_reuse (void)
{
mem_pools_stats.pools_count++;
mem_pools_stats.free_chunks--;
mem_pools_stats.reused_count++;
if (mem_pools_stats.pools_count > mem_pools_stats.peak_pools_count)
{
mem_pools_stats.peak_pools_count = mem_pools_stats.pools_count;
}
if (mem_pools_stats.pools_count > mem_pools_stats.global_peak_pools_count)
{
mem_pools_stats.global_peak_pools_count = mem_pools_stats.pools_count;
}
} /* mem_pools_stat_reuse */
/**
* Account for freeing a chunk
*/
static void
mem_pools_stat_free_pool (void)
{
JERRY_ASSERT (mem_pools_stats.pools_count > 0);
mem_pools_stats.pools_count--;
mem_pools_stats.free_chunks++;
} /* mem_pools_stat_free_pool */
/**
* Account for freeing a chunk
*/
static void
mem_pools_stat_dealloc (void)
{
mem_pools_stats.free_chunks--;
} /* mem_pools_stat_dealloc */
#endif /* MEM_STATS */
/**
* @}
* @}
*/

View File

@ -38,7 +38,7 @@ util_free_literal (lexer_literal_t *literal_p) /**< literal */
{
if (!(literal_p->status_flags & LEXER_FLAG_SOURCE_PTR))
{
mem_heap_free_block_size_stored ((void *) literal_p->u.char_p);
jmem_heap_free_block_size_stored ((void *) literal_p->u.char_p);
}
}
else if ((literal_p->type == LEXER_FUNCTION_LITERAL)

View File

@ -45,7 +45,7 @@
#include "ecma-globals.h"
#include "ecma-regexp-object.h"
#include "lit-literal.h"
#include "mem-heap.h"
#include "jmem-heap.h"
/* Immediate management. */

View File

@ -1202,7 +1202,7 @@ lexer_process_char_literal (parser_context_t *context_p, /**< context */
if (has_escape)
{
literal_p->u.char_p = (uint8_t *) mem_heap_alloc_block_store_size (length);
literal_p->u.char_p = (uint8_t *) jmem_heap_alloc_block_store_size (length);
memcpy ((uint8_t *) literal_p->u.char_p, char_p, length);
}
else

View File

@ -54,7 +54,7 @@
/* Maximum code size.
* Limit: 16777215. Recommended: 65535, 16777215. */
#ifndef PARSER_MAXIMUM_CODE_SIZE
#define PARSER_MAXIMUM_CODE_SIZE (65535 << (MEM_ALIGNMENT_LOG))
#define PARSER_MAXIMUM_CODE_SIZE (65535 << (JMEM_ALIGNMENT_LOG))
#endif /* !PARSER_MAXIMUM_CODE_SIZE */
/* Maximum number of values pushed onto the stack by a function.

View File

@ -39,7 +39,7 @@ parser_malloc (parser_context_t *context_p, /**< context */
void *result;
JERRY_ASSERT (size > 0);
result = mem_heap_alloc_block (size);
result = jmem_heap_alloc_block (size);
if (result == 0)
{
parser_raise_error (context_p, PARSER_ERR_OUT_OF_MEMORY);
@ -53,7 +53,7 @@ parser_malloc (parser_context_t *context_p, /**< context */
void parser_free (void *ptr, /**< pointer to free */
size_t size) /**< size of the memory block */
{
mem_heap_free_block (ptr, size);
jmem_heap_free_block (ptr, size);
} /* parser_free */
/**
@ -68,7 +68,7 @@ parser_malloc_local (parser_context_t *context_p, /**< context */
void *result;
JERRY_ASSERT (size > 0);
result = mem_heap_alloc_block (size);
result = jmem_heap_alloc_block (size);
if (result == 0)
{
parser_raise_error (context_p, PARSER_ERR_OUT_OF_MEMORY);
@ -82,7 +82,7 @@ parser_malloc_local (parser_context_t *context_p, /**< context */
void parser_free_local (void *ptr, /**< pointer to free */
size_t size) /**< size of the memory */
{
mem_heap_free_block (ptr, size);
jmem_heap_free_block (ptr, size);
} /* parser_free_local */
/**********************************************************************/

View File

@ -105,7 +105,7 @@ parser_compute_indicies (parser_context_t *context_p, /**< context */
if (!(literal_p->status_flags & LEXER_FLAG_SOURCE_PTR))
{
mem_heap_free_block_size_stored ((void *) char_p);
jmem_heap_free_block_size_stored ((void *) char_p);
}
}
}
@ -527,7 +527,7 @@ parser_generate_initializers (parser_context_t *context_p, /**< context */
if (!context_p->is_show_opcodes
&& !(literal_p->status_flags & LEXER_FLAG_SOURCE_PTR))
{
mem_heap_free_block_size_stored ((void *) literal_p->u.char_p);
jmem_heap_free_block_size_stored ((void *) literal_p->u.char_p);
}
#else /* !PARSER_DUMP_BYTE_CODE */
literal_pool_p[literal_p->prop.index] = literal_p->u.value;
@ -1460,12 +1460,12 @@ parser_post_processing (parser_context_t *context_p) /**< context */
}
total_size += length + context_p->literal_count * sizeof (lit_cpointer_t);
total_size = JERRY_ALIGNUP (total_size, MEM_ALIGNMENT);
total_size = JERRY_ALIGNUP (total_size, JMEM_ALIGNMENT);
compiled_code_p = (ecma_compiled_code_t *) parser_malloc (context_p, total_size);
byte_code_p = (uint8_t *) compiled_code_p;
compiled_code_p->size = (uint16_t) (total_size >> MEM_ALIGNMENT_LOG);
compiled_code_p->size = (uint16_t) (total_size >> JMEM_ALIGNMENT_LOG);
compiled_code_p->refs = 1;
compiled_code_p->status_flags = CBC_CODE_FLAGS_FUNCTION;
@ -1695,7 +1695,7 @@ parser_post_processing (parser_context_t *context_p) /**< context */
if ((literal_p->type == LEXER_IDENT_LITERAL || literal_p->type == LEXER_STRING_LITERAL)
&& !(literal_p->status_flags & LEXER_FLAG_SOURCE_PTR))
{
mem_heap_free_block_size_stored ((void *) literal_p->u.char_p);
jmem_heap_free_block_size_stored ((void *) literal_p->u.char_p);
}
}
}

View File

@ -54,11 +54,11 @@ re_realloc_regexp_bytecode_block (re_bytecode_ctx_t *bc_ctx_p) /**< RegExp bytec
JERRY_ASSERT (bc_ctx_p->current_p >= bc_ctx_p->block_start_p);
size_t current_ptr_offset = (size_t) (bc_ctx_p->current_p - bc_ctx_p->block_start_p);
uint8_t *new_block_start_p = (uint8_t *) mem_heap_alloc_block (new_block_size);
uint8_t *new_block_start_p = (uint8_t *) jmem_heap_alloc_block (new_block_size);
if (bc_ctx_p->current_p)
{
memcpy (new_block_start_p, bc_ctx_p->block_start_p, (size_t) (current_ptr_offset));
mem_heap_free_block (bc_ctx_p->block_start_p, old_size);
jmem_heap_free_block (bc_ctx_p->block_start_p, old_size);
}
bc_ctx_p->block_start_p = new_block_start_p;
bc_ctx_p->block_end_p = new_block_start_p + new_block_size;
@ -109,10 +109,10 @@ re_bytecode_list_insert (re_bytecode_ctx_t *bc_ctx_p, /**< RegExp bytecode conte
{
uint8_t *dest_p = src_p + length;
uint8_t *tmp_block_start_p;
tmp_block_start_p = (uint8_t *) mem_heap_alloc_block (re_get_bytecode_length (bc_ctx_p) - offset);
tmp_block_start_p = (uint8_t *) jmem_heap_alloc_block (re_get_bytecode_length (bc_ctx_p) - offset);
memcpy (tmp_block_start_p, src_p, (size_t) (re_get_bytecode_length (bc_ctx_p) - offset));
memcpy (dest_p, tmp_block_start_p, (size_t) (re_get_bytecode_length (bc_ctx_p) - offset));
mem_heap_free_block (tmp_block_start_p, re_get_bytecode_length (bc_ctx_p) - offset);
jmem_heap_free_block (tmp_block_start_p, re_get_bytecode_length (bc_ctx_p) - offset);
}
memcpy (src_p, bytecode_p, length);

View File

@ -86,7 +86,7 @@ typedef enum
typedef struct
{
ecma_compiled_code_t header; /**< compiled code header */
mem_cpointer_t pattern_cp; /**< original RegExp pattern */
jmem_cpointer_t pattern_cp; /**< original RegExp pattern */
uint32_t num_of_captures; /**< number of capturing brackets */
uint32_t num_of_non_captures; /**< number of non capturing brackets */
} re_compiled_code_t;

View File

@ -19,7 +19,7 @@
#include "ecma-regexp-object.h"
#include "ecma-try-catch-macro.h"
#include "jrt-libc-includes.h"
#include "mem-heap.h"
#include "jmem-heap.h"
#include "re-bytecode.h"
#include "re-compiler.h"
#include "re-parser.h"
@ -597,7 +597,7 @@ re_compile_bytecode (const re_compiled_code_t **out_bytecode_p, /**< [out] point
{
/* Compilation failed, free bytecode. */
JERRY_DDLOG ("RegExp compilation failed!\n");
mem_heap_free_block (bc_ctx.block_start_p, byte_code_size);
jmem_heap_free_block (bc_ctx.block_start_p, byte_code_size);
*out_bytecode_p = NULL;
}
else
@ -610,7 +610,7 @@ re_compile_bytecode (const re_compiled_code_t **out_bytecode_p, /**< [out] point
JERRY_ASSERT (bc_ctx.block_start_p != NULL);
*out_bytecode_p = (re_compiled_code_t *) bc_ctx.block_start_p;
((re_compiled_code_t *) bc_ctx.block_start_p)->header.size = (uint16_t) (byte_code_size >> MEM_ALIGNMENT_LOG);
((re_compiled_code_t *) bc_ctx.block_start_p)->header.size = (uint16_t) (byte_code_size >> JMEM_ALIGNMENT_LOG);
if (cache_idx == RE_CACHE_SIZE)
{

View File

@ -68,12 +68,12 @@ vm_stack_context_abort (vm_frame_ctx_t *frame_ctx_p, /**< frame context */
}
case VM_CONTEXT_FOR_IN:
{
mem_cpointer_t current = (uint16_t) vm_stack_top_p[-2];
jmem_cpointer_t current = (uint16_t) vm_stack_top_p[-2];
while (current != MEM_CP_NULL)
while (current != JMEM_CP_NULL)
{
ecma_collection_chunk_t *chunk_p = MEM_CP_GET_NON_NULL_POINTER (ecma_collection_chunk_t,
current);
ecma_collection_chunk_t *chunk_p = JMEM_CP_GET_NON_NULL_POINTER (ecma_collection_chunk_t,
current);
lit_utf8_byte_t *data_ptr = chunk_p->data;
ecma_free_value (*(ecma_value_t *) data_ptr);

View File

@ -1998,7 +1998,7 @@ vm_loop (vm_frame_ctx_t *frame_ctx_p) /**< frame context */
case VM_OC_FOR_IN_GET_NEXT:
{
ecma_value_t *context_top_p = frame_ctx_p->registers_p + register_end + frame_ctx_p->context_depth;
ecma_collection_chunk_t *chunk_p = MEM_CP_GET_NON_NULL_POINTER (ecma_collection_chunk_t, context_top_p[-2]);
ecma_collection_chunk_t *chunk_p = JMEM_CP_GET_NON_NULL_POINTER (ecma_collection_chunk_t, context_top_p[-2]);
JERRY_ASSERT (VM_GET_CONTEXT_TYPE (context_top_p[-1]) == VM_CONTEXT_FOR_IN);
@ -2015,7 +2015,7 @@ vm_loop (vm_frame_ctx_t *frame_ctx_p) /**< frame context */
while (true)
{
if (stack_top_p[-2] == MEM_CP_NULL)
if (stack_top_p[-2] == JMEM_CP_NULL)
{
ecma_free_value (stack_top_p[-3]);
@ -2024,7 +2024,7 @@ vm_loop (vm_frame_ctx_t *frame_ctx_p) /**< frame context */
break;
}
ecma_collection_chunk_t *chunk_p = MEM_CP_GET_NON_NULL_POINTER (ecma_collection_chunk_t, stack_top_p[-2]);
ecma_collection_chunk_t *chunk_p = JMEM_CP_GET_NON_NULL_POINTER (ecma_collection_chunk_t, stack_top_p[-2]);
lit_utf8_byte_t *data_ptr = chunk_p->data;
ecma_string_t *prop_name_p = ecma_get_string_from_value (*(ecma_value_t *) data_ptr);

View File

@ -14,7 +14,7 @@
* limitations under the License.
*/
#include "mem-allocator.h"
#include "jmem-allocator.h"
#include "test-common.h"
@ -35,17 +35,17 @@ size_t sizes[test_sub_iters];
bool is_one_chunked[test_sub_iters];
static void
test_heap_give_some_memory_back (mem_try_give_memory_back_severity_t severity)
test_heap_give_some_memory_back (jmem_try_give_memory_back_severity_t severity)
{
int p;
if (severity == MEM_TRY_GIVE_MEMORY_BACK_SEVERITY_LOW)
if (severity == JMEM_TRY_GIVE_MEMORY_BACK_SEVERITY_LOW)
{
p = 8;
}
else
{
JERRY_ASSERT (severity == MEM_TRY_GIVE_MEMORY_BACK_SEVERITY_HIGH);
JERRY_ASSERT (severity == JMEM_TRY_GIVE_MEMORY_BACK_SEVERITY_HIGH);
p = 1;
}
@ -61,7 +61,7 @@ test_heap_give_some_memory_back (mem_try_give_memory_back_severity_t severity)
JERRY_ASSERT (ptrs[i][k] == 0);
}
mem_heap_free_block_size_stored (ptrs[i]);
jmem_heap_free_block_size_stored (ptrs[i]);
ptrs[i] = NULL;
}
}
@ -74,27 +74,27 @@ main (int __attr_unused___ argc,
{
TEST_INIT ();
mem_heap_init ();
jmem_heap_init ();
mem_register_a_try_give_memory_back_callback (test_heap_give_some_memory_back);
jmem_register_a_try_give_memory_back_callback (test_heap_give_some_memory_back);
#ifdef MEM_STATS
mem_heap_stats_print ();
#endif /* MEM_STATS */
#ifdef JMEM_STATS
jmem_heap_stats_print ();
#endif /* JMEM_STATS */
for (uint32_t i = 0; i < test_iters; i++)
{
for (uint32_t j = 0; j < test_sub_iters; j++)
{
size_t size = (size_t) rand () % test_threshold_block_size;
ptrs[j] = (uint8_t *) mem_heap_alloc_block_store_size (size);
ptrs[j] = (uint8_t *) jmem_heap_alloc_block_store_size (size);
sizes[j] = size;
JERRY_ASSERT (sizes[j] == 0 || ptrs[j] != NULL);
memset (ptrs[j], 0, sizes[j]);
}
// mem_heap_print (true);
/* jmem_heap_print (true); */
for (uint32_t j = 0; j < test_sub_iters; j++)
{
@ -105,16 +105,16 @@ main (int __attr_unused___ argc,
JERRY_ASSERT (ptrs[j][k] == 0);
}
mem_heap_free_block_size_stored (ptrs[j]);
jmem_heap_free_block_size_stored (ptrs[j]);
ptrs[j] = NULL;
}
}
}
#ifdef MEM_STATS
mem_heap_stats_print ();
#endif /* MEM_STATS */
#ifdef JMEM_STATS
jmem_heap_stats_print ();
#endif /* JMEM_STATS */
return 0;
} /* main */

View File

@ -28,7 +28,7 @@ main (int __attr_unused___ argc,
{
TEST_INIT ();
mem_init ();
jmem_init ();
lit_init ();
ecma_init ();
@ -78,7 +78,7 @@ main (int __attr_unused___ argc,
ecma_finalize ();
lit_finalize ();
mem_finalize (true);
jmem_finalize (true);
return 0;
} /* main */

View File

@ -91,7 +91,7 @@ main (int __attr_unused___ argc,
lit_utf8_byte_t strings[test_sub_iters][max_characters_in_string + 1];
lit_utf8_size_t lengths[test_sub_iters];
mem_init ();
jmem_init ();
lit_init ();
for (uint32_t i = 0; i < test_iters; i++)
@ -159,6 +159,6 @@ main (int __attr_unused___ argc,
}
lit_finalize ();
mem_finalize (true);
jmem_finalize (true);
return 0;
} /* main */

View File

@ -20,8 +20,8 @@
#define JERRY_MEM_POOL_INTERNAL
#include "mem-allocator.h"
#include "mem-poolman.h"
#include "jmem-allocator.h"
#include "jmem-poolman.h"
#include "test-common.h"
@ -32,7 +32,7 @@ const uint32_t test_iters = 1024;
#define TEST_MAX_SUB_ITERS 1024
uint8_t *ptrs[TEST_MAX_SUB_ITERS];
uint8_t data[TEST_MAX_SUB_ITERS][MEM_POOL_CHUNK_SIZE];
uint8_t data[TEST_MAX_SUB_ITERS][JMEM_POOL_CHUNK_SIZE];
int
main (int __attr_unused___ argc,
@ -40,7 +40,7 @@ main (int __attr_unused___ argc,
{
TEST_INIT ();
mem_init ();
jmem_init ();
for (uint32_t i = 0; i < test_iters; i++)
{
@ -48,42 +48,42 @@ main (int __attr_unused___ argc,
for (size_t j = 0; j < subiters; j++)
{
ptrs[j] = (uint8_t *) mem_pools_alloc ();
ptrs[j] = (uint8_t *) jmem_pools_alloc ();
if (ptrs[j] != NULL)
{
for (size_t k = 0; k < MEM_POOL_CHUNK_SIZE; k++)
for (size_t k = 0; k < JMEM_POOL_CHUNK_SIZE; k++)
{
ptrs[j][k] = (uint8_t) (rand () % 256);
}
memcpy (data[j], ptrs[j], MEM_POOL_CHUNK_SIZE);
memcpy (data[j], ptrs[j], JMEM_POOL_CHUNK_SIZE);
}
}
// mem_heap_print (false);
/* jmem_heap_print (false); */
for (size_t j = 0; j < subiters; j++)
{
if (rand () % 256 == 0)
{
mem_pools_collect_empty ();
jmem_pools_collect_empty ();
}
if (ptrs[j] != NULL)
{
JERRY_ASSERT (!memcmp (data[j], ptrs[j], MEM_POOL_CHUNK_SIZE));
JERRY_ASSERT (!memcmp (data[j], ptrs[j], JMEM_POOL_CHUNK_SIZE));
mem_pools_free (ptrs[j]);
jmem_pools_free (ptrs[j]);
}
}
}
#ifdef MEM_STATS
mem_pools_stats_print ();
#endif /* MEM_STATS */
#ifdef JMEM_STATS
jmem_pools_stats_print ();
#endif /* JMEM_STATS */
mem_finalize (false);
jmem_finalize (false);
return 0;
} /* main */

View File

@ -107,7 +107,7 @@ main (int __attr_unused___ argc,
{
TEST_INIT ();
mem_init ();
jmem_init ();
lit_init ();
ecma_init ();
@ -221,7 +221,7 @@ main (int __attr_unused___ argc,
ecma_finalize ();
lit_finalize ();
mem_finalize (true);
jmem_finalize (true);
return 0;
} /* main */