#include "../libnetdata.h"
#include "aral.h"
#ifdef NETDATA_TRACE_ALLOCATIONS
#define TRACE_ALLOCATIONS_FUNCTION_DEFINITION_PARAMS , const char *file, const char *function, size_t line
#define TRACE_ALLOCATIONS_FUNCTION_CALL_PARAMS , file, function, line
#else
#define TRACE_ALLOCATIONS_FUNCTION_DEFINITION_PARAMS
#define TRACE_ALLOCATIONS_FUNCTION_CALL_PARAMS
#endif
#define ARAL_FREE_PAGES_DELTA_TO_REARRANGE_LIST 5
// max file size
#define ARAL_MAX_PAGE_SIZE_MMAP (1*1024*1024*1024)
// max malloc size
// optimal at current versions of libc is up to 256k
// ideal to have the same overhead as libc is 4k
#define ARAL_MAX_PAGE_SIZE_MALLOC (65*1024)
typedef struct aral_free {
size_t size;
struct aral_free *next;
} ARAL_FREE;
typedef struct aral_page {
size_t size; // the allocation size of the page
const char *filename;
uint8_t *data;
uint32_t free_elements_to_move_first;
uint32_t max_elements; // the number of elements that can fit on this page
struct {
uint32_t used_elements; // the number of used elements on this page
uint32_t free_elements; // the number of free elements on this page
struct aral_page *prev; // the prev page on the list
struct aral_page *next; // the next page on the list
} aral_lock;
struct {
SPINLOCK spinlock;
ARAL_FREE *list;
} free;
} ARAL_PAGE;
typedef enum {
ARAL_LOCKLESS = (1 << 0),
ARAL_DEFRAGMENT = (1 << 1),
ARAL_ALLOCATED_STATS = (1 << 2),
} ARAL_OPTIONS;
struct aral {
struct {
char name[ARAL_MAX_NAME + 1];
ARAL_OPTIONS options;
size_t element_size; // calculated to take into account ARAL overheads
size_t max_allocation_size; // calculated in bytes
size_t max_page_elements; // calculated
size_t page_ptr_offset; // calculated
size_t natural_page_size; // calculated
size_t initial_page_elements;
size_t requested_element_size;
size_t requested_max_page_size;
struct {
bool enabled;
const char *filename;
char **cache_dir;
} mmap;
} config;
struct {
SPINLOCK spinlock;
size_t file_number; // for mmap
struct aral_page *pages; // linked list of pages
size_t user_malloc_operations;
size_t user_free_operations;
size_t defragment_operations;
size_t defragment_linked_list_traversals;
} aral_lock;
struct {
SPINLOCK spinlock;
size_t allocating_elements; // currently allocating elements
size_t allocation_size; // current / next allocation size
} adders;
struct {
size_t allocators; // the number of threads currently trying to allocate memory
} atomic;
struct aral_statistics *stats;
};
size_t aral_structures_from_stats(struct aral_statistics *stats) {
return __atomic_load_n(&stats->structures.allocated_bytes, __ATOMIC_RELAXED);
}
size_t aral_overhead_from_stats(struct aral_statistics *stats) {
return __atomic_load_n(&stats->malloc.allocated_bytes, __ATOMIC_RELAXED) -
__atomic_load_n(&stats->malloc.used_bytes, __ATOMIC_RELAXED);
}
size_t aral_overhead(ARAL *ar) {
return aral_overhead_from_stats(ar->stats);
}
size_t aral_structures(ARAL *ar) {
return aral_structures_from_stats(ar->stats);
}
struct aral_statistics *aral_statistics(ARAL *ar) {
return ar->stats;
}
#define ARAL_NATURAL_ALIGNMENT (sizeof(uintptr_t) * 2)
static inline size_t natural_alignment(size_t size, size