mirror of
https://github.com/morgan9e/systemd
synced 2026-04-15 17:06:39 +09:00
Before we had the following scheme: mempool_enabled() would check mempool_use_allowed, and libsystemd-shared would be linked with a .c file that provides mempool_use_allowed=true, while other things would linked with a different .c file with mempool_use_allowed=false. In the new scheme, mempool_enabled() itself is a weak symbol. If it's not found, we assume false. So it only needs to be provided for libsystemd-shared, where it can return false or true. test-set-disable-mempool is libshared, so it gets the symbol. But then we actually disable the mempool via envvar. mempool_enable() is called to check its return value directly.
84 lines
2.0 KiB
C
84 lines
2.0 KiB
C
/* SPDX-License-Identifier: LGPL-2.1-or-later */
|
|
|
|
#include <stdint.h>
|
|
#include <stdlib.h>
|
|
|
|
#include "macro.h"
|
|
#include "memory-util.h"
|
|
#include "mempool.h"
|
|
|
|
struct pool {
|
|
struct pool *next;
|
|
size_t n_tiles;
|
|
size_t n_used;
|
|
};
|
|
|
|
void* mempool_alloc_tile(struct mempool *mp) {
|
|
size_t i;
|
|
|
|
/* When a tile is released we add it to the list and simply
|
|
* place the next pointer at its offset 0. */
|
|
|
|
assert(mp->tile_size >= sizeof(void*));
|
|
assert(mp->at_least > 0);
|
|
|
|
if (mp->freelist) {
|
|
void *r;
|
|
|
|
r = mp->freelist;
|
|
mp->freelist = * (void**) mp->freelist;
|
|
return r;
|
|
}
|
|
|
|
if (_unlikely_(!mp->first_pool) ||
|
|
_unlikely_(mp->first_pool->n_used >= mp->first_pool->n_tiles)) {
|
|
size_t size, n;
|
|
struct pool *p;
|
|
|
|
n = mp->first_pool ? mp->first_pool->n_tiles : 0;
|
|
n = MAX(mp->at_least, n * 2);
|
|
size = PAGE_ALIGN(ALIGN(sizeof(struct pool)) + n*mp->tile_size);
|
|
n = (size - ALIGN(sizeof(struct pool))) / mp->tile_size;
|
|
|
|
p = malloc(size);
|
|
if (!p)
|
|
return NULL;
|
|
|
|
p->next = mp->first_pool;
|
|
p->n_tiles = n;
|
|
p->n_used = 0;
|
|
|
|
mp->first_pool = p;
|
|
}
|
|
|
|
i = mp->first_pool->n_used++;
|
|
|
|
return ((uint8_t*) mp->first_pool) + ALIGN(sizeof(struct pool)) + i*mp->tile_size;
|
|
}
|
|
|
|
void* mempool_alloc0_tile(struct mempool *mp) {
|
|
void *p;
|
|
|
|
p = mempool_alloc_tile(mp);
|
|
if (p)
|
|
memzero(p, mp->tile_size);
|
|
return p;
|
|
}
|
|
|
|
void mempool_free_tile(struct mempool *mp, void *p) {
|
|
* (void**) p = mp->freelist;
|
|
mp->freelist = p;
|
|
}
|
|
|
|
#if VALGRIND
|
|
void mempool_drop(struct mempool *mp) {
|
|
struct pool *p = mp->first_pool;
|
|
while (p) {
|
|
struct pool *n;
|
|
n = p->next;
|
|
free(p);
|
|
p = n;
|
|
}
|
|
}
|
|
#endif
|