Skip to content

Commit 3ae6cbb

Browse files
EvgenyMekhanikkyukhin
authored andcommitted
small: changed small allocator pool management
In previous version allocator created new pool if necessary and inserted it in the pools tree. Now we allocate pools on the stage of allocator creation according alloc_factor. We use small_alloc class for this purpose also we use it to find necessary pool when we alloc memory. This is faster then previous allocator behavior and also fixes #5216. Closes #5216
1 parent 59b5ccc commit 3ae6cbb

File tree

7 files changed

+94
-212
lines changed

7 files changed

+94
-212
lines changed

perf/small_alloc_perf.c

Lines changed: 8 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -209,8 +209,10 @@ small_alloc_basic(unsigned int slab_size)
209209
slab_arena_create(&arena, &quota, 0, slab_size, MAP_PRIVATE);
210210
slab_cache_create(&cache, &arena);
211211
for (unsigned int i = 0; i < SZR(slab_alloc_factor); i++) {
212+
float actual_alloc_factor;
212213
small_alloc_create(&alloc, &cache,
213-
OBJSIZE_MIN, slab_alloc_factor[i]);
214+
OBJSIZE_MIN, slab_alloc_factor[i],
215+
&actual_alloc_factor);
214216
int size_min = OBJSIZE_MIN;
215217
int size_max = (int)alloc.objsize_max - 1;
216218
fail_unless(clock_gettime (CLOCK_MONOTONIC, &tm1) == 0);
@@ -248,8 +250,10 @@ small_alloc_basic(unsigned int slab_size)
248250
print_json_test_header("exponent");
249251
}
250252
for (unsigned int i = 0; i < SZR(slab_alloc_factor); i++) {
253+
float actual_alloc_factor;
251254
small_alloc_create(&alloc, &cache,
252-
OBJSIZE_MIN, slab_alloc_factor[i]);
255+
OBJSIZE_MIN, slab_alloc_factor[i],
256+
&actual_alloc_factor);
253257
int size_min = OBJSIZE_MIN;
254258
int size_max = (int)alloc.objsize_max - 1;
255259
fail_unless(clock_gettime (CLOCK_MONOTONIC, &tm1) == 0);
@@ -295,8 +299,9 @@ small_alloc_large()
295299
print_json_test_header("large");
296300
}
297301
for (unsigned int i = 0; i < SZR(slab_alloc_factor); i++) {
302+
float actual_alloc_factor;
298303
small_alloc_create(&alloc, &cache, OBJSIZE_MIN,
299-
slab_alloc_factor[i]);
304+
slab_alloc_factor[i], &actual_alloc_factor);
300305
fail_unless(clock_gettime (CLOCK_MONOTONIC, &tm1) == 0);
301306
small_alloc_test(large_size_min, large_size_max, 200, 1, 25);
302307
fail_unless(clock_gettime (CLOCK_MONOTONIC, &tm2) == 0);

small/small.c

Lines changed: 54 additions & 165 deletions
Original file line numberDiff line numberDiff line change
@@ -33,122 +33,59 @@
3333
#include <string.h>
3434
#include <stdio.h>
3535

36-
enum {
37-
/** Step size for stepped pools, in bytes */
38-
STEP_SIZE = 8,
39-
/**
40-
* LB stands for logarithm with binary base, this constant
41-
* is used for bit shifts, when we need to divide by
42-
* STEP_SIZE.
43-
*/
44-
STEP_SIZE_LB = 3,
45-
};
46-
47-
rb_proto(, factor_tree_, factor_tree_t, struct factor_pool)
48-
49-
/** Used for search in the tree. */
50-
static inline int
51-
factor_pool_cmp(const struct factor_pool *a, const struct factor_pool *b)
36+
static inline struct factor_pool *
37+
factor_pool_search(struct small_alloc *alloc, size_t size)
5238
{
53-
return a->pool.objsize > b->pool.objsize ? 1 :
54-
a->pool.objsize < b->pool.objsize ? -1 : 0;
39+
if (size > alloc->objsize_max)
40+
return NULL;
41+
unsigned cls = small_class_calc_offset_by_size(&alloc->small_class, size);
42+
struct factor_pool *pool = &alloc->factor_pool_cache[cls];
43+
return pool;
5544
}
5645

57-
rb_gen(, factor_tree_, factor_tree_t, struct factor_pool, node,
58-
factor_pool_cmp)
59-
60-
static inline struct factor_pool *
61-
factor_pool_create(struct small_alloc *alloc,
62-
struct factor_pool *upper_bound,
63-
size_t size)
46+
static inline void
47+
factor_pool_create(struct small_alloc *alloc)
6448
{
65-
assert(size > alloc->step_pool_objsize_max);
66-
assert(size <= alloc->objsize_max);
67-
68-
if (alloc->factor_pool_next == NULL) {
69-
/**
70-
* Too many factored pools already, fall back
71-
* to an imperfect one.
72-
*/
73-
return upper_bound;
49+
size_t objsize = 0;
50+
for (alloc->factor_pool_cache_size = 0;
51+
objsize < alloc->objsize_max && alloc->factor_pool_cache_size < FACTOR_POOL_MAX;
52+
alloc->factor_pool_cache_size++) {
53+
size_t prevsize = objsize;
54+
objsize = small_class_calc_size_by_offset(&alloc->small_class,
55+
alloc->factor_pool_cache_size);
56+
if (objsize > alloc->objsize_max)
57+
objsize = alloc->objsize_max;
58+
struct factor_pool *pool =
59+
&alloc->factor_pool_cache[alloc->factor_pool_cache_size];
60+
mempool_create(&pool->pool, alloc->cache, objsize);
61+
pool->objsize_min = prevsize + 1;
7462
}
75-
size_t objsize = alloc->step_pool_objsize_max;
76-
size_t prevsize;
77-
do {
78-
prevsize = objsize;
79-
/*
80-
* Align objsize after each multiplication to
81-
* ensure that the distance between objsizes of
82-
* factored pools is a multiple of STEP_SIZE.
83-
*/
84-
objsize = small_align(objsize * alloc->factor,
85-
sizeof(intptr_t));
86-
assert(objsize > alloc->step_pool_objsize_max);
87-
} while (objsize < size);
88-
if (objsize > alloc->objsize_max)
89-
objsize = alloc->objsize_max;
90-
struct factor_pool *pool = alloc->factor_pool_next;
91-
alloc->factor_pool_next = pool->next;
92-
mempool_create(&pool->pool, alloc->cache, objsize);
93-
pool->objsize_min = prevsize + 1;
94-
factor_tree_insert(&alloc->factor_pools, pool);
95-
return pool;
63+
alloc->objsize_max = objsize;
9664
}
9765

9866
/** Initialize the small allocator. */
9967
void
10068
small_alloc_create(struct small_alloc *alloc, struct slab_cache *cache,
101-
uint32_t objsize_min, float alloc_factor)
69+
uint32_t objsize_min, float alloc_factor,
70+
float *actual_alloc_factor)
10271
{
10372
alloc->cache = cache;
10473
/* Align sizes. */
105-
objsize_min = small_align(objsize_min, STEP_SIZE);
106-
alloc->step_pool0_step_count = (objsize_min - 1) >> STEP_SIZE_LB;
74+
objsize_min = small_align(objsize_min, sizeof(intptr_t));
10775
/* Make sure at least 4 largest objects can fit in a slab. */
10876
alloc->objsize_max =
10977
mempool_objsize_max(slab_order_size(cache, cache->order_max));
11078

111-
if (!(alloc->objsize_max > objsize_min + STEP_POOL_MAX * STEP_SIZE)) {
112-
fprintf(stderr, "Can't create small alloc, small "
113-
"object min size should not be greather than %u\n",
114-
alloc->objsize_max - (STEP_POOL_MAX + 1) * STEP_SIZE);
115-
abort();
116-
}
79+
assert(alloc_factor > 1. && alloc_factor <= 2.);
11780

118-
struct mempool *step_pool;
119-
for (step_pool = alloc->step_pools;
120-
step_pool < alloc->step_pools + STEP_POOL_MAX;
121-
step_pool++) {
122-
mempool_create(step_pool, alloc->cache, objsize_min);
123-
objsize_min += STEP_SIZE;
124-
}
125-
alloc->step_pool_objsize_max = (step_pool - 1)->objsize;
126-
if (alloc_factor > 2.0)
127-
alloc_factor = 2.0;
81+
alloc->factor = alloc_factor;
12882
/*
129-
* Correct the user-supplied alloc_factor to ensure that
130-
* it actually produces growing object sizes.
83+
* Second parameter (uintptr_t) - granularity,
84+
* determines alignment.
13185
*/
132-
if (alloc->step_pool_objsize_max * alloc_factor <
133-
alloc->step_pool_objsize_max + STEP_SIZE) {
134-
135-
alloc_factor =
136-
(alloc->step_pool_objsize_max + STEP_SIZE + 0.5)/
137-
alloc->step_pool_objsize_max;
138-
}
139-
alloc->factor = alloc_factor;
140-
141-
/* Initialize the factored pool cache. */
142-
struct factor_pool *factor_pool = alloc->factor_pool_cache;
143-
do {
144-
factor_pool->next = factor_pool + 1;
145-
factor_pool++;
146-
} while (factor_pool !=
147-
alloc->factor_pool_cache + FACTOR_POOL_MAX - 1);
148-
factor_pool->next = NULL;
149-
alloc->factor_pool_next = alloc->factor_pool_cache;
150-
factor_tree_new(&alloc->factor_pools);
151-
(void) factor_pool_create(alloc, NULL, alloc->objsize_max);
86+
small_class_create(&alloc->small_class, sizeof(intptr_t),
87+
alloc->factor, objsize_min, actual_alloc_factor);
88+
factor_pool_create(alloc);
15289

15390
lifo_init(&alloc->delayed);
15491
lifo_init(&alloc->delayed_large);
@@ -225,72 +162,27 @@ smalloc(struct small_alloc *alloc, size_t size)
225162
{
226163
small_collect_garbage(alloc);
227164

228-
struct mempool *pool;
229-
int idx = (size - 1) >> STEP_SIZE_LB;
230-
idx = (idx > (int) alloc->step_pool0_step_count) ? idx - alloc->step_pool0_step_count : 0;
231-
if (idx < STEP_POOL_MAX) {
232-
/* Allocate in a stepped pool. */
233-
pool = &alloc->step_pools[idx];
234-
assert(size <= pool->objsize &&
235-
(size + STEP_SIZE > pool->objsize || idx == 0));
236-
} else {
237-
struct factor_pool pattern;
238-
pattern.pool.objsize = size;
239-
struct factor_pool *upper_bound =
240-
factor_tree_nsearch(&alloc->factor_pools, &pattern);
241-
if (upper_bound == NULL) {
242-
/* Object is too large, fallback to slab_cache */
243-
struct slab *slab = slab_get_large(alloc->cache, size);
244-
if (slab == NULL)
245-
return NULL;
246-
return slab_data(slab);
247-
}
248-
249-
if (size < upper_bound->objsize_min)
250-
upper_bound = factor_pool_create(alloc, upper_bound,
251-
size);
252-
pool = &upper_bound->pool;
165+
struct factor_pool *upper_bound = factor_pool_search(alloc, size);
166+
if (upper_bound == NULL) {
167+
/* Object is too large, fallback to slab_cache */
168+
struct slab *slab = slab_get_large(alloc->cache, size);
169+
if (slab == NULL)
170+
return NULL;
171+
return slab_data(slab);
253172
}
173+
struct mempool *pool = &upper_bound->pool;
254174
assert(size <= pool->objsize);
255175
return mempool_alloc(pool);
256176
}
257177

258-
static void
259-
small_recycle_pool(struct small_alloc *alloc, struct mempool *pool)
260-
{
261-
if (mempool_used(pool) == 0 &&
262-
pool->objsize > alloc->step_pool_objsize_max &&
263-
alloc->factor_pool_next == NULL) {
264-
struct factor_pool *factor_pool = (struct factor_pool *)
265-
((char *) pool - (intptr_t)
266-
&((struct factor_pool *) NULL)->pool);
267-
factor_tree_remove(&alloc->factor_pools, factor_pool);
268-
mempool_destroy(pool);
269-
alloc->factor_pool_next = factor_pool;
270-
}
271-
}
272-
273178
static inline struct mempool *
274179
mempool_find(struct small_alloc *alloc, size_t size)
275180
{
276-
struct mempool *pool;
277-
int idx = (size - 1) >> STEP_SIZE_LB;
278-
idx = (idx > (int) alloc->step_pool0_step_count) ? idx - alloc->step_pool0_step_count : 0;
279-
if (idx < STEP_POOL_MAX) {
280-
/* Allocated in a stepped pool. */
281-
pool = &alloc->step_pools[idx];
282-
assert((size + STEP_SIZE > pool->objsize) || (idx == 0));
283-
} else {
284-
/* Allocated in a factor pool. */
285-
struct factor_pool pattern;
286-
pattern.pool.objsize = size;
287-
struct factor_pool *upper_bound =
288-
factor_tree_nsearch(&alloc->factor_pools, &pattern);
289-
if (upper_bound == NULL)
290-
return NULL; /* Allocated by slab_cache. */
291-
assert(size >= upper_bound->objsize_min);
292-
pool = &upper_bound->pool;
293-
}
181+
struct factor_pool *upper_bound = factor_pool_search(alloc, size);
182+
if (upper_bound == NULL)
183+
return NULL; /* Allocated by slab_cache. */
184+
assert(size >= upper_bound->objsize_min);
185+
struct mempool *pool = &upper_bound->pool;
294186
assert(size <= pool->objsize);
295187
return pool;
296188
}
@@ -319,8 +211,6 @@ smfree(struct small_alloc *alloc, void *ptr, size_t size)
319211

320212
/* Regular allocation in mempools */
321213
mempool_free(pool, ptr);
322-
if (mempool_used(pool) == 0)
323-
small_recycle_pool(alloc, pool);
324214
}
325215

326216
/**
@@ -351,28 +241,27 @@ smfree_delayed(struct small_alloc *alloc, void *ptr, size_t size)
351241
struct mempool_iterator
352242
{
353243
struct small_alloc *alloc;
354-
struct mempool *step_pool;
355-
struct factor_tree_iterator factor_iterator;
244+
uint32_t factor_iterator;
356245
};
357246

358247
void
359248
mempool_iterator_create(struct mempool_iterator *it,
360249
struct small_alloc *alloc)
361250
{
362251
it->alloc = alloc;
363-
it->step_pool = alloc->step_pools;
364-
factor_tree_ifirst(&alloc->factor_pools, &it->factor_iterator);
252+
it->factor_iterator = 0;
365253
}
366254

367255
struct mempool *
368256
mempool_iterator_next(struct mempool_iterator *it)
369257
{
370-
if (it->step_pool < it->alloc->step_pools + STEP_POOL_MAX)
371-
return it->step_pool++;
372-
struct factor_pool *factor_pool = factor_tree_inext(&it->factor_iterator);
373-
if (factor_pool) {
258+
struct factor_pool *factor_pool = NULL;
259+
if (it->factor_iterator < it->alloc->factor_pool_cache_size)
260+
factor_pool =
261+
&it->alloc->factor_pool_cache[(it->factor_iterator)++];
262+
if (factor_pool)
374263
return &(factor_pool->pool);
375-
}
264+
376265
return NULL;
377266
}
378267

0 commit comments

Comments
 (0)