|
33 | 33 | #include <string.h>
|
34 | 34 | #include <stdio.h>
|
35 | 35 |
|
36 |
| -enum { |
37 |
| - /** Step size for stepped pools, in bytes */ |
38 |
| - STEP_SIZE = 8, |
39 |
| - /** |
40 |
| - * LB stands for logarithm with binary base, this constant |
41 |
| - * is used for bit shifts, when we need to divide by |
42 |
| - * STEP_SIZE. |
43 |
| - */ |
44 |
| - STEP_SIZE_LB = 3, |
45 |
| -}; |
46 |
| - |
47 |
| -rb_proto(, factor_tree_, factor_tree_t, struct factor_pool) |
48 |
| - |
49 |
| -/** Used for search in the tree. */ |
50 |
| -static inline int |
51 |
| -factor_pool_cmp(const struct factor_pool *a, const struct factor_pool *b) |
| 36 | +static inline struct factor_pool * |
| 37 | +factor_pool_search(struct small_alloc *alloc, size_t size) |
52 | 38 | {
|
53 |
| - return a->pool.objsize > b->pool.objsize ? 1 : |
54 |
| - a->pool.objsize < b->pool.objsize ? -1 : 0; |
| 39 | + if (size > alloc->objsize_max) |
| 40 | + return NULL; |
| 41 | + unsigned cls = small_class_calc_offset_by_size(&alloc->small_class, size); |
| 42 | + struct factor_pool *pool = &alloc->factor_pool_cache[cls]; |
| 43 | + return pool; |
55 | 44 | }
|
56 | 45 |
|
57 |
| -rb_gen(, factor_tree_, factor_tree_t, struct factor_pool, node, |
58 |
| - factor_pool_cmp) |
59 |
| - |
60 |
| -static inline struct factor_pool * |
61 |
| -factor_pool_create(struct small_alloc *alloc, |
62 |
| - struct factor_pool *upper_bound, |
63 |
| - size_t size) |
| 46 | +static inline void |
| 47 | +factor_pool_create(struct small_alloc *alloc) |
64 | 48 | {
|
65 |
| - assert(size > alloc->step_pool_objsize_max); |
66 |
| - assert(size <= alloc->objsize_max); |
67 |
| - |
68 |
| - if (alloc->factor_pool_next == NULL) { |
69 |
| - /** |
70 |
| - * Too many factored pools already, fall back |
71 |
| - * to an imperfect one. |
72 |
| - */ |
73 |
| - return upper_bound; |
| 49 | + size_t objsize = 0; |
| 50 | + for (alloc->factor_pool_cache_size = 0; |
| 51 | + objsize < alloc->objsize_max && alloc->factor_pool_cache_size < FACTOR_POOL_MAX; |
| 52 | + alloc->factor_pool_cache_size++) { |
| 53 | + size_t prevsize = objsize; |
| 54 | + objsize = small_class_calc_size_by_offset(&alloc->small_class, |
| 55 | + alloc->factor_pool_cache_size); |
| 56 | + if (objsize > alloc->objsize_max) |
| 57 | + objsize = alloc->objsize_max; |
| 58 | + struct factor_pool *pool = |
| 59 | + &alloc->factor_pool_cache[alloc->factor_pool_cache_size]; |
| 60 | + mempool_create(&pool->pool, alloc->cache, objsize); |
| 61 | + pool->objsize_min = prevsize + 1; |
74 | 62 | }
|
75 |
| - size_t objsize = alloc->step_pool_objsize_max; |
76 |
| - size_t prevsize; |
77 |
| - do { |
78 |
| - prevsize = objsize; |
79 |
| - /* |
80 |
| - * Align objsize after each multiplication to |
81 |
| - * ensure that the distance between objsizes of |
82 |
| - * factored pools is a multiple of STEP_SIZE. |
83 |
| - */ |
84 |
| - objsize = small_align(objsize * alloc->factor, |
85 |
| - sizeof(intptr_t)); |
86 |
| - assert(objsize > alloc->step_pool_objsize_max); |
87 |
| - } while (objsize < size); |
88 |
| - if (objsize > alloc->objsize_max) |
89 |
| - objsize = alloc->objsize_max; |
90 |
| - struct factor_pool *pool = alloc->factor_pool_next; |
91 |
| - alloc->factor_pool_next = pool->next; |
92 |
| - mempool_create(&pool->pool, alloc->cache, objsize); |
93 |
| - pool->objsize_min = prevsize + 1; |
94 |
| - factor_tree_insert(&alloc->factor_pools, pool); |
95 |
| - return pool; |
| 63 | + alloc->objsize_max = objsize; |
96 | 64 | }
|
97 | 65 |
|
98 | 66 | /** Initialize the small allocator. */
|
99 | 67 | void
|
100 | 68 | small_alloc_create(struct small_alloc *alloc, struct slab_cache *cache,
|
101 |
| - uint32_t objsize_min, float alloc_factor) |
| 69 | + uint32_t objsize_min, float alloc_factor, |
| 70 | + float *actual_alloc_factor) |
102 | 71 | {
|
103 | 72 | alloc->cache = cache;
|
104 | 73 | /* Align sizes. */
|
105 |
| - objsize_min = small_align(objsize_min, STEP_SIZE); |
106 |
| - alloc->step_pool0_step_count = (objsize_min - 1) >> STEP_SIZE_LB; |
| 74 | + objsize_min = small_align(objsize_min, sizeof(intptr_t)); |
107 | 75 | /* Make sure at least 4 largest objects can fit in a slab. */
|
108 | 76 | alloc->objsize_max =
|
109 | 77 | mempool_objsize_max(slab_order_size(cache, cache->order_max));
|
110 | 78 |
|
111 |
| - if (!(alloc->objsize_max > objsize_min + STEP_POOL_MAX * STEP_SIZE)) { |
112 |
| - fprintf(stderr, "Can't create small alloc, small " |
113 |
| - "object min size should not be greather than %u\n", |
114 |
| - alloc->objsize_max - (STEP_POOL_MAX + 1) * STEP_SIZE); |
115 |
| - abort(); |
116 |
| - } |
| 79 | + assert(alloc_factor > 1. && alloc_factor <= 2.); |
117 | 80 |
|
118 |
| - struct mempool *step_pool; |
119 |
| - for (step_pool = alloc->step_pools; |
120 |
| - step_pool < alloc->step_pools + STEP_POOL_MAX; |
121 |
| - step_pool++) { |
122 |
| - mempool_create(step_pool, alloc->cache, objsize_min); |
123 |
| - objsize_min += STEP_SIZE; |
124 |
| - } |
125 |
| - alloc->step_pool_objsize_max = (step_pool - 1)->objsize; |
126 |
| - if (alloc_factor > 2.0) |
127 |
| - alloc_factor = 2.0; |
| 81 | + alloc->factor = alloc_factor; |
128 | 82 | /*
|
129 |
| - * Correct the user-supplied alloc_factor to ensure that |
130 |
| - * it actually produces growing object sizes. |
| 83 | + * Second parameter (uintptr_t) - granularity, |
| 84 | + * determines alignment. |
131 | 85 | */
|
132 |
| - if (alloc->step_pool_objsize_max * alloc_factor < |
133 |
| - alloc->step_pool_objsize_max + STEP_SIZE) { |
134 |
| - |
135 |
| - alloc_factor = |
136 |
| - (alloc->step_pool_objsize_max + STEP_SIZE + 0.5)/ |
137 |
| - alloc->step_pool_objsize_max; |
138 |
| - } |
139 |
| - alloc->factor = alloc_factor; |
140 |
| - |
141 |
| - /* Initialize the factored pool cache. */ |
142 |
| - struct factor_pool *factor_pool = alloc->factor_pool_cache; |
143 |
| - do { |
144 |
| - factor_pool->next = factor_pool + 1; |
145 |
| - factor_pool++; |
146 |
| - } while (factor_pool != |
147 |
| - alloc->factor_pool_cache + FACTOR_POOL_MAX - 1); |
148 |
| - factor_pool->next = NULL; |
149 |
| - alloc->factor_pool_next = alloc->factor_pool_cache; |
150 |
| - factor_tree_new(&alloc->factor_pools); |
151 |
| - (void) factor_pool_create(alloc, NULL, alloc->objsize_max); |
| 86 | + small_class_create(&alloc->small_class, sizeof(intptr_t), |
| 87 | + alloc->factor, objsize_min, actual_alloc_factor); |
| 88 | + factor_pool_create(alloc); |
152 | 89 |
|
153 | 90 | lifo_init(&alloc->delayed);
|
154 | 91 | lifo_init(&alloc->delayed_large);
|
@@ -225,72 +162,27 @@ smalloc(struct small_alloc *alloc, size_t size)
|
225 | 162 | {
|
226 | 163 | small_collect_garbage(alloc);
|
227 | 164 |
|
228 |
| - struct mempool *pool; |
229 |
| - int idx = (size - 1) >> STEP_SIZE_LB; |
230 |
| - idx = (idx > (int) alloc->step_pool0_step_count) ? idx - alloc->step_pool0_step_count : 0; |
231 |
| - if (idx < STEP_POOL_MAX) { |
232 |
| - /* Allocate in a stepped pool. */ |
233 |
| - pool = &alloc->step_pools[idx]; |
234 |
| - assert(size <= pool->objsize && |
235 |
| - (size + STEP_SIZE > pool->objsize || idx == 0)); |
236 |
| - } else { |
237 |
| - struct factor_pool pattern; |
238 |
| - pattern.pool.objsize = size; |
239 |
| - struct factor_pool *upper_bound = |
240 |
| - factor_tree_nsearch(&alloc->factor_pools, &pattern); |
241 |
| - if (upper_bound == NULL) { |
242 |
| - /* Object is too large, fallback to slab_cache */ |
243 |
| - struct slab *slab = slab_get_large(alloc->cache, size); |
244 |
| - if (slab == NULL) |
245 |
| - return NULL; |
246 |
| - return slab_data(slab); |
247 |
| - } |
248 |
| - |
249 |
| - if (size < upper_bound->objsize_min) |
250 |
| - upper_bound = factor_pool_create(alloc, upper_bound, |
251 |
| - size); |
252 |
| - pool = &upper_bound->pool; |
| 165 | + struct factor_pool *upper_bound = factor_pool_search(alloc, size); |
| 166 | + if (upper_bound == NULL) { |
| 167 | + /* Object is too large, fallback to slab_cache */ |
| 168 | + struct slab *slab = slab_get_large(alloc->cache, size); |
| 169 | + if (slab == NULL) |
| 170 | + return NULL; |
| 171 | + return slab_data(slab); |
253 | 172 | }
|
| 173 | + struct mempool *pool = &upper_bound->pool; |
254 | 174 | assert(size <= pool->objsize);
|
255 | 175 | return mempool_alloc(pool);
|
256 | 176 | }
|
257 | 177 |
|
258 |
| -static void |
259 |
| -small_recycle_pool(struct small_alloc *alloc, struct mempool *pool) |
260 |
| -{ |
261 |
| - if (mempool_used(pool) == 0 && |
262 |
| - pool->objsize > alloc->step_pool_objsize_max && |
263 |
| - alloc->factor_pool_next == NULL) { |
264 |
| - struct factor_pool *factor_pool = (struct factor_pool *) |
265 |
| - ((char *) pool - (intptr_t) |
266 |
| - &((struct factor_pool *) NULL)->pool); |
267 |
| - factor_tree_remove(&alloc->factor_pools, factor_pool); |
268 |
| - mempool_destroy(pool); |
269 |
| - alloc->factor_pool_next = factor_pool; |
270 |
| - } |
271 |
| -} |
272 |
| - |
273 | 178 | static inline struct mempool *
|
274 | 179 | mempool_find(struct small_alloc *alloc, size_t size)
|
275 | 180 | {
|
276 |
| - struct mempool *pool; |
277 |
| - int idx = (size - 1) >> STEP_SIZE_LB; |
278 |
| - idx = (idx > (int) alloc->step_pool0_step_count) ? idx - alloc->step_pool0_step_count : 0; |
279 |
| - if (idx < STEP_POOL_MAX) { |
280 |
| - /* Allocated in a stepped pool. */ |
281 |
| - pool = &alloc->step_pools[idx]; |
282 |
| - assert((size + STEP_SIZE > pool->objsize) || (idx == 0)); |
283 |
| - } else { |
284 |
| - /* Allocated in a factor pool. */ |
285 |
| - struct factor_pool pattern; |
286 |
| - pattern.pool.objsize = size; |
287 |
| - struct factor_pool *upper_bound = |
288 |
| - factor_tree_nsearch(&alloc->factor_pools, &pattern); |
289 |
| - if (upper_bound == NULL) |
290 |
| - return NULL; /* Allocated by slab_cache. */ |
291 |
| - assert(size >= upper_bound->objsize_min); |
292 |
| - pool = &upper_bound->pool; |
293 |
| - } |
| 181 | + struct factor_pool *upper_bound = factor_pool_search(alloc, size); |
| 182 | + if (upper_bound == NULL) |
| 183 | + return NULL; /* Allocated by slab_cache. */ |
| 184 | + assert(size >= upper_bound->objsize_min); |
| 185 | + struct mempool *pool = &upper_bound->pool; |
294 | 186 | assert(size <= pool->objsize);
|
295 | 187 | return pool;
|
296 | 188 | }
|
@@ -319,8 +211,6 @@ smfree(struct small_alloc *alloc, void *ptr, size_t size)
|
319 | 211 |
|
320 | 212 | /* Regular allocation in mempools */
|
321 | 213 | mempool_free(pool, ptr);
|
322 |
| - if (mempool_used(pool) == 0) |
323 |
| - small_recycle_pool(alloc, pool); |
324 | 214 | }
|
325 | 215 |
|
326 | 216 | /**
|
@@ -351,28 +241,27 @@ smfree_delayed(struct small_alloc *alloc, void *ptr, size_t size)
|
351 | 241 | struct mempool_iterator
|
352 | 242 | {
|
353 | 243 | struct small_alloc *alloc;
|
354 |
| - struct mempool *step_pool; |
355 |
| - struct factor_tree_iterator factor_iterator; |
| 244 | + uint32_t factor_iterator; |
356 | 245 | };
|
357 | 246 |
|
358 | 247 | void
|
359 | 248 | mempool_iterator_create(struct mempool_iterator *it,
|
360 | 249 | struct small_alloc *alloc)
|
361 | 250 | {
|
362 | 251 | it->alloc = alloc;
|
363 |
| - it->step_pool = alloc->step_pools; |
364 |
| - factor_tree_ifirst(&alloc->factor_pools, &it->factor_iterator); |
| 252 | + it->factor_iterator = 0; |
365 | 253 | }
|
366 | 254 |
|
367 | 255 | struct mempool *
|
368 | 256 | mempool_iterator_next(struct mempool_iterator *it)
|
369 | 257 | {
|
370 |
| - if (it->step_pool < it->alloc->step_pools + STEP_POOL_MAX) |
371 |
| - return it->step_pool++; |
372 |
| - struct factor_pool *factor_pool = factor_tree_inext(&it->factor_iterator); |
373 |
| - if (factor_pool) { |
| 258 | + struct factor_pool *factor_pool = NULL; |
| 259 | + if (it->factor_iterator < it->alloc->factor_pool_cache_size) |
| 260 | + factor_pool = |
| 261 | + &it->alloc->factor_pool_cache[(it->factor_iterator)++]; |
| 262 | + if (factor_pool) |
374 | 263 | return &(factor_pool->pool);
|
375 |
| - } |
| 264 | + |
376 | 265 | return NULL;
|
377 | 266 | }
|
378 | 267 |
|
|
0 commit comments