Skip to content

Commit 21bb132

Browse files
Mikulas Patockasnitm
Mikulas Patocka
authored andcommitted
dm bufio: remove code that merges slab caches
All slab allocators can merge duplicate caches. So dm-bufio doesn't need extra slab merging logic. Instead it can just allocate one slab cache per client and let the allocator merge them. Signed-off-by: Mikulas Patocka <[email protected]> Signed-off-by: Mike Snitzer <[email protected]>
1 parent eeb67a0 commit 21bb132

File tree

1 file changed

+14
-39
lines changed

1 file changed

+14
-39
lines changed

drivers/md/dm-bufio.c

Lines changed: 14 additions & 39 deletions
Original file line numberDiff line numberDiff line change
@@ -57,10 +57,9 @@
5757
#define DM_BUFIO_INLINE_VECS 16
5858

5959
/*
60-
* Don't try to use kmem_cache_alloc for blocks larger than this.
60+
* Don't try to use alloc_pages for blocks larger than this.
6161
* For explanation, see alloc_buffer_data below.
6262
*/
63-
#define DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT (PAGE_SIZE >> 1)
6463
#define DM_BUFIO_BLOCK_SIZE_GFP_LIMIT (PAGE_SIZE << (MAX_ORDER - 1))
6564

6665
/*
@@ -101,11 +100,11 @@ struct dm_bufio_client {
101100
unsigned block_size;
102101
unsigned char sectors_per_block_bits;
103102
unsigned char pages_per_block_bits;
104-
unsigned char blocks_per_page_bits;
105103
unsigned aux_size;
106104
void (*alloc_callback)(struct dm_buffer *);
107105
void (*write_callback)(struct dm_buffer *);
108106

107+
struct kmem_cache *slab_cache;
109108
struct dm_io_client *dm_io;
110109

111110
struct list_head reserved_buffers;
@@ -172,19 +171,6 @@ struct dm_buffer {
172171

173172
/*----------------------------------------------------------------*/
174173

175-
static struct kmem_cache *dm_bufio_caches[PAGE_SHIFT - SECTOR_SHIFT];
176-
177-
static inline int dm_bufio_cache_index(struct dm_bufio_client *c)
178-
{
179-
unsigned ret = c->blocks_per_page_bits - 1;
180-
181-
BUG_ON(ret >= ARRAY_SIZE(dm_bufio_caches));
182-
183-
return ret;
184-
}
185-
186-
#define DM_BUFIO_CACHE(c) (dm_bufio_caches[dm_bufio_cache_index(c)])
187-
188174
#define dm_bufio_in_request() (!!current->bio_list)
189175

190176
static void dm_bufio_lock(struct dm_bufio_client *c)
@@ -384,9 +370,9 @@ static void __cache_size_refresh(void)
384370
static void *alloc_buffer_data(struct dm_bufio_client *c, gfp_t gfp_mask,
385371
enum data_mode *data_mode)
386372
{
387-
if (c->block_size <= DM_BUFIO_BLOCK_SIZE_SLAB_LIMIT) {
373+
if (unlikely(c->slab_cache != NULL)) {
388374
*data_mode = DATA_MODE_SLAB;
389-
return kmem_cache_alloc(DM_BUFIO_CACHE(c), gfp_mask);
375+
return kmem_cache_alloc(c->slab_cache, gfp_mask);
390376
}
391377

392378
if (c->block_size <= DM_BUFIO_BLOCK_SIZE_GFP_LIMIT &&
@@ -426,7 +412,7 @@ static void free_buffer_data(struct dm_bufio_client *c,
426412
{
427413
switch (data_mode) {
428414
case DATA_MODE_SLAB:
429-
kmem_cache_free(DM_BUFIO_CACHE(c), data);
415+
kmem_cache_free(c->slab_cache, data);
430416
break;
431417

432418
case DATA_MODE_GET_FREE_PAGES:
@@ -1672,8 +1658,6 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
16721658
c->sectors_per_block_bits = __ffs(block_size) - SECTOR_SHIFT;
16731659
c->pages_per_block_bits = (__ffs(block_size) >= PAGE_SHIFT) ?
16741660
__ffs(block_size) - PAGE_SHIFT : 0;
1675-
c->blocks_per_page_bits = (__ffs(block_size) < PAGE_SHIFT ?
1676-
PAGE_SHIFT - __ffs(block_size) : 0);
16771661

16781662
c->aux_size = aux_size;
16791663
c->alloc_callback = alloc_callback;
@@ -1699,20 +1683,15 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
16991683
goto bad_dm_io;
17001684
}
17011685

1702-
mutex_lock(&dm_bufio_clients_lock);
1703-
if (c->blocks_per_page_bits) {
1704-
if (!DM_BUFIO_CACHE(c)) {
1705-
char name[26];
1706-
snprintf(name, sizeof name, "dm_bufio_cache-%u", c->block_size);
1707-
DM_BUFIO_CACHE(c) = kmem_cache_create(name, c->block_size, c->block_size, 0, NULL);
1708-
if (!DM_BUFIO_CACHE(c)) {
1709-
r = -ENOMEM;
1710-
mutex_unlock(&dm_bufio_clients_lock);
1711-
goto bad;
1712-
}
1686+
if (block_size < PAGE_SIZE) {
1687+
char name[26];
1688+
snprintf(name, sizeof name, "dm_bufio_cache-%u", c->block_size);
1689+
c->slab_cache = kmem_cache_create(name, c->block_size, c->block_size, 0, NULL);
1690+
if (!c->slab_cache) {
1691+
r = -ENOMEM;
1692+
goto bad;
17131693
}
17141694
}
1715-
mutex_unlock(&dm_bufio_clients_lock);
17161695

17171696
while (c->need_reserved_buffers) {
17181697
struct dm_buffer *b = alloc_buffer(c, GFP_KERNEL);
@@ -1747,6 +1726,7 @@ struct dm_bufio_client *dm_bufio_client_create(struct block_device *bdev, unsign
17471726
list_del(&b->lru_list);
17481727
free_buffer(b);
17491728
}
1729+
kmem_cache_destroy(c->slab_cache);
17501730
dm_io_client_destroy(c->dm_io);
17511731
bad_dm_io:
17521732
mutex_destroy(&c->lock);
@@ -1793,6 +1773,7 @@ void dm_bufio_client_destroy(struct dm_bufio_client *c)
17931773
for (i = 0; i < LIST_SIZE; i++)
17941774
BUG_ON(c->n_buffers[i]);
17951775

1776+
kmem_cache_destroy(c->slab_cache);
17961777
dm_io_client_destroy(c->dm_io);
17971778
mutex_destroy(&c->lock);
17981779
kfree(c);
@@ -1896,8 +1877,6 @@ static int __init dm_bufio_init(void)
18961877
dm_bufio_allocated_vmalloc = 0;
18971878
dm_bufio_current_allocated = 0;
18981879

1899-
memset(&dm_bufio_caches, 0, sizeof dm_bufio_caches);
1900-
19011880
mem = (__u64)mult_frac(totalram_pages - totalhigh_pages,
19021881
DM_BUFIO_MEMORY_PERCENT, 100) << PAGE_SHIFT;
19031882

@@ -1932,14 +1911,10 @@ static int __init dm_bufio_init(void)
19321911
static void __exit dm_bufio_exit(void)
19331912
{
19341913
int bug = 0;
1935-
int i;
19361914

19371915
cancel_delayed_work_sync(&dm_bufio_work);
19381916
destroy_workqueue(dm_bufio_wq);
19391917

1940-
for (i = 0; i < ARRAY_SIZE(dm_bufio_caches); i++)
1941-
kmem_cache_destroy(dm_bufio_caches[i]);
1942-
19431918
if (dm_bufio_client_count) {
19441919
DMCRIT("%s: dm_bufio_client_count leaked: %d",
19451920
__func__, dm_bufio_client_count);

0 commit comments

Comments
 (0)