dma_buf: try to use kmem_cache pool for dmabuf allocations

These get allocated and freed millions of times on this kernel tree.
Use a dedicated kmem_cache pool and avoid costly dynamic memory allocations.

Most allocations' size is:
(sizeof(struct dma_buf) + sizeof(struct reservation_object)).

Put those under kmem_cache pool and distinguish them with dmabuf->from_kmem
flag.

Signed-off-by: Park Ju Hyung <qkrwngud825@gmail.com>
[@0ctobot: Adapted for 4.19]
Signed-off-by: Adam W. Willis <return.of.octobot@gmail.com>
This commit is contained in:
Park Ju Hyung 2021-03-14 09:19:23 -04:00 committed by spakkkk
parent 774d7449d5
commit 26d295a1b4
2 changed files with 26 additions and 3 deletions

View File

@ -49,10 +49,15 @@
static atomic_long_t name_counter; static atomic_long_t name_counter;
static struct kmem_cache *kmem_attach_pool; static struct kmem_cache *kmem_attach_pool;
static struct kmem_cache *kmem_dma_buf_pool;
void __init init_dma_buf_kmem_pool(void) void __init init_dma_buf_kmem_pool(void)
{ {
kmem_attach_pool = KMEM_CACHE(dma_buf_attachment, SLAB_HWCACHE_ALIGN | SLAB_PANIC); kmem_attach_pool = KMEM_CACHE(dma_buf_attachment, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
kmem_dma_buf_pool = kmem_cache_create("dma_buf",
(sizeof(struct dma_buf) + sizeof(struct reservation_object)),
(sizeof(struct dma_buf) + sizeof(struct reservation_object)),
SLAB_HWCACHE_ALIGN | SLAB_PANIC, NULL);
} }
static inline int is_dma_buf_file(struct file *); static inline int is_dma_buf_file(struct file *);
@ -81,6 +86,9 @@ static void dmabuf_dent_put(struct dma_buf *dmabuf)
{ {
if (atomic_dec_and_test(&dmabuf->dent_count)) { if (atomic_dec_and_test(&dmabuf->dent_count)) {
kfree(dmabuf->name); kfree(dmabuf->name);
if (dmabuf->from_kmem)
kmem_cache_free(kmem_dma_buf_pool, dmabuf);
else
kfree(dmabuf); kfree(dmabuf);
} }
} }
@ -606,6 +614,7 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
char *bufname; char *bufname;
int ret; int ret;
long cnt; long cnt;
bool from_kmem;
if (!exp_info->resv) if (!exp_info->resv)
alloc_size += sizeof(struct reservation_object); alloc_size += sizeof(struct reservation_object);
@ -633,7 +642,16 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
goto err_module; goto err_module;
} }
from_kmem = (alloc_size ==
(sizeof(struct dma_buf) + sizeof(struct reservation_object)));
if (from_kmem) {
dmabuf = kmem_cache_zalloc(kmem_dma_buf_pool, GFP_KERNEL);
dmabuf->from_kmem = true;
} else {
dmabuf = kzalloc(alloc_size, GFP_KERNEL); dmabuf = kzalloc(alloc_size, GFP_KERNEL);
}
if (!dmabuf) { if (!dmabuf) {
ret = -ENOMEM; ret = -ENOMEM;
goto err_name; goto err_name;
@ -681,6 +699,9 @@ struct dma_buf *dma_buf_export(const struct dma_buf_export_info *exp_info)
return dmabuf; return dmabuf;
err_dmabuf: err_dmabuf:
if (from_kmem)
kmem_cache_free(kmem_dma_buf_pool, dmabuf);
else
kfree(dmabuf); kfree(dmabuf);
err_name: err_name:
kfree(bufname); kfree(bufname);

View File

@ -463,6 +463,8 @@ struct dma_buf {
dma_buf_destructor dtor; dma_buf_destructor dtor;
void *dtor_data; void *dtor_data;
atomic_t dent_count; atomic_t dent_count;
bool from_kmem;
}; };
/** /**