kernfs: use kmem_cache pool for struct kernfs_open_node/file

These get allocated and freed millions of times on this kernel tree.

Use a dedicated kmem_cache pool and avoid costly dynamic memory allocations.

Signed-off-by: Park Ju Hyung <qkrwngud825@gmail.com>
Signed-off-by: Adam W. Willis <return.of.octobot@gmail.com>
This commit is contained in:
Park Ju Hyung 2019-07-12 00:42:24 +09:00 committed by spakkkk
parent 4c2bbf266f
commit 4c374f8565
3 changed files with 20 additions and 7 deletions

View File

@ -39,6 +39,15 @@ struct kernfs_open_node {
struct list_head files; /* goes through kernfs_open_file.list */ struct list_head files; /* goes through kernfs_open_file.list */
}; };
static struct kmem_cache *kmem_open_node_pool;
static struct kmem_cache *kmem_open_file_pool;
void __init init_kernfs_file_pool(void)
{
kmem_open_node_pool = KMEM_CACHE(kernfs_open_node, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
kmem_open_file_pool = KMEM_CACHE(kernfs_open_file, SLAB_HWCACHE_ALIGN | SLAB_PANIC);
}
/* /*
* kernfs_notify() may be called from any context and bounces notifications * kernfs_notify() may be called from any context and bounces notifications
* through a work item. To minimize space overhead in kernfs_node, the * through a work item. To minimize space overhead in kernfs_node, the
@ -570,12 +579,13 @@ static int kernfs_get_open_node(struct kernfs_node *kn,
mutex_unlock(&kernfs_open_file_mutex); mutex_unlock(&kernfs_open_file_mutex);
if (on) { if (on) {
kfree(new_on); if (new_on)
kmem_cache_free(kmem_open_node_pool, new_on);
return 0; return 0;
} }
/* not there, initialize a new one and retry */ /* not there, initialize a new one and retry */
new_on = kmalloc(sizeof(*new_on), GFP_KERNEL); new_on = kmem_cache_alloc(kmem_open_node_pool, GFP_KERNEL);
if (!new_on) if (!new_on)
return -ENOMEM; return -ENOMEM;
@ -617,7 +627,8 @@ static void kernfs_put_open_node(struct kernfs_node *kn,
spin_unlock_irqrestore(&kernfs_open_node_lock, flags); spin_unlock_irqrestore(&kernfs_open_node_lock, flags);
mutex_unlock(&kernfs_open_file_mutex); mutex_unlock(&kernfs_open_file_mutex);
kfree(on); if (on)
kmem_cache_free(kmem_open_node_pool, on);
} }
static int kernfs_fop_open(struct inode *inode, struct file *file) static int kernfs_fop_open(struct inode *inode, struct file *file)
@ -651,7 +662,7 @@ static int kernfs_fop_open(struct inode *inode, struct file *file)
/* allocate a kernfs_open_file for the file */ /* allocate a kernfs_open_file for the file */
error = -ENOMEM; error = -ENOMEM;
of = kzalloc(sizeof(struct kernfs_open_file), GFP_KERNEL); of = kmem_cache_zalloc(kmem_open_file_pool, GFP_KERNEL);
if (!of) if (!of)
goto err_out; goto err_out;
@ -742,7 +753,7 @@ err_seq_release:
seq_release(inode, file); seq_release(inode, file);
err_free: err_free:
kfree(of->prealloc_buf); kfree(of->prealloc_buf);
kfree(of); kmem_cache_free(kmem_open_file_pool, of);
err_out: err_out:
kernfs_put_active(kn); kernfs_put_active(kn);
return error; return error;
@ -786,7 +797,8 @@ static int kernfs_fop_release(struct inode *inode, struct file *filp)
kernfs_put_open_node(kn, of); kernfs_put_open_node(kn, of);
seq_release(inode, filp); seq_release(inode, filp);
kfree(of->prealloc_buf); kfree(of->prealloc_buf);
kfree(of); if (of)
kmem_cache_free(kmem_open_file_pool, of);
return 0; return 0;
} }

View File

@ -115,6 +115,7 @@ struct kernfs_node *kernfs_find_and_get_node_by_ino(struct kernfs_root *root,
*/ */
extern const struct file_operations kernfs_file_fops; extern const struct file_operations kernfs_file_fops;
void __init init_kernfs_file_pool(void);
void kernfs_drain_open_files(struct kernfs_node *kn); void kernfs_drain_open_files(struct kernfs_node *kn);
/* /*

View File

@ -406,7 +406,7 @@ struct super_block *kernfs_pin_sb(struct kernfs_root *root, const void *ns)
void __init kernfs_init(void) void __init kernfs_init(void)
{ {
init_kernfs_file_pool();
/* /*
* the slab is freed in RCU context, so kernfs_find_and_get_node_by_ino * the slab is freed in RCU context, so kernfs_find_and_get_node_by_ino
* can access the slab lock free. This could introduce stale nodes, * can access the slab lock free. This could introduce stale nodes,