UPSTREAM: locking/percpu-rwsem: Remove preempt_disable variants

Effective revert commit:

  87709e28dc ("fs/locks: Use percpu_down_read_preempt_disable()")

This is causing major pain for PREEMPT_RT.

Sebastian did a lot of lockperf runs on 2 and 4 node machines with all
preemption modes (PREEMPT=n should be an obvious NOP for this patch
and thus serves as a good control) and no results showed significance
over 2-sigma (the PREEMPT=n results were almost empty at 1-sigma).

Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Sebastian Andrzej Siewior <bigeasy@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: Will Deacon <will.deacon@arm.com>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Change-Id: Ic81171045eba8a615163e97d27bc282bb0e998a2
Signed-off-by: UtsavBalar1231 <utsavbalar1231@gmail.com>
This commit is contained in:
Peter Zijlstra 2019-02-21 15:38:40 +01:00 committed by spakkkk
parent 7d41c747f3
commit e3a6fa2b16
2 changed files with 20 additions and 36 deletions

View File

@ -936,7 +936,7 @@ static int flock_lock_inode(struct inode *inode, struct file_lock *request)
return -ENOMEM; return -ENOMEM;
} }
percpu_down_read_preempt_disable(&file_rwsem); percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock); spin_lock(&ctx->flc_lock);
if (request->fl_flags & FL_ACCESS) if (request->fl_flags & FL_ACCESS)
goto find_conflict; goto find_conflict;
@ -977,7 +977,7 @@ find_conflict:
out: out:
spin_unlock(&ctx->flc_lock); spin_unlock(&ctx->flc_lock);
percpu_up_read_preempt_enable(&file_rwsem); percpu_up_read(&file_rwsem);
if (new_fl) if (new_fl)
locks_free_lock(new_fl); locks_free_lock(new_fl);
locks_dispose_list(&dispose); locks_dispose_list(&dispose);
@ -1015,7 +1015,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
new_fl2 = locks_alloc_lock(); new_fl2 = locks_alloc_lock();
} }
percpu_down_read_preempt_disable(&file_rwsem); percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock); spin_lock(&ctx->flc_lock);
/* /*
* New lock request. Walk all POSIX locks and look for conflicts. If * New lock request. Walk all POSIX locks and look for conflicts. If
@ -1187,7 +1187,7 @@ static int posix_lock_inode(struct inode *inode, struct file_lock *request,
} }
out: out:
spin_unlock(&ctx->flc_lock); spin_unlock(&ctx->flc_lock);
percpu_up_read_preempt_enable(&file_rwsem); percpu_up_read(&file_rwsem);
/* /*
* Free any unused locks. * Free any unused locks.
*/ */
@ -1462,7 +1462,7 @@ int __break_lease(struct inode *inode, unsigned int mode, unsigned int type)
return error; return error;
} }
percpu_down_read_preempt_disable(&file_rwsem); percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock); spin_lock(&ctx->flc_lock);
time_out_leases(inode, &dispose); time_out_leases(inode, &dispose);
@ -1514,13 +1514,13 @@ restart:
locks_insert_block(fl, new_fl); locks_insert_block(fl, new_fl);
trace_break_lease_block(inode, new_fl); trace_break_lease_block(inode, new_fl);
spin_unlock(&ctx->flc_lock); spin_unlock(&ctx->flc_lock);
percpu_up_read_preempt_enable(&file_rwsem); percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose); locks_dispose_list(&dispose);
error = wait_event_interruptible_timeout(new_fl->fl_wait, error = wait_event_interruptible_timeout(new_fl->fl_wait,
!new_fl->fl_next, break_time); !new_fl->fl_next, break_time);
percpu_down_read_preempt_disable(&file_rwsem); percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock); spin_lock(&ctx->flc_lock);
trace_break_lease_unblock(inode, new_fl); trace_break_lease_unblock(inode, new_fl);
locks_delete_block(new_fl); locks_delete_block(new_fl);
@ -1537,7 +1537,7 @@ restart:
} }
out: out:
spin_unlock(&ctx->flc_lock); spin_unlock(&ctx->flc_lock);
percpu_up_read_preempt_enable(&file_rwsem); percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose); locks_dispose_list(&dispose);
locks_free_lock(new_fl); locks_free_lock(new_fl);
return error; return error;
@ -1609,7 +1609,7 @@ int fcntl_getlease(struct file *filp)
ctx = smp_load_acquire(&inode->i_flctx); ctx = smp_load_acquire(&inode->i_flctx);
if (ctx && !list_empty_careful(&ctx->flc_lease)) { if (ctx && !list_empty_careful(&ctx->flc_lease)) {
percpu_down_read_preempt_disable(&file_rwsem); percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock); spin_lock(&ctx->flc_lock);
time_out_leases(inode, &dispose); time_out_leases(inode, &dispose);
list_for_each_entry(fl, &ctx->flc_lease, fl_list) { list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
@ -1619,7 +1619,7 @@ int fcntl_getlease(struct file *filp)
break; break;
} }
spin_unlock(&ctx->flc_lock); spin_unlock(&ctx->flc_lock);
percpu_up_read_preempt_enable(&file_rwsem); percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose); locks_dispose_list(&dispose);
} }
@ -1693,7 +1693,7 @@ generic_add_lease(struct file *filp, long arg, struct file_lock **flp, void **pr
return -EINVAL; return -EINVAL;
} }
percpu_down_read_preempt_disable(&file_rwsem); percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock); spin_lock(&ctx->flc_lock);
time_out_leases(inode, &dispose); time_out_leases(inode, &dispose);
error = check_conflicting_open(dentry, arg, lease->fl_flags); error = check_conflicting_open(dentry, arg, lease->fl_flags);
@ -1764,7 +1764,7 @@ out_setup:
lease->fl_lmops->lm_setup(lease, priv); lease->fl_lmops->lm_setup(lease, priv);
out: out:
spin_unlock(&ctx->flc_lock); spin_unlock(&ctx->flc_lock);
percpu_up_read_preempt_enable(&file_rwsem); percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose); locks_dispose_list(&dispose);
if (is_deleg) if (is_deleg)
inode_unlock(inode); inode_unlock(inode);
@ -1787,7 +1787,7 @@ static int generic_delete_lease(struct file *filp, void *owner)
return error; return error;
} }
percpu_down_read_preempt_disable(&file_rwsem); percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock); spin_lock(&ctx->flc_lock);
list_for_each_entry(fl, &ctx->flc_lease, fl_list) { list_for_each_entry(fl, &ctx->flc_lease, fl_list) {
if (fl->fl_file == filp && if (fl->fl_file == filp &&
@ -1800,7 +1800,7 @@ static int generic_delete_lease(struct file *filp, void *owner)
if (victim) if (victim)
error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose); error = fl->fl_lmops->lm_change(victim, F_UNLCK, &dispose);
spin_unlock(&ctx->flc_lock); spin_unlock(&ctx->flc_lock);
percpu_up_read_preempt_enable(&file_rwsem); percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose); locks_dispose_list(&dispose);
return error; return error;
} }
@ -2531,13 +2531,13 @@ locks_remove_lease(struct file *filp, struct file_lock_context *ctx)
if (list_empty(&ctx->flc_lease)) if (list_empty(&ctx->flc_lease))
return; return;
percpu_down_read_preempt_disable(&file_rwsem); percpu_down_read(&file_rwsem);
spin_lock(&ctx->flc_lock); spin_lock(&ctx->flc_lock);
list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list) list_for_each_entry_safe(fl, tmp, &ctx->flc_lease, fl_list)
if (filp == fl->fl_file) if (filp == fl->fl_file)
lease_modify(fl, F_UNLCK, &dispose); lease_modify(fl, F_UNLCK, &dispose);
spin_unlock(&ctx->flc_lock); spin_unlock(&ctx->flc_lock);
percpu_up_read_preempt_enable(&file_rwsem); percpu_up_read(&file_rwsem);
locks_dispose_list(&dispose); locks_dispose_list(&dispose);
} }

View File

@ -29,7 +29,7 @@ static struct percpu_rw_semaphore name = { \
extern int __percpu_down_read(struct percpu_rw_semaphore *, int); extern int __percpu_down_read(struct percpu_rw_semaphore *, int);
extern void __percpu_up_read(struct percpu_rw_semaphore *); extern void __percpu_up_read(struct percpu_rw_semaphore *);
static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *sem) static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
{ {
might_sleep(); might_sleep();
@ -47,16 +47,10 @@ static inline void percpu_down_read_preempt_disable(struct percpu_rw_semaphore *
__this_cpu_inc(*sem->read_count); __this_cpu_inc(*sem->read_count);
if (unlikely(!rcu_sync_is_idle(&sem->rss))) if (unlikely(!rcu_sync_is_idle(&sem->rss)))
__percpu_down_read(sem, false); /* Unconditional memory barrier */ __percpu_down_read(sem, false); /* Unconditional memory barrier */
barrier();
/* /*
* The barrier() prevents the compiler from * The preempt_enable() prevents the compiler from
* bleeding the critical section out. * bleeding the critical section out.
*/ */
}
static inline void percpu_down_read(struct percpu_rw_semaphore *sem)
{
percpu_down_read_preempt_disable(sem);
preempt_enable(); preempt_enable();
} }
@ -83,13 +77,9 @@ static inline int percpu_down_read_trylock(struct percpu_rw_semaphore *sem)
return ret; return ret;
} }
static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem) static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
{ {
/* preempt_disable();
* The barrier() prevents the compiler from
* bleeding the critical section out.
*/
barrier();
/* /*
* Same as in percpu_down_read(). * Same as in percpu_down_read().
*/ */
@ -102,12 +92,6 @@ static inline void percpu_up_read_preempt_enable(struct percpu_rw_semaphore *sem
rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_); rwsem_release(&sem->rw_sem.dep_map, 1, _RET_IP_);
} }
static inline void percpu_up_read(struct percpu_rw_semaphore *sem)
{
preempt_disable();
percpu_up_read_preempt_enable(sem);
}
extern void percpu_down_write(struct percpu_rw_semaphore *); extern void percpu_down_write(struct percpu_rw_semaphore *);
extern void percpu_up_write(struct percpu_rw_semaphore *); extern void percpu_up_write(struct percpu_rw_semaphore *);