Revert "locking/rwsem: for rwsem prio aware enhancement"

This reverts commit c4b6927bb6.

Change-Id: I0633e8c330f5370b59c33d1e8767f7da9bd94230
Signed-off-by: UtsavBalar1231 <utsavbalar1231@gmail.com>
This commit is contained in:
UtsavBalar1231 2022-09-30 14:05:30 +05:30 committed by spakkkk
parent 75878f65ca
commit df70227b90
4 changed files with 16 additions and 100 deletions

View File

@ -47,10 +47,6 @@ struct rw_semaphore {
#ifdef CONFIG_DEBUG_LOCK_ALLOC
struct lockdep_map dep_map;
#endif
#ifdef CONFIG_RWSEM_PRIO_AWARE
/* count for waiters preempt to queue in wait list */
long m_count;
#endif
};
/*
@ -82,20 +78,13 @@ static inline int rwsem_is_locked(struct rw_semaphore *sem)
#define __RWSEM_OPT_INIT(lockname)
#endif
#ifdef CONFIG_RWSEM_PRIO_AWARE
#define __RWSEM_PRIO_AWARE_INIT(lockname) .m_count = 0
#else
#define __RWSEM_PRIO_AWARE_INIT(lockname)
#endif
#define __RWSEM_INITIALIZER(name) \
{ __RWSEM_INIT_COUNT(name), \
.owner = NULL, \
.wait_list = LIST_HEAD_INIT((name).wait_list), \
.wait_lock = __RAW_SPIN_LOCK_UNLOCKED(name.wait_lock) \
__RWSEM_OPT_INIT(name) \
__RWSEM_DEP_MAP_INIT(name), \
__RWSEM_PRIO_AWARE_INIT(name) }
__RWSEM_DEP_MAP_INIT(name) }
#define DECLARE_RWSEM(name) \
struct rw_semaphore name = __RWSEM_INITIALIZER(name)

View File

@ -248,7 +248,3 @@ config ARCH_USE_QUEUED_RWLOCKS
config QUEUED_RWLOCKS
def_bool y if ARCH_USE_QUEUED_RWLOCKS
depends on SMP
config RWSEM_PRIO_AWARE
def_bool y
depends on RWSEM_XCHGADD_ALGORITHM

View File

@ -90,13 +90,21 @@ void __init_rwsem(struct rw_semaphore *sem, const char *name,
#ifdef CONFIG_RWSEM_SPIN_ON_OWNER
osq_lock_init(&sem->osq);
#endif
#ifdef CONFIG_RWSEM_PRIO_AWARE
sem->m_count = 0;
#endif
}
EXPORT_SYMBOL(__init_rwsem);
enum rwsem_waiter_type {
RWSEM_WAITING_FOR_WRITE,
RWSEM_WAITING_FOR_READ
};
struct rwsem_waiter {
struct list_head list;
struct task_struct *task;
enum rwsem_waiter_type type;
};
enum rwsem_wake_type {
RWSEM_WAKE_ANY, /* Wake whatever's at head of wait list */
RWSEM_WAKE_READERS, /* Wake readers only */
@ -419,7 +427,6 @@ __rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
long count, adjustment = -RWSEM_ACTIVE_READ_BIAS;
struct rwsem_waiter waiter;
DEFINE_WAKE_Q(wake_q);
bool is_first_waiter = false;
waiter.task = current;
waiter.type = RWSEM_WAITING_FOR_READ;
@ -440,9 +447,7 @@ __rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
}
adjustment += RWSEM_WAITING_BIAS;
}
/* is_first_waiter == true means we are first in the queue */
is_first_waiter = rwsem_list_add_per_prio(&waiter, sem);
list_add_tail(&waiter.list, &sem->wait_list);
/* we're now waiting on the lock, but no longer actively locking */
count = atomic_long_add_return(adjustment, &sem->count);
@ -455,8 +460,7 @@ __rwsem_down_read_failed_common(struct rw_semaphore *sem, int state)
*/
if (count == RWSEM_WAITING_BIAS ||
(count > RWSEM_WAITING_BIAS &&
(adjustment != -RWSEM_ACTIVE_READ_BIAS ||
is_first_waiter)))
adjustment != -RWSEM_ACTIVE_READ_BIAS))
__rwsem_mark_wake(sem, RWSEM_WAKE_ANY, &wake_q);
raw_spin_unlock_irq(&sem->wait_lock);
@ -516,7 +520,6 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
struct rwsem_waiter waiter;
struct rw_semaphore *ret = sem;
DEFINE_WAKE_Q(wake_q);
bool is_first_waiter = false;
/* undo write bias from down_write operation, stop active locking */
count = atomic_long_sub_return(RWSEM_ACTIVE_WRITE_BIAS, &sem->count);
@ -538,11 +541,7 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
if (list_empty(&sem->wait_list))
waiting = false;
/*
* is_first_waiter == true means we are first in the queue,
* so there is no read locks that were queued ahead of us.
*/
is_first_waiter = rwsem_list_add_per_prio(&waiter, sem);
list_add_tail(&waiter.list, &sem->wait_list);
/* we're now waiting on the lock, but no longer actively locking */
if (waiting) {
@ -553,7 +552,7 @@ __rwsem_down_write_failed_common(struct rw_semaphore *sem, int state)
* no active writers, the lock must be read owned; so we try to
* wake any read locks that were queued ahead of us.
*/
if (!is_first_waiter && count > RWSEM_WAITING_BIAS) {
if (count > RWSEM_WAITING_BIAS) {
__rwsem_mark_wake(sem, RWSEM_WAKE_READERS, &wake_q);
/*
* The wakeup is normally called _after_ the wait_lock

View File

@ -41,17 +41,6 @@
# define DEBUG_RWSEMS_WARN_ON(c, sem)
#endif
enum rwsem_waiter_type {
RWSEM_WAITING_FOR_WRITE,
RWSEM_WAITING_FOR_READ
};
struct rwsem_waiter {
struct list_head list;
struct task_struct *task;
enum rwsem_waiter_type type;
};
/*
* R/W semaphores originally for PPC using the stuff in lib/rwsem.c.
* Adapted largely from include/asm-i386/rwsem.h
@ -150,63 +139,6 @@ static inline void rwsem_clear_reader_owned(struct rw_semaphore *sem)
}
#endif
#ifdef CONFIG_RWSEM_PRIO_AWARE
#define RWSEM_MAX_PREEMPT_ALLOWED 3000
/*
* Return true if current waiter is added in the front of the rwsem wait list.
*/
static inline bool rwsem_list_add_per_prio(struct rwsem_waiter *waiter_in,
struct rw_semaphore *sem)
{
struct list_head *pos;
struct list_head *head;
struct rwsem_waiter *waiter = NULL;
pos = head = &sem->wait_list;
/*
* Rules for task prio aware rwsem wait list queueing:
* 1: Only try to preempt waiters with which task priority
* which is higher than DEFAULT_PRIO.
* 2: To avoid starvation, add count to record
* how many high priority waiters preempt to queue in wait
* list.
* If preempt count is exceed RWSEM_MAX_PREEMPT_ALLOWED,
* use simple fifo until wait list is empty.
*/
if (list_empty(head)) {
list_add_tail(&waiter_in->list, head);
sem->m_count = 0;
return true;
}
if (waiter_in->task->prio < DEFAULT_PRIO
&& sem->m_count < RWSEM_MAX_PREEMPT_ALLOWED) {
list_for_each(pos, head) {
waiter = list_entry(pos, struct rwsem_waiter, list);
if (waiter->task->prio > waiter_in->task->prio) {
list_add(&waiter_in->list, pos->prev);
sem->m_count++;
return &waiter_in->list == head->next;
}
}
}
list_add_tail(&waiter_in->list, head);
return false;
}
#else
static inline bool rwsem_list_add_per_prio(struct rwsem_waiter *waiter_in,
struct rw_semaphore *sem)
{
list_add_tail(&waiter_in->list, &sem->wait_list);
return false;
}
#endif
extern struct rw_semaphore *rwsem_down_read_failed(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_down_read_failed_killable(struct rw_semaphore *sem);
extern struct rw_semaphore *rwsem_down_write_failed(struct rw_semaphore *sem);