block: cfq-iosched: Port samsung optimizations from SM-N986B

Change-Id: I9409e7cce91344f1f3b454e213081d436f12ee1f
Signed-off-by: UtsavBalar1231 <utsavbalar1231@gmail.com>
This commit is contained in:
UtsavBalar1231 2022-02-20 16:51:40 +05:30 committed by spakkkk
parent 197dc69c14
commit f88627492e

View File

@ -34,8 +34,12 @@ static u64 cfq_slice_async = NSEC_PER_SEC / 25;
static const int cfq_slice_async_rq = 2;
static u64 cfq_slice_idle = NSEC_PER_SEC / 125;
static u64 cfq_group_idle = NSEC_PER_SEC / 125;
/* IOPP-cfq_rt_idle_only-v1.0.k4.19 */
static int cfq_rt_idle_only = 1;
static const u64 cfq_target_latency = (u64)NSEC_PER_SEC * 3/10; /* 300 ms */
static const int cfq_hist_divisor = 4;
/* IOPP-cfq_max_async_dispatch-v1.0.4.4 */
static int cfq_max_async_dispatch = 4;
/*
* offset from end of queue service tree for idle class
@ -387,6 +391,8 @@ struct cfq_data {
unsigned int cfq_back_max;
unsigned int cfq_slice_async_rq;
unsigned int cfq_latency;
unsigned int cfq_max_async_dispatch;
unsigned int cfq_rt_idle_only;
u64 cfq_fifo_expire[2];
u64 cfq_slice[2];
u64 cfq_slice_idle;
@ -2951,6 +2957,9 @@ static bool cfq_should_idle(struct cfq_data *cfqd, struct cfq_queue *cfqq)
if (wl_class == IDLE_WORKLOAD)
return false;
if (cfqd->cfq_rt_idle_only && wl_class != RT_WORKLOAD)
return false;
/* We do for queues that were marked with idle window flag. */
if (cfq_cfqq_idle_window(cfqq) &&
!(blk_queue_nonrot(cfqd->queue) && cfqd->hw_tag))
@ -3457,6 +3466,9 @@ static inline bool cfq_slice_used_soon(struct cfq_data *cfqd,
return false;
}
#define CFQ_MAY_DISPATCH_ASYNC(cfqd) \
((cfqd)->rq_in_flight[BLK_RW_ASYNC] < (cfqd)->cfq_max_async_dispatch)
static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
{
unsigned int max_dispatch;
@ -3472,8 +3484,16 @@ static bool cfq_may_dispatch(struct cfq_data *cfqd, struct cfq_queue *cfqq)
/*
* If this is an async queue and we have sync IO in flight, let it wait
* but only if device does not support hw_tag.
*/
if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq))
if (cfqd->rq_in_flight[BLK_RW_SYNC] && !cfq_cfqq_sync(cfqq)
&& !cfqd->hw_tag)
return false;
/*
* Let it wait if too many async requests are dispatched.
*/
if (!cfq_cfqq_sync(cfqq) && !CFQ_MAY_DISPATCH_ASYNC(cfqd))
return false;
max_dispatch = max_t(unsigned int, cfqd->cfq_quantum / 2, 1);
@ -3556,7 +3576,7 @@ static bool cfq_dispatch_request(struct cfq_data *cfqd, struct cfq_queue *cfqq)
BUG_ON(RB_EMPTY_ROOT(&cfqq->sort_list));
rq = cfq_check_fifo(cfqq);
if (rq)
if (rq && (cfq_cfqq_sync(cfqq) || CFQ_MAY_DISPATCH_ASYNC(cfqd)))
cfq_mark_cfqq_must_dispatch(cfqq);
if (!cfq_may_dispatch(cfqd, cfqq))
@ -3801,7 +3821,8 @@ static void cfq_init_cfqq(struct cfq_data *cfqd, struct cfq_queue *cfqq,
cfq_mark_cfqq_prio_changed(cfqq);
if (is_sync) {
if (!cfq_class_idle(cfqq))
if (!cfq_class_idle(cfqq) &&
(!cfqd->cfq_rt_idle_only || cfq_class_rt(cfqq)))
cfq_mark_cfqq_idle_window(cfqq);
cfq_mark_cfqq_sync(cfqq);
}
@ -3993,6 +4014,9 @@ cfq_update_idle_window(struct cfq_data *cfqd, struct cfq_queue *cfqq,
if (!cfq_cfqq_sync(cfqq) || cfq_class_idle(cfqq))
return;
if (cfqd->cfq_rt_idle_only && !cfq_class_rt(cfqq))
return;
enable_idle = old_idle = cfq_cfqq_idle_window(cfqq);
if (cfqq->queued[0] + cfqq->queued[1] >= 4)
@ -4232,8 +4256,14 @@ static void cfq_update_hw_tag(struct cfq_data *cfqd)
if (cfqd->hw_tag_samples++ < 50)
return;
if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN)
if (cfqd->hw_tag_est_depth >= CFQ_HW_QUEUE_MIN) {
cfqd->hw_tag = 1;
/*
* for queueing devices, such as UFS and eMMC CQ,
* set slice_idle to 0 if no one touched it yet.
*/
cfqd->cfq_slice_idle = 0;
}
else
cfqd->hw_tag = 0;
}
@ -4711,7 +4741,9 @@ static int cfq_init_queue(struct request_queue *q, struct elevator_type *e)
cfqd->cfq_target_latency = cfq_target_latency;
cfqd->cfq_slice_async_rq = cfq_slice_async_rq;
cfqd->cfq_slice_idle = cfq_slice_idle;
cfqd->cfq_max_async_dispatch = cfq_max_async_dispatch;
cfqd->cfq_group_idle = cfq_group_idle;
cfqd->cfq_rt_idle_only = cfq_rt_idle_only;
cfqd->cfq_latency = 1;
cfqd->hw_tag = -1;
/*
@ -4735,7 +4767,7 @@ static void cfq_registered_queue(struct request_queue *q)
/*
* Default to IOPS mode with no idling for SSDs
*/
if (blk_queue_nonrot(q))
if (blk_queue_nonrot(q) && !cfqd->cfq_rt_idle_only)
cfqd->cfq_slice_idle = 0;
wbt_disable_default(q);
}
@ -4772,7 +4804,9 @@ SHOW_FUNCTION(cfq_fifo_expire_async_show, cfqd->cfq_fifo_expire[0], 1);
SHOW_FUNCTION(cfq_back_seek_max_show, cfqd->cfq_back_max, 0);
SHOW_FUNCTION(cfq_back_seek_penalty_show, cfqd->cfq_back_penalty, 0);
SHOW_FUNCTION(cfq_slice_idle_show, cfqd->cfq_slice_idle, 1);
SHOW_FUNCTION(cfq_max_async_dispatch_show, cfqd->cfq_max_async_dispatch, 0);
SHOW_FUNCTION(cfq_group_idle_show, cfqd->cfq_group_idle, 1);
SHOW_FUNCTION(cfq_rt_idle_only_show, cfqd->cfq_rt_idle_only, 0);
SHOW_FUNCTION(cfq_slice_sync_show, cfqd->cfq_slice[1], 1);
SHOW_FUNCTION(cfq_slice_async_show, cfqd->cfq_slice[0], 1);
SHOW_FUNCTION(cfq_slice_async_rq_show, cfqd->cfq_slice_async_rq, 0);
@ -4821,7 +4855,9 @@ STORE_FUNCTION(cfq_back_seek_max_store, &cfqd->cfq_back_max, 0, UINT_MAX, 0);
STORE_FUNCTION(cfq_back_seek_penalty_store, &cfqd->cfq_back_penalty, 1,
UINT_MAX, 0);
STORE_FUNCTION(cfq_slice_idle_store, &cfqd->cfq_slice_idle, 0, UINT_MAX, 1);
STORE_FUNCTION(cfq_max_async_dispatch_store, &cfqd->cfq_max_async_dispatch, 1, UINT_MAX, 0);
STORE_FUNCTION(cfq_group_idle_store, &cfqd->cfq_group_idle, 0, UINT_MAX, 1);
STORE_FUNCTION(cfq_rt_idle_only_store, &cfqd->cfq_rt_idle_only, 0, 1, 0);
STORE_FUNCTION(cfq_slice_sync_store, &cfqd->cfq_slice[1], 1, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_async_store, &cfqd->cfq_slice[0], 1, UINT_MAX, 1);
STORE_FUNCTION(cfq_slice_async_rq_store, &cfqd->cfq_slice_async_rq, 1,
@ -4869,9 +4905,11 @@ static struct elv_fs_entry cfq_attrs[] = {
CFQ_ATTR(slice_idle_us),
CFQ_ATTR(group_idle),
CFQ_ATTR(group_idle_us),
CFQ_ATTR(rt_idle_only),
CFQ_ATTR(low_latency),
CFQ_ATTR(target_latency),
CFQ_ATTR(target_latency_us),
CFQ_ATTR(max_async_dispatch),
__ATTR_NULL
};