scsi: ufs: Add simple IRQ-affined PM QoS operations

Qualcomm's PM QoS solution suffers from a number of issues: applying
PM QoS to all CPUs, convoluted spaghetti code that wastes CPU cycles,
and keeping PM QoS applied for 10 ms after all requests finish
processing.

This implements a simple IRQ-affined PM QoS mechanism for each UFS
adapter which uses atomics to elide locking, and enqueues a worker to
apply PM QoS to the target CPU as soon as a command request is issued.

Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com>
Signed-off-by: alk3pInjection <webmaster@raspii.tech>
This commit is contained in:
Sultan Alsawaf 2021-02-22 00:43:24 -08:00 committed by spakkkk
parent 67322ffa74
commit a8052f1777
2 changed files with 83 additions and 6 deletions

View File

@ -3714,6 +3714,48 @@ static inline void ufshcd_put_read_lock(struct ufs_hba *hba)
up_read(&hba->lock);
}
static void ufshcd_pm_qos_get_worker(struct work_struct *work)
{
struct ufs_hba *hba = container_of(work, typeof(*hba), pm_qos.get_work);
if (!atomic_read(&hba->pm_qos.count))
return;
mutex_lock(&hba->pm_qos.lock);
if (atomic_read(&hba->pm_qos.count) && !hba->pm_qos.active) {
pm_qos_update_request(&hba->pm_qos.req, 100);
hba->pm_qos.active = true;
}
mutex_unlock(&hba->pm_qos.lock);
}
static void ufshcd_pm_qos_put_worker(struct work_struct *work)
{
struct ufs_hba *hba = container_of(work, typeof(*hba), pm_qos.put_work);
if (atomic_read(&hba->pm_qos.count))
return;
mutex_lock(&hba->pm_qos.lock);
if (!atomic_read(&hba->pm_qos.count) && hba->pm_qos.active) {
pm_qos_update_request(&hba->pm_qos.req, PM_QOS_DEFAULT_VALUE);
hba->pm_qos.active = false;
}
mutex_unlock(&hba->pm_qos.lock);
}
static void ufshcd_pm_qos_get(struct ufs_hba *hba)
{
if (atomic_inc_return(&hba->pm_qos.count) == 1)
queue_work(system_unbound_wq, &hba->pm_qos.get_work);
}
static void ufshcd_pm_qos_put(struct ufs_hba *hba)
{
if (atomic_dec_return(&hba->pm_qos.count) == 0)
queue_work(system_unbound_wq, &hba->pm_qos.put_work);
}
/**
* ufshcd_queuecommand - main entry point for SCSI requests
* @host: SCSI host pointer
@ -3729,12 +3771,16 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
int tag;
int err = 0;
bool has_read_lock = false;
bool cmd_sent = false;
hba = shost_priv(host);
if (!cmd || !cmd->request || !hba)
return -EINVAL;
/* Wake the CPU managing the IRQ as soon as possible */
ufshcd_pm_qos_get(hba);
tag = cmd->request->tag;
if (!ufshcd_valid_tag(hba, tag)) {
dev_err(hba->dev,
@ -3748,12 +3794,12 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
if (err == -EPERM) {
set_host_byte(cmd, DID_ERROR);
cmd->scsi_done(cmd);
return 0;
err = 0;
goto out_pm_qos;
}
if (err == -EAGAIN) {
hba->ufs_stats.scsi_blk_reqs.ts = ktime_get();
hba->ufs_stats.scsi_blk_reqs.busy_ctx = SCALING_BUSY;
return SCSI_MLQUEUE_HOST_BUSY;
err = SCSI_MLQUEUE_HOST_BUSY;
goto out_pm_qos;
}
} else if (err == 1) {
has_read_lock = true;
@ -3904,16 +3950,22 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
if (has_read_lock)
ufshcd_put_read_lock(hba);
cmd->scsi_done(cmd);
return 0;
err = 0;
goto out_pm_qos;
}
goto out;
}
cmd_sent = true;
out_unlock:
spin_unlock_irqrestore(hba->host->host_lock, flags);
out:
if (has_read_lock)
ufshcd_put_read_lock(hba);
out_pm_qos:
if (!cmd_sent)
ufshcd_pm_qos_put(hba);
return err;
}
@ -6571,6 +6623,8 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
/* Mark completed command as NULL in LRB */
lrbp->cmd = NULL;
hba->ufs_stats.clk_rel.ctx = XFR_REQ_COMPL;
if (cmd->request)
ufshcd_pm_qos_put(hba);
clear_bit_unlock(index, &hba->lrb_in_use);
/*
@ -6639,7 +6693,8 @@ void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba, int result)
update_req_stats(hba, lrbp);
/* Mark completed command as NULL in LRB */
lrbp->cmd = NULL;
if (cmd->request)
ufshcd_pm_qos_put(hba);
clear_bit_unlock(index, &hba->lrb_in_use);
/*
@ -10975,6 +11030,9 @@ void ufshcd_remove(struct ufs_hba *hba)
/* disable interrupts */
ufshcd_disable_intr(hba, hba->intr_mask);
ufshcd_hba_stop(hba, true);
cancel_work_sync(&hba->pm_qos.put_work);
cancel_work_sync(&hba->pm_qos.get_work);
pm_qos_remove_request(&hba->pm_qos.req);
ufshcd_exit_clk_scaling(hba);
ufshcd_exit_clk_gating(hba);
@ -11198,6 +11256,14 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
*/
mb();
mutex_init(&hba->pm_qos.lock);
INIT_WORK(&hba->pm_qos.get_work, ufshcd_pm_qos_get_worker);
INIT_WORK(&hba->pm_qos.put_work, ufshcd_pm_qos_put_worker);
hba->pm_qos.req.type = PM_QOS_REQ_AFFINE_IRQ;
hba->pm_qos.req.irq = irq;
pm_qos_add_request(&hba->pm_qos.req, PM_QOS_CPU_DMA_LATENCY,
PM_QOS_DEFAULT_VALUE);
/* IRQ registration */
err = devm_request_irq(dev, irq, ufshcd_intr, IRQF_SHARED,
dev_name(dev), hba);
@ -11283,6 +11349,7 @@ int ufshcd_init(struct ufs_hba *hba, void __iomem *mmio_base, unsigned int irq)
out_remove_scsi_host:
scsi_remove_host(hba->host);
exit_gating:
pm_qos_remove_request(&hba->pm_qos.req);
ufshcd_exit_clk_scaling(hba);
ufshcd_exit_clk_gating(hba);
out_disable:

View File

@ -60,6 +60,7 @@
#include <linux/reset.h>
#include <linux/extcon-provider.h>
#include <linux/devfreq.h>
#include <linux/pm_qos.h>
#include "unipro.h"
#include <asm/irq.h>
@ -1125,6 +1126,15 @@ struct ufs_hba {
ANDROID_KABI_RESERVE(2);
ANDROID_KABI_RESERVE(3);
ANDROID_KABI_RESERVE(4);
struct {
struct pm_qos_request req;
struct work_struct get_work;
struct work_struct put_work;
struct mutex lock;
atomic_t count;
bool active;
} pm_qos;
};
static inline void ufshcd_mark_shutdown_ongoing(struct ufs_hba *hba)