scsi: ufs: Scrap Qualcomm's PM QoS implementation

This implementation is completely over the top and wastes lots of CPU
cycles. It's too convoluted to fix, so just scrap it to make way for a
simpler solution. This purges every PM QoS reference in the UFS drivers.

Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com>
Signed-off-by: alk3pInjection <webmaster@raspii.tech>
This commit is contained in:
Sultan Alsawaf 2021-02-21 16:57:19 -08:00 committed by spakkkk
parent c0b8dc4d0a
commit d89f76b07e
5 changed files with 1 additions and 559 deletions

View File

@ -246,40 +246,6 @@ static const struct file_operations ufs_qcom_dbg_dbg_regs_desc = {
.release = single_release,
};
static int ufs_qcom_dbg_pm_qos_show(struct seq_file *file, void *data)
{
struct ufs_qcom_host *host = (struct ufs_qcom_host *)file->private;
unsigned long flags;
int i;
spin_lock_irqsave(host->hba->host->host_lock, flags);
seq_printf(file, "enabled: %d\n", host->pm_qos.is_enabled);
for (i = 0; i < host->pm_qos.num_groups && host->pm_qos.groups; i++)
seq_printf(file,
"CPU Group #%d(mask=0x%lx): active_reqs=%d, state=%d, latency=%d\n",
i, host->pm_qos.groups[i].mask.bits[0],
host->pm_qos.groups[i].active_reqs,
host->pm_qos.groups[i].state,
host->pm_qos.groups[i].latency_us);
spin_unlock_irqrestore(host->hba->host->host_lock, flags);
return 0;
}
static int ufs_qcom_dbg_pm_qos_open(struct inode *inode,
struct file *file)
{
return single_open(file, ufs_qcom_dbg_pm_qos_show, inode->i_private);
}
static const struct file_operations ufs_qcom_dbg_pm_qos_desc = {
.open = ufs_qcom_dbg_pm_qos_open,
.read = seq_read,
.release = single_release,
};
void ufs_qcom_dbg_add_debugfs(struct ufs_hba *hba, struct dentry *root)
{
struct ufs_qcom_host *host;
@ -368,17 +334,6 @@ void ufs_qcom_dbg_add_debugfs(struct ufs_hba *hba, struct dentry *root)
goto err;
}
host->debugfs_files.pm_qos =
debugfs_create_file("pm_qos", 0400,
host->debugfs_files.debugfs_root, host,
&ufs_qcom_dbg_pm_qos_desc);
if (!host->debugfs_files.dbg_regs) {
dev_err(host->hba->dev,
"%s: failed create dbg_regs debugfs entry\n",
__func__);
goto err;
}
return;
err:

View File

@ -35,8 +35,6 @@
#define MAX_PROP_SIZE 32
#define VDDP_REF_CLK_MIN_UV 1200000
#define VDDP_REF_CLK_MAX_UV 1200000
/* TODO: further tuning for this parameter may be required */
#define UFS_QCOM_PM_QOS_UNVOTE_TIMEOUT_US (10000) /* microseconds */
#define UFS_QCOM_DEFAULT_DBG_PRINT_EN \
(UFS_QCOM_DBG_PRINT_REGS_EN | UFS_QCOM_DBG_PRINT_TEST_BUS_EN)
@ -64,7 +62,6 @@ static void ufs_qcom_get_default_testbus_cfg(struct ufs_qcom_host *host);
static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
u32 clk_1us_cycles,
u32 clk_40ns_cycles);
static void ufs_qcom_pm_qos_suspend(struct ufs_qcom_host *host);
static int ufs_qcom_init_sysfs(struct ufs_hba *hba);
static void ufs_qcom_dump_regs(struct ufs_hba *hba, int offset, int len,
@ -864,8 +861,6 @@ static int ufs_qcom_suspend(struct ufs_hba *hba, enum ufs_pm_op pm_op)
goto out;
}
}
/* Unvote PM QoS */
ufs_qcom_pm_qos_suspend(host);
out:
return ret;
@ -1607,398 +1602,6 @@ out:
return err;
}
#ifdef CONFIG_SMP /* CONFIG_SMP */
static int ufs_qcom_cpu_to_group(struct ufs_qcom_host *host, int cpu)
{
int i;
if (cpu >= 0 && cpu < num_possible_cpus())
for (i = 0; i < host->pm_qos.num_groups; i++)
if (cpumask_test_cpu(cpu, &host->pm_qos.groups[i].mask))
return i;
return host->pm_qos.default_cpu;
}
static void ufs_qcom_pm_qos_req_start(struct ufs_hba *hba, struct request *req)
{
unsigned long flags;
struct ufs_qcom_host *host;
struct ufs_qcom_pm_qos_cpu_group *group;
if (!hba || !req)
return;
host = ufshcd_get_variant(hba);
if (!host->pm_qos.groups)
return;
group = &host->pm_qos.groups[ufs_qcom_cpu_to_group(host, req->cpu)];
spin_lock_irqsave(hba->host->host_lock, flags);
if (!host->pm_qos.is_enabled)
goto out;
group->active_reqs++;
if (group->state != PM_QOS_REQ_VOTE &&
group->state != PM_QOS_VOTED) {
group->state = PM_QOS_REQ_VOTE;
queue_work(host->pm_qos.workq, &group->vote_work);
}
out:
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
/* hba->host->host_lock is assumed to be held by caller */
static void __ufs_qcom_pm_qos_req_end(struct ufs_qcom_host *host, int req_cpu)
{
struct ufs_qcom_pm_qos_cpu_group *group;
if (!host->pm_qos.groups || !host->pm_qos.is_enabled)
return;
group = &host->pm_qos.groups[ufs_qcom_cpu_to_group(host, req_cpu)];
if (group->active_reqs <= 0)
pr_err_ratelimited("ufshcd-qcom: active req coount is negative: %d\n",
group->active_reqs);
if (--group->active_reqs)
return;
group->state = PM_QOS_REQ_UNVOTE;
queue_work(host->pm_qos.workq, &group->unvote_work);
}
static void ufs_qcom_pm_qos_req_end(struct ufs_hba *hba, struct request *req,
bool should_lock)
{
unsigned long flags = 0;
if (!hba || !req)
return;
if (should_lock)
spin_lock_irqsave(hba->host->host_lock, flags);
__ufs_qcom_pm_qos_req_end(ufshcd_get_variant(hba), req->cpu);
if (should_lock)
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
static void ufs_qcom_pm_qos_vote_work(struct work_struct *work)
{
struct ufs_qcom_pm_qos_cpu_group *group =
container_of(work, struct ufs_qcom_pm_qos_cpu_group, vote_work);
struct ufs_qcom_host *host = group->host;
unsigned long flags;
spin_lock_irqsave(host->hba->host->host_lock, flags);
if (!host->pm_qos.is_enabled || !group->active_reqs) {
spin_unlock_irqrestore(host->hba->host->host_lock, flags);
return;
}
group->state = PM_QOS_VOTED;
spin_unlock_irqrestore(host->hba->host->host_lock, flags);
pm_qos_update_request(&group->req, group->latency_us);
}
static void ufs_qcom_pm_qos_unvote_work(struct work_struct *work)
{
struct ufs_qcom_pm_qos_cpu_group *group = container_of(work,
struct ufs_qcom_pm_qos_cpu_group, unvote_work);
struct ufs_qcom_host *host = group->host;
unsigned long flags;
/*
* Check if new requests were submitted in the meantime and do not
* unvote if so.
*/
spin_lock_irqsave(host->hba->host->host_lock, flags);
if (!host->pm_qos.is_enabled || group->active_reqs) {
spin_unlock_irqrestore(host->hba->host->host_lock, flags);
return;
}
group->state = PM_QOS_UNVOTED;
spin_unlock_irqrestore(host->hba->host->host_lock, flags);
pm_qos_update_request_timeout(&group->req,
group->latency_us, UFS_QCOM_PM_QOS_UNVOTE_TIMEOUT_US);
}
static ssize_t ufs_qcom_pm_qos_enable_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev->parent);
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
return snprintf(buf, PAGE_SIZE, "%d\n", host->pm_qos.is_enabled);
}
static ssize_t ufs_qcom_pm_qos_enable_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct ufs_hba *hba = dev_get_drvdata(dev->parent);
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
unsigned long value;
unsigned long flags;
bool enable;
int i;
if (kstrtoul(buf, 0, &value))
return -EINVAL;
enable = !!value;
/*
* Must take the spinlock and save irqs before changing the enabled
* flag in order to keep correctness of PM QoS release.
*/
spin_lock_irqsave(hba->host->host_lock, flags);
if (enable == host->pm_qos.is_enabled) {
spin_unlock_irqrestore(hba->host->host_lock, flags);
return count;
}
host->pm_qos.is_enabled = enable;
spin_unlock_irqrestore(hba->host->host_lock, flags);
if (!enable)
for (i = 0; i < host->pm_qos.num_groups; i++) {
cancel_work_sync(&host->pm_qos.groups[i].vote_work);
cancel_work_sync(&host->pm_qos.groups[i].unvote_work);
spin_lock_irqsave(hba->host->host_lock, flags);
host->pm_qos.groups[i].state = PM_QOS_UNVOTED;
host->pm_qos.groups[i].active_reqs = 0;
spin_unlock_irqrestore(hba->host->host_lock, flags);
pm_qos_update_request(&host->pm_qos.groups[i].req,
PM_QOS_DEFAULT_VALUE);
}
return count;
}
static ssize_t ufs_qcom_pm_qos_latency_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
struct ufs_hba *hba = dev_get_drvdata(dev->parent);
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
int ret;
int i;
int offset = 0;
for (i = 0; i < host->pm_qos.num_groups; i++) {
ret = snprintf(&buf[offset], PAGE_SIZE,
"cpu group #%d(mask=0x%lx): %d\n", i,
host->pm_qos.groups[i].mask.bits[0],
host->pm_qos.groups[i].latency_us);
if (ret > 0)
offset += ret;
else
break;
}
return offset;
}
static ssize_t ufs_qcom_pm_qos_latency_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t count)
{
struct ufs_hba *hba = dev_get_drvdata(dev->parent);
struct ufs_qcom_host *host = ufshcd_get_variant(hba);
unsigned long value;
unsigned long flags;
char *strbuf;
char *strbuf_copy;
char *token;
int i;
int ret;
/* reserve one byte for null termination */
strbuf = kmalloc(count + 1, GFP_KERNEL);
if (!strbuf)
return -ENOMEM;
strbuf_copy = strbuf;
strlcpy(strbuf, buf, count + 1);
for (i = 0; i < host->pm_qos.num_groups; i++) {
token = strsep(&strbuf, ",");
if (!token)
break;
ret = kstrtoul(token, 0, &value);
if (ret)
break;
spin_lock_irqsave(hba->host->host_lock, flags);
host->pm_qos.groups[i].latency_us = value;
spin_unlock_irqrestore(hba->host->host_lock, flags);
}
kfree(strbuf_copy);
return count;
}
static int ufs_qcom_pm_qos_init(struct ufs_qcom_host *host)
{
struct device_node *node = host->hba->dev->of_node;
struct device_attribute *attr;
int ret = 0;
int num_groups;
int num_values;
char wq_name[sizeof("ufs_pm_qos_00")];
int i;
num_groups = of_property_count_u32_elems(node,
"qcom,pm-qos-cpu-groups");
if (num_groups <= 0)
goto no_pm_qos;
num_values = of_property_count_u32_elems(node,
"qcom,pm-qos-cpu-group-latency-us");
if (num_values <= 0)
goto no_pm_qos;
if (num_values != num_groups || num_groups > num_possible_cpus()) {
dev_err(host->hba->dev, "%s: invalid count: num_groups=%d, num_values=%d, num_possible_cpus=%d\n",
__func__, num_groups, num_values, num_possible_cpus());
goto no_pm_qos;
}
host->pm_qos.num_groups = num_groups;
host->pm_qos.groups = kcalloc(host->pm_qos.num_groups,
sizeof(struct ufs_qcom_pm_qos_cpu_group), GFP_KERNEL);
if (!host->pm_qos.groups)
return -ENOMEM;
for (i = 0; i < host->pm_qos.num_groups; i++) {
u32 mask;
ret = of_property_read_u32_index(node, "qcom,pm-qos-cpu-groups",
i, &mask);
if (ret)
goto free_groups;
host->pm_qos.groups[i].mask.bits[0] = mask;
if (!cpumask_subset(&host->pm_qos.groups[i].mask,
cpu_possible_mask)) {
dev_err(host->hba->dev, "%s: invalid mask 0x%x for cpu group\n",
__func__, mask);
goto free_groups;
}
ret = of_property_read_u32_index(node,
"qcom,pm-qos-cpu-group-latency-us", i,
&host->pm_qos.groups[i].latency_us);
if (ret)
goto free_groups;
host->pm_qos.groups[i].req.type = PM_QOS_REQ_AFFINE_CORES;
host->pm_qos.groups[i].req.cpus_affine =
host->pm_qos.groups[i].mask;
host->pm_qos.groups[i].state = PM_QOS_UNVOTED;
host->pm_qos.groups[i].active_reqs = 0;
host->pm_qos.groups[i].host = host;
INIT_WORK(&host->pm_qos.groups[i].vote_work,
ufs_qcom_pm_qos_vote_work);
INIT_WORK(&host->pm_qos.groups[i].unvote_work,
ufs_qcom_pm_qos_unvote_work);
}
ret = of_property_read_u32(node, "qcom,pm-qos-default-cpu",
&host->pm_qos.default_cpu);
if (ret || host->pm_qos.default_cpu > num_possible_cpus())
host->pm_qos.default_cpu = 0;
/*
* Use a single-threaded workqueue to assure work submitted to the queue
* is performed in order. Consider the following 2 possible cases:
*
* 1. A new request arrives and voting work is scheduled for it. Before
* the voting work is performed the request is finished and unvote
* work is also scheduled.
* 2. A request is finished and unvote work is scheduled. Before the
* work is performed a new request arrives and voting work is also
* scheduled.
*
* In both cases a vote work and unvote work wait to be performed.
* If ordering is not guaranteed, then the end state might be the
* opposite of the desired state.
*/
snprintf(wq_name, ARRAY_SIZE(wq_name), "%s_%d", "ufs_pm_qos",
host->hba->host->host_no);
host->pm_qos.workq = create_singlethread_workqueue(wq_name);
if (!host->pm_qos.workq) {
dev_err(host->hba->dev, "%s: failed to create the workqueue\n",
__func__);
ret = -ENOMEM;
goto free_groups;
}
/* Initialization was ok, add all PM QoS requests */
for (i = 0; i < host->pm_qos.num_groups; i++)
pm_qos_add_request(&host->pm_qos.groups[i].req,
PM_QOS_CPU_DMA_LATENCY, PM_QOS_DEFAULT_VALUE);
/* PM QoS latency sys-fs attribute */
attr = &host->pm_qos.latency_attr;
attr->show = ufs_qcom_pm_qos_latency_show;
attr->store = ufs_qcom_pm_qos_latency_store;
sysfs_attr_init(&attr->attr);
attr->attr.name = "pm_qos_latency_us";
attr->attr.mode = 0644;
if (device_create_file(host->hba->var->dev, attr))
dev_dbg(host->hba->dev, "Failed to create sysfs for pm_qos_latency_us\n");
/* PM QoS enable sys-fs attribute */
attr = &host->pm_qos.enable_attr;
attr->show = ufs_qcom_pm_qos_enable_show;
attr->store = ufs_qcom_pm_qos_enable_store;
sysfs_attr_init(&attr->attr);
attr->attr.name = "pm_qos_enable";
attr->attr.mode = 0644;
if (device_create_file(host->hba->var->dev, attr))
dev_dbg(host->hba->dev, "Failed to create sysfs for pm_qos enable\n");
host->pm_qos.is_enabled = true;
return 0;
free_groups:
kfree(host->pm_qos.groups);
no_pm_qos:
host->pm_qos.groups = NULL;
return ret ? ret : -ENOTSUPP;
}
static void ufs_qcom_pm_qos_suspend(struct ufs_qcom_host *host)
{
int i;
if (!host->pm_qos.groups)
return;
for (i = 0; i < host->pm_qos.num_groups; i++)
flush_work(&host->pm_qos.groups[i].unvote_work);
}
static void ufs_qcom_pm_qos_remove(struct ufs_qcom_host *host)
{
int i;
if (!host->pm_qos.groups)
return;
for (i = 0; i < host->pm_qos.num_groups; i++)
pm_qos_remove_request(&host->pm_qos.groups[i].req);
destroy_workqueue(host->pm_qos.workq);
kfree(host->pm_qos.groups);
host->pm_qos.groups = NULL;
}
#endif /* CONFIG_SMP */
#define ANDROID_BOOT_DEV_MAX 30
static char android_boot_dev[ANDROID_BOOT_DEV_MAX];
@ -2165,10 +1768,6 @@ static int ufs_qcom_init(struct ufs_hba *hba)
goto out_variant_clear;
}
err = ufs_qcom_pm_qos_init(host);
if (err)
dev_info(dev, "%s: PM QoS will be disabled\n", __func__);
/* restore the secure configuration */
ufs_qcom_update_sec_cfg(hba, true);
@ -2294,7 +1893,6 @@ static void ufs_qcom_exit(struct ufs_hba *hba)
host->is_phy_pwr_on = false;
}
phy_exit(host->generic_phy);
ufs_qcom_pm_qos_remove(host);
}
static int ufs_qcom_set_dme_vs_core_clk_ctrl_clear_div(struct ufs_hba *hba,
@ -2771,15 +2369,9 @@ static struct ufs_hba_variant_ops ufs_hba_qcom_vops = {
.get_user_cap_mode = ufs_qcom_get_user_cap_mode,
};
static struct ufs_hba_pm_qos_variant_ops ufs_hba_pm_qos_variant_ops = {
.req_start = ufs_qcom_pm_qos_req_start,
.req_end = ufs_qcom_pm_qos_req_end,
};
static struct ufs_hba_variant ufs_hba_qcom_variant = {
.name = "qcom",
.vops = &ufs_hba_qcom_vops,
.pm_qos_vops = &ufs_hba_pm_qos_variant_ops,
};
/**

View File

@ -16,7 +16,6 @@
#define UFS_QCOM_H_
#include <linux/phy/phy.h>
#include <linux/pm_qos.h>
#include "ufshcd.h"
#define MAX_UFS_QCOM_HOSTS 2
@ -247,62 +246,9 @@ struct qcom_debugfs_files {
struct dentry *testbus_cfg;
struct dentry *testbus_bus;
struct dentry *dbg_regs;
struct dentry *pm_qos;
};
#endif
/* PM QoS voting state */
enum ufs_qcom_pm_qos_state {
PM_QOS_UNVOTED,
PM_QOS_VOTED,
PM_QOS_REQ_VOTE,
PM_QOS_REQ_UNVOTE,
};
/**
* struct ufs_qcom_pm_qos_cpu_group - data related to cluster PM QoS voting
* logic
* @req: request object for PM QoS
* @vote_work: work object for voting procedure
* @unvote_work: work object for un-voting procedure
* @host: back pointer to the main structure
* @state: voting state machine current state
* @latency_us: requested latency value used for cluster voting, in
* microseconds
* @mask: cpu mask defined for this cluster
* @active_reqs: number of active requests on this cluster
*/
struct ufs_qcom_pm_qos_cpu_group {
struct pm_qos_request req;
struct work_struct vote_work;
struct work_struct unvote_work;
struct ufs_qcom_host *host;
enum ufs_qcom_pm_qos_state state;
s32 latency_us;
cpumask_t mask;
int active_reqs;
};
/**
* struct ufs_qcom_pm_qos - data related to PM QoS voting logic
* @groups: PM QoS cpu group state array
* @enable_attr: sysfs attribute to enable/disable PM QoS voting logic
* @latency_attr: sysfs attribute to set latency value
* @workq: single threaded workqueue to run PM QoS voting/unvoting
* @num_clusters: number of clusters defined
* @default_cpu: cpu to use for voting for request not specifying a cpu
* @is_enabled: flag specifying whether voting logic is enabled
*/
struct ufs_qcom_pm_qos {
struct ufs_qcom_pm_qos_cpu_group *groups;
struct device_attribute enable_attr;
struct device_attribute latency_attr;
struct workqueue_struct *workq;
int num_groups;
int default_cpu;
bool is_enabled;
};
struct ufs_qcom_host {
/*
* Set this capability if host controller supports the QUniPro mode
@ -340,9 +286,6 @@ struct ufs_qcom_host {
struct clk *rx_l1_sync_clk;
struct clk *tx_l1_sync_clk;
/* PM Quality-of-Service (QoS) data */
struct ufs_qcom_pm_qos pm_qos;
bool disable_lpm;
bool is_lane_clks_enabled;
bool sec_cfg_updated;

View File

@ -3840,9 +3840,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
if (ufshcd_is_hibern8_on_idle_allowed(hba))
WARN_ON(hba->hibern8_on_idle.state != HIBERN8_EXITED);
/* Vote PM QoS for the request */
ufshcd_vops_pm_qos_req_start(hba, cmd->request);
WARN_ON(hba->clk_gating.state != CLKS_ON);
lrbp = &hba->lrb[tag];
@ -3874,7 +3871,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
lrbp->cmd = NULL;
clear_bit_unlock(tag, &hba->lrb_in_use);
ufshcd_release_all(hba);
ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
goto out;
}
@ -3883,7 +3879,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
lrbp->cmd = NULL;
clear_bit_unlock(tag, &hba->lrb_in_use);
ufshcd_release_all(hba);
ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
goto out;
}
@ -3902,7 +3897,6 @@ static int ufshcd_queuecommand(struct Scsi_Host *host, struct scsi_cmnd *cmd)
lrbp->cmd = NULL;
clear_bit_unlock(tag, &hba->lrb_in_use);
ufshcd_release_all(hba);
ufshcd_vops_pm_qos_req_end(hba, cmd->request, true);
dev_err(hba->dev, "%s: failed sending command, %d\n",
__func__, err);
if (err == -EINVAL) {
@ -6577,15 +6571,6 @@ static void __ufshcd_transfer_req_compl(struct ufs_hba *hba,
/* Mark completed command as NULL in LRB */
lrbp->cmd = NULL;
hba->ufs_stats.clk_rel.ctx = XFR_REQ_COMPL;
if (cmd->request) {
/*
* As we are accessing the "request" structure,
* this must be called before calling
* ->scsi_done() callback.
*/
ufshcd_vops_pm_qos_req_end(hba, cmd->request,
false);
}
clear_bit_unlock(index, &hba->lrb_in_use);
/*
@ -6654,15 +6639,7 @@ void ufshcd_abort_outstanding_transfer_requests(struct ufs_hba *hba, int result)
update_req_stats(hba, lrbp);
/* Mark completed command as NULL in LRB */
lrbp->cmd = NULL;
if (cmd->request) {
/*
* As we are accessing the "request" structure,
* this must be called before calling
* ->scsi_done() callback.
*/
ufshcd_vops_pm_qos_req_end(hba, cmd->request,
true);
}
clear_bit_unlock(index, &hba->lrb_in_use);
/*

View File

@ -389,15 +389,6 @@ struct ufs_hba_variant_ops {
ANDROID_KABI_RESERVE(4);
};
/**
* struct ufs_hba_pm_qos_variant_ops - variant specific PM QoS callbacks
*/
struct ufs_hba_pm_qos_variant_ops {
void (*req_start)(struct ufs_hba *hba, struct request *req);
void (*req_end)(struct ufs_hba *hba, struct request *req,
bool should_lock);
};
/**
* struct ufs_hba_variant - variant specific parameters
* @name: variant name
@ -406,7 +397,6 @@ struct ufs_hba_variant {
struct device *dev;
const char *name;
struct ufs_hba_variant_ops *vops;
struct ufs_hba_pm_qos_variant_ops *pm_qos_vops;
};
struct keyslot_mgmt_ll_ops;
@ -1599,21 +1589,6 @@ static inline void ufshcd_vops_remove_debugfs(struct ufs_hba *hba)
}
#endif
static inline void ufshcd_vops_pm_qos_req_start(struct ufs_hba *hba,
struct request *req)
{
if (hba->var && hba->var->pm_qos_vops &&
hba->var->pm_qos_vops->req_start)
hba->var->pm_qos_vops->req_start(hba, req);
}
static inline void ufshcd_vops_pm_qos_req_end(struct ufs_hba *hba,
struct request *req, bool lock)
{
if (hba->var && hba->var->pm_qos_vops && hba->var->pm_qos_vops->req_end)
hba->var->pm_qos_vops->req_end(hba, req, lock);
}
extern struct ufs_pm_lvl_states ufs_pm_lvl_states[];
/*