drm/msm: Offload commit cleanup onto little CPUs

The cleanup portion of non-blocking commits can be offloaded to little
CPUs to reduce latency in the display commit path, since it takes up a
non-trivial amount of CPU time. This reduces display commit latency.

Signed-off-by: Sultan Alsawaf <sultan@kerneltoast.com>
[lazerl0rd: Adjust for Linux 4.19, with different commit cleanup.]
Signed-off-by: Diab Neiroukh <lazerl0rd@thezest.dev>
This commit is contained in:
Sultan Alsawaf 2021-01-25 23:32:41 -08:00 committed by spakkkk
parent 5e45392196
commit 2fb480b5de
3 changed files with 45 additions and 24 deletions

View File

@ -116,18 +116,6 @@ static inline bool _msm_seamless_for_conn(struct drm_connector *connector,
/* clear specified crtcs (no longer pending update) */
static void commit_destroy(struct msm_commit *c)
{
struct msm_drm_private *priv = c->dev->dev_private;
uint32_t crtc_mask = c->crtc_mask;
uint32_t plane_mask = c->plane_mask;
/* End_atomic */
spin_lock(&priv->pending_crtcs_event.lock);
DBG("end: %08x", crtc_mask);
priv->pending_crtcs &= ~crtc_mask;
priv->pending_planes &= ~plane_mask;
wake_up_all_locked(&priv->pending_crtcs_event);
spin_unlock(&priv->pending_crtcs_event.lock);
if (c->nonblock)
kfree(c);
}
@ -469,6 +457,16 @@ static void msm_atomic_helper_commit_modeset_enables(struct drm_device *dev,
SDE_ATRACE_END("msm_enable");
}
static void complete_commit_cleanup(struct kthread_work *work)
{
struct msm_commit *c = container_of(work, typeof(*c), commit_work);
struct drm_atomic_state *state = c->state;
drm_atomic_state_put(state);
commit_destroy(c);
}
int msm_atomic_prepare_fb(struct drm_plane *plane,
struct drm_plane_state *new_state)
{
@ -505,6 +503,8 @@ static void complete_commit(struct msm_commit *c)
struct drm_device *dev = state->dev;
struct msm_drm_private *priv = dev->dev_private;
struct msm_kms *kms = priv->kms;
uint32_t crtc_mask = c->crtc_mask;
uint32_t plane_mask = c->plane_mask;
drm_atomic_helper_wait_for_fences(dev, state, false);
@ -536,35 +536,43 @@ static void complete_commit(struct msm_commit *c)
kms->funcs->complete_commit(kms, state);
drm_atomic_state_put(state);
priv->complete_commit_time = ktime_get()/1000;
complete_time_generate_event(dev);
commit_destroy(c);
/* End_atomic */
spin_lock(&priv->pending_crtcs_event.lock);
DBG("end: %08x", crtc_mask);
priv->pending_crtcs &= ~crtc_mask;
priv->pending_planes &= ~plane_mask;
wake_up_all_locked(&priv->pending_crtcs_event);
spin_unlock(&priv->pending_crtcs_event.lock);
}
static void _msm_drm_commit_work_cb(struct kthread_work *work)
{
struct msm_commit *commit = NULL;
struct msm_commit *c = container_of(work, typeof(*c), commit_work);
struct drm_atomic_state *state = c->state;
struct drm_device *dev = state->dev;
struct msm_drm_private *priv = dev->dev_private;
ktime_t start, end;
s64 duration;
if (!work) {
DRM_ERROR("%s: Invalid commit work data!\n", __func__);
return;
}
commit = container_of(work, struct msm_commit, commit_work);
start = ktime_get();
frame_stat_collector(0, COMMIT_START_TS);
SDE_ATRACE_BEGIN("complete_commit");
complete_commit(commit);
complete_commit(c);
SDE_ATRACE_END("complete_commit");
if (c->nonblock) {
/* Offload the cleanup onto little CPUs */
kthread_init_work(&c->commit_work, complete_commit_cleanup);
kthread_queue_work(&priv->clean_thread.worker, &c->commit_work);
} else {
complete_commit_cleanup(&c->commit_work);
}
end = ktime_get();
duration = ktime_to_ns(ktime_sub(end, start));
frame_stat_collector(duration, COMMIT_END_TS);
@ -643,6 +651,7 @@ static void msm_atomic_commit_dispatch(struct drm_device *dev,
* ensure that SW and HW state don't get out of sync.
*/
complete_commit(commit);
complete_commit_cleanup(&commit->commit_work);
} else if (!nonblock) {
kthread_flush_work(&commit->commit_work);
}

View File

@ -377,6 +377,8 @@ static int msm_drm_uninit(struct device *dev)
}
}
kthread_stop(priv->clean_thread.thread);
drm_kms_helper_poll_fini(ddev);
drm_mode_config_cleanup(ddev);
@ -553,6 +555,11 @@ static int msm_drm_display_thread_create(struct sched_param param,
{
int i, ret = 0;
kthread_init_worker(&priv->clean_thread.worker);
priv->clean_thread.thread = kthread_run_perf_critical(cpu_lp_mask,
kthread_worker_fn, &priv->clean_thread.worker, "drm_cleanup");
BUG_ON(IS_ERR(priv->clean_thread.thread));
/**
* this priority was found during empiric testing to have appropriate
* realtime scheduling to process display updates and interact with

View File

@ -667,6 +667,11 @@ struct msm_drm_private {
struct msm_drm_thread disp_thread[MAX_CRTCS];
struct msm_drm_thread event_thread[MAX_CRTCS];
struct {
struct task_struct *thread;
struct kthread_worker worker;
} clean_thread;
struct task_struct *pp_event_thread;
struct kthread_worker pp_event_worker;