From 7dba3ca921969a65de3e2a9364dd57eff5d64e51 Mon Sep 17 00:00:00 2001 From: Junak Date: Mon, 20 Apr 2020 02:32:40 +0300 Subject: [PATCH] drm: msm: mdp5: improve command-mode panel support --- drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c | 18 ++++++++++++++++++ drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c | 12 ++++++++++++ drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h | 1 + 3 files changed, 31 insertions(+) diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c index bb7d066618e6..c02afb8939ba 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c @@ -49,6 +49,8 @@ struct mdp5_crtc { struct completion pp_completion; + atomic_t pp_complete; + bool lm_cursor_enabled; struct { @@ -83,6 +85,9 @@ static void request_pending(struct drm_crtc *crtc, uint32_t pending) static void request_pp_done_pending(struct drm_crtc *crtc) { struct mdp5_crtc *mdp5_crtc = to_mdp5_crtc(crtc); + + atomic_set(&mdp5_crtc->pp_complete, 0); + reinit_completion(&mdp5_crtc->pp_completion); } @@ -1195,6 +1200,15 @@ static void mdp5_crtc_pp_done_irq(struct mdp_irq *irq, uint32_t irqstatus) { struct mdp5_crtc *mdp5_crtc = container_of(irq, struct mdp5_crtc, pp_done); + struct drm_crtc *crtc = &mdp5_crtc->base; + struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); + + atomic_set(&mdp5_crtc->pp_complete, 1); + + if (mdp5_cstate->cmd_mode) { + struct mdp5_ctl *ctl = mdp5_cstate->ctl; + mdp5_ctl_commit_finished(ctl); + } complete_all(&mdp5_crtc->pp_completion); } @@ -1206,6 +1220,9 @@ static void mdp5_crtc_wait_for_pp_done(struct drm_crtc *crtc) struct mdp5_crtc_state *mdp5_cstate = to_mdp5_crtc_state(crtc->state); int ret; + if (atomic_read(&mdp5_crtc->pp_complete)) + return; + ret = wait_for_completion_timeout(&mdp5_crtc->pp_completion, msecs_to_jiffies(50)); if (ret == 0) @@ -1319,6 +1336,7 @@ struct drm_crtc *mdp5_crtc_init(struct drm_device *dev, spin_lock_init(&mdp5_crtc->lm_lock); spin_lock_init(&mdp5_crtc->cursor.lock); init_completion(&mdp5_crtc->pp_completion); + atomic_set(&mdp5_crtc->pp_complete, 0); mdp5_crtc->vblank.irq = mdp5_crtc_vblank_irq; mdp5_crtc->err.irq = mdp5_crtc_err_irq; diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c index 1220f2b20e05..490309c36f56 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.c @@ -49,6 +49,8 @@ struct mdp5_ctl { /* True if the current CTL has FLUSH bits pending for single FLUSH. */ bool flush_pending; + bool busy; + struct mdp5_ctl *pair; /* Paired CTL to be flushed together */ }; @@ -209,6 +211,11 @@ static void send_start_signal(struct mdp5_ctl *ctl) unsigned long flags; spin_lock_irqsave(&ctl->hw_lock, flags); + if (ctl->busy) { + spin_unlock_irqrestore(&ctl->hw_lock, flags); + return; + } + ctl->busy = true; ctl_write(ctl, REG_MDP5_CTL_START(ctl->id), 1); spin_unlock_irqrestore(&ctl->hw_lock, flags); } @@ -242,6 +249,11 @@ int mdp5_ctl_set_encoder_state(struct mdp5_ctl *ctl, return 0; } +void mdp5_ctl_commit_finished(struct mdp5_ctl *ctl) +{ + ctl->busy = false; +} + /* * Note: * CTL registers need to be flushed after calling this function diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h index c2af68aa77ae..ef7409081bf8 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_ctl.h @@ -69,6 +69,7 @@ u32 mdp_ctl_flush_mask_cursor(int cursor_id); u32 mdp_ctl_flush_mask_encoder(struct mdp5_interface *intf); /* @flush_mask: see CTL flush masks definitions below */ +void mdp5_ctl_commit_finished(struct mdp5_ctl *ctl); u32 mdp5_ctl_commit(struct mdp5_ctl *ctl, struct mdp5_pipeline *pipeline, u32 flush_mask, bool start); u32 mdp5_ctl_get_commit_status(struct mdp5_ctl *ctl);