Rob Clark | c8afe68 | 2013-06-26 12:44:06 -0400 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2013 Red Hat |
| 3 | * Author: Rob Clark <robdclark@gmail.com> |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify it |
| 6 | * under the terms of the GNU General Public License version 2 as published by |
| 7 | * the Free Software Foundation. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, but WITHOUT |
| 10 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 11 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 12 | * more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License along with |
| 15 | * this program. If not, see <http://www.gnu.org/licenses/>. |
| 16 | */ |
| 17 | |
| 18 | |
| 19 | #include "msm_drv.h" |
| 20 | #include "mdp4_kms.h" |
| 21 | |
| 22 | |
| 23 | struct mdp4_irq_wait { |
| 24 | struct mdp4_irq irq; |
| 25 | int count; |
| 26 | }; |
| 27 | |
| 28 | static DECLARE_WAIT_QUEUE_HEAD(wait_event); |
| 29 | |
| 30 | static DEFINE_SPINLOCK(list_lock); |
| 31 | |
| 32 | static void update_irq(struct mdp4_kms *mdp4_kms) |
| 33 | { |
| 34 | struct mdp4_irq *irq; |
| 35 | uint32_t irqmask = mdp4_kms->vblank_mask; |
| 36 | |
| 37 | BUG_ON(!spin_is_locked(&list_lock)); |
| 38 | |
| 39 | list_for_each_entry(irq, &mdp4_kms->irq_list, node) |
| 40 | irqmask |= irq->irqmask; |
| 41 | |
| 42 | mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, irqmask); |
| 43 | } |
| 44 | |
| 45 | static void update_irq_unlocked(struct mdp4_kms *mdp4_kms) |
| 46 | { |
| 47 | unsigned long flags; |
| 48 | spin_lock_irqsave(&list_lock, flags); |
| 49 | update_irq(mdp4_kms); |
| 50 | spin_unlock_irqrestore(&list_lock, flags); |
| 51 | } |
| 52 | |
| 53 | static void mdp4_irq_error_handler(struct mdp4_irq *irq, uint32_t irqstatus) |
| 54 | { |
| 55 | DRM_ERROR("errors: %08x\n", irqstatus); |
| 56 | } |
| 57 | |
| 58 | void mdp4_irq_preinstall(struct msm_kms *kms) |
| 59 | { |
| 60 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); |
| 61 | mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, 0xffffffff); |
| 62 | } |
| 63 | |
| 64 | int mdp4_irq_postinstall(struct msm_kms *kms) |
| 65 | { |
| 66 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); |
| 67 | struct mdp4_irq *error_handler = &mdp4_kms->error_handler; |
| 68 | |
| 69 | INIT_LIST_HEAD(&mdp4_kms->irq_list); |
| 70 | |
| 71 | error_handler->irq = mdp4_irq_error_handler; |
| 72 | error_handler->irqmask = MDP4_IRQ_PRIMARY_INTF_UDERRUN | |
| 73 | MDP4_IRQ_EXTERNAL_INTF_UDERRUN; |
| 74 | |
| 75 | mdp4_irq_register(mdp4_kms, error_handler); |
| 76 | |
| 77 | return 0; |
| 78 | } |
| 79 | |
| 80 | void mdp4_irq_uninstall(struct msm_kms *kms) |
| 81 | { |
| 82 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); |
| 83 | mdp4_write(mdp4_kms, REG_MDP4_INTR_ENABLE, 0x00000000); |
| 84 | } |
| 85 | |
| 86 | irqreturn_t mdp4_irq(struct msm_kms *kms) |
| 87 | { |
| 88 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); |
| 89 | struct drm_device *dev = mdp4_kms->dev; |
| 90 | struct msm_drm_private *priv = dev->dev_private; |
| 91 | struct mdp4_irq *handler, *n; |
| 92 | unsigned long flags; |
| 93 | unsigned int id; |
| 94 | uint32_t status; |
| 95 | |
| 96 | status = mdp4_read(mdp4_kms, REG_MDP4_INTR_STATUS); |
| 97 | mdp4_write(mdp4_kms, REG_MDP4_INTR_CLEAR, status); |
| 98 | |
| 99 | VERB("status=%08x", status); |
| 100 | |
| 101 | for (id = 0; id < priv->num_crtcs; id++) |
| 102 | if (status & mdp4_crtc_vblank(priv->crtcs[id])) |
| 103 | drm_handle_vblank(dev, id); |
| 104 | |
| 105 | spin_lock_irqsave(&list_lock, flags); |
| 106 | mdp4_kms->in_irq = true; |
| 107 | list_for_each_entry_safe(handler, n, &mdp4_kms->irq_list, node) { |
| 108 | if (handler->irqmask & status) { |
| 109 | spin_unlock_irqrestore(&list_lock, flags); |
| 110 | handler->irq(handler, handler->irqmask & status); |
| 111 | spin_lock_irqsave(&list_lock, flags); |
| 112 | } |
| 113 | } |
| 114 | mdp4_kms->in_irq = false; |
| 115 | update_irq(mdp4_kms); |
| 116 | spin_unlock_irqrestore(&list_lock, flags); |
| 117 | |
| 118 | return IRQ_HANDLED; |
| 119 | } |
| 120 | |
| 121 | int mdp4_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) |
| 122 | { |
| 123 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); |
| 124 | unsigned long flags; |
| 125 | |
| 126 | spin_lock_irqsave(&list_lock, flags); |
| 127 | mdp4_kms->vblank_mask |= mdp4_crtc_vblank(crtc); |
| 128 | update_irq(mdp4_kms); |
| 129 | spin_unlock_irqrestore(&list_lock, flags); |
| 130 | |
| 131 | return 0; |
| 132 | } |
| 133 | |
| 134 | void mdp4_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc) |
| 135 | { |
| 136 | struct mdp4_kms *mdp4_kms = to_mdp4_kms(kms); |
| 137 | unsigned long flags; |
| 138 | |
| 139 | spin_lock_irqsave(&list_lock, flags); |
| 140 | mdp4_kms->vblank_mask &= ~mdp4_crtc_vblank(crtc); |
| 141 | update_irq(mdp4_kms); |
| 142 | spin_unlock_irqrestore(&list_lock, flags); |
| 143 | } |
| 144 | |
| 145 | static void wait_irq(struct mdp4_irq *irq, uint32_t irqstatus) |
| 146 | { |
| 147 | struct mdp4_irq_wait *wait = |
| 148 | container_of(irq, struct mdp4_irq_wait, irq); |
| 149 | wait->count--; |
| 150 | wake_up_all(&wait_event); |
| 151 | } |
| 152 | |
| 153 | void mdp4_irq_wait(struct mdp4_kms *mdp4_kms, uint32_t irqmask) |
| 154 | { |
| 155 | struct mdp4_irq_wait wait = { |
| 156 | .irq = { |
| 157 | .irq = wait_irq, |
| 158 | .irqmask = irqmask, |
| 159 | }, |
| 160 | .count = 1, |
| 161 | }; |
| 162 | mdp4_irq_register(mdp4_kms, &wait.irq); |
| 163 | wait_event(wait_event, (wait.count <= 0)); |
| 164 | mdp4_irq_unregister(mdp4_kms, &wait.irq); |
| 165 | } |
| 166 | |
| 167 | void mdp4_irq_register(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq) |
| 168 | { |
| 169 | unsigned long flags; |
| 170 | bool needs_update = false; |
| 171 | |
| 172 | spin_lock_irqsave(&list_lock, flags); |
| 173 | |
| 174 | if (!irq->registered) { |
| 175 | irq->registered = true; |
| 176 | list_add(&irq->node, &mdp4_kms->irq_list); |
| 177 | needs_update = !mdp4_kms->in_irq; |
| 178 | } |
| 179 | |
| 180 | spin_unlock_irqrestore(&list_lock, flags); |
| 181 | |
| 182 | if (needs_update) |
| 183 | update_irq_unlocked(mdp4_kms); |
| 184 | } |
| 185 | |
| 186 | void mdp4_irq_unregister(struct mdp4_kms *mdp4_kms, struct mdp4_irq *irq) |
| 187 | { |
| 188 | unsigned long flags; |
| 189 | bool needs_update = false; |
| 190 | |
| 191 | spin_lock_irqsave(&list_lock, flags); |
| 192 | |
| 193 | if (irq->registered) { |
| 194 | irq->registered = false; |
| 195 | list_del(&irq->node); |
| 196 | needs_update = !mdp4_kms->in_irq; |
| 197 | } |
| 198 | |
| 199 | spin_unlock_irqrestore(&list_lock, flags); |
| 200 | |
| 201 | if (needs_update) |
| 202 | update_irq_unlocked(mdp4_kms); |
| 203 | } |