Eric Anholt | d3f5168 | 2015-03-02 13:01:12 -0800 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2014 The Linux Foundation. All rights reserved. |
| 3 | * Copyright (C) 2013 Red Hat |
| 4 | * Author: Rob Clark <robdclark@gmail.com> |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify it |
| 7 | * under the terms of the GNU General Public License version 2 as published by |
| 8 | * the Free Software Foundation. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, but WITHOUT |
| 11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 13 | * more details. |
| 14 | * |
| 15 | * You should have received a copy of the GNU General Public License along with |
| 16 | * this program. If not, see <http://www.gnu.org/licenses/>. |
| 17 | */ |
| 18 | |
Masahiro Yamada | b7e8e25 | 2017-05-18 13:29:38 +0900 | [diff] [blame] | 19 | #include <linux/clk.h> |
| 20 | #include <linux/component.h> |
| 21 | #include <linux/pm_runtime.h> |
Eric Anholt | d3f5168 | 2015-03-02 13:01:12 -0800 | [diff] [blame] | 22 | #include "vc4_drv.h" |
| 23 | #include "vc4_regs.h" |
| 24 | |
| 25 | #ifdef CONFIG_DEBUG_FS |
| 26 | #define REGDEF(reg) { reg, #reg } |
| 27 | static const struct { |
| 28 | uint32_t reg; |
| 29 | const char *name; |
| 30 | } vc4_reg_defs[] = { |
| 31 | REGDEF(V3D_IDENT0), |
| 32 | REGDEF(V3D_IDENT1), |
| 33 | REGDEF(V3D_IDENT2), |
| 34 | REGDEF(V3D_SCRATCH), |
| 35 | REGDEF(V3D_L2CACTL), |
| 36 | REGDEF(V3D_SLCACTL), |
| 37 | REGDEF(V3D_INTCTL), |
| 38 | REGDEF(V3D_INTENA), |
| 39 | REGDEF(V3D_INTDIS), |
| 40 | REGDEF(V3D_CT0CS), |
| 41 | REGDEF(V3D_CT1CS), |
| 42 | REGDEF(V3D_CT0EA), |
| 43 | REGDEF(V3D_CT1EA), |
| 44 | REGDEF(V3D_CT0CA), |
| 45 | REGDEF(V3D_CT1CA), |
| 46 | REGDEF(V3D_CT00RA0), |
| 47 | REGDEF(V3D_CT01RA0), |
| 48 | REGDEF(V3D_CT0LC), |
| 49 | REGDEF(V3D_CT1LC), |
| 50 | REGDEF(V3D_CT0PC), |
| 51 | REGDEF(V3D_CT1PC), |
| 52 | REGDEF(V3D_PCS), |
| 53 | REGDEF(V3D_BFC), |
| 54 | REGDEF(V3D_RFC), |
| 55 | REGDEF(V3D_BPCA), |
| 56 | REGDEF(V3D_BPCS), |
| 57 | REGDEF(V3D_BPOA), |
| 58 | REGDEF(V3D_BPOS), |
| 59 | REGDEF(V3D_BXCF), |
| 60 | REGDEF(V3D_SQRSV0), |
| 61 | REGDEF(V3D_SQRSV1), |
| 62 | REGDEF(V3D_SQCNTL), |
| 63 | REGDEF(V3D_SRQPC), |
| 64 | REGDEF(V3D_SRQUA), |
| 65 | REGDEF(V3D_SRQUL), |
| 66 | REGDEF(V3D_SRQCS), |
| 67 | REGDEF(V3D_VPACNTL), |
| 68 | REGDEF(V3D_VPMBASE), |
| 69 | REGDEF(V3D_PCTRC), |
| 70 | REGDEF(V3D_PCTRE), |
| 71 | REGDEF(V3D_PCTR0), |
| 72 | REGDEF(V3D_PCTRS0), |
| 73 | REGDEF(V3D_PCTR1), |
| 74 | REGDEF(V3D_PCTRS1), |
| 75 | REGDEF(V3D_PCTR2), |
| 76 | REGDEF(V3D_PCTRS2), |
| 77 | REGDEF(V3D_PCTR3), |
| 78 | REGDEF(V3D_PCTRS3), |
| 79 | REGDEF(V3D_PCTR4), |
| 80 | REGDEF(V3D_PCTRS4), |
| 81 | REGDEF(V3D_PCTR5), |
| 82 | REGDEF(V3D_PCTRS5), |
| 83 | REGDEF(V3D_PCTR6), |
| 84 | REGDEF(V3D_PCTRS6), |
| 85 | REGDEF(V3D_PCTR7), |
| 86 | REGDEF(V3D_PCTRS7), |
| 87 | REGDEF(V3D_PCTR8), |
| 88 | REGDEF(V3D_PCTRS8), |
| 89 | REGDEF(V3D_PCTR9), |
| 90 | REGDEF(V3D_PCTRS9), |
| 91 | REGDEF(V3D_PCTR10), |
| 92 | REGDEF(V3D_PCTRS10), |
| 93 | REGDEF(V3D_PCTR11), |
| 94 | REGDEF(V3D_PCTRS11), |
| 95 | REGDEF(V3D_PCTR12), |
| 96 | REGDEF(V3D_PCTRS12), |
| 97 | REGDEF(V3D_PCTR13), |
| 98 | REGDEF(V3D_PCTRS13), |
| 99 | REGDEF(V3D_PCTR14), |
| 100 | REGDEF(V3D_PCTRS14), |
| 101 | REGDEF(V3D_PCTR15), |
| 102 | REGDEF(V3D_PCTRS15), |
| 103 | REGDEF(V3D_DBGE), |
| 104 | REGDEF(V3D_FDBGO), |
| 105 | REGDEF(V3D_FDBGB), |
| 106 | REGDEF(V3D_FDBGR), |
| 107 | REGDEF(V3D_FDBGS), |
| 108 | REGDEF(V3D_ERRSTAT), |
| 109 | }; |
| 110 | |
| 111 | int vc4_v3d_debugfs_regs(struct seq_file *m, void *unused) |
| 112 | { |
| 113 | struct drm_info_node *node = (struct drm_info_node *)m->private; |
| 114 | struct drm_device *dev = node->minor->dev; |
| 115 | struct vc4_dev *vc4 = to_vc4_dev(dev); |
| 116 | int i; |
| 117 | |
| 118 | for (i = 0; i < ARRAY_SIZE(vc4_reg_defs); i++) { |
| 119 | seq_printf(m, "%s (0x%04x): 0x%08x\n", |
| 120 | vc4_reg_defs[i].name, vc4_reg_defs[i].reg, |
| 121 | V3D_READ(vc4_reg_defs[i].reg)); |
| 122 | } |
| 123 | |
| 124 | return 0; |
| 125 | } |
| 126 | |
| 127 | int vc4_v3d_debugfs_ident(struct seq_file *m, void *unused) |
| 128 | { |
| 129 | struct drm_info_node *node = (struct drm_info_node *)m->private; |
| 130 | struct drm_device *dev = node->minor->dev; |
| 131 | struct vc4_dev *vc4 = to_vc4_dev(dev); |
| 132 | uint32_t ident1 = V3D_READ(V3D_IDENT1); |
| 133 | uint32_t nslc = VC4_GET_FIELD(ident1, V3D_IDENT1_NSLC); |
| 134 | uint32_t tups = VC4_GET_FIELD(ident1, V3D_IDENT1_TUPS); |
| 135 | uint32_t qups = VC4_GET_FIELD(ident1, V3D_IDENT1_QUPS); |
| 136 | |
| 137 | seq_printf(m, "Revision: %d\n", |
| 138 | VC4_GET_FIELD(ident1, V3D_IDENT1_REV)); |
| 139 | seq_printf(m, "Slices: %d\n", nslc); |
| 140 | seq_printf(m, "TMUs: %d\n", nslc * tups); |
| 141 | seq_printf(m, "QPUs: %d\n", nslc * qups); |
| 142 | seq_printf(m, "Semaphores: %d\n", |
| 143 | VC4_GET_FIELD(ident1, V3D_IDENT1_NSEM)); |
| 144 | |
| 145 | return 0; |
| 146 | } |
| 147 | #endif /* CONFIG_DEBUG_FS */ |
| 148 | |
| 149 | static void vc4_v3d_init_hw(struct drm_device *dev) |
| 150 | { |
| 151 | struct vc4_dev *vc4 = to_vc4_dev(dev); |
| 152 | |
| 153 | /* Take all the memory that would have been reserved for user |
| 154 | * QPU programs, since we don't have an interface for running |
| 155 | * them, anyway. |
| 156 | */ |
| 157 | V3D_WRITE(V3D_VPMBASE, 0); |
| 158 | } |
| 159 | |
Eric Anholt | 553c942 | 2017-03-27 16:10:25 -0700 | [diff] [blame] | 160 | int vc4_v3d_get_bin_slot(struct vc4_dev *vc4) |
| 161 | { |
| 162 | struct drm_device *dev = vc4->dev; |
| 163 | unsigned long irqflags; |
| 164 | int slot; |
| 165 | uint64_t seqno = 0; |
| 166 | struct vc4_exec_info *exec; |
| 167 | |
| 168 | try_again: |
| 169 | spin_lock_irqsave(&vc4->job_lock, irqflags); |
| 170 | slot = ffs(~vc4->bin_alloc_used); |
| 171 | if (slot != 0) { |
| 172 | /* Switch from ffs() bit index to a 0-based index. */ |
| 173 | slot--; |
| 174 | vc4->bin_alloc_used |= BIT(slot); |
| 175 | spin_unlock_irqrestore(&vc4->job_lock, irqflags); |
| 176 | return slot; |
| 177 | } |
| 178 | |
| 179 | /* Couldn't find an open slot. Wait for render to complete |
| 180 | * and try again. |
| 181 | */ |
| 182 | exec = vc4_last_render_job(vc4); |
| 183 | if (exec) |
| 184 | seqno = exec->seqno; |
| 185 | spin_unlock_irqrestore(&vc4->job_lock, irqflags); |
| 186 | |
| 187 | if (seqno) { |
| 188 | int ret = vc4_wait_for_seqno(dev, seqno, ~0ull, true); |
| 189 | |
| 190 | if (ret == 0) |
| 191 | goto try_again; |
| 192 | |
| 193 | return ret; |
| 194 | } |
| 195 | |
| 196 | return -ENOMEM; |
| 197 | } |
| 198 | |
| 199 | /** |
| 200 | * vc4_allocate_bin_bo() - allocates the memory that will be used for |
| 201 | * tile binning. |
| 202 | * |
| 203 | * The binner has a limitation that the addresses in the tile state |
| 204 | * buffer that point into the tile alloc buffer or binner overflow |
| 205 | * memory only have 28 bits (256MB), and the top 4 on the bus for |
| 206 | * tile alloc references end up coming from the tile state buffer's |
| 207 | * address. |
| 208 | * |
| 209 | * To work around this, we allocate a single large buffer while V3D is |
| 210 | * in use, make sure that it has the top 4 bits constant across its |
| 211 | * entire extent, and then put the tile state, tile alloc, and binner |
| 212 | * overflow memory inside that buffer. |
| 213 | * |
| 214 | * This creates a limitation where we may not be able to execute a job |
| 215 | * if it doesn't fit within the buffer that we allocated up front. |
| 216 | * However, it turns out that 16MB is "enough for anybody", and |
| 217 | * real-world applications run into allocation failures from the |
| 218 | * overall CMA pool before they make scenes complicated enough to run |
| 219 | * out of bin space. |
| 220 | */ |
| 221 | int |
| 222 | vc4_allocate_bin_bo(struct drm_device *drm) |
| 223 | { |
| 224 | struct vc4_dev *vc4 = to_vc4_dev(drm); |
| 225 | struct vc4_v3d *v3d = vc4->v3d; |
| 226 | uint32_t size = 16 * 1024 * 1024; |
| 227 | int ret = 0; |
| 228 | struct list_head list; |
| 229 | |
| 230 | /* We may need to try allocating more than once to get a BO |
| 231 | * that doesn't cross 256MB. Track the ones we've allocated |
| 232 | * that failed so far, so that we can free them when we've got |
| 233 | * one that succeeded (if we freed them right away, our next |
| 234 | * allocation would probably be the same chunk of memory). |
| 235 | */ |
| 236 | INIT_LIST_HEAD(&list); |
| 237 | |
| 238 | while (true) { |
| 239 | struct vc4_bo *bo = vc4_bo_create(drm, size, true); |
| 240 | |
| 241 | if (IS_ERR(bo)) { |
| 242 | ret = PTR_ERR(bo); |
| 243 | |
| 244 | dev_err(&v3d->pdev->dev, |
| 245 | "Failed to allocate memory for tile binning: " |
| 246 | "%d. You may need to enable CMA or give it " |
| 247 | "more memory.", |
| 248 | ret); |
| 249 | break; |
| 250 | } |
| 251 | |
| 252 | /* Check if this BO won't trigger the addressing bug. */ |
| 253 | if ((bo->base.paddr & 0xf0000000) == |
| 254 | ((bo->base.paddr + bo->base.base.size - 1) & 0xf0000000)) { |
| 255 | vc4->bin_bo = bo; |
| 256 | |
| 257 | /* Set up for allocating 512KB chunks of |
| 258 | * binner memory. The biggest allocation we |
| 259 | * need to do is for the initial tile alloc + |
| 260 | * tile state buffer. We can render to a |
| 261 | * maximum of ((2048*2048) / (32*32) = 4096 |
| 262 | * tiles in a frame (until we do floating |
| 263 | * point rendering, at which point it would be |
| 264 | * 8192). Tile state is 48b/tile (rounded to |
| 265 | * a page), and tile alloc is 32b/tile |
| 266 | * (rounded to a page), plus a page of extra, |
| 267 | * for a total of 320kb for our worst-case. |
| 268 | * We choose 512kb so that it divides evenly |
| 269 | * into our 16MB, and the rest of the 512kb |
| 270 | * will be used as storage for the overflow |
| 271 | * from the initial 32b CL per bin. |
| 272 | */ |
| 273 | vc4->bin_alloc_size = 512 * 1024; |
| 274 | vc4->bin_alloc_used = 0; |
| 275 | vc4->bin_alloc_overflow = 0; |
| 276 | WARN_ON_ONCE(sizeof(vc4->bin_alloc_used) * 8 != |
| 277 | bo->base.base.size / vc4->bin_alloc_size); |
| 278 | |
| 279 | break; |
| 280 | } |
| 281 | |
| 282 | /* Put it on the list to free later, and try again. */ |
| 283 | list_add(&bo->unref_head, &list); |
| 284 | } |
| 285 | |
| 286 | /* Free all the BOs we allocated but didn't choose. */ |
| 287 | while (!list_empty(&list)) { |
| 288 | struct vc4_bo *bo = list_last_entry(&list, |
| 289 | struct vc4_bo, unref_head); |
| 290 | |
| 291 | list_del(&bo->unref_head); |
| 292 | drm_gem_object_put_unlocked(&bo->base.base); |
| 293 | } |
| 294 | |
| 295 | return ret; |
| 296 | } |
| 297 | |
Eric Anholt | 001bdb5 | 2016-02-05 17:41:49 -0800 | [diff] [blame] | 298 | #ifdef CONFIG_PM |
| 299 | static int vc4_v3d_runtime_suspend(struct device *dev) |
| 300 | { |
| 301 | struct vc4_v3d *v3d = dev_get_drvdata(dev); |
| 302 | struct vc4_dev *vc4 = v3d->vc4; |
| 303 | |
| 304 | vc4_irq_uninstall(vc4->dev); |
| 305 | |
Eric Anholt | 553c942 | 2017-03-27 16:10:25 -0700 | [diff] [blame] | 306 | drm_gem_object_put_unlocked(&vc4->bin_bo->base.base); |
| 307 | vc4->bin_bo = NULL; |
| 308 | |
Eric Anholt | b72a281 | 2017-04-28 15:42:21 -0700 | [diff] [blame] | 309 | clk_disable_unprepare(v3d->clk); |
| 310 | |
Eric Anholt | 001bdb5 | 2016-02-05 17:41:49 -0800 | [diff] [blame] | 311 | return 0; |
| 312 | } |
| 313 | |
| 314 | static int vc4_v3d_runtime_resume(struct device *dev) |
| 315 | { |
| 316 | struct vc4_v3d *v3d = dev_get_drvdata(dev); |
| 317 | struct vc4_dev *vc4 = v3d->vc4; |
Eric Anholt | 553c942 | 2017-03-27 16:10:25 -0700 | [diff] [blame] | 318 | int ret; |
| 319 | |
| 320 | ret = vc4_allocate_bin_bo(vc4->dev); |
| 321 | if (ret) |
| 322 | return ret; |
Eric Anholt | 001bdb5 | 2016-02-05 17:41:49 -0800 | [diff] [blame] | 323 | |
Eric Anholt | b72a281 | 2017-04-28 15:42:21 -0700 | [diff] [blame] | 324 | ret = clk_prepare_enable(v3d->clk); |
| 325 | if (ret != 0) |
| 326 | return ret; |
| 327 | |
Eric Anholt | 001bdb5 | 2016-02-05 17:41:49 -0800 | [diff] [blame] | 328 | vc4_v3d_init_hw(vc4->dev); |
| 329 | vc4_irq_postinstall(vc4->dev); |
| 330 | |
| 331 | return 0; |
| 332 | } |
| 333 | #endif |
| 334 | |
Eric Anholt | d3f5168 | 2015-03-02 13:01:12 -0800 | [diff] [blame] | 335 | static int vc4_v3d_bind(struct device *dev, struct device *master, void *data) |
| 336 | { |
| 337 | struct platform_device *pdev = to_platform_device(dev); |
| 338 | struct drm_device *drm = dev_get_drvdata(master); |
| 339 | struct vc4_dev *vc4 = to_vc4_dev(drm); |
| 340 | struct vc4_v3d *v3d = NULL; |
Eric Anholt | d5b1a78 | 2015-11-30 12:13:37 -0800 | [diff] [blame] | 341 | int ret; |
Eric Anholt | d3f5168 | 2015-03-02 13:01:12 -0800 | [diff] [blame] | 342 | |
| 343 | v3d = devm_kzalloc(&pdev->dev, sizeof(*v3d), GFP_KERNEL); |
| 344 | if (!v3d) |
| 345 | return -ENOMEM; |
| 346 | |
Eric Anholt | 001bdb5 | 2016-02-05 17:41:49 -0800 | [diff] [blame] | 347 | dev_set_drvdata(dev, v3d); |
| 348 | |
Eric Anholt | d3f5168 | 2015-03-02 13:01:12 -0800 | [diff] [blame] | 349 | v3d->pdev = pdev; |
| 350 | |
| 351 | v3d->regs = vc4_ioremap_regs(pdev, 0); |
| 352 | if (IS_ERR(v3d->regs)) |
| 353 | return PTR_ERR(v3d->regs); |
| 354 | |
| 355 | vc4->v3d = v3d; |
Eric Anholt | 001bdb5 | 2016-02-05 17:41:49 -0800 | [diff] [blame] | 356 | v3d->vc4 = vc4; |
Eric Anholt | d3f5168 | 2015-03-02 13:01:12 -0800 | [diff] [blame] | 357 | |
Eric Anholt | b72a281 | 2017-04-28 15:42:21 -0700 | [diff] [blame] | 358 | v3d->clk = devm_clk_get(dev, NULL); |
| 359 | if (IS_ERR(v3d->clk)) { |
| 360 | int ret = PTR_ERR(v3d->clk); |
| 361 | |
| 362 | if (ret == -ENOENT) { |
| 363 | /* bcm2835 didn't have a clock reference in the DT. */ |
| 364 | ret = 0; |
| 365 | v3d->clk = NULL; |
| 366 | } else { |
| 367 | if (ret != -EPROBE_DEFER) |
| 368 | dev_err(dev, "Failed to get V3D clock: %d\n", |
| 369 | ret); |
| 370 | return ret; |
| 371 | } |
| 372 | } |
| 373 | |
Eric Anholt | d3f5168 | 2015-03-02 13:01:12 -0800 | [diff] [blame] | 374 | if (V3D_READ(V3D_IDENT0) != V3D_EXPECTED_IDENT0) { |
| 375 | DRM_ERROR("V3D_IDENT0 read 0x%08x instead of 0x%08x\n", |
| 376 | V3D_READ(V3D_IDENT0), V3D_EXPECTED_IDENT0); |
| 377 | return -EINVAL; |
| 378 | } |
| 379 | |
Eric Anholt | b72a281 | 2017-04-28 15:42:21 -0700 | [diff] [blame] | 380 | ret = clk_prepare_enable(v3d->clk); |
| 381 | if (ret != 0) |
Eric Anholt | 553c942 | 2017-03-27 16:10:25 -0700 | [diff] [blame] | 382 | return ret; |
| 383 | |
Eric Anholt | b72a281 | 2017-04-28 15:42:21 -0700 | [diff] [blame] | 384 | ret = vc4_allocate_bin_bo(drm); |
| 385 | if (ret) { |
| 386 | clk_disable_unprepare(v3d->clk); |
| 387 | return ret; |
| 388 | } |
| 389 | |
Eric Anholt | d5b1a78 | 2015-11-30 12:13:37 -0800 | [diff] [blame] | 390 | /* Reset the binner overflow address/size at setup, to be sure |
| 391 | * we don't reuse an old one. |
| 392 | */ |
| 393 | V3D_WRITE(V3D_BPOA, 0); |
| 394 | V3D_WRITE(V3D_BPOS, 0); |
| 395 | |
Eric Anholt | d3f5168 | 2015-03-02 13:01:12 -0800 | [diff] [blame] | 396 | vc4_v3d_init_hw(drm); |
| 397 | |
Eric Anholt | d5b1a78 | 2015-11-30 12:13:37 -0800 | [diff] [blame] | 398 | ret = drm_irq_install(drm, platform_get_irq(pdev, 0)); |
| 399 | if (ret) { |
| 400 | DRM_ERROR("Failed to install IRQ handler\n"); |
| 401 | return ret; |
| 402 | } |
| 403 | |
Eric Anholt | 7f69694 | 2017-05-15 10:16:15 -0700 | [diff] [blame] | 404 | pm_runtime_set_active(dev); |
Eric Anholt | 3a62234 | 2016-11-04 15:58:38 -0700 | [diff] [blame] | 405 | pm_runtime_use_autosuspend(dev); |
| 406 | pm_runtime_set_autosuspend_delay(dev, 40); /* a little over 2 frames. */ |
Eric Anholt | 001bdb5 | 2016-02-05 17:41:49 -0800 | [diff] [blame] | 407 | pm_runtime_enable(dev); |
| 408 | |
Eric Anholt | d3f5168 | 2015-03-02 13:01:12 -0800 | [diff] [blame] | 409 | return 0; |
| 410 | } |
| 411 | |
| 412 | static void vc4_v3d_unbind(struct device *dev, struct device *master, |
| 413 | void *data) |
| 414 | { |
| 415 | struct drm_device *drm = dev_get_drvdata(master); |
| 416 | struct vc4_dev *vc4 = to_vc4_dev(drm); |
| 417 | |
Eric Anholt | 001bdb5 | 2016-02-05 17:41:49 -0800 | [diff] [blame] | 418 | pm_runtime_disable(dev); |
| 419 | |
Eric Anholt | d5b1a78 | 2015-11-30 12:13:37 -0800 | [diff] [blame] | 420 | drm_irq_uninstall(drm); |
| 421 | |
| 422 | /* Disable the binner's overflow memory address, so the next |
| 423 | * driver probe (if any) doesn't try to reuse our old |
| 424 | * allocation. |
| 425 | */ |
| 426 | V3D_WRITE(V3D_BPOA, 0); |
| 427 | V3D_WRITE(V3D_BPOS, 0); |
| 428 | |
Eric Anholt | d3f5168 | 2015-03-02 13:01:12 -0800 | [diff] [blame] | 429 | vc4->v3d = NULL; |
| 430 | } |
| 431 | |
Eric Anholt | 001bdb5 | 2016-02-05 17:41:49 -0800 | [diff] [blame] | 432 | static const struct dev_pm_ops vc4_v3d_pm_ops = { |
| 433 | SET_RUNTIME_PM_OPS(vc4_v3d_runtime_suspend, vc4_v3d_runtime_resume, NULL) |
| 434 | }; |
| 435 | |
Eric Anholt | d3f5168 | 2015-03-02 13:01:12 -0800 | [diff] [blame] | 436 | static const struct component_ops vc4_v3d_ops = { |
| 437 | .bind = vc4_v3d_bind, |
| 438 | .unbind = vc4_v3d_unbind, |
| 439 | }; |
| 440 | |
| 441 | static int vc4_v3d_dev_probe(struct platform_device *pdev) |
| 442 | { |
| 443 | return component_add(&pdev->dev, &vc4_v3d_ops); |
| 444 | } |
| 445 | |
| 446 | static int vc4_v3d_dev_remove(struct platform_device *pdev) |
| 447 | { |
| 448 | component_del(&pdev->dev, &vc4_v3d_ops); |
| 449 | return 0; |
| 450 | } |
| 451 | |
| 452 | static const struct of_device_id vc4_v3d_dt_match[] = { |
Eric Anholt | 90d7116 | 2016-03-04 12:32:07 -0800 | [diff] [blame] | 453 | { .compatible = "brcm,bcm2835-v3d" }, |
Eric Anholt | b3f7787 | 2017-04-28 15:42:23 -0700 | [diff] [blame] | 454 | { .compatible = "brcm,cygnus-v3d" }, |
Eric Anholt | d3f5168 | 2015-03-02 13:01:12 -0800 | [diff] [blame] | 455 | { .compatible = "brcm,vc4-v3d" }, |
| 456 | {} |
| 457 | }; |
| 458 | |
| 459 | struct platform_driver vc4_v3d_driver = { |
| 460 | .probe = vc4_v3d_dev_probe, |
| 461 | .remove = vc4_v3d_dev_remove, |
| 462 | .driver = { |
| 463 | .name = "vc4_v3d", |
| 464 | .of_match_table = vc4_v3d_dt_match, |
Eric Anholt | 001bdb5 | 2016-02-05 17:41:49 -0800 | [diff] [blame] | 465 | .pm = &vc4_v3d_pm_ops, |
Eric Anholt | d3f5168 | 2015-03-02 13:01:12 -0800 | [diff] [blame] | 466 | }, |
| 467 | }; |