blob: 4b83e9eeab06df1027e330407d350ace99a1536b [file] [log] [blame]
Andy Gross71e88312011-12-05 19:19:21 -06001/*
2 * DMM IOMMU driver support functions for TI OMAP processors.
3 *
4 * Author: Rob Clark <rob@ti.com>
5 * Andy Gross <andy.gross@ti.com>
6 *
7 * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
8 *
9 * This program is free software; you can redistribute it and/or
10 * modify it under the terms of the GNU General Public License as
11 * published by the Free Software Foundation version 2.
12 *
13 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
14 * kind, whether express or implied; without even the implied warranty
15 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 */
Laurent Pinchart2d278f52015-03-05 21:31:37 +020018
19#include <linux/completion.h>
20#include <linux/delay.h>
21#include <linux/dma-mapping.h>
22#include <linux/errno.h>
Andy Gross71e88312011-12-05 19:19:21 -060023#include <linux/init.h>
Laurent Pinchart2d278f52015-03-05 21:31:37 +020024#include <linux/interrupt.h>
25#include <linux/list.h>
26#include <linux/mm.h>
Andy Gross71e88312011-12-05 19:19:21 -060027#include <linux/module.h>
28#include <linux/platform_device.h> /* platform_device() */
Andy Gross71e88312011-12-05 19:19:21 -060029#include <linux/sched.h>
Arnd Bergmann2d802452016-05-11 18:01:45 +020030#include <linux/seq_file.h>
Andy Gross71e88312011-12-05 19:19:21 -060031#include <linux/slab.h>
Andy Gross71e88312011-12-05 19:19:21 -060032#include <linux/time.h>
Laurent Pinchart2d278f52015-03-05 21:31:37 +020033#include <linux/vmalloc.h>
34#include <linux/wait.h>
Andy Gross71e88312011-12-05 19:19:21 -060035
36#include "omap_dmm_tiler.h"
37#include "omap_dmm_priv.h"
38
Andy Gross5c137792012-03-05 10:48:39 -060039#define DMM_DRIVER_NAME "dmm"
40
Andy Gross71e88312011-12-05 19:19:21 -060041/* mappings for associating views to luts */
42static struct tcm *containers[TILFMT_NFORMATS];
43static struct dmm *omap_dmm;
44
Tomi Valkeinen7cb0d6c2014-09-25 19:24:29 +000045#if defined(CONFIG_OF)
46static const struct of_device_id dmm_of_match[];
47#endif
48
Andy Grossef445932012-05-24 11:43:32 -050049/* global spinlock for protecting lists */
50static DEFINE_SPINLOCK(list_lock);
51
Andy Gross71e88312011-12-05 19:19:21 -060052/* Geometry table */
53#define GEOM(xshift, yshift, bytes_per_pixel) { \
54 .x_shft = (xshift), \
55 .y_shft = (yshift), \
56 .cpp = (bytes_per_pixel), \
57 .slot_w = 1 << (SLOT_WIDTH_BITS - (xshift)), \
58 .slot_h = 1 << (SLOT_HEIGHT_BITS - (yshift)), \
59 }
60
61static const struct {
62 uint32_t x_shft; /* unused X-bits (as part of bpp) */
63 uint32_t y_shft; /* unused Y-bits (as part of bpp) */
64 uint32_t cpp; /* bytes/chars per pixel */
65 uint32_t slot_w; /* width of each slot (in pixels) */
66 uint32_t slot_h; /* height of each slot (in pixels) */
67} geom[TILFMT_NFORMATS] = {
Laurent Pinchart222025e2015-01-11 00:02:07 +020068 [TILFMT_8BIT] = GEOM(0, 0, 1),
69 [TILFMT_16BIT] = GEOM(0, 1, 2),
70 [TILFMT_32BIT] = GEOM(1, 1, 4),
71 [TILFMT_PAGE] = GEOM(SLOT_WIDTH_BITS, SLOT_HEIGHT_BITS, 1),
Andy Gross71e88312011-12-05 19:19:21 -060072};
73
74
75/* lookup table for registers w/ per-engine instances */
76static const uint32_t reg[][4] = {
Laurent Pinchart222025e2015-01-11 00:02:07 +020077 [PAT_STATUS] = {DMM_PAT_STATUS__0, DMM_PAT_STATUS__1,
78 DMM_PAT_STATUS__2, DMM_PAT_STATUS__3},
79 [PAT_DESCR] = {DMM_PAT_DESCR__0, DMM_PAT_DESCR__1,
80 DMM_PAT_DESCR__2, DMM_PAT_DESCR__3},
Andy Gross71e88312011-12-05 19:19:21 -060081};
82
Tomi Valkeinen8e54adf2015-08-07 14:31:28 +030083static u32 dmm_read(struct dmm *dmm, u32 reg)
84{
85 return readl(dmm->base + reg);
86}
87
88static void dmm_write(struct dmm *dmm, u32 val, u32 reg)
89{
90 writel(val, dmm->base + reg);
91}
92
Andy Gross71e88312011-12-05 19:19:21 -060093/* simple allocator to grab next 16 byte aligned memory from txn */
94static void *alloc_dma(struct dmm_txn *txn, size_t sz, dma_addr_t *pa)
95{
96 void *ptr;
97 struct refill_engine *engine = txn->engine_handle;
98
99 /* dmm programming requires 16 byte aligned addresses */
100 txn->current_pa = round_up(txn->current_pa, 16);
101 txn->current_va = (void *)round_up((long)txn->current_va, 16);
102
103 ptr = txn->current_va;
104 *pa = txn->current_pa;
105
106 txn->current_pa += sz;
107 txn->current_va += sz;
108
109 BUG_ON((txn->current_va - engine->refill_va) > REFILL_BUFFER_SIZE);
110
111 return ptr;
112}
113
114/* check status and spin until wait_mask comes true */
115static int wait_status(struct refill_engine *engine, uint32_t wait_mask)
116{
117 struct dmm *dmm = engine->dmm;
118 uint32_t r = 0, err, i;
119
120 i = DMM_FIXED_RETRY_COUNT;
121 while (true) {
Tomi Valkeinen8e54adf2015-08-07 14:31:28 +0300122 r = dmm_read(dmm, reg[PAT_STATUS][engine->id]);
Andy Gross71e88312011-12-05 19:19:21 -0600123 err = r & DMM_PATSTATUS_ERR;
124 if (err)
125 return -EFAULT;
126
127 if ((r & wait_mask) == wait_mask)
128 break;
129
130 if (--i == 0)
131 return -ETIMEDOUT;
132
133 udelay(1);
134 }
135
136 return 0;
137}
138
Andy Grossfaaa0542012-10-12 11:18:11 -0500139static void release_engine(struct refill_engine *engine)
140{
141 unsigned long flags;
142
143 spin_lock_irqsave(&list_lock, flags);
144 list_add(&engine->idle_node, &omap_dmm->idle_head);
145 spin_unlock_irqrestore(&list_lock, flags);
146
147 atomic_inc(&omap_dmm->engine_counter);
148 wake_up_interruptible(&omap_dmm->engine_queue);
149}
150
Andy Grossd7de9932012-08-09 00:14:56 -0500151static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
Andy Gross71e88312011-12-05 19:19:21 -0600152{
153 struct dmm *dmm = arg;
Tomi Valkeinen8e54adf2015-08-07 14:31:28 +0300154 uint32_t status = dmm_read(dmm, DMM_PAT_IRQSTATUS);
Andy Gross71e88312011-12-05 19:19:21 -0600155 int i;
156
157 /* ack IRQ */
Tomi Valkeinen8e54adf2015-08-07 14:31:28 +0300158 dmm_write(dmm, status, DMM_PAT_IRQSTATUS);
Andy Gross71e88312011-12-05 19:19:21 -0600159
160 for (i = 0; i < dmm->num_engines; i++) {
Andy Grossfaaa0542012-10-12 11:18:11 -0500161 if (status & DMM_IRQSTAT_LST) {
Andy Grossfaaa0542012-10-12 11:18:11 -0500162 if (dmm->engines[i].async)
163 release_engine(&dmm->engines[i]);
Tomi Valkeinen74395072014-12-17 14:34:23 +0200164
165 complete(&dmm->engines[i].compl);
Andy Grossfaaa0542012-10-12 11:18:11 -0500166 }
167
Andy Gross71e88312011-12-05 19:19:21 -0600168 status >>= 8;
169 }
170
171 return IRQ_HANDLED;
172}
173
174/**
175 * Get a handle for a DMM transaction
176 */
177static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
178{
179 struct dmm_txn *txn = NULL;
180 struct refill_engine *engine = NULL;
Andy Grossfaaa0542012-10-12 11:18:11 -0500181 int ret;
182 unsigned long flags;
Andy Gross71e88312011-12-05 19:19:21 -0600183
Andy Grossfaaa0542012-10-12 11:18:11 -0500184
185 /* wait until an engine is available */
186 ret = wait_event_interruptible(omap_dmm->engine_queue,
187 atomic_add_unless(&omap_dmm->engine_counter, -1, 0));
188 if (ret)
189 return ERR_PTR(ret);
Andy Gross71e88312011-12-05 19:19:21 -0600190
191 /* grab an idle engine */
Andy Grossfaaa0542012-10-12 11:18:11 -0500192 spin_lock_irqsave(&list_lock, flags);
Andy Gross71e88312011-12-05 19:19:21 -0600193 if (!list_empty(&dmm->idle_head)) {
194 engine = list_entry(dmm->idle_head.next, struct refill_engine,
195 idle_node);
196 list_del(&engine->idle_node);
197 }
Andy Grossfaaa0542012-10-12 11:18:11 -0500198 spin_unlock_irqrestore(&list_lock, flags);
Andy Gross71e88312011-12-05 19:19:21 -0600199
200 BUG_ON(!engine);
201
202 txn = &engine->txn;
203 engine->tcm = tcm;
204 txn->engine_handle = engine;
205 txn->last_pat = NULL;
206 txn->current_va = engine->refill_va;
207 txn->current_pa = engine->refill_pa;
208
209 return txn;
210}
211
212/**
213 * Add region to DMM transaction. If pages or pages[i] is NULL, then the
214 * corresponding slot is cleared (ie. dummy_pa is programmed)
215 */
Andy Grossfaaa0542012-10-12 11:18:11 -0500216static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
Rob Clarka6a91822011-12-09 23:26:08 -0600217 struct page **pages, uint32_t npages, uint32_t roll)
Andy Gross71e88312011-12-05 19:19:21 -0600218{
Russell King2d31ca32014-07-12 10:53:41 +0100219 dma_addr_t pat_pa = 0, data_pa = 0;
Andy Gross71e88312011-12-05 19:19:21 -0600220 uint32_t *data;
221 struct pat *pat;
222 struct refill_engine *engine = txn->engine_handle;
223 int columns = (1 + area->x1 - area->x0);
224 int rows = (1 + area->y1 - area->y0);
225 int i = columns*rows;
Andy Gross71e88312011-12-05 19:19:21 -0600226
227 pat = alloc_dma(txn, sizeof(struct pat), &pat_pa);
228
229 if (txn->last_pat)
230 txn->last_pat->next_pa = (uint32_t)pat_pa;
231
232 pat->area = *area;
Andy Grossc6b7ae552012-12-19 14:53:38 -0600233
234 /* adjust Y coordinates based off of container parameters */
235 pat->area.y0 += engine->tcm->y_offset;
236 pat->area.y1 += engine->tcm->y_offset;
237
Andy Gross71e88312011-12-05 19:19:21 -0600238 pat->ctrl = (struct pat_ctrl){
239 .start = 1,
240 .lut_id = engine->tcm->lut_id,
241 };
242
Russell King2d31ca32014-07-12 10:53:41 +0100243 data = alloc_dma(txn, 4*i, &data_pa);
244 /* FIXME: what if data_pa is more than 32-bit ? */
245 pat->data_pa = data_pa;
Andy Gross71e88312011-12-05 19:19:21 -0600246
247 while (i--) {
Rob Clarka6a91822011-12-09 23:26:08 -0600248 int n = i + roll;
249 if (n >= npages)
250 n -= npages;
251 data[i] = (pages && pages[n]) ?
252 page_to_phys(pages[n]) : engine->dmm->dummy_pa;
Andy Gross71e88312011-12-05 19:19:21 -0600253 }
254
Andy Gross71e88312011-12-05 19:19:21 -0600255 txn->last_pat = pat;
256
Andy Grossfaaa0542012-10-12 11:18:11 -0500257 return;
Andy Gross71e88312011-12-05 19:19:21 -0600258}
259
260/**
261 * Commit the DMM transaction.
262 */
263static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
264{
265 int ret = 0;
266 struct refill_engine *engine = txn->engine_handle;
267 struct dmm *dmm = engine->dmm;
268
269 if (!txn->last_pat) {
270 dev_err(engine->dmm->dev, "need at least one txn\n");
271 ret = -EINVAL;
272 goto cleanup;
273 }
274
275 txn->last_pat->next_pa = 0;
276
277 /* write to PAT_DESCR to clear out any pending transaction */
Tomi Valkeinen8e54adf2015-08-07 14:31:28 +0300278 dmm_write(dmm, 0x0, reg[PAT_DESCR][engine->id]);
Andy Gross71e88312011-12-05 19:19:21 -0600279
280 /* wait for engine ready: */
281 ret = wait_status(engine, DMM_PATSTATUS_READY);
282 if (ret) {
283 ret = -EFAULT;
284 goto cleanup;
285 }
286
Andy Grossfaaa0542012-10-12 11:18:11 -0500287 /* mark whether it is async to denote list management in IRQ handler */
288 engine->async = wait ? false : true;
Tomi Valkeinen74395072014-12-17 14:34:23 +0200289 reinit_completion(&engine->compl);
290 /* verify that the irq handler sees the 'async' and completion value */
Tomi Valkeinene7e24df2014-11-10 12:23:01 +0200291 smp_mb();
Andy Grossfaaa0542012-10-12 11:18:11 -0500292
Andy Gross71e88312011-12-05 19:19:21 -0600293 /* kick reload */
Tomi Valkeinen8e54adf2015-08-07 14:31:28 +0300294 dmm_write(dmm, engine->refill_pa, reg[PAT_DESCR][engine->id]);
Andy Gross71e88312011-12-05 19:19:21 -0600295
296 if (wait) {
Tomi Valkeinen74395072014-12-17 14:34:23 +0200297 if (!wait_for_completion_timeout(&engine->compl,
Tomi Valkeinen96cbd142015-04-28 14:01:32 +0300298 msecs_to_jiffies(100))) {
Andy Gross71e88312011-12-05 19:19:21 -0600299 dev_err(dmm->dev, "timed out waiting for done\n");
300 ret = -ETIMEDOUT;
301 }
302 }
303
304cleanup:
Andy Grossfaaa0542012-10-12 11:18:11 -0500305 /* only place engine back on list if we are done with it */
306 if (ret || wait)
307 release_engine(engine);
Andy Gross71e88312011-12-05 19:19:21 -0600308
Andy Gross71e88312011-12-05 19:19:21 -0600309 return ret;
310}
311
312/*
313 * DMM programming
314 */
Rob Clarka6a91822011-12-09 23:26:08 -0600315static int fill(struct tcm_area *area, struct page **pages,
316 uint32_t npages, uint32_t roll, bool wait)
Andy Gross71e88312011-12-05 19:19:21 -0600317{
318 int ret = 0;
319 struct tcm_area slice, area_s;
320 struct dmm_txn *txn;
321
Tomi Valkeinen2bb2daf2015-04-28 14:01:34 +0300322 /*
323 * FIXME
324 *
325 * Asynchronous fill does not work reliably, as the driver does not
326 * handle errors in the async code paths. The fill operation may
327 * silently fail, leading to leaking DMM engines, which may eventually
328 * lead to deadlock if we run out of DMM engines.
329 *
330 * For now, always set 'wait' so that we only use sync fills. Async
331 * fills should be fixed, or alternatively we could decide to only
332 * support sync fills and so the whole async code path could be removed.
333 */
334
335 wait = true;
336
Andy Gross71e88312011-12-05 19:19:21 -0600337 txn = dmm_txn_init(omap_dmm, area->tcm);
338 if (IS_ERR_OR_NULL(txn))
Andy Gross295c7992012-11-16 13:10:57 -0600339 return -ENOMEM;
Andy Gross71e88312011-12-05 19:19:21 -0600340
341 tcm_for_each_slice(slice, *area, area_s) {
342 struct pat_area p_area = {
343 .x0 = slice.p0.x, .y0 = slice.p0.y,
344 .x1 = slice.p1.x, .y1 = slice.p1.y,
345 };
346
Andy Grossfaaa0542012-10-12 11:18:11 -0500347 dmm_txn_append(txn, &p_area, pages, npages, roll);
Andy Gross71e88312011-12-05 19:19:21 -0600348
Rob Clarka6a91822011-12-09 23:26:08 -0600349 roll += tcm_sizeof(slice);
Andy Gross71e88312011-12-05 19:19:21 -0600350 }
351
352 ret = dmm_txn_commit(txn, wait);
353
Andy Gross71e88312011-12-05 19:19:21 -0600354 return ret;
355}
356
357/*
358 * Pin/unpin
359 */
360
361/* note: slots for which pages[i] == NULL are filled w/ dummy page
362 */
Rob Clarka6a91822011-12-09 23:26:08 -0600363int tiler_pin(struct tiler_block *block, struct page **pages,
364 uint32_t npages, uint32_t roll, bool wait)
Andy Gross71e88312011-12-05 19:19:21 -0600365{
366 int ret;
367
Rob Clarka6a91822011-12-09 23:26:08 -0600368 ret = fill(&block->area, pages, npages, roll, wait);
Andy Gross71e88312011-12-05 19:19:21 -0600369
370 if (ret)
371 tiler_unpin(block);
372
373 return ret;
374}
375
376int tiler_unpin(struct tiler_block *block)
377{
Rob Clarka6a91822011-12-09 23:26:08 -0600378 return fill(&block->area, NULL, 0, 0, false);
Andy Gross71e88312011-12-05 19:19:21 -0600379}
380
381/*
382 * Reserve/release
383 */
384struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
385 uint16_t h, uint16_t align)
386{
387 struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
388 u32 min_align = 128;
389 int ret;
Andy Grossfaaa0542012-10-12 11:18:11 -0500390 unsigned long flags;
Andy Gross0d6fa532015-08-12 11:24:38 +0300391 size_t slot_bytes;
Andy Gross71e88312011-12-05 19:19:21 -0600392
393 BUG_ON(!validfmt(fmt));
394
395 /* convert width/height to slots */
396 w = DIV_ROUND_UP(w, geom[fmt].slot_w);
397 h = DIV_ROUND_UP(h, geom[fmt].slot_h);
398
399 /* convert alignment to slots */
Andy Gross0d6fa532015-08-12 11:24:38 +0300400 slot_bytes = geom[fmt].slot_w * geom[fmt].cpp;
401 min_align = max(min_align, slot_bytes);
402 align = (align > min_align) ? ALIGN(align, min_align) : min_align;
403 align /= slot_bytes;
Andy Gross71e88312011-12-05 19:19:21 -0600404
405 block->fmt = fmt;
406
Andy Gross0d6fa532015-08-12 11:24:38 +0300407 ret = tcm_reserve_2d(containers[fmt], w, h, align, -1, slot_bytes,
408 &block->area);
Andy Gross71e88312011-12-05 19:19:21 -0600409 if (ret) {
410 kfree(block);
Rob Clark1c3a4dc2012-03-21 16:40:23 -0500411 return ERR_PTR(-ENOMEM);
Andy Gross71e88312011-12-05 19:19:21 -0600412 }
413
414 /* add to allocation list */
Andy Grossfaaa0542012-10-12 11:18:11 -0500415 spin_lock_irqsave(&list_lock, flags);
Andy Gross71e88312011-12-05 19:19:21 -0600416 list_add(&block->alloc_node, &omap_dmm->alloc_head);
Andy Grossfaaa0542012-10-12 11:18:11 -0500417 spin_unlock_irqrestore(&list_lock, flags);
Andy Gross71e88312011-12-05 19:19:21 -0600418
419 return block;
420}
421
422struct tiler_block *tiler_reserve_1d(size_t size)
423{
424 struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
425 int num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
Andy Grossfaaa0542012-10-12 11:18:11 -0500426 unsigned long flags;
Andy Gross71e88312011-12-05 19:19:21 -0600427
428 if (!block)
Andy Grossd7de9932012-08-09 00:14:56 -0500429 return ERR_PTR(-ENOMEM);
Andy Gross71e88312011-12-05 19:19:21 -0600430
431 block->fmt = TILFMT_PAGE;
432
433 if (tcm_reserve_1d(containers[TILFMT_PAGE], num_pages,
434 &block->area)) {
435 kfree(block);
Rob Clark1c3a4dc2012-03-21 16:40:23 -0500436 return ERR_PTR(-ENOMEM);
Andy Gross71e88312011-12-05 19:19:21 -0600437 }
438
Andy Grossfaaa0542012-10-12 11:18:11 -0500439 spin_lock_irqsave(&list_lock, flags);
Andy Gross71e88312011-12-05 19:19:21 -0600440 list_add(&block->alloc_node, &omap_dmm->alloc_head);
Andy Grossfaaa0542012-10-12 11:18:11 -0500441 spin_unlock_irqrestore(&list_lock, flags);
Andy Gross71e88312011-12-05 19:19:21 -0600442
443 return block;
444}
445
446/* note: if you have pin'd pages, you should have already unpin'd first! */
447int tiler_release(struct tiler_block *block)
448{
449 int ret = tcm_free(&block->area);
Andy Grossfaaa0542012-10-12 11:18:11 -0500450 unsigned long flags;
Andy Gross71e88312011-12-05 19:19:21 -0600451
452 if (block->area.tcm)
453 dev_err(omap_dmm->dev, "failed to release block\n");
454
Andy Grossfaaa0542012-10-12 11:18:11 -0500455 spin_lock_irqsave(&list_lock, flags);
Andy Gross71e88312011-12-05 19:19:21 -0600456 list_del(&block->alloc_node);
Andy Grossfaaa0542012-10-12 11:18:11 -0500457 spin_unlock_irqrestore(&list_lock, flags);
Andy Gross71e88312011-12-05 19:19:21 -0600458
459 kfree(block);
460 return ret;
461}
462
463/*
464 * Utils
465 */
466
Rob Clark3c810c62012-08-15 15:18:01 -0500467/* calculate the tiler space address of a pixel in a view orientation...
468 * below description copied from the display subsystem section of TRM:
469 *
470 * When the TILER is addressed, the bits:
471 * [28:27] = 0x0 for 8-bit tiled
472 * 0x1 for 16-bit tiled
473 * 0x2 for 32-bit tiled
474 * 0x3 for page mode
475 * [31:29] = 0x0 for 0-degree view
476 * 0x1 for 180-degree view + mirroring
477 * 0x2 for 0-degree view + mirroring
478 * 0x3 for 180-degree view
479 * 0x4 for 270-degree view + mirroring
480 * 0x5 for 270-degree view
481 * 0x6 for 90-degree view
482 * 0x7 for 90-degree view + mirroring
483 * Otherwise the bits indicated the corresponding bit address to access
484 * the SDRAM.
485 */
486static u32 tiler_get_address(enum tiler_fmt fmt, u32 orient, u32 x, u32 y)
Andy Gross71e88312011-12-05 19:19:21 -0600487{
488 u32 x_bits, y_bits, tmp, x_mask, y_mask, alignment;
489
490 x_bits = CONT_WIDTH_BITS - geom[fmt].x_shft;
491 y_bits = CONT_HEIGHT_BITS - geom[fmt].y_shft;
492 alignment = geom[fmt].x_shft + geom[fmt].y_shft;
493
494 /* validate coordinate */
495 x_mask = MASK(x_bits);
496 y_mask = MASK(y_bits);
497
Rob Clark3c810c62012-08-15 15:18:01 -0500498 if (x < 0 || x > x_mask || y < 0 || y > y_mask) {
499 DBG("invalid coords: %u < 0 || %u > %u || %u < 0 || %u > %u",
500 x, x, x_mask, y, y, y_mask);
Andy Gross71e88312011-12-05 19:19:21 -0600501 return 0;
Rob Clark3c810c62012-08-15 15:18:01 -0500502 }
Andy Gross71e88312011-12-05 19:19:21 -0600503
504 /* account for mirroring */
505 if (orient & MASK_X_INVERT)
506 x ^= x_mask;
507 if (orient & MASK_Y_INVERT)
508 y ^= y_mask;
509
510 /* get coordinate address */
511 if (orient & MASK_XY_FLIP)
512 tmp = ((x << y_bits) + y);
513 else
514 tmp = ((y << x_bits) + x);
515
516 return TIL_ADDR((tmp << alignment), orient, fmt);
517}
518
519dma_addr_t tiler_ssptr(struct tiler_block *block)
520{
521 BUG_ON(!validfmt(block->fmt));
522
Rob Clark3c810c62012-08-15 15:18:01 -0500523 return TILVIEW_8BIT + tiler_get_address(block->fmt, 0,
Andy Gross71e88312011-12-05 19:19:21 -0600524 block->area.p0.x * geom[block->fmt].slot_w,
525 block->area.p0.y * geom[block->fmt].slot_h);
526}
527
Rob Clark3c810c62012-08-15 15:18:01 -0500528dma_addr_t tiler_tsptr(struct tiler_block *block, uint32_t orient,
529 uint32_t x, uint32_t y)
530{
531 struct tcm_pt *p = &block->area.p0;
532 BUG_ON(!validfmt(block->fmt));
533
534 return tiler_get_address(block->fmt, orient,
535 (p->x * geom[block->fmt].slot_w) + x,
536 (p->y * geom[block->fmt].slot_h) + y);
537}
538
Andy Gross71e88312011-12-05 19:19:21 -0600539void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h)
540{
541 BUG_ON(!validfmt(fmt));
542 *w = round_up(*w, geom[fmt].slot_w);
543 *h = round_up(*h, geom[fmt].slot_h);
544}
545
Rob Clark3c810c62012-08-15 15:18:01 -0500546uint32_t tiler_stride(enum tiler_fmt fmt, uint32_t orient)
Andy Gross71e88312011-12-05 19:19:21 -0600547{
548 BUG_ON(!validfmt(fmt));
549
Rob Clark3c810c62012-08-15 15:18:01 -0500550 if (orient & MASK_XY_FLIP)
551 return 1 << (CONT_HEIGHT_BITS + geom[fmt].x_shft);
552 else
553 return 1 << (CONT_WIDTH_BITS + geom[fmt].y_shft);
Andy Gross71e88312011-12-05 19:19:21 -0600554}
555
556size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h)
557{
558 tiler_align(fmt, &w, &h);
559 return geom[fmt].cpp * w * h;
560}
561
562size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h)
563{
564 BUG_ON(!validfmt(fmt));
565 return round_up(geom[fmt].cpp * w, PAGE_SIZE) * h;
566}
567
Tomi Valkeinen7cb0d6c2014-09-25 19:24:29 +0000568uint32_t tiler_get_cpu_cache_flags(void)
569{
570 return omap_dmm->plat_data->cpu_cache_flags;
571}
572
Andy Grosse5e4e9b2012-10-17 00:30:03 -0500573bool dmm_is_available(void)
Andy Gross5c137792012-03-05 10:48:39 -0600574{
575 return omap_dmm ? true : false;
576}
577
578static int omap_dmm_remove(struct platform_device *dev)
Andy Gross71e88312011-12-05 19:19:21 -0600579{
580 struct tiler_block *block, *_block;
581 int i;
Andy Grossfaaa0542012-10-12 11:18:11 -0500582 unsigned long flags;
Andy Gross71e88312011-12-05 19:19:21 -0600583
584 if (omap_dmm) {
585 /* free all area regions */
Andy Grossfaaa0542012-10-12 11:18:11 -0500586 spin_lock_irqsave(&list_lock, flags);
Andy Gross71e88312011-12-05 19:19:21 -0600587 list_for_each_entry_safe(block, _block, &omap_dmm->alloc_head,
588 alloc_node) {
589 list_del(&block->alloc_node);
590 kfree(block);
591 }
Andy Grossfaaa0542012-10-12 11:18:11 -0500592 spin_unlock_irqrestore(&list_lock, flags);
Andy Gross71e88312011-12-05 19:19:21 -0600593
594 for (i = 0; i < omap_dmm->num_lut; i++)
595 if (omap_dmm->tcm && omap_dmm->tcm[i])
596 omap_dmm->tcm[i]->deinit(omap_dmm->tcm[i]);
597 kfree(omap_dmm->tcm);
598
599 kfree(omap_dmm->engines);
600 if (omap_dmm->refill_va)
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800601 dma_free_wc(omap_dmm->dev,
602 REFILL_BUFFER_SIZE * omap_dmm->num_engines,
603 omap_dmm->refill_va, omap_dmm->refill_pa);
Andy Gross71e88312011-12-05 19:19:21 -0600604 if (omap_dmm->dummy_page)
605 __free_page(omap_dmm->dummy_page);
606
Andy Grossef445932012-05-24 11:43:32 -0500607 if (omap_dmm->irq > 0)
Andy Gross71e88312011-12-05 19:19:21 -0600608 free_irq(omap_dmm->irq, omap_dmm);
609
Andy Gross5c137792012-03-05 10:48:39 -0600610 iounmap(omap_dmm->base);
Andy Gross71e88312011-12-05 19:19:21 -0600611 kfree(omap_dmm);
Andy Gross5c137792012-03-05 10:48:39 -0600612 omap_dmm = NULL;
Andy Gross71e88312011-12-05 19:19:21 -0600613 }
614
615 return 0;
616}
617
Andy Gross5c137792012-03-05 10:48:39 -0600618static int omap_dmm_probe(struct platform_device *dev)
Andy Gross71e88312011-12-05 19:19:21 -0600619{
620 int ret = -EFAULT, i;
621 struct tcm_area area = {0};
Andy Gross0f562d12012-10-11 23:06:43 -0500622 u32 hwinfo, pat_geom;
Andy Gross5c137792012-03-05 10:48:39 -0600623 struct resource *mem;
Andy Gross71e88312011-12-05 19:19:21 -0600624
625 omap_dmm = kzalloc(sizeof(*omap_dmm), GFP_KERNEL);
Joe Perches78110bb2013-02-11 09:41:29 -0800626 if (!omap_dmm)
Andy Gross71e88312011-12-05 19:19:21 -0600627 goto fail;
Andy Gross71e88312011-12-05 19:19:21 -0600628
Andy Grossef445932012-05-24 11:43:32 -0500629 /* initialize lists */
630 INIT_LIST_HEAD(&omap_dmm->alloc_head);
631 INIT_LIST_HEAD(&omap_dmm->idle_head);
632
Andy Grossfaaa0542012-10-12 11:18:11 -0500633 init_waitqueue_head(&omap_dmm->engine_queue);
634
Tomi Valkeinen7cb0d6c2014-09-25 19:24:29 +0000635 if (dev->dev.of_node) {
636 const struct of_device_id *match;
637
638 match = of_match_node(dmm_of_match, dev->dev.of_node);
639 if (!match) {
640 dev_err(&dev->dev, "failed to find matching device node\n");
Christophe JAILLET93011652017-09-24 08:01:03 +0200641 ret = -ENODEV;
642 goto fail;
Tomi Valkeinen7cb0d6c2014-09-25 19:24:29 +0000643 }
644
645 omap_dmm->plat_data = match->data;
646 }
647
Andy Gross71e88312011-12-05 19:19:21 -0600648 /* lookup hwmod data - base address and irq */
Andy Gross5c137792012-03-05 10:48:39 -0600649 mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
650 if (!mem) {
651 dev_err(&dev->dev, "failed to get base address resource\n");
Andy Gross71e88312011-12-05 19:19:21 -0600652 goto fail;
653 }
654
Andy Gross5c137792012-03-05 10:48:39 -0600655 omap_dmm->base = ioremap(mem->start, SZ_2K);
656
657 if (!omap_dmm->base) {
658 dev_err(&dev->dev, "failed to get dmm base address\n");
659 goto fail;
660 }
661
662 omap_dmm->irq = platform_get_irq(dev, 0);
663 if (omap_dmm->irq < 0) {
664 dev_err(&dev->dev, "failed to get IRQ resource\n");
665 goto fail;
666 }
667
668 omap_dmm->dev = &dev->dev;
669
Tomi Valkeinen8e54adf2015-08-07 14:31:28 +0300670 hwinfo = dmm_read(omap_dmm, DMM_PAT_HWINFO);
Andy Gross71e88312011-12-05 19:19:21 -0600671 omap_dmm->num_engines = (hwinfo >> 24) & 0x1F;
672 omap_dmm->num_lut = (hwinfo >> 16) & 0x1F;
673 omap_dmm->container_width = 256;
674 omap_dmm->container_height = 128;
675
Andy Grossfaaa0542012-10-12 11:18:11 -0500676 atomic_set(&omap_dmm->engine_counter, omap_dmm->num_engines);
677
Andy Gross71e88312011-12-05 19:19:21 -0600678 /* read out actual LUT width and height */
Tomi Valkeinen8e54adf2015-08-07 14:31:28 +0300679 pat_geom = dmm_read(omap_dmm, DMM_PAT_GEOMETRY);
Andy Gross71e88312011-12-05 19:19:21 -0600680 omap_dmm->lut_width = ((pat_geom >> 16) & 0xF) << 5;
681 omap_dmm->lut_height = ((pat_geom >> 24) & 0xF) << 5;
682
Andy Grossc6b7ae552012-12-19 14:53:38 -0600683 /* increment LUT by one if on OMAP5 */
684 /* LUT has twice the height, and is split into a separate container */
685 if (omap_dmm->lut_height != omap_dmm->container_height)
686 omap_dmm->num_lut++;
687
Andy Gross71e88312011-12-05 19:19:21 -0600688 /* initialize DMM registers */
Tomi Valkeinen8e54adf2015-08-07 14:31:28 +0300689 dmm_write(omap_dmm, 0x88888888, DMM_PAT_VIEW__0);
690 dmm_write(omap_dmm, 0x88888888, DMM_PAT_VIEW__1);
691 dmm_write(omap_dmm, 0x80808080, DMM_PAT_VIEW_MAP__0);
692 dmm_write(omap_dmm, 0x80000000, DMM_PAT_VIEW_MAP_BASE);
693 dmm_write(omap_dmm, 0x88888888, DMM_TILER_OR__0);
694 dmm_write(omap_dmm, 0x88888888, DMM_TILER_OR__1);
Andy Gross71e88312011-12-05 19:19:21 -0600695
696 ret = request_irq(omap_dmm->irq, omap_dmm_irq_handler, IRQF_SHARED,
697 "omap_dmm_irq_handler", omap_dmm);
698
699 if (ret) {
Andy Gross5c137792012-03-05 10:48:39 -0600700 dev_err(&dev->dev, "couldn't register IRQ %d, error %d\n",
Andy Gross71e88312011-12-05 19:19:21 -0600701 omap_dmm->irq, ret);
702 omap_dmm->irq = -1;
703 goto fail;
704 }
705
Rob Clarka6a91822011-12-09 23:26:08 -0600706 /* Enable all interrupts for each refill engine except
707 * ERR_LUT_MISS<n> (which is just advisory, and we don't care
708 * about because we want to be able to refill live scanout
709 * buffers for accelerated pan/scroll) and FILL_DSC<n> which
710 * we just generally don't care about.
711 */
Tomi Valkeinen8e54adf2015-08-07 14:31:28 +0300712 dmm_write(omap_dmm, 0x7e7e7e7e, DMM_PAT_IRQENABLE_SET);
Andy Gross71e88312011-12-05 19:19:21 -0600713
Andy Gross71e88312011-12-05 19:19:21 -0600714 omap_dmm->dummy_page = alloc_page(GFP_KERNEL | __GFP_DMA32);
715 if (!omap_dmm->dummy_page) {
Andy Gross5c137792012-03-05 10:48:39 -0600716 dev_err(&dev->dev, "could not allocate dummy page\n");
Andy Gross71e88312011-12-05 19:19:21 -0600717 ret = -ENOMEM;
718 goto fail;
719 }
Andy Gross5c137792012-03-05 10:48:39 -0600720
721 /* set dma mask for device */
Russell Kingd6cfaab2013-06-10 18:41:59 +0100722 ret = dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32));
723 if (ret)
724 goto fail;
Andy Gross5c137792012-03-05 10:48:39 -0600725
Andy Gross71e88312011-12-05 19:19:21 -0600726 omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page);
727
728 /* alloc refill memory */
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800729 omap_dmm->refill_va = dma_alloc_wc(&dev->dev,
730 REFILL_BUFFER_SIZE * omap_dmm->num_engines,
731 &omap_dmm->refill_pa, GFP_KERNEL);
Andy Gross71e88312011-12-05 19:19:21 -0600732 if (!omap_dmm->refill_va) {
Andy Gross5c137792012-03-05 10:48:39 -0600733 dev_err(&dev->dev, "could not allocate refill memory\n");
Andy Gross71e88312011-12-05 19:19:21 -0600734 goto fail;
735 }
736
737 /* alloc engines */
Joe Perches78110bb2013-02-11 09:41:29 -0800738 omap_dmm->engines = kcalloc(omap_dmm->num_engines,
739 sizeof(struct refill_engine), GFP_KERNEL);
Andy Gross71e88312011-12-05 19:19:21 -0600740 if (!omap_dmm->engines) {
Andy Gross71e88312011-12-05 19:19:21 -0600741 ret = -ENOMEM;
742 goto fail;
743 }
744
Andy Gross71e88312011-12-05 19:19:21 -0600745 for (i = 0; i < omap_dmm->num_engines; i++) {
746 omap_dmm->engines[i].id = i;
747 omap_dmm->engines[i].dmm = omap_dmm;
748 omap_dmm->engines[i].refill_va = omap_dmm->refill_va +
749 (REFILL_BUFFER_SIZE * i);
750 omap_dmm->engines[i].refill_pa = omap_dmm->refill_pa +
751 (REFILL_BUFFER_SIZE * i);
Tomi Valkeinen74395072014-12-17 14:34:23 +0200752 init_completion(&omap_dmm->engines[i].compl);
Andy Gross71e88312011-12-05 19:19:21 -0600753
754 list_add(&omap_dmm->engines[i].idle_node, &omap_dmm->idle_head);
755 }
756
Joe Perches78110bb2013-02-11 09:41:29 -0800757 omap_dmm->tcm = kcalloc(omap_dmm->num_lut, sizeof(*omap_dmm->tcm),
Andy Gross71e88312011-12-05 19:19:21 -0600758 GFP_KERNEL);
759 if (!omap_dmm->tcm) {
Andy Gross71e88312011-12-05 19:19:21 -0600760 ret = -ENOMEM;
761 goto fail;
762 }
763
764 /* init containers */
Andy Grossc6b7ae552012-12-19 14:53:38 -0600765 /* Each LUT is associated with a TCM (container manager). We use the
766 lut_id to denote the lut_id used to identify the correct LUT for
767 programming during reill operations */
Andy Gross71e88312011-12-05 19:19:21 -0600768 for (i = 0; i < omap_dmm->num_lut; i++) {
769 omap_dmm->tcm[i] = sita_init(omap_dmm->container_width,
Andy Gross0d6fa532015-08-12 11:24:38 +0300770 omap_dmm->container_height);
Andy Gross71e88312011-12-05 19:19:21 -0600771
772 if (!omap_dmm->tcm[i]) {
Andy Gross5c137792012-03-05 10:48:39 -0600773 dev_err(&dev->dev, "failed to allocate container\n");
Andy Gross71e88312011-12-05 19:19:21 -0600774 ret = -ENOMEM;
775 goto fail;
776 }
777
778 omap_dmm->tcm[i]->lut_id = i;
779 }
780
781 /* assign access mode containers to applicable tcm container */
782 /* OMAP 4 has 1 container for all 4 views */
Andy Grossc6b7ae552012-12-19 14:53:38 -0600783 /* OMAP 5 has 2 containers, 1 for 2D and 1 for 1D */
Andy Gross71e88312011-12-05 19:19:21 -0600784 containers[TILFMT_8BIT] = omap_dmm->tcm[0];
785 containers[TILFMT_16BIT] = omap_dmm->tcm[0];
786 containers[TILFMT_32BIT] = omap_dmm->tcm[0];
Andy Grossc6b7ae552012-12-19 14:53:38 -0600787
788 if (omap_dmm->container_height != omap_dmm->lut_height) {
789 /* second LUT is used for PAGE mode. Programming must use
790 y offset that is added to all y coordinates. LUT id is still
791 0, because it is the same LUT, just the upper 128 lines */
792 containers[TILFMT_PAGE] = omap_dmm->tcm[1];
793 omap_dmm->tcm[1]->y_offset = OMAP5_LUT_OFFSET;
794 omap_dmm->tcm[1]->lut_id = 0;
795 } else {
796 containers[TILFMT_PAGE] = omap_dmm->tcm[0];
797 }
Andy Gross71e88312011-12-05 19:19:21 -0600798
Andy Gross71e88312011-12-05 19:19:21 -0600799 area = (struct tcm_area) {
Andy Gross71e88312011-12-05 19:19:21 -0600800 .tcm = NULL,
801 .p1.x = omap_dmm->container_width - 1,
802 .p1.y = omap_dmm->container_height - 1,
803 };
804
Andy Gross71e88312011-12-05 19:19:21 -0600805 /* initialize all LUTs to dummy page entries */
806 for (i = 0; i < omap_dmm->num_lut; i++) {
807 area.tcm = omap_dmm->tcm[i];
Rob Clarka6a91822011-12-09 23:26:08 -0600808 if (fill(&area, NULL, 0, 0, true))
Andy Gross71e88312011-12-05 19:19:21 -0600809 dev_err(omap_dmm->dev, "refill failed");
810 }
811
812 dev_info(omap_dmm->dev, "initialized all PAT entries\n");
813
814 return 0;
815
816fail:
Andy Grossef445932012-05-24 11:43:32 -0500817 if (omap_dmm_remove(dev))
818 dev_err(&dev->dev, "cleanup failed\n");
Andy Gross71e88312011-12-05 19:19:21 -0600819 return ret;
820}
Andy Gross6169a1482011-12-15 21:05:17 -0600821
822/*
823 * debugfs support
824 */
825
826#ifdef CONFIG_DEBUG_FS
827
828static const char *alphabet = "abcdefghijklmnopqrstuvwxyz"
829 "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
830static const char *special = ".,:;'\"`~!^-+";
831
832static void fill_map(char **map, int xdiv, int ydiv, struct tcm_area *a,
833 char c, bool ovw)
834{
835 int x, y;
836 for (y = a->p0.y / ydiv; y <= a->p1.y / ydiv; y++)
837 for (x = a->p0.x / xdiv; x <= a->p1.x / xdiv; x++)
838 if (map[y][x] == ' ' || ovw)
839 map[y][x] = c;
840}
841
842static void fill_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p,
843 char c)
844{
845 map[p->y / ydiv][p->x / xdiv] = c;
846}
847
848static char read_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p)
849{
850 return map[p->y / ydiv][p->x / xdiv];
851}
852
853static int map_width(int xdiv, int x0, int x1)
854{
855 return (x1 / xdiv) - (x0 / xdiv) + 1;
856}
857
858static void text_map(char **map, int xdiv, char *nice, int yd, int x0, int x1)
859{
860 char *p = map[yd] + (x0 / xdiv);
861 int w = (map_width(xdiv, x0, x1) - strlen(nice)) / 2;
862 if (w >= 0) {
863 p += w;
864 while (*nice)
865 *p++ = *nice++;
866 }
867}
868
869static void map_1d_info(char **map, int xdiv, int ydiv, char *nice,
870 struct tcm_area *a)
871{
872 sprintf(nice, "%dK", tcm_sizeof(*a) * 4);
873 if (a->p0.y + 1 < a->p1.y) {
874 text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv, 0,
875 256 - 1);
876 } else if (a->p0.y < a->p1.y) {
877 if (strlen(nice) < map_width(xdiv, a->p0.x, 256 - 1))
878 text_map(map, xdiv, nice, a->p0.y / ydiv,
879 a->p0.x + xdiv, 256 - 1);
880 else if (strlen(nice) < map_width(xdiv, 0, a->p1.x))
881 text_map(map, xdiv, nice, a->p1.y / ydiv,
882 0, a->p1.y - xdiv);
883 } else if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x)) {
884 text_map(map, xdiv, nice, a->p0.y / ydiv, a->p0.x, a->p1.x);
885 }
886}
887
888static void map_2d_info(char **map, int xdiv, int ydiv, char *nice,
889 struct tcm_area *a)
890{
891 sprintf(nice, "(%d*%d)", tcm_awidth(*a), tcm_aheight(*a));
892 if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x))
893 text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv,
894 a->p0.x, a->p1.x);
895}
896
897int tiler_map_show(struct seq_file *s, void *arg)
898{
899 int xdiv = 2, ydiv = 1;
900 char **map = NULL, *global_map;
901 struct tiler_block *block;
902 struct tcm_area a, p;
903 int i;
904 const char *m2d = alphabet;
905 const char *a2d = special;
906 const char *m2dp = m2d, *a2dp = a2d;
907 char nice[128];
Andy Gross02646fb2012-03-05 10:48:38 -0600908 int h_adj;
909 int w_adj;
Andy Gross6169a1482011-12-15 21:05:17 -0600910 unsigned long flags;
Andy Grossc6b7ae552012-12-19 14:53:38 -0600911 int lut_idx;
912
Andy Gross6169a1482011-12-15 21:05:17 -0600913
Andy Gross02646fb2012-03-05 10:48:38 -0600914 if (!omap_dmm) {
915 /* early return if dmm/tiler device is not initialized */
916 return 0;
917 }
918
Andy Grossc6b7ae552012-12-19 14:53:38 -0600919 h_adj = omap_dmm->container_height / ydiv;
920 w_adj = omap_dmm->container_width / xdiv;
Andy Gross02646fb2012-03-05 10:48:38 -0600921
Andy Grossc6b7ae552012-12-19 14:53:38 -0600922 map = kmalloc(h_adj * sizeof(*map), GFP_KERNEL);
923 global_map = kmalloc((w_adj + 1) * h_adj, GFP_KERNEL);
Andy Gross6169a1482011-12-15 21:05:17 -0600924
925 if (!map || !global_map)
926 goto error;
927
Andy Grossc6b7ae552012-12-19 14:53:38 -0600928 for (lut_idx = 0; lut_idx < omap_dmm->num_lut; lut_idx++) {
Dan Carpentere1e9c902013-08-22 15:42:50 +0300929 memset(map, 0, h_adj * sizeof(*map));
Andy Grossc6b7ae552012-12-19 14:53:38 -0600930 memset(global_map, ' ', (w_adj + 1) * h_adj);
Andy Gross6169a1482011-12-15 21:05:17 -0600931
Andy Grossc6b7ae552012-12-19 14:53:38 -0600932 for (i = 0; i < omap_dmm->container_height; i++) {
933 map[i] = global_map + i * (w_adj + 1);
934 map[i][w_adj] = 0;
Andy Gross6169a1482011-12-15 21:05:17 -0600935 }
Andy Gross6169a1482011-12-15 21:05:17 -0600936
Andy Grossc6b7ae552012-12-19 14:53:38 -0600937 spin_lock_irqsave(&list_lock, flags);
Andy Gross6169a1482011-12-15 21:05:17 -0600938
Andy Grossc6b7ae552012-12-19 14:53:38 -0600939 list_for_each_entry(block, &omap_dmm->alloc_head, alloc_node) {
940 if (block->area.tcm == omap_dmm->tcm[lut_idx]) {
941 if (block->fmt != TILFMT_PAGE) {
942 fill_map(map, xdiv, ydiv, &block->area,
943 *m2dp, true);
944 if (!*++a2dp)
945 a2dp = a2d;
946 if (!*++m2dp)
947 m2dp = m2d;
948 map_2d_info(map, xdiv, ydiv, nice,
949 &block->area);
950 } else {
951 bool start = read_map_pt(map, xdiv,
952 ydiv, &block->area.p0) == ' ';
953 bool end = read_map_pt(map, xdiv, ydiv,
954 &block->area.p1) == ' ';
955
956 tcm_for_each_slice(a, block->area, p)
957 fill_map(map, xdiv, ydiv, &a,
958 '=', true);
959 fill_map_pt(map, xdiv, ydiv,
960 &block->area.p0,
961 start ? '<' : 'X');
962 fill_map_pt(map, xdiv, ydiv,
963 &block->area.p1,
964 end ? '>' : 'X');
965 map_1d_info(map, xdiv, ydiv, nice,
966 &block->area);
967 }
968 }
969 }
970
971 spin_unlock_irqrestore(&list_lock, flags);
972
973 if (s) {
974 seq_printf(s, "CONTAINER %d DUMP BEGIN\n", lut_idx);
975 for (i = 0; i < 128; i++)
976 seq_printf(s, "%03d:%s\n", i, map[i]);
977 seq_printf(s, "CONTAINER %d DUMP END\n", lut_idx);
978 } else {
979 dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP BEGIN\n",
980 lut_idx);
981 for (i = 0; i < 128; i++)
982 dev_dbg(omap_dmm->dev, "%03d:%s\n", i, map[i]);
983 dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP END\n",
984 lut_idx);
985 }
Andy Gross6169a1482011-12-15 21:05:17 -0600986 }
987
988error:
989 kfree(map);
990 kfree(global_map);
991
992 return 0;
993}
994#endif
Andy Gross5c137792012-03-05 10:48:39 -0600995
Grygorii Strashko1d601da2015-02-25 20:08:20 +0200996#ifdef CONFIG_PM_SLEEP
Andy Grosse78edba2012-12-19 14:53:37 -0600997static int omap_dmm_resume(struct device *dev)
998{
999 struct tcm_area area;
1000 int i;
1001
1002 if (!omap_dmm)
1003 return -ENODEV;
1004
1005 area = (struct tcm_area) {
Andy Grosse78edba2012-12-19 14:53:37 -06001006 .tcm = NULL,
1007 .p1.x = omap_dmm->container_width - 1,
1008 .p1.y = omap_dmm->container_height - 1,
1009 };
1010
1011 /* initialize all LUTs to dummy page entries */
1012 for (i = 0; i < omap_dmm->num_lut; i++) {
1013 area.tcm = omap_dmm->tcm[i];
1014 if (fill(&area, NULL, 0, 0, true))
1015 dev_err(dev, "refill failed");
1016 }
1017
1018 return 0;
1019}
Andy Grosse78edba2012-12-19 14:53:37 -06001020#endif
1021
Grygorii Strashko1d601da2015-02-25 20:08:20 +02001022static SIMPLE_DEV_PM_OPS(omap_dmm_pm_ops, NULL, omap_dmm_resume);
1023
Archit Taneja3d232342013-10-15 12:34:20 +05301024#if defined(CONFIG_OF)
Tomi Valkeinen7cb0d6c2014-09-25 19:24:29 +00001025static const struct dmm_platform_data dmm_omap4_platform_data = {
1026 .cpu_cache_flags = OMAP_BO_WC,
1027};
1028
1029static const struct dmm_platform_data dmm_omap5_platform_data = {
1030 .cpu_cache_flags = OMAP_BO_UNCACHED,
1031};
1032
Archit Taneja3d232342013-10-15 12:34:20 +05301033static const struct of_device_id dmm_of_match[] = {
Tomi Valkeinen7cb0d6c2014-09-25 19:24:29 +00001034 {
1035 .compatible = "ti,omap4-dmm",
1036 .data = &dmm_omap4_platform_data,
1037 },
1038 {
1039 .compatible = "ti,omap5-dmm",
1040 .data = &dmm_omap5_platform_data,
1041 },
Archit Taneja3d232342013-10-15 12:34:20 +05301042 {},
1043};
1044#endif
1045
Andy Gross5c137792012-03-05 10:48:39 -06001046struct platform_driver omap_dmm_driver = {
1047 .probe = omap_dmm_probe,
1048 .remove = omap_dmm_remove,
1049 .driver = {
1050 .owner = THIS_MODULE,
1051 .name = DMM_DRIVER_NAME,
Archit Taneja3d232342013-10-15 12:34:20 +05301052 .of_match_table = of_match_ptr(dmm_of_match),
Andy Grosse78edba2012-12-19 14:53:37 -06001053 .pm = &omap_dmm_pm_ops,
Andy Gross5c137792012-03-05 10:48:39 -06001054 },
1055};
1056
1057MODULE_LICENSE("GPL v2");
1058MODULE_AUTHOR("Andy Gross <andy.gross@ti.com>");
1059MODULE_DESCRIPTION("OMAP DMM/Tiler Driver");