blob: 4be0c94673f5917282944a3b8e93fde6edc7f62f [file] [log] [blame]
Andy Gross71e88312011-12-05 19:19:21 -06001/*
2 * DMM IOMMU driver support functions for TI OMAP processors.
3 *
Andrew F. Davisbb5cdf82017-12-05 14:29:31 -06004 * Copyright (C) 2011 Texas Instruments Incorporated - http://www.ti.com/
Andy Gross71e88312011-12-05 19:19:21 -06005 * Author: Rob Clark <rob@ti.com>
6 * Andy Gross <andy.gross@ti.com>
7 *
Andy Gross71e88312011-12-05 19:19:21 -06008 * This program is free software; you can redistribute it and/or
9 * modify it under the terms of the GNU General Public License as
10 * published by the Free Software Foundation version 2.
11 *
12 * This program is distributed "as is" WITHOUT ANY WARRANTY of any
13 * kind, whether express or implied; without even the implied warranty
14 * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
Laurent Pinchart2d278f52015-03-05 21:31:37 +020017
18#include <linux/completion.h>
19#include <linux/delay.h>
20#include <linux/dma-mapping.h>
21#include <linux/errno.h>
Andy Gross71e88312011-12-05 19:19:21 -060022#include <linux/init.h>
Laurent Pinchart2d278f52015-03-05 21:31:37 +020023#include <linux/interrupt.h>
24#include <linux/list.h>
25#include <linux/mm.h>
Andy Gross71e88312011-12-05 19:19:21 -060026#include <linux/module.h>
27#include <linux/platform_device.h> /* platform_device() */
Andy Gross71e88312011-12-05 19:19:21 -060028#include <linux/sched.h>
Arnd Bergmann2d802452016-05-11 18:01:45 +020029#include <linux/seq_file.h>
Andy Gross71e88312011-12-05 19:19:21 -060030#include <linux/slab.h>
Andy Gross71e88312011-12-05 19:19:21 -060031#include <linux/time.h>
Laurent Pinchart2d278f52015-03-05 21:31:37 +020032#include <linux/vmalloc.h>
33#include <linux/wait.h>
Andy Gross71e88312011-12-05 19:19:21 -060034
35#include "omap_dmm_tiler.h"
36#include "omap_dmm_priv.h"
37
Andy Gross5c137792012-03-05 10:48:39 -060038#define DMM_DRIVER_NAME "dmm"
39
Andy Gross71e88312011-12-05 19:19:21 -060040/* mappings for associating views to luts */
41static struct tcm *containers[TILFMT_NFORMATS];
42static struct dmm *omap_dmm;
43
Tomi Valkeinen7cb0d6c2014-09-25 19:24:29 +000044#if defined(CONFIG_OF)
45static const struct of_device_id dmm_of_match[];
46#endif
47
Andy Grossef445932012-05-24 11:43:32 -050048/* global spinlock for protecting lists */
49static DEFINE_SPINLOCK(list_lock);
50
Andy Gross71e88312011-12-05 19:19:21 -060051/* Geometry table */
52#define GEOM(xshift, yshift, bytes_per_pixel) { \
53 .x_shft = (xshift), \
54 .y_shft = (yshift), \
55 .cpp = (bytes_per_pixel), \
56 .slot_w = 1 << (SLOT_WIDTH_BITS - (xshift)), \
57 .slot_h = 1 << (SLOT_HEIGHT_BITS - (yshift)), \
58 }
59
60static const struct {
61 uint32_t x_shft; /* unused X-bits (as part of bpp) */
62 uint32_t y_shft; /* unused Y-bits (as part of bpp) */
63 uint32_t cpp; /* bytes/chars per pixel */
64 uint32_t slot_w; /* width of each slot (in pixels) */
65 uint32_t slot_h; /* height of each slot (in pixels) */
66} geom[TILFMT_NFORMATS] = {
Laurent Pinchart222025e2015-01-11 00:02:07 +020067 [TILFMT_8BIT] = GEOM(0, 0, 1),
68 [TILFMT_16BIT] = GEOM(0, 1, 2),
69 [TILFMT_32BIT] = GEOM(1, 1, 4),
70 [TILFMT_PAGE] = GEOM(SLOT_WIDTH_BITS, SLOT_HEIGHT_BITS, 1),
Andy Gross71e88312011-12-05 19:19:21 -060071};
72
73
74/* lookup table for registers w/ per-engine instances */
75static const uint32_t reg[][4] = {
Laurent Pinchart222025e2015-01-11 00:02:07 +020076 [PAT_STATUS] = {DMM_PAT_STATUS__0, DMM_PAT_STATUS__1,
77 DMM_PAT_STATUS__2, DMM_PAT_STATUS__3},
78 [PAT_DESCR] = {DMM_PAT_DESCR__0, DMM_PAT_DESCR__1,
79 DMM_PAT_DESCR__2, DMM_PAT_DESCR__3},
Andy Gross71e88312011-12-05 19:19:21 -060080};
81
Tomi Valkeinen8e54adf2015-08-07 14:31:28 +030082static u32 dmm_read(struct dmm *dmm, u32 reg)
83{
84 return readl(dmm->base + reg);
85}
86
87static void dmm_write(struct dmm *dmm, u32 val, u32 reg)
88{
89 writel(val, dmm->base + reg);
90}
91
Andy Gross71e88312011-12-05 19:19:21 -060092/* simple allocator to grab next 16 byte aligned memory from txn */
93static void *alloc_dma(struct dmm_txn *txn, size_t sz, dma_addr_t *pa)
94{
95 void *ptr;
96 struct refill_engine *engine = txn->engine_handle;
97
98 /* dmm programming requires 16 byte aligned addresses */
99 txn->current_pa = round_up(txn->current_pa, 16);
100 txn->current_va = (void *)round_up((long)txn->current_va, 16);
101
102 ptr = txn->current_va;
103 *pa = txn->current_pa;
104
105 txn->current_pa += sz;
106 txn->current_va += sz;
107
108 BUG_ON((txn->current_va - engine->refill_va) > REFILL_BUFFER_SIZE);
109
110 return ptr;
111}
112
113/* check status and spin until wait_mask comes true */
114static int wait_status(struct refill_engine *engine, uint32_t wait_mask)
115{
116 struct dmm *dmm = engine->dmm;
117 uint32_t r = 0, err, i;
118
119 i = DMM_FIXED_RETRY_COUNT;
120 while (true) {
Tomi Valkeinen8e54adf2015-08-07 14:31:28 +0300121 r = dmm_read(dmm, reg[PAT_STATUS][engine->id]);
Andy Gross71e88312011-12-05 19:19:21 -0600122 err = r & DMM_PATSTATUS_ERR;
Peter Ujfalusid312fe22017-09-29 14:49:47 +0300123 if (err) {
124 dev_err(dmm->dev,
125 "%s: error (engine%d). PAT_STATUS: 0x%08x\n",
126 __func__, engine->id, r);
Andy Gross71e88312011-12-05 19:19:21 -0600127 return -EFAULT;
Peter Ujfalusid312fe22017-09-29 14:49:47 +0300128 }
Andy Gross71e88312011-12-05 19:19:21 -0600129
130 if ((r & wait_mask) == wait_mask)
131 break;
132
Peter Ujfalusid312fe22017-09-29 14:49:47 +0300133 if (--i == 0) {
134 dev_err(dmm->dev,
135 "%s: timeout (engine%d). PAT_STATUS: 0x%08x\n",
136 __func__, engine->id, r);
Andy Gross71e88312011-12-05 19:19:21 -0600137 return -ETIMEDOUT;
Peter Ujfalusid312fe22017-09-29 14:49:47 +0300138 }
Andy Gross71e88312011-12-05 19:19:21 -0600139
140 udelay(1);
141 }
142
143 return 0;
144}
145
Andy Grossfaaa0542012-10-12 11:18:11 -0500146static void release_engine(struct refill_engine *engine)
147{
148 unsigned long flags;
149
150 spin_lock_irqsave(&list_lock, flags);
151 list_add(&engine->idle_node, &omap_dmm->idle_head);
152 spin_unlock_irqrestore(&list_lock, flags);
153
154 atomic_inc(&omap_dmm->engine_counter);
155 wake_up_interruptible(&omap_dmm->engine_queue);
156}
157
Andy Grossd7de9932012-08-09 00:14:56 -0500158static irqreturn_t omap_dmm_irq_handler(int irq, void *arg)
Andy Gross71e88312011-12-05 19:19:21 -0600159{
160 struct dmm *dmm = arg;
Tomi Valkeinen8e54adf2015-08-07 14:31:28 +0300161 uint32_t status = dmm_read(dmm, DMM_PAT_IRQSTATUS);
Andy Gross71e88312011-12-05 19:19:21 -0600162 int i;
163
164 /* ack IRQ */
Tomi Valkeinen8e54adf2015-08-07 14:31:28 +0300165 dmm_write(dmm, status, DMM_PAT_IRQSTATUS);
Andy Gross71e88312011-12-05 19:19:21 -0600166
167 for (i = 0; i < dmm->num_engines; i++) {
Peter Ujfalusib8c456d2017-09-29 14:49:48 +0300168 if (status & DMM_IRQSTAT_ERR_MASK)
169 dev_err(dmm->dev,
170 "irq error(engine%d): IRQSTAT 0x%02x\n",
171 i, status & 0xff);
172
Andy Grossfaaa0542012-10-12 11:18:11 -0500173 if (status & DMM_IRQSTAT_LST) {
Andy Grossfaaa0542012-10-12 11:18:11 -0500174 if (dmm->engines[i].async)
175 release_engine(&dmm->engines[i]);
Tomi Valkeinen74395072014-12-17 14:34:23 +0200176
177 complete(&dmm->engines[i].compl);
Andy Grossfaaa0542012-10-12 11:18:11 -0500178 }
179
Andy Gross71e88312011-12-05 19:19:21 -0600180 status >>= 8;
181 }
182
183 return IRQ_HANDLED;
184}
185
186/**
187 * Get a handle for a DMM transaction
188 */
189static struct dmm_txn *dmm_txn_init(struct dmm *dmm, struct tcm *tcm)
190{
191 struct dmm_txn *txn = NULL;
192 struct refill_engine *engine = NULL;
Andy Grossfaaa0542012-10-12 11:18:11 -0500193 int ret;
194 unsigned long flags;
Andy Gross71e88312011-12-05 19:19:21 -0600195
Andy Grossfaaa0542012-10-12 11:18:11 -0500196
197 /* wait until an engine is available */
198 ret = wait_event_interruptible(omap_dmm->engine_queue,
199 atomic_add_unless(&omap_dmm->engine_counter, -1, 0));
200 if (ret)
201 return ERR_PTR(ret);
Andy Gross71e88312011-12-05 19:19:21 -0600202
203 /* grab an idle engine */
Andy Grossfaaa0542012-10-12 11:18:11 -0500204 spin_lock_irqsave(&list_lock, flags);
Andy Gross71e88312011-12-05 19:19:21 -0600205 if (!list_empty(&dmm->idle_head)) {
206 engine = list_entry(dmm->idle_head.next, struct refill_engine,
207 idle_node);
208 list_del(&engine->idle_node);
209 }
Andy Grossfaaa0542012-10-12 11:18:11 -0500210 spin_unlock_irqrestore(&list_lock, flags);
Andy Gross71e88312011-12-05 19:19:21 -0600211
212 BUG_ON(!engine);
213
214 txn = &engine->txn;
215 engine->tcm = tcm;
216 txn->engine_handle = engine;
217 txn->last_pat = NULL;
218 txn->current_va = engine->refill_va;
219 txn->current_pa = engine->refill_pa;
220
221 return txn;
222}
223
224/**
225 * Add region to DMM transaction. If pages or pages[i] is NULL, then the
226 * corresponding slot is cleared (ie. dummy_pa is programmed)
227 */
Andy Grossfaaa0542012-10-12 11:18:11 -0500228static void dmm_txn_append(struct dmm_txn *txn, struct pat_area *area,
Rob Clarka6a91822011-12-09 23:26:08 -0600229 struct page **pages, uint32_t npages, uint32_t roll)
Andy Gross71e88312011-12-05 19:19:21 -0600230{
Russell King2d31ca32014-07-12 10:53:41 +0100231 dma_addr_t pat_pa = 0, data_pa = 0;
Andy Gross71e88312011-12-05 19:19:21 -0600232 uint32_t *data;
233 struct pat *pat;
234 struct refill_engine *engine = txn->engine_handle;
235 int columns = (1 + area->x1 - area->x0);
236 int rows = (1 + area->y1 - area->y0);
237 int i = columns*rows;
Andy Gross71e88312011-12-05 19:19:21 -0600238
Laurent Pinchartd501b122016-12-12 11:57:24 +0200239 pat = alloc_dma(txn, sizeof(*pat), &pat_pa);
Andy Gross71e88312011-12-05 19:19:21 -0600240
241 if (txn->last_pat)
242 txn->last_pat->next_pa = (uint32_t)pat_pa;
243
244 pat->area = *area;
Andy Grossc6b7ae552012-12-19 14:53:38 -0600245
246 /* adjust Y coordinates based off of container parameters */
247 pat->area.y0 += engine->tcm->y_offset;
248 pat->area.y1 += engine->tcm->y_offset;
249
Andy Gross71e88312011-12-05 19:19:21 -0600250 pat->ctrl = (struct pat_ctrl){
251 .start = 1,
252 .lut_id = engine->tcm->lut_id,
253 };
254
Russell King2d31ca32014-07-12 10:53:41 +0100255 data = alloc_dma(txn, 4*i, &data_pa);
256 /* FIXME: what if data_pa is more than 32-bit ? */
257 pat->data_pa = data_pa;
Andy Gross71e88312011-12-05 19:19:21 -0600258
259 while (i--) {
Rob Clarka6a91822011-12-09 23:26:08 -0600260 int n = i + roll;
261 if (n >= npages)
262 n -= npages;
263 data[i] = (pages && pages[n]) ?
264 page_to_phys(pages[n]) : engine->dmm->dummy_pa;
Andy Gross71e88312011-12-05 19:19:21 -0600265 }
266
Andy Gross71e88312011-12-05 19:19:21 -0600267 txn->last_pat = pat;
268
Andy Grossfaaa0542012-10-12 11:18:11 -0500269 return;
Andy Gross71e88312011-12-05 19:19:21 -0600270}
271
272/**
273 * Commit the DMM transaction.
274 */
275static int dmm_txn_commit(struct dmm_txn *txn, bool wait)
276{
277 int ret = 0;
278 struct refill_engine *engine = txn->engine_handle;
279 struct dmm *dmm = engine->dmm;
280
281 if (!txn->last_pat) {
282 dev_err(engine->dmm->dev, "need at least one txn\n");
283 ret = -EINVAL;
284 goto cleanup;
285 }
286
287 txn->last_pat->next_pa = 0;
288
289 /* write to PAT_DESCR to clear out any pending transaction */
Tomi Valkeinen8e54adf2015-08-07 14:31:28 +0300290 dmm_write(dmm, 0x0, reg[PAT_DESCR][engine->id]);
Andy Gross71e88312011-12-05 19:19:21 -0600291
292 /* wait for engine ready: */
293 ret = wait_status(engine, DMM_PATSTATUS_READY);
294 if (ret) {
295 ret = -EFAULT;
296 goto cleanup;
297 }
298
Andy Grossfaaa0542012-10-12 11:18:11 -0500299 /* mark whether it is async to denote list management in IRQ handler */
300 engine->async = wait ? false : true;
Tomi Valkeinen74395072014-12-17 14:34:23 +0200301 reinit_completion(&engine->compl);
302 /* verify that the irq handler sees the 'async' and completion value */
Tomi Valkeinene7e24df2014-11-10 12:23:01 +0200303 smp_mb();
Andy Grossfaaa0542012-10-12 11:18:11 -0500304
Andy Gross71e88312011-12-05 19:19:21 -0600305 /* kick reload */
Tomi Valkeinen8e54adf2015-08-07 14:31:28 +0300306 dmm_write(dmm, engine->refill_pa, reg[PAT_DESCR][engine->id]);
Andy Gross71e88312011-12-05 19:19:21 -0600307
308 if (wait) {
Tomi Valkeinen74395072014-12-17 14:34:23 +0200309 if (!wait_for_completion_timeout(&engine->compl,
Tomi Valkeinen96cbd142015-04-28 14:01:32 +0300310 msecs_to_jiffies(100))) {
Andy Gross71e88312011-12-05 19:19:21 -0600311 dev_err(dmm->dev, "timed out waiting for done\n");
312 ret = -ETIMEDOUT;
Peter Ujfalusib7ea6b22017-09-29 14:49:49 +0300313 goto cleanup;
Andy Gross71e88312011-12-05 19:19:21 -0600314 }
Peter Ujfalusib7ea6b22017-09-29 14:49:49 +0300315
316 /* Check the engine status before continue */
317 ret = wait_status(engine, DMM_PATSTATUS_READY |
318 DMM_PATSTATUS_VALID | DMM_PATSTATUS_DONE);
Andy Gross71e88312011-12-05 19:19:21 -0600319 }
320
321cleanup:
Andy Grossfaaa0542012-10-12 11:18:11 -0500322 /* only place engine back on list if we are done with it */
323 if (ret || wait)
324 release_engine(engine);
Andy Gross71e88312011-12-05 19:19:21 -0600325
Andy Gross71e88312011-12-05 19:19:21 -0600326 return ret;
327}
328
329/*
330 * DMM programming
331 */
Rob Clarka6a91822011-12-09 23:26:08 -0600332static int fill(struct tcm_area *area, struct page **pages,
333 uint32_t npages, uint32_t roll, bool wait)
Andy Gross71e88312011-12-05 19:19:21 -0600334{
335 int ret = 0;
336 struct tcm_area slice, area_s;
337 struct dmm_txn *txn;
338
Tomi Valkeinen2bb2daf2015-04-28 14:01:34 +0300339 /*
340 * FIXME
341 *
342 * Asynchronous fill does not work reliably, as the driver does not
343 * handle errors in the async code paths. The fill operation may
344 * silently fail, leading to leaking DMM engines, which may eventually
345 * lead to deadlock if we run out of DMM engines.
346 *
347 * For now, always set 'wait' so that we only use sync fills. Async
348 * fills should be fixed, or alternatively we could decide to only
349 * support sync fills and so the whole async code path could be removed.
350 */
351
352 wait = true;
353
Andy Gross71e88312011-12-05 19:19:21 -0600354 txn = dmm_txn_init(omap_dmm, area->tcm);
355 if (IS_ERR_OR_NULL(txn))
Andy Gross295c7992012-11-16 13:10:57 -0600356 return -ENOMEM;
Andy Gross71e88312011-12-05 19:19:21 -0600357
358 tcm_for_each_slice(slice, *area, area_s) {
359 struct pat_area p_area = {
360 .x0 = slice.p0.x, .y0 = slice.p0.y,
361 .x1 = slice.p1.x, .y1 = slice.p1.y,
362 };
363
Andy Grossfaaa0542012-10-12 11:18:11 -0500364 dmm_txn_append(txn, &p_area, pages, npages, roll);
Andy Gross71e88312011-12-05 19:19:21 -0600365
Rob Clarka6a91822011-12-09 23:26:08 -0600366 roll += tcm_sizeof(slice);
Andy Gross71e88312011-12-05 19:19:21 -0600367 }
368
369 ret = dmm_txn_commit(txn, wait);
370
Andy Gross71e88312011-12-05 19:19:21 -0600371 return ret;
372}
373
374/*
375 * Pin/unpin
376 */
377
378/* note: slots for which pages[i] == NULL are filled w/ dummy page
379 */
Rob Clarka6a91822011-12-09 23:26:08 -0600380int tiler_pin(struct tiler_block *block, struct page **pages,
381 uint32_t npages, uint32_t roll, bool wait)
Andy Gross71e88312011-12-05 19:19:21 -0600382{
383 int ret;
384
Rob Clarka6a91822011-12-09 23:26:08 -0600385 ret = fill(&block->area, pages, npages, roll, wait);
Andy Gross71e88312011-12-05 19:19:21 -0600386
387 if (ret)
388 tiler_unpin(block);
389
390 return ret;
391}
392
393int tiler_unpin(struct tiler_block *block)
394{
Rob Clarka6a91822011-12-09 23:26:08 -0600395 return fill(&block->area, NULL, 0, 0, false);
Andy Gross71e88312011-12-05 19:19:21 -0600396}
397
398/*
399 * Reserve/release
400 */
401struct tiler_block *tiler_reserve_2d(enum tiler_fmt fmt, uint16_t w,
402 uint16_t h, uint16_t align)
403{
404 struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
405 u32 min_align = 128;
406 int ret;
Andy Grossfaaa0542012-10-12 11:18:11 -0500407 unsigned long flags;
Tomi Valkeinen2150c192017-02-21 09:57:12 +0200408 u32 slot_bytes;
Andy Gross71e88312011-12-05 19:19:21 -0600409
410 BUG_ON(!validfmt(fmt));
411
412 /* convert width/height to slots */
413 w = DIV_ROUND_UP(w, geom[fmt].slot_w);
414 h = DIV_ROUND_UP(h, geom[fmt].slot_h);
415
416 /* convert alignment to slots */
Andy Gross0d6fa532015-08-12 11:24:38 +0300417 slot_bytes = geom[fmt].slot_w * geom[fmt].cpp;
418 min_align = max(min_align, slot_bytes);
419 align = (align > min_align) ? ALIGN(align, min_align) : min_align;
420 align /= slot_bytes;
Andy Gross71e88312011-12-05 19:19:21 -0600421
422 block->fmt = fmt;
423
Andy Gross0d6fa532015-08-12 11:24:38 +0300424 ret = tcm_reserve_2d(containers[fmt], w, h, align, -1, slot_bytes,
425 &block->area);
Andy Gross71e88312011-12-05 19:19:21 -0600426 if (ret) {
427 kfree(block);
Rob Clark1c3a4dc2012-03-21 16:40:23 -0500428 return ERR_PTR(-ENOMEM);
Andy Gross71e88312011-12-05 19:19:21 -0600429 }
430
431 /* add to allocation list */
Andy Grossfaaa0542012-10-12 11:18:11 -0500432 spin_lock_irqsave(&list_lock, flags);
Andy Gross71e88312011-12-05 19:19:21 -0600433 list_add(&block->alloc_node, &omap_dmm->alloc_head);
Andy Grossfaaa0542012-10-12 11:18:11 -0500434 spin_unlock_irqrestore(&list_lock, flags);
Andy Gross71e88312011-12-05 19:19:21 -0600435
436 return block;
437}
438
439struct tiler_block *tiler_reserve_1d(size_t size)
440{
441 struct tiler_block *block = kzalloc(sizeof(*block), GFP_KERNEL);
442 int num_pages = (size + PAGE_SIZE - 1) >> PAGE_SHIFT;
Andy Grossfaaa0542012-10-12 11:18:11 -0500443 unsigned long flags;
Andy Gross71e88312011-12-05 19:19:21 -0600444
445 if (!block)
Andy Grossd7de9932012-08-09 00:14:56 -0500446 return ERR_PTR(-ENOMEM);
Andy Gross71e88312011-12-05 19:19:21 -0600447
448 block->fmt = TILFMT_PAGE;
449
450 if (tcm_reserve_1d(containers[TILFMT_PAGE], num_pages,
451 &block->area)) {
452 kfree(block);
Rob Clark1c3a4dc2012-03-21 16:40:23 -0500453 return ERR_PTR(-ENOMEM);
Andy Gross71e88312011-12-05 19:19:21 -0600454 }
455
Andy Grossfaaa0542012-10-12 11:18:11 -0500456 spin_lock_irqsave(&list_lock, flags);
Andy Gross71e88312011-12-05 19:19:21 -0600457 list_add(&block->alloc_node, &omap_dmm->alloc_head);
Andy Grossfaaa0542012-10-12 11:18:11 -0500458 spin_unlock_irqrestore(&list_lock, flags);
Andy Gross71e88312011-12-05 19:19:21 -0600459
460 return block;
461}
462
463/* note: if you have pin'd pages, you should have already unpin'd first! */
464int tiler_release(struct tiler_block *block)
465{
466 int ret = tcm_free(&block->area);
Andy Grossfaaa0542012-10-12 11:18:11 -0500467 unsigned long flags;
Andy Gross71e88312011-12-05 19:19:21 -0600468
469 if (block->area.tcm)
470 dev_err(omap_dmm->dev, "failed to release block\n");
471
Andy Grossfaaa0542012-10-12 11:18:11 -0500472 spin_lock_irqsave(&list_lock, flags);
Andy Gross71e88312011-12-05 19:19:21 -0600473 list_del(&block->alloc_node);
Andy Grossfaaa0542012-10-12 11:18:11 -0500474 spin_unlock_irqrestore(&list_lock, flags);
Andy Gross71e88312011-12-05 19:19:21 -0600475
476 kfree(block);
477 return ret;
478}
479
480/*
481 * Utils
482 */
483
Rob Clark3c810c62012-08-15 15:18:01 -0500484/* calculate the tiler space address of a pixel in a view orientation...
485 * below description copied from the display subsystem section of TRM:
486 *
487 * When the TILER is addressed, the bits:
488 * [28:27] = 0x0 for 8-bit tiled
489 * 0x1 for 16-bit tiled
490 * 0x2 for 32-bit tiled
491 * 0x3 for page mode
492 * [31:29] = 0x0 for 0-degree view
493 * 0x1 for 180-degree view + mirroring
494 * 0x2 for 0-degree view + mirroring
495 * 0x3 for 180-degree view
496 * 0x4 for 270-degree view + mirroring
497 * 0x5 for 270-degree view
498 * 0x6 for 90-degree view
499 * 0x7 for 90-degree view + mirroring
500 * Otherwise the bits indicated the corresponding bit address to access
501 * the SDRAM.
502 */
503static u32 tiler_get_address(enum tiler_fmt fmt, u32 orient, u32 x, u32 y)
Andy Gross71e88312011-12-05 19:19:21 -0600504{
505 u32 x_bits, y_bits, tmp, x_mask, y_mask, alignment;
506
507 x_bits = CONT_WIDTH_BITS - geom[fmt].x_shft;
508 y_bits = CONT_HEIGHT_BITS - geom[fmt].y_shft;
509 alignment = geom[fmt].x_shft + geom[fmt].y_shft;
510
511 /* validate coordinate */
512 x_mask = MASK(x_bits);
513 y_mask = MASK(y_bits);
514
Rob Clark3c810c62012-08-15 15:18:01 -0500515 if (x < 0 || x > x_mask || y < 0 || y > y_mask) {
516 DBG("invalid coords: %u < 0 || %u > %u || %u < 0 || %u > %u",
517 x, x, x_mask, y, y, y_mask);
Andy Gross71e88312011-12-05 19:19:21 -0600518 return 0;
Rob Clark3c810c62012-08-15 15:18:01 -0500519 }
Andy Gross71e88312011-12-05 19:19:21 -0600520
521 /* account for mirroring */
522 if (orient & MASK_X_INVERT)
523 x ^= x_mask;
524 if (orient & MASK_Y_INVERT)
525 y ^= y_mask;
526
527 /* get coordinate address */
528 if (orient & MASK_XY_FLIP)
529 tmp = ((x << y_bits) + y);
530 else
531 tmp = ((y << x_bits) + x);
532
533 return TIL_ADDR((tmp << alignment), orient, fmt);
534}
535
536dma_addr_t tiler_ssptr(struct tiler_block *block)
537{
538 BUG_ON(!validfmt(block->fmt));
539
Rob Clark3c810c62012-08-15 15:18:01 -0500540 return TILVIEW_8BIT + tiler_get_address(block->fmt, 0,
Andy Gross71e88312011-12-05 19:19:21 -0600541 block->area.p0.x * geom[block->fmt].slot_w,
542 block->area.p0.y * geom[block->fmt].slot_h);
543}
544
Rob Clark3c810c62012-08-15 15:18:01 -0500545dma_addr_t tiler_tsptr(struct tiler_block *block, uint32_t orient,
546 uint32_t x, uint32_t y)
547{
548 struct tcm_pt *p = &block->area.p0;
549 BUG_ON(!validfmt(block->fmt));
550
551 return tiler_get_address(block->fmt, orient,
552 (p->x * geom[block->fmt].slot_w) + x,
553 (p->y * geom[block->fmt].slot_h) + y);
554}
555
Andy Gross71e88312011-12-05 19:19:21 -0600556void tiler_align(enum tiler_fmt fmt, uint16_t *w, uint16_t *h)
557{
558 BUG_ON(!validfmt(fmt));
559 *w = round_up(*w, geom[fmt].slot_w);
560 *h = round_up(*h, geom[fmt].slot_h);
561}
562
Rob Clark3c810c62012-08-15 15:18:01 -0500563uint32_t tiler_stride(enum tiler_fmt fmt, uint32_t orient)
Andy Gross71e88312011-12-05 19:19:21 -0600564{
565 BUG_ON(!validfmt(fmt));
566
Rob Clark3c810c62012-08-15 15:18:01 -0500567 if (orient & MASK_XY_FLIP)
568 return 1 << (CONT_HEIGHT_BITS + geom[fmt].x_shft);
569 else
570 return 1 << (CONT_WIDTH_BITS + geom[fmt].y_shft);
Andy Gross71e88312011-12-05 19:19:21 -0600571}
572
573size_t tiler_size(enum tiler_fmt fmt, uint16_t w, uint16_t h)
574{
575 tiler_align(fmt, &w, &h);
576 return geom[fmt].cpp * w * h;
577}
578
579size_t tiler_vsize(enum tiler_fmt fmt, uint16_t w, uint16_t h)
580{
581 BUG_ON(!validfmt(fmt));
582 return round_up(geom[fmt].cpp * w, PAGE_SIZE) * h;
583}
584
Tomi Valkeinen7cb0d6c2014-09-25 19:24:29 +0000585uint32_t tiler_get_cpu_cache_flags(void)
586{
587 return omap_dmm->plat_data->cpu_cache_flags;
588}
589
Andy Grosse5e4e9b2012-10-17 00:30:03 -0500590bool dmm_is_available(void)
Andy Gross5c137792012-03-05 10:48:39 -0600591{
592 return omap_dmm ? true : false;
593}
594
595static int omap_dmm_remove(struct platform_device *dev)
Andy Gross71e88312011-12-05 19:19:21 -0600596{
597 struct tiler_block *block, *_block;
598 int i;
Andy Grossfaaa0542012-10-12 11:18:11 -0500599 unsigned long flags;
Andy Gross71e88312011-12-05 19:19:21 -0600600
601 if (omap_dmm) {
602 /* free all area regions */
Andy Grossfaaa0542012-10-12 11:18:11 -0500603 spin_lock_irqsave(&list_lock, flags);
Andy Gross71e88312011-12-05 19:19:21 -0600604 list_for_each_entry_safe(block, _block, &omap_dmm->alloc_head,
605 alloc_node) {
606 list_del(&block->alloc_node);
607 kfree(block);
608 }
Andy Grossfaaa0542012-10-12 11:18:11 -0500609 spin_unlock_irqrestore(&list_lock, flags);
Andy Gross71e88312011-12-05 19:19:21 -0600610
611 for (i = 0; i < omap_dmm->num_lut; i++)
612 if (omap_dmm->tcm && omap_dmm->tcm[i])
613 omap_dmm->tcm[i]->deinit(omap_dmm->tcm[i]);
614 kfree(omap_dmm->tcm);
615
616 kfree(omap_dmm->engines);
617 if (omap_dmm->refill_va)
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800618 dma_free_wc(omap_dmm->dev,
619 REFILL_BUFFER_SIZE * omap_dmm->num_engines,
620 omap_dmm->refill_va, omap_dmm->refill_pa);
Andy Gross71e88312011-12-05 19:19:21 -0600621 if (omap_dmm->dummy_page)
622 __free_page(omap_dmm->dummy_page);
623
Andy Grossef445932012-05-24 11:43:32 -0500624 if (omap_dmm->irq > 0)
Andy Gross71e88312011-12-05 19:19:21 -0600625 free_irq(omap_dmm->irq, omap_dmm);
626
Andy Gross5c137792012-03-05 10:48:39 -0600627 iounmap(omap_dmm->base);
Andy Gross71e88312011-12-05 19:19:21 -0600628 kfree(omap_dmm);
Andy Gross5c137792012-03-05 10:48:39 -0600629 omap_dmm = NULL;
Andy Gross71e88312011-12-05 19:19:21 -0600630 }
631
632 return 0;
633}
634
Andy Gross5c137792012-03-05 10:48:39 -0600635static int omap_dmm_probe(struct platform_device *dev)
Andy Gross71e88312011-12-05 19:19:21 -0600636{
637 int ret = -EFAULT, i;
638 struct tcm_area area = {0};
Andy Gross0f562d12012-10-11 23:06:43 -0500639 u32 hwinfo, pat_geom;
Andy Gross5c137792012-03-05 10:48:39 -0600640 struct resource *mem;
Andy Gross71e88312011-12-05 19:19:21 -0600641
642 omap_dmm = kzalloc(sizeof(*omap_dmm), GFP_KERNEL);
Joe Perches78110bb2013-02-11 09:41:29 -0800643 if (!omap_dmm)
Andy Gross71e88312011-12-05 19:19:21 -0600644 goto fail;
Andy Gross71e88312011-12-05 19:19:21 -0600645
Andy Grossef445932012-05-24 11:43:32 -0500646 /* initialize lists */
647 INIT_LIST_HEAD(&omap_dmm->alloc_head);
648 INIT_LIST_HEAD(&omap_dmm->idle_head);
649
Andy Grossfaaa0542012-10-12 11:18:11 -0500650 init_waitqueue_head(&omap_dmm->engine_queue);
651
Tomi Valkeinen7cb0d6c2014-09-25 19:24:29 +0000652 if (dev->dev.of_node) {
653 const struct of_device_id *match;
654
655 match = of_match_node(dmm_of_match, dev->dev.of_node);
656 if (!match) {
657 dev_err(&dev->dev, "failed to find matching device node\n");
Christophe JAILLET8677b1a2017-09-24 08:01:03 +0200658 ret = -ENODEV;
659 goto fail;
Tomi Valkeinen7cb0d6c2014-09-25 19:24:29 +0000660 }
661
662 omap_dmm->plat_data = match->data;
663 }
664
Andy Gross71e88312011-12-05 19:19:21 -0600665 /* lookup hwmod data - base address and irq */
Andy Gross5c137792012-03-05 10:48:39 -0600666 mem = platform_get_resource(dev, IORESOURCE_MEM, 0);
667 if (!mem) {
668 dev_err(&dev->dev, "failed to get base address resource\n");
Andy Gross71e88312011-12-05 19:19:21 -0600669 goto fail;
670 }
671
Andy Gross5c137792012-03-05 10:48:39 -0600672 omap_dmm->base = ioremap(mem->start, SZ_2K);
673
674 if (!omap_dmm->base) {
675 dev_err(&dev->dev, "failed to get dmm base address\n");
676 goto fail;
677 }
678
679 omap_dmm->irq = platform_get_irq(dev, 0);
680 if (omap_dmm->irq < 0) {
681 dev_err(&dev->dev, "failed to get IRQ resource\n");
682 goto fail;
683 }
684
685 omap_dmm->dev = &dev->dev;
686
Tomi Valkeinen8e54adf2015-08-07 14:31:28 +0300687 hwinfo = dmm_read(omap_dmm, DMM_PAT_HWINFO);
Andy Gross71e88312011-12-05 19:19:21 -0600688 omap_dmm->num_engines = (hwinfo >> 24) & 0x1F;
689 omap_dmm->num_lut = (hwinfo >> 16) & 0x1F;
690 omap_dmm->container_width = 256;
691 omap_dmm->container_height = 128;
692
Andy Grossfaaa0542012-10-12 11:18:11 -0500693 atomic_set(&omap_dmm->engine_counter, omap_dmm->num_engines);
694
Andy Gross71e88312011-12-05 19:19:21 -0600695 /* read out actual LUT width and height */
Tomi Valkeinen8e54adf2015-08-07 14:31:28 +0300696 pat_geom = dmm_read(omap_dmm, DMM_PAT_GEOMETRY);
Andy Gross71e88312011-12-05 19:19:21 -0600697 omap_dmm->lut_width = ((pat_geom >> 16) & 0xF) << 5;
698 omap_dmm->lut_height = ((pat_geom >> 24) & 0xF) << 5;
699
Andy Grossc6b7ae552012-12-19 14:53:38 -0600700 /* increment LUT by one if on OMAP5 */
701 /* LUT has twice the height, and is split into a separate container */
702 if (omap_dmm->lut_height != omap_dmm->container_height)
703 omap_dmm->num_lut++;
704
Andy Gross71e88312011-12-05 19:19:21 -0600705 /* initialize DMM registers */
Tomi Valkeinen8e54adf2015-08-07 14:31:28 +0300706 dmm_write(omap_dmm, 0x88888888, DMM_PAT_VIEW__0);
707 dmm_write(omap_dmm, 0x88888888, DMM_PAT_VIEW__1);
708 dmm_write(omap_dmm, 0x80808080, DMM_PAT_VIEW_MAP__0);
709 dmm_write(omap_dmm, 0x80000000, DMM_PAT_VIEW_MAP_BASE);
710 dmm_write(omap_dmm, 0x88888888, DMM_TILER_OR__0);
711 dmm_write(omap_dmm, 0x88888888, DMM_TILER_OR__1);
Andy Gross71e88312011-12-05 19:19:21 -0600712
713 ret = request_irq(omap_dmm->irq, omap_dmm_irq_handler, IRQF_SHARED,
714 "omap_dmm_irq_handler", omap_dmm);
715
716 if (ret) {
Andy Gross5c137792012-03-05 10:48:39 -0600717 dev_err(&dev->dev, "couldn't register IRQ %d, error %d\n",
Andy Gross71e88312011-12-05 19:19:21 -0600718 omap_dmm->irq, ret);
719 omap_dmm->irq = -1;
720 goto fail;
721 }
722
Rob Clarka6a91822011-12-09 23:26:08 -0600723 /* Enable all interrupts for each refill engine except
724 * ERR_LUT_MISS<n> (which is just advisory, and we don't care
725 * about because we want to be able to refill live scanout
726 * buffers for accelerated pan/scroll) and FILL_DSC<n> which
727 * we just generally don't care about.
728 */
Tomi Valkeinen8e54adf2015-08-07 14:31:28 +0300729 dmm_write(omap_dmm, 0x7e7e7e7e, DMM_PAT_IRQENABLE_SET);
Andy Gross71e88312011-12-05 19:19:21 -0600730
Andy Gross71e88312011-12-05 19:19:21 -0600731 omap_dmm->dummy_page = alloc_page(GFP_KERNEL | __GFP_DMA32);
732 if (!omap_dmm->dummy_page) {
Andy Gross5c137792012-03-05 10:48:39 -0600733 dev_err(&dev->dev, "could not allocate dummy page\n");
Andy Gross71e88312011-12-05 19:19:21 -0600734 ret = -ENOMEM;
735 goto fail;
736 }
Andy Gross5c137792012-03-05 10:48:39 -0600737
738 /* set dma mask for device */
Russell Kingd6cfaab2013-06-10 18:41:59 +0100739 ret = dma_set_coherent_mask(&dev->dev, DMA_BIT_MASK(32));
740 if (ret)
741 goto fail;
Andy Gross5c137792012-03-05 10:48:39 -0600742
Andy Gross71e88312011-12-05 19:19:21 -0600743 omap_dmm->dummy_pa = page_to_phys(omap_dmm->dummy_page);
744
745 /* alloc refill memory */
Luis R. Rodriguezf6e45662016-01-22 18:34:22 -0800746 omap_dmm->refill_va = dma_alloc_wc(&dev->dev,
747 REFILL_BUFFER_SIZE * omap_dmm->num_engines,
748 &omap_dmm->refill_pa, GFP_KERNEL);
Andy Gross71e88312011-12-05 19:19:21 -0600749 if (!omap_dmm->refill_va) {
Andy Gross5c137792012-03-05 10:48:39 -0600750 dev_err(&dev->dev, "could not allocate refill memory\n");
Andy Gross71e88312011-12-05 19:19:21 -0600751 goto fail;
752 }
753
754 /* alloc engines */
Joe Perches78110bb2013-02-11 09:41:29 -0800755 omap_dmm->engines = kcalloc(omap_dmm->num_engines,
Laurent Pinchartd501b122016-12-12 11:57:24 +0200756 sizeof(*omap_dmm->engines), GFP_KERNEL);
Andy Gross71e88312011-12-05 19:19:21 -0600757 if (!omap_dmm->engines) {
Andy Gross71e88312011-12-05 19:19:21 -0600758 ret = -ENOMEM;
759 goto fail;
760 }
761
Andy Gross71e88312011-12-05 19:19:21 -0600762 for (i = 0; i < omap_dmm->num_engines; i++) {
763 omap_dmm->engines[i].id = i;
764 omap_dmm->engines[i].dmm = omap_dmm;
765 omap_dmm->engines[i].refill_va = omap_dmm->refill_va +
766 (REFILL_BUFFER_SIZE * i);
767 omap_dmm->engines[i].refill_pa = omap_dmm->refill_pa +
768 (REFILL_BUFFER_SIZE * i);
Tomi Valkeinen74395072014-12-17 14:34:23 +0200769 init_completion(&omap_dmm->engines[i].compl);
Andy Gross71e88312011-12-05 19:19:21 -0600770
771 list_add(&omap_dmm->engines[i].idle_node, &omap_dmm->idle_head);
772 }
773
Joe Perches78110bb2013-02-11 09:41:29 -0800774 omap_dmm->tcm = kcalloc(omap_dmm->num_lut, sizeof(*omap_dmm->tcm),
Andy Gross71e88312011-12-05 19:19:21 -0600775 GFP_KERNEL);
776 if (!omap_dmm->tcm) {
Andy Gross71e88312011-12-05 19:19:21 -0600777 ret = -ENOMEM;
778 goto fail;
779 }
780
781 /* init containers */
Andy Grossc6b7ae552012-12-19 14:53:38 -0600782 /* Each LUT is associated with a TCM (container manager). We use the
783 lut_id to denote the lut_id used to identify the correct LUT for
784 programming during reill operations */
Andy Gross71e88312011-12-05 19:19:21 -0600785 for (i = 0; i < omap_dmm->num_lut; i++) {
786 omap_dmm->tcm[i] = sita_init(omap_dmm->container_width,
Andy Gross0d6fa532015-08-12 11:24:38 +0300787 omap_dmm->container_height);
Andy Gross71e88312011-12-05 19:19:21 -0600788
789 if (!omap_dmm->tcm[i]) {
Andy Gross5c137792012-03-05 10:48:39 -0600790 dev_err(&dev->dev, "failed to allocate container\n");
Andy Gross71e88312011-12-05 19:19:21 -0600791 ret = -ENOMEM;
792 goto fail;
793 }
794
795 omap_dmm->tcm[i]->lut_id = i;
796 }
797
798 /* assign access mode containers to applicable tcm container */
799 /* OMAP 4 has 1 container for all 4 views */
Andy Grossc6b7ae552012-12-19 14:53:38 -0600800 /* OMAP 5 has 2 containers, 1 for 2D and 1 for 1D */
Andy Gross71e88312011-12-05 19:19:21 -0600801 containers[TILFMT_8BIT] = omap_dmm->tcm[0];
802 containers[TILFMT_16BIT] = omap_dmm->tcm[0];
803 containers[TILFMT_32BIT] = omap_dmm->tcm[0];
Andy Grossc6b7ae552012-12-19 14:53:38 -0600804
805 if (omap_dmm->container_height != omap_dmm->lut_height) {
806 /* second LUT is used for PAGE mode. Programming must use
807 y offset that is added to all y coordinates. LUT id is still
808 0, because it is the same LUT, just the upper 128 lines */
809 containers[TILFMT_PAGE] = omap_dmm->tcm[1];
810 omap_dmm->tcm[1]->y_offset = OMAP5_LUT_OFFSET;
811 omap_dmm->tcm[1]->lut_id = 0;
812 } else {
813 containers[TILFMT_PAGE] = omap_dmm->tcm[0];
814 }
Andy Gross71e88312011-12-05 19:19:21 -0600815
Andy Gross71e88312011-12-05 19:19:21 -0600816 area = (struct tcm_area) {
Andy Gross71e88312011-12-05 19:19:21 -0600817 .tcm = NULL,
818 .p1.x = omap_dmm->container_width - 1,
819 .p1.y = omap_dmm->container_height - 1,
820 };
821
Andy Gross71e88312011-12-05 19:19:21 -0600822 /* initialize all LUTs to dummy page entries */
823 for (i = 0; i < omap_dmm->num_lut; i++) {
824 area.tcm = omap_dmm->tcm[i];
Rob Clarka6a91822011-12-09 23:26:08 -0600825 if (fill(&area, NULL, 0, 0, true))
Andy Gross71e88312011-12-05 19:19:21 -0600826 dev_err(omap_dmm->dev, "refill failed");
827 }
828
829 dev_info(omap_dmm->dev, "initialized all PAT entries\n");
830
831 return 0;
832
833fail:
Andy Grossef445932012-05-24 11:43:32 -0500834 if (omap_dmm_remove(dev))
835 dev_err(&dev->dev, "cleanup failed\n");
Andy Gross71e88312011-12-05 19:19:21 -0600836 return ret;
837}
Andy Gross6169a1482011-12-15 21:05:17 -0600838
839/*
840 * debugfs support
841 */
842
843#ifdef CONFIG_DEBUG_FS
844
845static const char *alphabet = "abcdefghijklmnopqrstuvwxyz"
846 "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789";
847static const char *special = ".,:;'\"`~!^-+";
848
849static void fill_map(char **map, int xdiv, int ydiv, struct tcm_area *a,
850 char c, bool ovw)
851{
852 int x, y;
853 for (y = a->p0.y / ydiv; y <= a->p1.y / ydiv; y++)
854 for (x = a->p0.x / xdiv; x <= a->p1.x / xdiv; x++)
855 if (map[y][x] == ' ' || ovw)
856 map[y][x] = c;
857}
858
859static void fill_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p,
860 char c)
861{
862 map[p->y / ydiv][p->x / xdiv] = c;
863}
864
865static char read_map_pt(char **map, int xdiv, int ydiv, struct tcm_pt *p)
866{
867 return map[p->y / ydiv][p->x / xdiv];
868}
869
870static int map_width(int xdiv, int x0, int x1)
871{
872 return (x1 / xdiv) - (x0 / xdiv) + 1;
873}
874
875static void text_map(char **map, int xdiv, char *nice, int yd, int x0, int x1)
876{
877 char *p = map[yd] + (x0 / xdiv);
878 int w = (map_width(xdiv, x0, x1) - strlen(nice)) / 2;
879 if (w >= 0) {
880 p += w;
881 while (*nice)
882 *p++ = *nice++;
883 }
884}
885
886static void map_1d_info(char **map, int xdiv, int ydiv, char *nice,
887 struct tcm_area *a)
888{
889 sprintf(nice, "%dK", tcm_sizeof(*a) * 4);
890 if (a->p0.y + 1 < a->p1.y) {
891 text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv, 0,
892 256 - 1);
893 } else if (a->p0.y < a->p1.y) {
894 if (strlen(nice) < map_width(xdiv, a->p0.x, 256 - 1))
895 text_map(map, xdiv, nice, a->p0.y / ydiv,
896 a->p0.x + xdiv, 256 - 1);
897 else if (strlen(nice) < map_width(xdiv, 0, a->p1.x))
898 text_map(map, xdiv, nice, a->p1.y / ydiv,
899 0, a->p1.y - xdiv);
900 } else if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x)) {
901 text_map(map, xdiv, nice, a->p0.y / ydiv, a->p0.x, a->p1.x);
902 }
903}
904
905static void map_2d_info(char **map, int xdiv, int ydiv, char *nice,
906 struct tcm_area *a)
907{
908 sprintf(nice, "(%d*%d)", tcm_awidth(*a), tcm_aheight(*a));
909 if (strlen(nice) + 1 < map_width(xdiv, a->p0.x, a->p1.x))
910 text_map(map, xdiv, nice, (a->p0.y + a->p1.y) / 2 / ydiv,
911 a->p0.x, a->p1.x);
912}
913
914int tiler_map_show(struct seq_file *s, void *arg)
915{
916 int xdiv = 2, ydiv = 1;
917 char **map = NULL, *global_map;
918 struct tiler_block *block;
919 struct tcm_area a, p;
920 int i;
921 const char *m2d = alphabet;
922 const char *a2d = special;
923 const char *m2dp = m2d, *a2dp = a2d;
924 char nice[128];
Andy Gross02646fb2012-03-05 10:48:38 -0600925 int h_adj;
926 int w_adj;
Andy Gross6169a1482011-12-15 21:05:17 -0600927 unsigned long flags;
Andy Grossc6b7ae552012-12-19 14:53:38 -0600928 int lut_idx;
929
Andy Gross6169a1482011-12-15 21:05:17 -0600930
Andy Gross02646fb2012-03-05 10:48:38 -0600931 if (!omap_dmm) {
932 /* early return if dmm/tiler device is not initialized */
933 return 0;
934 }
935
Andy Grossc6b7ae552012-12-19 14:53:38 -0600936 h_adj = omap_dmm->container_height / ydiv;
937 w_adj = omap_dmm->container_width / xdiv;
Andy Gross02646fb2012-03-05 10:48:38 -0600938
Andy Grossc6b7ae552012-12-19 14:53:38 -0600939 map = kmalloc(h_adj * sizeof(*map), GFP_KERNEL);
940 global_map = kmalloc((w_adj + 1) * h_adj, GFP_KERNEL);
Andy Gross6169a1482011-12-15 21:05:17 -0600941
942 if (!map || !global_map)
943 goto error;
944
Andy Grossc6b7ae552012-12-19 14:53:38 -0600945 for (lut_idx = 0; lut_idx < omap_dmm->num_lut; lut_idx++) {
Dan Carpentere1e9c902013-08-22 15:42:50 +0300946 memset(map, 0, h_adj * sizeof(*map));
Andy Grossc6b7ae552012-12-19 14:53:38 -0600947 memset(global_map, ' ', (w_adj + 1) * h_adj);
Andy Gross6169a1482011-12-15 21:05:17 -0600948
Andy Grossc6b7ae552012-12-19 14:53:38 -0600949 for (i = 0; i < omap_dmm->container_height; i++) {
950 map[i] = global_map + i * (w_adj + 1);
951 map[i][w_adj] = 0;
Andy Gross6169a1482011-12-15 21:05:17 -0600952 }
Andy Gross6169a1482011-12-15 21:05:17 -0600953
Andy Grossc6b7ae552012-12-19 14:53:38 -0600954 spin_lock_irqsave(&list_lock, flags);
Andy Gross6169a1482011-12-15 21:05:17 -0600955
Andy Grossc6b7ae552012-12-19 14:53:38 -0600956 list_for_each_entry(block, &omap_dmm->alloc_head, alloc_node) {
957 if (block->area.tcm == omap_dmm->tcm[lut_idx]) {
958 if (block->fmt != TILFMT_PAGE) {
959 fill_map(map, xdiv, ydiv, &block->area,
960 *m2dp, true);
961 if (!*++a2dp)
962 a2dp = a2d;
963 if (!*++m2dp)
964 m2dp = m2d;
965 map_2d_info(map, xdiv, ydiv, nice,
966 &block->area);
967 } else {
968 bool start = read_map_pt(map, xdiv,
969 ydiv, &block->area.p0) == ' ';
970 bool end = read_map_pt(map, xdiv, ydiv,
971 &block->area.p1) == ' ';
972
973 tcm_for_each_slice(a, block->area, p)
974 fill_map(map, xdiv, ydiv, &a,
975 '=', true);
976 fill_map_pt(map, xdiv, ydiv,
977 &block->area.p0,
978 start ? '<' : 'X');
979 fill_map_pt(map, xdiv, ydiv,
980 &block->area.p1,
981 end ? '>' : 'X');
982 map_1d_info(map, xdiv, ydiv, nice,
983 &block->area);
984 }
985 }
986 }
987
988 spin_unlock_irqrestore(&list_lock, flags);
989
990 if (s) {
991 seq_printf(s, "CONTAINER %d DUMP BEGIN\n", lut_idx);
992 for (i = 0; i < 128; i++)
993 seq_printf(s, "%03d:%s\n", i, map[i]);
994 seq_printf(s, "CONTAINER %d DUMP END\n", lut_idx);
995 } else {
996 dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP BEGIN\n",
997 lut_idx);
998 for (i = 0; i < 128; i++)
999 dev_dbg(omap_dmm->dev, "%03d:%s\n", i, map[i]);
1000 dev_dbg(omap_dmm->dev, "CONTAINER %d DUMP END\n",
1001 lut_idx);
1002 }
Andy Gross6169a1482011-12-15 21:05:17 -06001003 }
1004
1005error:
1006 kfree(map);
1007 kfree(global_map);
1008
1009 return 0;
1010}
1011#endif
Andy Gross5c137792012-03-05 10:48:39 -06001012
Grygorii Strashko1d601da2015-02-25 20:08:20 +02001013#ifdef CONFIG_PM_SLEEP
Andy Grosse78edba2012-12-19 14:53:37 -06001014static int omap_dmm_resume(struct device *dev)
1015{
1016 struct tcm_area area;
1017 int i;
1018
1019 if (!omap_dmm)
1020 return -ENODEV;
1021
1022 area = (struct tcm_area) {
Andy Grosse78edba2012-12-19 14:53:37 -06001023 .tcm = NULL,
1024 .p1.x = omap_dmm->container_width - 1,
1025 .p1.y = omap_dmm->container_height - 1,
1026 };
1027
1028 /* initialize all LUTs to dummy page entries */
1029 for (i = 0; i < omap_dmm->num_lut; i++) {
1030 area.tcm = omap_dmm->tcm[i];
1031 if (fill(&area, NULL, 0, 0, true))
1032 dev_err(dev, "refill failed");
1033 }
1034
1035 return 0;
1036}
Andy Grosse78edba2012-12-19 14:53:37 -06001037#endif
1038
Grygorii Strashko1d601da2015-02-25 20:08:20 +02001039static SIMPLE_DEV_PM_OPS(omap_dmm_pm_ops, NULL, omap_dmm_resume);
1040
Archit Taneja3d232342013-10-15 12:34:20 +05301041#if defined(CONFIG_OF)
Tomi Valkeinen7cb0d6c2014-09-25 19:24:29 +00001042static const struct dmm_platform_data dmm_omap4_platform_data = {
1043 .cpu_cache_flags = OMAP_BO_WC,
1044};
1045
1046static const struct dmm_platform_data dmm_omap5_platform_data = {
1047 .cpu_cache_flags = OMAP_BO_UNCACHED,
1048};
1049
Archit Taneja3d232342013-10-15 12:34:20 +05301050static const struct of_device_id dmm_of_match[] = {
Tomi Valkeinen7cb0d6c2014-09-25 19:24:29 +00001051 {
1052 .compatible = "ti,omap4-dmm",
1053 .data = &dmm_omap4_platform_data,
1054 },
1055 {
1056 .compatible = "ti,omap5-dmm",
1057 .data = &dmm_omap5_platform_data,
1058 },
Archit Taneja3d232342013-10-15 12:34:20 +05301059 {},
1060};
1061#endif
1062
Andy Gross5c137792012-03-05 10:48:39 -06001063struct platform_driver omap_dmm_driver = {
1064 .probe = omap_dmm_probe,
1065 .remove = omap_dmm_remove,
1066 .driver = {
1067 .owner = THIS_MODULE,
1068 .name = DMM_DRIVER_NAME,
Archit Taneja3d232342013-10-15 12:34:20 +05301069 .of_match_table = of_match_ptr(dmm_of_match),
Andy Grosse78edba2012-12-19 14:53:37 -06001070 .pm = &omap_dmm_pm_ops,
Andy Gross5c137792012-03-05 10:48:39 -06001071 },
1072};
1073
1074MODULE_LICENSE("GPL v2");
1075MODULE_AUTHOR("Andy Gross <andy.gross@ti.com>");
1076MODULE_DESCRIPTION("OMAP DMM/Tiler Driver");