blob: abdf1f229dc3d04e7197d197595fda0eac060098 [file] [log] [blame]
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +02001/*
2 * Dmaengine driver base library for DMA controllers, found on SH-based SoCs
3 *
4 * extracted from shdma.c and headers
5 *
6 * Copyright (C) 2011-2012 Guennadi Liakhovetski <g.liakhovetski@gmx.de>
7 * Copyright (C) 2009 Nobuhiro Iwamatsu <iwamatsu.nobuhiro@renesas.com>
8 * Copyright (C) 2009 Renesas Solutions, Inc. All rights reserved.
9 * Copyright (C) 2007 Freescale Semiconductor, Inc. All rights reserved.
10 *
11 * This is free software; you can redistribute it and/or modify
12 * it under the terms of version 2 of the GNU General Public License as
13 * published by the Free Software Foundation.
14 */
15
16#ifndef SHDMA_BASE_H
17#define SHDMA_BASE_H
18
19#include <linux/dmaengine.h>
20#include <linux/interrupt.h>
21#include <linux/list.h>
22#include <linux/types.h>
23
24/**
25 * shdma_pm_state - DMA channel PM state
26 * SHDMA_PM_ESTABLISHED: either idle or during data transfer
27 * SHDMA_PM_BUSY: during the transfer preparation, when we have to
28 * drop the lock temporarily
29 * SHDMA_PM_PENDING: transfers pending
30 */
31enum shdma_pm_state {
32 SHDMA_PM_ESTABLISHED,
33 SHDMA_PM_BUSY,
34 SHDMA_PM_PENDING,
35};
36
37struct device;
38
39/*
40 * Drivers, using this library are expected to embed struct shdma_dev,
41 * struct shdma_chan, struct shdma_desc, and struct shdma_slave
42 * in their respective device, channel, descriptor and slave objects.
43 */
44
45struct shdma_slave {
Guennadi Liakhovetskic2cdb7e2012-07-05 12:29:41 +020046 int slave_id;
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +020047};
48
49struct shdma_desc {
50 struct list_head node;
51 struct dma_async_tx_descriptor async_tx;
52 enum dma_transfer_direction direction;
Guennadi Liakhovetski4f46f8a2012-07-30 21:28:27 +020053 size_t partial;
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +020054 dma_cookie_t cookie;
55 int chunks;
56 int mark;
Kuninori Morimotodfbb85c2014-04-02 20:17:00 -070057 bool cyclic; /* used as cyclic transfer */
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +020058};
59
60struct shdma_chan {
61 spinlock_t chan_lock; /* Channel operation lock */
62 struct list_head ld_queue; /* Link descriptors queue */
63 struct list_head ld_free; /* Free link descriptors */
64 struct dma_chan dma_chan; /* DMA channel */
65 struct device *dev; /* Channel device */
66 void *desc; /* buffer for descriptor array */
67 int desc_num; /* desc count */
68 size_t max_xfer_len; /* max transfer length */
69 int id; /* Raw id of this channel */
70 int irq; /* Channel IRQ */
Guennadi Liakhovetskic2cdb7e2012-07-05 12:29:41 +020071 int slave_id; /* Client ID for slave DMA */
Guennadi Liakhovetski67eacc12013-06-18 18:16:57 +020072 int hw_req; /* DMA request line for slave DMA - same
73 * as MID/RID, used with DT */
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +020074 enum shdma_pm_state pm_state;
75};
76
77/**
78 * struct shdma_ops - simple DMA driver operations
79 * desc_completed: return true, if this is the descriptor, that just has
80 * completed (atomic)
81 * halt_channel: stop DMA channel operation (atomic)
82 * channel_busy: return true, if the channel is busy (atomic)
83 * slave_addr: return slave DMA address
84 * desc_setup: set up the hardware specific descriptor portion (atomic)
85 * set_slave: bind channel to a slave
86 * setup_xfer: configure channel hardware for operation (atomic)
87 * start_xfer: start the DMA transfer (atomic)
88 * embedded_desc: return Nth struct shdma_desc pointer from the
89 * descriptor array
90 * chan_irq: process channel IRQ, return true if a transfer has
91 * completed (atomic)
92 */
93struct shdma_ops {
94 bool (*desc_completed)(struct shdma_chan *, struct shdma_desc *);
95 void (*halt_channel)(struct shdma_chan *);
96 bool (*channel_busy)(struct shdma_chan *);
97 dma_addr_t (*slave_addr)(struct shdma_chan *);
98 int (*desc_setup)(struct shdma_chan *, struct shdma_desc *,
99 dma_addr_t, dma_addr_t, size_t *);
Guennadi Liakhovetski4981c4d2013-08-02 16:50:36 +0200100 int (*set_slave)(struct shdma_chan *, int, dma_addr_t, bool);
Guennadi Liakhovetskic2cdb7e2012-07-05 12:29:41 +0200101 void (*setup_xfer)(struct shdma_chan *, int);
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +0200102 void (*start_xfer)(struct shdma_chan *, struct shdma_desc *);
103 struct shdma_desc *(*embedded_desc)(void *, int);
104 bool (*chan_irq)(struct shdma_chan *, int);
Guennadi Liakhovetski4f46f8a2012-07-30 21:28:27 +0200105 size_t (*get_partial)(struct shdma_chan *, struct shdma_desc *);
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +0200106};
107
108struct shdma_dev {
109 struct dma_device dma_dev;
110 struct shdma_chan **schan;
111 const struct shdma_ops *ops;
112 size_t desc_size;
113};
114
115#define shdma_for_each_chan(c, d, i) for (i = 0, c = (d)->schan[0]; \
116 i < (d)->dma_dev.chancnt; c = (d)->schan[++i])
117
118int shdma_request_irq(struct shdma_chan *, int,
119 unsigned long, const char *);
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +0200120bool shdma_reset(struct shdma_dev *sdev);
121void shdma_chan_probe(struct shdma_dev *sdev,
122 struct shdma_chan *schan, int id);
123void shdma_chan_remove(struct shdma_chan *schan);
124int shdma_init(struct device *dev, struct shdma_dev *sdev,
125 int chan_num);
126void shdma_cleanup(struct shdma_dev *sdev);
Guennadi Liakhovetskiab116a42013-07-10 11:09:12 +0900127#if IS_ENABLED(CONFIG_SH_DMAE_BASE)
Guennadi Liakhovetskid0951a22013-06-06 17:37:14 +0200128bool shdma_chan_filter(struct dma_chan *chan, void *arg);
Guennadi Liakhovetskiab116a42013-07-10 11:09:12 +0900129#else
130#define shdma_chan_filter NULL
131#endif
Guennadi Liakhovetski9a7b8e02012-05-09 17:09:13 +0200132
133#endif