blob: 70b15b3034714c9b059cae0c33a9ede494de99df [file] [log] [blame]
Moritz Fischer37784702015-10-16 15:42:30 -07001/*
2 * Copyright (c) 2011-2015 Xilinx Inc.
3 * Copyright (c) 2015, National Instruments Corp.
4 *
5 * FPGA Manager Driver for Xilinx Zynq, heavily based on xdevcfg driver
6 * in their vendor tree.
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; version 2 of the License.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 */
17
18#include <linux/clk.h>
19#include <linux/completion.h>
20#include <linux/delay.h>
21#include <linux/dma-mapping.h>
22#include <linux/fpga/fpga-mgr.h>
23#include <linux/interrupt.h>
24#include <linux/io.h>
25#include <linux/iopoll.h>
26#include <linux/module.h>
27#include <linux/mfd/syscon.h>
28#include <linux/of_address.h>
29#include <linux/of_irq.h>
30#include <linux/pm.h>
31#include <linux/regmap.h>
32#include <linux/string.h>
Jason Gunthorpe425902f2017-02-01 12:48:45 -070033#include <linux/scatterlist.h>
Moritz Fischer37784702015-10-16 15:42:30 -070034
35/* Offsets into SLCR regmap */
36
37/* FPGA Software Reset Control */
38#define SLCR_FPGA_RST_CTRL_OFFSET 0x240
39/* Level Shifters Enable */
40#define SLCR_LVL_SHFTR_EN_OFFSET 0x900
41
42/* Constant Definitions */
43
44/* Control Register */
45#define CTRL_OFFSET 0x00
46/* Lock Register */
47#define LOCK_OFFSET 0x04
48/* Interrupt Status Register */
49#define INT_STS_OFFSET 0x0c
50/* Interrupt Mask Register */
51#define INT_MASK_OFFSET 0x10
52/* Status Register */
53#define STATUS_OFFSET 0x14
54/* DMA Source Address Register */
55#define DMA_SRC_ADDR_OFFSET 0x18
56/* DMA Destination Address Reg */
57#define DMA_DST_ADDR_OFFSET 0x1c
58/* DMA Source Transfer Length */
59#define DMA_SRC_LEN_OFFSET 0x20
60/* DMA Destination Transfer */
61#define DMA_DEST_LEN_OFFSET 0x24
62/* Unlock Register */
63#define UNLOCK_OFFSET 0x34
64/* Misc. Control Register */
65#define MCTRL_OFFSET 0x80
66
67/* Control Register Bit definitions */
68
69/* Signal to reset FPGA */
70#define CTRL_PCFG_PROG_B_MASK BIT(30)
71/* Enable PCAP for PR */
72#define CTRL_PCAP_PR_MASK BIT(27)
73/* Enable PCAP */
74#define CTRL_PCAP_MODE_MASK BIT(26)
Moritz Fischer7f33bbc2017-02-27 09:19:01 -060075/* Lower rate to allow decrypt on the fly */
76#define CTRL_PCAP_RATE_EN_MASK BIT(25)
77/* System booted in secure mode */
78#define CTRL_SEC_EN_MASK BIT(7)
Moritz Fischer37784702015-10-16 15:42:30 -070079
80/* Miscellaneous Control Register bit definitions */
81/* Internal PCAP loopback */
82#define MCTRL_PCAP_LPBK_MASK BIT(4)
83
84/* Status register bit definitions */
85
86/* FPGA init status */
87#define STATUS_DMA_Q_F BIT(31)
Jason Gunthorpe425902f2017-02-01 12:48:45 -070088#define STATUS_DMA_Q_E BIT(30)
Moritz Fischer37784702015-10-16 15:42:30 -070089#define STATUS_PCFG_INIT_MASK BIT(4)
90
91/* Interrupt Status/Mask Register Bit definitions */
92/* DMA command done */
93#define IXR_DMA_DONE_MASK BIT(13)
94/* DMA and PCAP cmd done */
95#define IXR_D_P_DONE_MASK BIT(12)
96 /* FPGA programmed */
97#define IXR_PCFG_DONE_MASK BIT(2)
Jason Gunthorpe6b45e0f2017-02-01 12:48:42 -070098#define IXR_ERROR_FLAGS_MASK 0x00F0C860
Moritz Fischer37784702015-10-16 15:42:30 -070099#define IXR_ALL_MASK 0xF8F7F87F
100
101/* Miscellaneous constant values */
102
103/* Invalid DMA addr */
104#define DMA_INVALID_ADDRESS GENMASK(31, 0)
105/* Used to unlock the dev */
106#define UNLOCK_MASK 0x757bdf0d
Moritz Fischer37784702015-10-16 15:42:30 -0700107/* Timeout for polling reset bits */
108#define INIT_POLL_TIMEOUT 2500000
109/* Delay for polling reset bits */
110#define INIT_POLL_DELAY 20
Jason Gunthorpe425902f2017-02-01 12:48:45 -0700111/* Signal this is the last DMA transfer, wait for the AXI and PCAP before
112 * interrupting
113 */
114#define DMA_SRC_LAST_TRANSFER 1
115/* Timeout for DMA completion */
116#define DMA_TIMEOUT_MS 5000
Moritz Fischer37784702015-10-16 15:42:30 -0700117
118/* Masks for controlling stuff in SLCR */
119/* Disable all Level shifters */
120#define LVL_SHFTR_DISABLE_ALL_MASK 0x0
121/* Enable Level shifters from PS to PL */
122#define LVL_SHFTR_ENABLE_PS_TO_PL 0xa
123/* Enable Level shifters from PL to PS */
124#define LVL_SHFTR_ENABLE_PL_TO_PS 0xf
125/* Enable global resets */
126#define FPGA_RST_ALL_MASK 0xf
127/* Disable global resets */
128#define FPGA_RST_NONE_MASK 0x0
129
130struct zynq_fpga_priv {
Moritz Fischer37784702015-10-16 15:42:30 -0700131 int irq;
132 struct clk *clk;
133
134 void __iomem *io_base;
135 struct regmap *slcr;
136
Jason Gunthorpe425902f2017-02-01 12:48:45 -0700137 spinlock_t dma_lock;
138 unsigned int dma_elm;
139 unsigned int dma_nelms;
140 struct scatterlist *cur_sg;
141
Moritz Fischer37784702015-10-16 15:42:30 -0700142 struct completion dma_done;
143};
144
145static inline void zynq_fpga_write(struct zynq_fpga_priv *priv, u32 offset,
146 u32 val)
147{
148 writel(val, priv->io_base + offset);
149}
150
151static inline u32 zynq_fpga_read(const struct zynq_fpga_priv *priv,
152 u32 offset)
153{
154 return readl(priv->io_base + offset);
155}
156
157#define zynq_fpga_poll_timeout(priv, addr, val, cond, sleep_us, timeout_us) \
158 readl_poll_timeout(priv->io_base + addr, val, cond, sleep_us, \
159 timeout_us)
160
Jason Gunthorpe6b45e0f2017-02-01 12:48:42 -0700161/* Cause the specified irq mask bits to generate IRQs */
162static inline void zynq_fpga_set_irq(struct zynq_fpga_priv *priv, u32 enable)
Moritz Fischer37784702015-10-16 15:42:30 -0700163{
Jason Gunthorpe6b45e0f2017-02-01 12:48:42 -0700164 zynq_fpga_write(priv, INT_MASK_OFFSET, ~enable);
Moritz Fischer37784702015-10-16 15:42:30 -0700165}
166
Jason Gunthorpe425902f2017-02-01 12:48:45 -0700167/* Must be called with dma_lock held */
168static void zynq_step_dma(struct zynq_fpga_priv *priv)
169{
170 u32 addr;
171 u32 len;
172 bool first;
173
174 first = priv->dma_elm == 0;
175 while (priv->cur_sg) {
176 /* Feed the DMA queue until it is full. */
177 if (zynq_fpga_read(priv, STATUS_OFFSET) & STATUS_DMA_Q_F)
178 break;
179
180 addr = sg_dma_address(priv->cur_sg);
181 len = sg_dma_len(priv->cur_sg);
182 if (priv->dma_elm + 1 == priv->dma_nelms) {
183 /* The last transfer waits for the PCAP to finish too,
184 * notice this also changes the irq_mask to ignore
185 * IXR_DMA_DONE_MASK which ensures we do not trigger
186 * the completion too early.
187 */
188 addr |= DMA_SRC_LAST_TRANSFER;
189 priv->cur_sg = NULL;
190 } else {
191 priv->cur_sg = sg_next(priv->cur_sg);
192 priv->dma_elm++;
193 }
194
195 zynq_fpga_write(priv, DMA_SRC_ADDR_OFFSET, addr);
196 zynq_fpga_write(priv, DMA_DST_ADDR_OFFSET, DMA_INVALID_ADDRESS);
197 zynq_fpga_write(priv, DMA_SRC_LEN_OFFSET, len / 4);
198 zynq_fpga_write(priv, DMA_DEST_LEN_OFFSET, 0);
199 }
200
201 /* Once the first transfer is queued we can turn on the ISR, future
202 * calls to zynq_step_dma will happen from the ISR context. The
203 * dma_lock spinlock guarentees this handover is done coherently, the
204 * ISR enable is put at the end to avoid another CPU spinning in the
205 * ISR on this lock.
206 */
207 if (first && priv->cur_sg) {
208 zynq_fpga_set_irq(priv,
209 IXR_DMA_DONE_MASK | IXR_ERROR_FLAGS_MASK);
210 } else if (!priv->cur_sg) {
211 /* The last transfer changes to DMA & PCAP mode since we do
212 * not want to continue until everything has been flushed into
213 * the PCAP.
214 */
215 zynq_fpga_set_irq(priv,
216 IXR_D_P_DONE_MASK | IXR_ERROR_FLAGS_MASK);
217 }
218}
219
Moritz Fischer37784702015-10-16 15:42:30 -0700220static irqreturn_t zynq_fpga_isr(int irq, void *data)
221{
222 struct zynq_fpga_priv *priv = data;
Jason Gunthorpe425902f2017-02-01 12:48:45 -0700223 u32 intr_status;
Moritz Fischer37784702015-10-16 15:42:30 -0700224
Jason Gunthorpe425902f2017-02-01 12:48:45 -0700225 /* If anything other than DMA completion is reported stop and hand
226 * control back to zynq_fpga_ops_write, something went wrong,
227 * otherwise progress the DMA.
228 */
229 spin_lock(&priv->dma_lock);
230 intr_status = zynq_fpga_read(priv, INT_STS_OFFSET);
231 if (!(intr_status & IXR_ERROR_FLAGS_MASK) &&
232 (intr_status & IXR_DMA_DONE_MASK) && priv->cur_sg) {
233 zynq_fpga_write(priv, INT_STS_OFFSET, IXR_DMA_DONE_MASK);
234 zynq_step_dma(priv);
235 spin_unlock(&priv->dma_lock);
236 return IRQ_HANDLED;
237 }
238 spin_unlock(&priv->dma_lock);
239
Jason Gunthorpe6b45e0f2017-02-01 12:48:42 -0700240 zynq_fpga_set_irq(priv, 0);
Moritz Fischer37784702015-10-16 15:42:30 -0700241 complete(&priv->dma_done);
242
243 return IRQ_HANDLED;
244}
245
Jason Gunthorpeb496df82017-02-01 12:48:43 -0700246/* Sanity check the proposed bitstream. It must start with the sync word in
247 * the correct byte order, and be dword aligned. The input is a Xilinx .bin
248 * file with every 32 bit quantity swapped.
249 */
250static bool zynq_fpga_has_sync(const u8 *buf, size_t count)
251{
252 for (; count >= 4; buf += 4, count -= 4)
253 if (buf[0] == 0x66 && buf[1] == 0x55 && buf[2] == 0x99 &&
254 buf[3] == 0xaa)
255 return true;
256 return false;
257}
258
Alan Tull1df28652016-11-01 14:14:26 -0500259static int zynq_fpga_ops_write_init(struct fpga_manager *mgr,
260 struct fpga_image_info *info,
Moritz Fischer37784702015-10-16 15:42:30 -0700261 const char *buf, size_t count)
262{
263 struct zynq_fpga_priv *priv;
264 u32 ctrl, status;
265 int err;
266
267 priv = mgr->priv;
268
269 err = clk_enable(priv->clk);
270 if (err)
271 return err;
272
Moritz Fischer7f33bbc2017-02-27 09:19:01 -0600273 /* check if bitstream is encrypted & and system's still secure */
274 if (info->flags & FPGA_MGR_ENCRYPTED_BITSTREAM) {
275 ctrl = zynq_fpga_read(priv, CTRL_OFFSET);
276 if (!(ctrl & CTRL_SEC_EN_MASK)) {
277 dev_err(&mgr->dev,
278 "System not secure, can't use crypted bitstreams\n");
279 err = -EINVAL;
280 goto out_err;
281 }
282 }
283
Moritz Fischer37784702015-10-16 15:42:30 -0700284 /* don't globally reset PL if we're doing partial reconfig */
Alan Tull1df28652016-11-01 14:14:26 -0500285 if (!(info->flags & FPGA_MGR_PARTIAL_RECONFIG)) {
Jason Gunthorpeb496df82017-02-01 12:48:43 -0700286 if (!zynq_fpga_has_sync(buf, count)) {
287 dev_err(&mgr->dev,
288 "Invalid bitstream, could not find a sync word. Bitstream must be a byte swapped .bin file\n");
289 err = -EINVAL;
290 goto out_err;
291 }
292
Moritz Fischer37784702015-10-16 15:42:30 -0700293 /* assert AXI interface resets */
294 regmap_write(priv->slcr, SLCR_FPGA_RST_CTRL_OFFSET,
295 FPGA_RST_ALL_MASK);
296
297 /* disable all level shifters */
298 regmap_write(priv->slcr, SLCR_LVL_SHFTR_EN_OFFSET,
299 LVL_SHFTR_DISABLE_ALL_MASK);
300 /* enable level shifters from PS to PL */
301 regmap_write(priv->slcr, SLCR_LVL_SHFTR_EN_OFFSET,
302 LVL_SHFTR_ENABLE_PS_TO_PL);
303
304 /* create a rising edge on PCFG_INIT. PCFG_INIT follows
305 * PCFG_PROG_B, so we need to poll it after setting PCFG_PROG_B
306 * to make sure the rising edge actually happens.
307 * Note: PCFG_PROG_B is low active, sequence as described in
308 * UG585 v1.10 page 211
309 */
310 ctrl = zynq_fpga_read(priv, CTRL_OFFSET);
311 ctrl |= CTRL_PCFG_PROG_B_MASK;
312
313 zynq_fpga_write(priv, CTRL_OFFSET, ctrl);
314
315 err = zynq_fpga_poll_timeout(priv, STATUS_OFFSET, status,
316 status & STATUS_PCFG_INIT_MASK,
317 INIT_POLL_DELAY,
318 INIT_POLL_TIMEOUT);
319 if (err) {
Jason Gunthorpe80baf642016-11-21 22:26:44 +0000320 dev_err(&mgr->dev, "Timeout waiting for PCFG_INIT\n");
Moritz Fischer37784702015-10-16 15:42:30 -0700321 goto out_err;
322 }
323
324 ctrl = zynq_fpga_read(priv, CTRL_OFFSET);
325 ctrl &= ~CTRL_PCFG_PROG_B_MASK;
326
327 zynq_fpga_write(priv, CTRL_OFFSET, ctrl);
328
329 err = zynq_fpga_poll_timeout(priv, STATUS_OFFSET, status,
330 !(status & STATUS_PCFG_INIT_MASK),
331 INIT_POLL_DELAY,
332 INIT_POLL_TIMEOUT);
333 if (err) {
Jason Gunthorpe80baf642016-11-21 22:26:44 +0000334 dev_err(&mgr->dev, "Timeout waiting for !PCFG_INIT\n");
Moritz Fischer37784702015-10-16 15:42:30 -0700335 goto out_err;
336 }
337
338 ctrl = zynq_fpga_read(priv, CTRL_OFFSET);
339 ctrl |= CTRL_PCFG_PROG_B_MASK;
340
341 zynq_fpga_write(priv, CTRL_OFFSET, ctrl);
342
343 err = zynq_fpga_poll_timeout(priv, STATUS_OFFSET, status,
344 status & STATUS_PCFG_INIT_MASK,
345 INIT_POLL_DELAY,
346 INIT_POLL_TIMEOUT);
347 if (err) {
Jason Gunthorpe80baf642016-11-21 22:26:44 +0000348 dev_err(&mgr->dev, "Timeout waiting for PCFG_INIT\n");
Moritz Fischer37784702015-10-16 15:42:30 -0700349 goto out_err;
350 }
351 }
352
353 /* set configuration register with following options:
354 * - enable PCAP interface
Moritz Fischer7f33bbc2017-02-27 09:19:01 -0600355 * - set throughput for maximum speed (if bistream not crypted)
Moritz Fischer37784702015-10-16 15:42:30 -0700356 * - set CPU in user mode
357 */
358 ctrl = zynq_fpga_read(priv, CTRL_OFFSET);
Moritz Fischer7f33bbc2017-02-27 09:19:01 -0600359 if (info->flags & FPGA_MGR_ENCRYPTED_BITSTREAM)
360 zynq_fpga_write(priv, CTRL_OFFSET,
361 (CTRL_PCAP_PR_MASK | CTRL_PCAP_MODE_MASK
362 | CTRL_PCAP_RATE_EN_MASK | ctrl));
363 else
364 zynq_fpga_write(priv, CTRL_OFFSET,
365 (CTRL_PCAP_PR_MASK | CTRL_PCAP_MODE_MASK
366 | ctrl));
367
Moritz Fischer37784702015-10-16 15:42:30 -0700368
Jason Gunthorpe425902f2017-02-01 12:48:45 -0700369 /* We expect that the command queue is empty right now. */
Moritz Fischer37784702015-10-16 15:42:30 -0700370 status = zynq_fpga_read(priv, STATUS_OFFSET);
Jason Gunthorpe425902f2017-02-01 12:48:45 -0700371 if ((status & STATUS_DMA_Q_F) ||
372 (status & STATUS_DMA_Q_E) != STATUS_DMA_Q_E) {
373 dev_err(&mgr->dev, "DMA command queue not right\n");
Moritz Fischer37784702015-10-16 15:42:30 -0700374 err = -EBUSY;
375 goto out_err;
376 }
377
378 /* ensure internal PCAP loopback is disabled */
379 ctrl = zynq_fpga_read(priv, MCTRL_OFFSET);
380 zynq_fpga_write(priv, MCTRL_OFFSET, (~MCTRL_PCAP_LPBK_MASK & ctrl));
381
382 clk_disable(priv->clk);
383
384 return 0;
385
386out_err:
387 clk_disable(priv->clk);
388
389 return err;
390}
391
Jason Gunthorpe425902f2017-02-01 12:48:45 -0700392static int zynq_fpga_ops_write(struct fpga_manager *mgr, struct sg_table *sgt)
Moritz Fischer37784702015-10-16 15:42:30 -0700393{
394 struct zynq_fpga_priv *priv;
Jason Gunthorpe6b45e0f2017-02-01 12:48:42 -0700395 const char *why;
Moritz Fischer37784702015-10-16 15:42:30 -0700396 int err;
Moritz Fischer37784702015-10-16 15:42:30 -0700397 u32 intr_status;
Jason Gunthorpe425902f2017-02-01 12:48:45 -0700398 unsigned long timeout;
399 unsigned long flags;
400 struct scatterlist *sg;
401 int i;
Moritz Fischer37784702015-10-16 15:42:30 -0700402
Moritz Fischer37784702015-10-16 15:42:30 -0700403 priv = mgr->priv;
404
Jason Gunthorpe425902f2017-02-01 12:48:45 -0700405 /* The hardware can only DMA multiples of 4 bytes, and it requires the
406 * starting addresses to be aligned to 64 bits (UG585 pg 212).
407 */
408 for_each_sg(sgt->sgl, sg, sgt->nents, i) {
409 if ((sg->offset % 8) || (sg->length % 4)) {
410 dev_err(&mgr->dev,
411 "Invalid bitstream, chunks must be aligned\n");
412 return -EINVAL;
413 }
414 }
Moritz Fischer37784702015-10-16 15:42:30 -0700415
Jason Gunthorpe425902f2017-02-01 12:48:45 -0700416 priv->dma_nelms =
417 dma_map_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
418 if (priv->dma_nelms == 0) {
419 dev_err(&mgr->dev, "Unable to DMA map (TO_DEVICE)\n");
420 return -ENOMEM;
421 }
Moritz Fischer37784702015-10-16 15:42:30 -0700422
Moritz Fischer37784702015-10-16 15:42:30 -0700423 /* enable clock */
424 err = clk_enable(priv->clk);
425 if (err)
426 goto out_free;
427
428 zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK);
Moritz Fischer37784702015-10-16 15:42:30 -0700429 reinit_completion(&priv->dma_done);
430
Jason Gunthorpe425902f2017-02-01 12:48:45 -0700431 /* zynq_step_dma will turn on interrupts */
432 spin_lock_irqsave(&priv->dma_lock, flags);
433 priv->dma_elm = 0;
434 priv->cur_sg = sgt->sgl;
435 zynq_step_dma(priv);
436 spin_unlock_irqrestore(&priv->dma_lock, flags);
Moritz Fischer37784702015-10-16 15:42:30 -0700437
Jason Gunthorpe425902f2017-02-01 12:48:45 -0700438 timeout = wait_for_completion_timeout(&priv->dma_done,
439 msecs_to_jiffies(DMA_TIMEOUT_MS));
Moritz Fischer37784702015-10-16 15:42:30 -0700440
Jason Gunthorpe425902f2017-02-01 12:48:45 -0700441 spin_lock_irqsave(&priv->dma_lock, flags);
442 zynq_fpga_set_irq(priv, 0);
443 priv->cur_sg = NULL;
444 spin_unlock_irqrestore(&priv->dma_lock, flags);
Moritz Fischer37784702015-10-16 15:42:30 -0700445
446 intr_status = zynq_fpga_read(priv, INT_STS_OFFSET);
Jason Gunthorpe425902f2017-02-01 12:48:45 -0700447 zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK);
448
449 /* There doesn't seem to be a way to force cancel any DMA, so if
450 * something went wrong we are relying on the hardware to have halted
451 * the DMA before we get here, if there was we could use
452 * wait_for_completion_interruptible too.
453 */
Moritz Fischer37784702015-10-16 15:42:30 -0700454
Jason Gunthorpe6b45e0f2017-02-01 12:48:42 -0700455 if (intr_status & IXR_ERROR_FLAGS_MASK) {
456 why = "DMA reported error";
457 err = -EIO;
458 goto out_report;
Moritz Fischer37784702015-10-16 15:42:30 -0700459 }
460
Jason Gunthorpe425902f2017-02-01 12:48:45 -0700461 if (priv->cur_sg ||
462 !((intr_status & IXR_D_P_DONE_MASK) == IXR_D_P_DONE_MASK)) {
463 if (timeout == 0)
464 why = "DMA timed out";
465 else
466 why = "DMA did not complete";
Jason Gunthorpe6b45e0f2017-02-01 12:48:42 -0700467 err = -EIO;
468 goto out_report;
469 }
470
471 err = 0;
472 goto out_clk;
473
474out_report:
475 dev_err(&mgr->dev,
476 "%s: INT_STS:0x%x CTRL:0x%x LOCK:0x%x INT_MASK:0x%x STATUS:0x%x MCTRL:0x%x\n",
477 why,
478 intr_status,
479 zynq_fpga_read(priv, CTRL_OFFSET),
480 zynq_fpga_read(priv, LOCK_OFFSET),
481 zynq_fpga_read(priv, INT_MASK_OFFSET),
482 zynq_fpga_read(priv, STATUS_OFFSET),
483 zynq_fpga_read(priv, MCTRL_OFFSET));
484
485out_clk:
Moritz Fischer37784702015-10-16 15:42:30 -0700486 clk_disable(priv->clk);
487
488out_free:
Jason Gunthorpe425902f2017-02-01 12:48:45 -0700489 dma_unmap_sg(mgr->dev.parent, sgt->sgl, sgt->nents, DMA_TO_DEVICE);
Moritz Fischer37784702015-10-16 15:42:30 -0700490 return err;
491}
492
Alan Tull1df28652016-11-01 14:14:26 -0500493static int zynq_fpga_ops_write_complete(struct fpga_manager *mgr,
494 struct fpga_image_info *info)
Moritz Fischer37784702015-10-16 15:42:30 -0700495{
496 struct zynq_fpga_priv *priv = mgr->priv;
497 int err;
498 u32 intr_status;
499
500 err = clk_enable(priv->clk);
501 if (err)
502 return err;
503
504 err = zynq_fpga_poll_timeout(priv, INT_STS_OFFSET, intr_status,
505 intr_status & IXR_PCFG_DONE_MASK,
506 INIT_POLL_DELAY,
507 INIT_POLL_TIMEOUT);
508
509 clk_disable(priv->clk);
510
511 if (err)
512 return err;
513
514 /* for the partial reconfig case we didn't touch the level shifters */
Alan Tull1df28652016-11-01 14:14:26 -0500515 if (!(info->flags & FPGA_MGR_PARTIAL_RECONFIG)) {
Moritz Fischer37784702015-10-16 15:42:30 -0700516 /* enable level shifters from PL to PS */
517 regmap_write(priv->slcr, SLCR_LVL_SHFTR_EN_OFFSET,
518 LVL_SHFTR_ENABLE_PL_TO_PS);
519
520 /* deassert AXI interface resets */
521 regmap_write(priv->slcr, SLCR_FPGA_RST_CTRL_OFFSET,
522 FPGA_RST_NONE_MASK);
523 }
524
525 return 0;
526}
527
528static enum fpga_mgr_states zynq_fpga_ops_state(struct fpga_manager *mgr)
529{
530 int err;
531 u32 intr_status;
532 struct zynq_fpga_priv *priv;
533
534 priv = mgr->priv;
535
536 err = clk_enable(priv->clk);
537 if (err)
538 return FPGA_MGR_STATE_UNKNOWN;
539
540 intr_status = zynq_fpga_read(priv, INT_STS_OFFSET);
541 clk_disable(priv->clk);
542
543 if (intr_status & IXR_PCFG_DONE_MASK)
544 return FPGA_MGR_STATE_OPERATING;
545
546 return FPGA_MGR_STATE_UNKNOWN;
547}
548
549static const struct fpga_manager_ops zynq_fpga_ops = {
Jason Gunthorpeb496df82017-02-01 12:48:43 -0700550 .initial_header_size = 128,
Moritz Fischer37784702015-10-16 15:42:30 -0700551 .state = zynq_fpga_ops_state,
552 .write_init = zynq_fpga_ops_write_init,
Jason Gunthorpe425902f2017-02-01 12:48:45 -0700553 .write_sg = zynq_fpga_ops_write,
Moritz Fischer37784702015-10-16 15:42:30 -0700554 .write_complete = zynq_fpga_ops_write_complete,
555};
556
557static int zynq_fpga_probe(struct platform_device *pdev)
558{
559 struct device *dev = &pdev->dev;
560 struct zynq_fpga_priv *priv;
561 struct resource *res;
562 int err;
563
564 priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
565 if (!priv)
566 return -ENOMEM;
Jason Gunthorpe425902f2017-02-01 12:48:45 -0700567 spin_lock_init(&priv->dma_lock);
Moritz Fischer37784702015-10-16 15:42:30 -0700568
Moritz Fischer37784702015-10-16 15:42:30 -0700569 res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
570 priv->io_base = devm_ioremap_resource(dev, res);
571 if (IS_ERR(priv->io_base))
572 return PTR_ERR(priv->io_base);
573
574 priv->slcr = syscon_regmap_lookup_by_phandle(dev->of_node,
575 "syscon");
576 if (IS_ERR(priv->slcr)) {
Jason Gunthorpe1930c282016-11-21 22:26:43 +0000577 dev_err(dev, "unable to get zynq-slcr regmap\n");
Moritz Fischer37784702015-10-16 15:42:30 -0700578 return PTR_ERR(priv->slcr);
579 }
580
581 init_completion(&priv->dma_done);
582
583 priv->irq = platform_get_irq(pdev, 0);
584 if (priv->irq < 0) {
Jason Gunthorpe1930c282016-11-21 22:26:43 +0000585 dev_err(dev, "No IRQ available\n");
Moritz Fischer37784702015-10-16 15:42:30 -0700586 return priv->irq;
587 }
588
Moritz Fischer37784702015-10-16 15:42:30 -0700589 priv->clk = devm_clk_get(dev, "ref_clk");
590 if (IS_ERR(priv->clk)) {
Jason Gunthorpe1930c282016-11-21 22:26:43 +0000591 dev_err(dev, "input clock not found\n");
Moritz Fischer37784702015-10-16 15:42:30 -0700592 return PTR_ERR(priv->clk);
593 }
594
595 err = clk_prepare_enable(priv->clk);
596 if (err) {
Jason Gunthorpe1930c282016-11-21 22:26:43 +0000597 dev_err(dev, "unable to enable clock\n");
Moritz Fischer37784702015-10-16 15:42:30 -0700598 return err;
599 }
600
601 /* unlock the device */
602 zynq_fpga_write(priv, UNLOCK_OFFSET, UNLOCK_MASK);
603
Jason Gunthorpe6b45e0f2017-02-01 12:48:42 -0700604 zynq_fpga_set_irq(priv, 0);
Jason Gunthorpe340c0c52016-11-21 22:26:45 +0000605 zynq_fpga_write(priv, INT_STS_OFFSET, IXR_ALL_MASK);
606 err = devm_request_irq(dev, priv->irq, zynq_fpga_isr, 0, dev_name(dev),
607 priv);
608 if (err) {
609 dev_err(dev, "unable to request IRQ\n");
610 clk_disable_unprepare(priv->clk);
611 return err;
612 }
613
Moritz Fischer37784702015-10-16 15:42:30 -0700614 clk_disable(priv->clk);
615
616 err = fpga_mgr_register(dev, "Xilinx Zynq FPGA Manager",
617 &zynq_fpga_ops, priv);
618 if (err) {
Jason Gunthorpe1930c282016-11-21 22:26:43 +0000619 dev_err(dev, "unable to register FPGA manager\n");
Moritz Fischer63769312015-10-19 13:35:33 -0700620 clk_unprepare(priv->clk);
Moritz Fischer37784702015-10-16 15:42:30 -0700621 return err;
622 }
623
624 return 0;
625}
626
627static int zynq_fpga_remove(struct platform_device *pdev)
628{
629 struct zynq_fpga_priv *priv;
Moritz Fischer28f98a12015-10-22 11:56:09 -0700630 struct fpga_manager *mgr;
631
632 mgr = platform_get_drvdata(pdev);
633 priv = mgr->priv;
Moritz Fischer37784702015-10-16 15:42:30 -0700634
635 fpga_mgr_unregister(&pdev->dev);
636
Moritz Fischer63769312015-10-19 13:35:33 -0700637 clk_unprepare(priv->clk);
Moritz Fischer37784702015-10-16 15:42:30 -0700638
639 return 0;
640}
641
642#ifdef CONFIG_OF
643static const struct of_device_id zynq_fpga_of_match[] = {
644 { .compatible = "xlnx,zynq-devcfg-1.0", },
645 {},
646};
647
648MODULE_DEVICE_TABLE(of, zynq_fpga_of_match);
649#endif
650
651static struct platform_driver zynq_fpga_driver = {
652 .probe = zynq_fpga_probe,
653 .remove = zynq_fpga_remove,
654 .driver = {
655 .name = "zynq_fpga_manager",
656 .of_match_table = of_match_ptr(zynq_fpga_of_match),
657 },
658};
659
660module_platform_driver(zynq_fpga_driver);
661
662MODULE_AUTHOR("Moritz Fischer <moritz.fischer@ettus.com>");
663MODULE_AUTHOR("Michal Simek <michal.simek@xilinx.com>");
664MODULE_DESCRIPTION("Xilinx Zynq FPGA Manager");
665MODULE_LICENSE("GPL v2");