blob: 9d3ed92e162c8628cb19558c86ae35af5730bc0e [file] [log] [blame]
Anup Patel743e1c82017-05-15 10:34:54 +05301/*
2 * Copyright (C) 2017 Broadcom
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9/*
10 * Broadcom SBA RAID Driver
11 *
12 * The Broadcom stream buffer accelerator (SBA) provides offloading
13 * capabilities for RAID operations. The SBA offload engine is accessible
14 * via Broadcom SoC specific ring manager. Two or more offload engines
15 * can share same Broadcom SoC specific ring manager due to this Broadcom
16 * SoC specific ring manager driver is implemented as a mailbox controller
17 * driver and offload engine drivers are implemented as mallbox clients.
18 *
19 * Typically, Broadcom SoC specific ring manager will implement larger
20 * number of hardware rings over one or more SBA hardware devices. By
21 * design, the internal buffer size of SBA hardware device is limited
22 * but all offload operations supported by SBA can be broken down into
23 * multiple small size requests and executed parallely on multiple SBA
24 * hardware devices for achieving high through-put.
25 *
26 * The Broadcom SBA RAID driver does not require any register programming
27 * except submitting request to SBA hardware device via mailbox channels.
28 * This driver implements a DMA device with one DMA channel using a set
29 * of mailbox channels provided by Broadcom SoC specific ring manager
30 * driver. To exploit parallelism (as described above), all DMA request
31 * coming to SBA RAID DMA channel are broken down to smaller requests
32 * and submitted to multiple mailbox channels in round-robin fashion.
33 * For having more SBA DMA channels, we can create more SBA device nodes
34 * in Broadcom SoC specific DTS based on number of hardware rings supported
35 * by Broadcom SoC ring manager.
36 */
37
38#include <linux/bitops.h>
39#include <linux/dma-mapping.h>
40#include <linux/dmaengine.h>
41#include <linux/list.h>
42#include <linux/mailbox_client.h>
43#include <linux/mailbox/brcm-message.h>
44#include <linux/module.h>
45#include <linux/of_device.h>
46#include <linux/slab.h>
47#include <linux/raid/pq.h>
48
49#include "dmaengine.h"
50
Anup Patele8970912017-08-22 15:26:50 +053051/* ====== Driver macros and defines ===== */
52
Anup Patel743e1c82017-05-15 10:34:54 +053053#define SBA_TYPE_SHIFT 48
54#define SBA_TYPE_MASK GENMASK(1, 0)
55#define SBA_TYPE_A 0x0
56#define SBA_TYPE_B 0x2
57#define SBA_TYPE_C 0x3
58#define SBA_USER_DEF_SHIFT 32
59#define SBA_USER_DEF_MASK GENMASK(15, 0)
60#define SBA_R_MDATA_SHIFT 24
61#define SBA_R_MDATA_MASK GENMASK(7, 0)
62#define SBA_C_MDATA_MS_SHIFT 18
63#define SBA_C_MDATA_MS_MASK GENMASK(1, 0)
64#define SBA_INT_SHIFT 17
65#define SBA_INT_MASK BIT(0)
66#define SBA_RESP_SHIFT 16
67#define SBA_RESP_MASK BIT(0)
68#define SBA_C_MDATA_SHIFT 8
69#define SBA_C_MDATA_MASK GENMASK(7, 0)
70#define SBA_C_MDATA_BNUMx_SHIFT(__bnum) (2 * (__bnum))
71#define SBA_C_MDATA_BNUMx_MASK GENMASK(1, 0)
72#define SBA_C_MDATA_DNUM_SHIFT 5
73#define SBA_C_MDATA_DNUM_MASK GENMASK(4, 0)
74#define SBA_C_MDATA_LS(__v) ((__v) & 0xff)
75#define SBA_C_MDATA_MS(__v) (((__v) >> 8) & 0x3)
76#define SBA_CMD_SHIFT 0
77#define SBA_CMD_MASK GENMASK(3, 0)
78#define SBA_CMD_ZERO_BUFFER 0x4
79#define SBA_CMD_ZERO_ALL_BUFFERS 0x8
80#define SBA_CMD_LOAD_BUFFER 0x9
81#define SBA_CMD_XOR 0xa
82#define SBA_CMD_GALOIS_XOR 0xb
83#define SBA_CMD_WRITE_BUFFER 0xc
84#define SBA_CMD_GALOIS 0xe
85
86/* Driver helper macros */
87#define to_sba_request(tx) \
88 container_of(tx, struct sba_request, tx)
89#define to_sba_device(dchan) \
90 container_of(dchan, struct sba_device, dma_chan)
91
Anup Patele8970912017-08-22 15:26:50 +053092/* ===== Driver data structures ===== */
93
Anup Patel57a28502017-08-22 15:26:52 +053094enum sba_request_flags {
95 SBA_REQUEST_STATE_FREE = 0x001,
96 SBA_REQUEST_STATE_ALLOCED = 0x002,
97 SBA_REQUEST_STATE_PENDING = 0x004,
98 SBA_REQUEST_STATE_ACTIVE = 0x008,
99 SBA_REQUEST_STATE_RECEIVED = 0x010,
100 SBA_REQUEST_STATE_COMPLETED = 0x020,
101 SBA_REQUEST_STATE_ABORTED = 0x040,
102 SBA_REQUEST_STATE_MASK = 0x0ff,
103 SBA_REQUEST_FENCE = 0x100,
Anup Patel743e1c82017-05-15 10:34:54 +0530104};
105
106struct sba_request {
107 /* Global state */
108 struct list_head node;
109 struct sba_device *sba;
Anup Patel57a28502017-08-22 15:26:52 +0530110 u32 flags;
Anup Patel743e1c82017-05-15 10:34:54 +0530111 /* Chained requests management */
112 struct sba_request *first;
113 struct list_head next;
Anup Patel743e1c82017-05-15 10:34:54 +0530114 atomic_t next_pending_count;
115 /* BRCM message data */
Anup Patel743e1c82017-05-15 10:34:54 +0530116 struct brcm_sba_command *cmds;
117 struct brcm_message msg;
118 struct dma_async_tx_descriptor tx;
119};
120
121enum sba_version {
122 SBA_VER_1 = 0,
123 SBA_VER_2
124};
125
126struct sba_device {
127 /* Underlying device */
128 struct device *dev;
129 /* DT configuration parameters */
130 enum sba_version ver;
131 /* Derived configuration parameters */
132 u32 max_req;
133 u32 hw_buf_size;
134 u32 hw_resp_size;
135 u32 max_pq_coefs;
136 u32 max_pq_srcs;
137 u32 max_cmd_per_req;
138 u32 max_xor_srcs;
139 u32 max_resp_pool_size;
140 u32 max_cmds_pool_size;
141 /* Maibox client and Mailbox channels */
142 struct mbox_client client;
143 int mchans_count;
144 atomic_t mchans_current;
145 struct mbox_chan **mchans;
146 struct device *mbox_dev;
147 /* DMA device and DMA channel */
148 struct dma_device dma_dev;
149 struct dma_chan dma_chan;
150 /* DMA channel resources */
151 void *resp_base;
152 dma_addr_t resp_dma_base;
153 void *cmds_base;
154 dma_addr_t cmds_dma_base;
155 spinlock_t reqs_lock;
156 struct sba_request *reqs;
157 bool reqs_fence;
158 struct list_head reqs_alloc_list;
159 struct list_head reqs_pending_list;
160 struct list_head reqs_active_list;
161 struct list_head reqs_received_list;
162 struct list_head reqs_completed_list;
163 struct list_head reqs_aborted_list;
164 struct list_head reqs_free_list;
165 int reqs_free_count;
166};
167
Anup Patele8970912017-08-22 15:26:50 +0530168/* ====== Command helper routines ===== */
Anup Patel743e1c82017-05-15 10:34:54 +0530169
170static inline u64 __pure sba_cmd_enc(u64 cmd, u32 val, u32 shift, u32 mask)
171{
172 cmd &= ~((u64)mask << shift);
173 cmd |= ((u64)(val & mask) << shift);
174 return cmd;
175}
176
177static inline u32 __pure sba_cmd_load_c_mdata(u32 b0)
178{
179 return b0 & SBA_C_MDATA_BNUMx_MASK;
180}
181
182static inline u32 __pure sba_cmd_write_c_mdata(u32 b0)
183{
184 return b0 & SBA_C_MDATA_BNUMx_MASK;
185}
186
187static inline u32 __pure sba_cmd_xor_c_mdata(u32 b1, u32 b0)
188{
189 return (b0 & SBA_C_MDATA_BNUMx_MASK) |
190 ((b1 & SBA_C_MDATA_BNUMx_MASK) << SBA_C_MDATA_BNUMx_SHIFT(1));
191}
192
193static inline u32 __pure sba_cmd_pq_c_mdata(u32 d, u32 b1, u32 b0)
194{
195 return (b0 & SBA_C_MDATA_BNUMx_MASK) |
196 ((b1 & SBA_C_MDATA_BNUMx_MASK) << SBA_C_MDATA_BNUMx_SHIFT(1)) |
197 ((d & SBA_C_MDATA_DNUM_MASK) << SBA_C_MDATA_DNUM_SHIFT);
198}
199
Anup Patele8970912017-08-22 15:26:50 +0530200/* ====== General helper routines ===== */
Anup Patel743e1c82017-05-15 10:34:54 +0530201
202static struct sba_request *sba_alloc_request(struct sba_device *sba)
203{
204 unsigned long flags;
205 struct sba_request *req = NULL;
206
207 spin_lock_irqsave(&sba->reqs_lock, flags);
Anup Patel743e1c82017-05-15 10:34:54 +0530208 req = list_first_entry_or_null(&sba->reqs_free_list,
209 struct sba_request, node);
210 if (req) {
211 list_move_tail(&req->node, &sba->reqs_alloc_list);
Anup Patel743e1c82017-05-15 10:34:54 +0530212 sba->reqs_free_count--;
Anup Patel743e1c82017-05-15 10:34:54 +0530213 }
Anup Patel743e1c82017-05-15 10:34:54 +0530214 spin_unlock_irqrestore(&sba->reqs_lock, flags);
Anup Patele4274cf2017-08-22 15:26:51 +0530215 if (!req)
216 return NULL;
217
Anup Patel57a28502017-08-22 15:26:52 +0530218 req->flags = SBA_REQUEST_STATE_ALLOCED;
Anup Patele4274cf2017-08-22 15:26:51 +0530219 req->first = req;
220 INIT_LIST_HEAD(&req->next);
Anup Patele4274cf2017-08-22 15:26:51 +0530221 atomic_set(&req->next_pending_count, 1);
222
223 dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
Anup Patel743e1c82017-05-15 10:34:54 +0530224
225 return req;
226}
227
228/* Note: Must be called with sba->reqs_lock held */
229static void _sba_pending_request(struct sba_device *sba,
230 struct sba_request *req)
231{
232 lockdep_assert_held(&sba->reqs_lock);
Anup Patel57a28502017-08-22 15:26:52 +0530233 req->flags &= ~SBA_REQUEST_STATE_MASK;
234 req->flags |= SBA_REQUEST_STATE_PENDING;
Anup Patel743e1c82017-05-15 10:34:54 +0530235 list_move_tail(&req->node, &sba->reqs_pending_list);
236 if (list_empty(&sba->reqs_active_list))
237 sba->reqs_fence = false;
238}
239
240/* Note: Must be called with sba->reqs_lock held */
241static bool _sba_active_request(struct sba_device *sba,
242 struct sba_request *req)
243{
244 lockdep_assert_held(&sba->reqs_lock);
245 if (list_empty(&sba->reqs_active_list))
246 sba->reqs_fence = false;
247 if (sba->reqs_fence)
248 return false;
Anup Patel57a28502017-08-22 15:26:52 +0530249 req->flags &= ~SBA_REQUEST_STATE_MASK;
250 req->flags |= SBA_REQUEST_STATE_ACTIVE;
Anup Patel743e1c82017-05-15 10:34:54 +0530251 list_move_tail(&req->node, &sba->reqs_active_list);
Anup Patel57a28502017-08-22 15:26:52 +0530252 if (req->flags & SBA_REQUEST_FENCE)
Anup Patel743e1c82017-05-15 10:34:54 +0530253 sba->reqs_fence = true;
254 return true;
255}
256
257/* Note: Must be called with sba->reqs_lock held */
258static void _sba_abort_request(struct sba_device *sba,
259 struct sba_request *req)
260{
261 lockdep_assert_held(&sba->reqs_lock);
Anup Patel57a28502017-08-22 15:26:52 +0530262 req->flags &= ~SBA_REQUEST_STATE_MASK;
263 req->flags |= SBA_REQUEST_STATE_ABORTED;
Anup Patel743e1c82017-05-15 10:34:54 +0530264 list_move_tail(&req->node, &sba->reqs_aborted_list);
265 if (list_empty(&sba->reqs_active_list))
266 sba->reqs_fence = false;
267}
268
269/* Note: Must be called with sba->reqs_lock held */
270static void _sba_free_request(struct sba_device *sba,
271 struct sba_request *req)
272{
273 lockdep_assert_held(&sba->reqs_lock);
Anup Patel57a28502017-08-22 15:26:52 +0530274 req->flags &= ~SBA_REQUEST_STATE_MASK;
275 req->flags |= SBA_REQUEST_STATE_FREE;
Anup Patel743e1c82017-05-15 10:34:54 +0530276 list_move_tail(&req->node, &sba->reqs_free_list);
277 if (list_empty(&sba->reqs_active_list))
278 sba->reqs_fence = false;
279 sba->reqs_free_count++;
280}
281
282static void sba_received_request(struct sba_request *req)
283{
284 unsigned long flags;
285 struct sba_device *sba = req->sba;
286
287 spin_lock_irqsave(&sba->reqs_lock, flags);
Anup Patel57a28502017-08-22 15:26:52 +0530288 req->flags &= ~SBA_REQUEST_STATE_MASK;
289 req->flags |= SBA_REQUEST_STATE_RECEIVED;
Anup Patel743e1c82017-05-15 10:34:54 +0530290 list_move_tail(&req->node, &sba->reqs_received_list);
291 spin_unlock_irqrestore(&sba->reqs_lock, flags);
292}
293
294static void sba_complete_chained_requests(struct sba_request *req)
295{
296 unsigned long flags;
297 struct sba_request *nreq;
298 struct sba_device *sba = req->sba;
299
300 spin_lock_irqsave(&sba->reqs_lock, flags);
301
Anup Patel57a28502017-08-22 15:26:52 +0530302 req->flags &= ~SBA_REQUEST_STATE_MASK;
303 req->flags |= SBA_REQUEST_STATE_COMPLETED;
Anup Patel743e1c82017-05-15 10:34:54 +0530304 list_move_tail(&req->node, &sba->reqs_completed_list);
305 list_for_each_entry(nreq, &req->next, next) {
Anup Patel57a28502017-08-22 15:26:52 +0530306 nreq->flags &= ~SBA_REQUEST_STATE_MASK;
307 nreq->flags |= SBA_REQUEST_STATE_COMPLETED;
Anup Patel743e1c82017-05-15 10:34:54 +0530308 list_move_tail(&nreq->node, &sba->reqs_completed_list);
309 }
310 if (list_empty(&sba->reqs_active_list))
311 sba->reqs_fence = false;
312
313 spin_unlock_irqrestore(&sba->reqs_lock, flags);
314}
315
316static void sba_free_chained_requests(struct sba_request *req)
317{
318 unsigned long flags;
319 struct sba_request *nreq;
320 struct sba_device *sba = req->sba;
321
322 spin_lock_irqsave(&sba->reqs_lock, flags);
323
324 _sba_free_request(sba, req);
325 list_for_each_entry(nreq, &req->next, next)
326 _sba_free_request(sba, nreq);
327
328 spin_unlock_irqrestore(&sba->reqs_lock, flags);
329}
330
331static void sba_chain_request(struct sba_request *first,
332 struct sba_request *req)
333{
334 unsigned long flags;
335 struct sba_device *sba = req->sba;
336
337 spin_lock_irqsave(&sba->reqs_lock, flags);
338
339 list_add_tail(&req->next, &first->next);
340 req->first = first;
Anup Patel10f1a332017-08-22 15:26:53 +0530341 atomic_inc(&first->next_pending_count);
Anup Patel743e1c82017-05-15 10:34:54 +0530342
343 spin_unlock_irqrestore(&sba->reqs_lock, flags);
344}
345
346static void sba_cleanup_nonpending_requests(struct sba_device *sba)
347{
348 unsigned long flags;
349 struct sba_request *req, *req1;
350
351 spin_lock_irqsave(&sba->reqs_lock, flags);
352
353 /* Freeup all alloced request */
354 list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node)
355 _sba_free_request(sba, req);
356
357 /* Freeup all received request */
358 list_for_each_entry_safe(req, req1, &sba->reqs_received_list, node)
359 _sba_free_request(sba, req);
360
361 /* Freeup all completed request */
362 list_for_each_entry_safe(req, req1, &sba->reqs_completed_list, node)
363 _sba_free_request(sba, req);
364
365 /* Set all active requests as aborted */
366 list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node)
367 _sba_abort_request(sba, req);
368
369 /*
370 * Note: We expect that aborted request will be eventually
371 * freed by sba_receive_message()
372 */
373
374 spin_unlock_irqrestore(&sba->reqs_lock, flags);
375}
376
377static void sba_cleanup_pending_requests(struct sba_device *sba)
378{
379 unsigned long flags;
380 struct sba_request *req, *req1;
381
382 spin_lock_irqsave(&sba->reqs_lock, flags);
383
384 /* Freeup all pending request */
385 list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node)
386 _sba_free_request(sba, req);
387
388 spin_unlock_irqrestore(&sba->reqs_lock, flags);
389}
390
391/* ====== DMAENGINE callbacks ===== */
392
393static void sba_free_chan_resources(struct dma_chan *dchan)
394{
395 /*
396 * Channel resources are pre-alloced so we just free-up
397 * whatever we can so that we can re-use pre-alloced
398 * channel resources next time.
399 */
400 sba_cleanup_nonpending_requests(to_sba_device(dchan));
401}
402
403static int sba_device_terminate_all(struct dma_chan *dchan)
404{
405 /* Cleanup all pending requests */
406 sba_cleanup_pending_requests(to_sba_device(dchan));
407
408 return 0;
409}
410
411static int sba_send_mbox_request(struct sba_device *sba,
412 struct sba_request *req)
413{
414 int mchans_idx, ret = 0;
415
416 /* Select mailbox channel in round-robin fashion */
417 mchans_idx = atomic_inc_return(&sba->mchans_current);
418 mchans_idx = mchans_idx % sba->mchans_count;
419
420 /* Send message for the request */
421 req->msg.error = 0;
422 ret = mbox_send_message(sba->mchans[mchans_idx], &req->msg);
423 if (ret < 0) {
424 dev_err(sba->dev, "send message failed with error %d", ret);
425 return ret;
426 }
427 ret = req->msg.error;
428 if (ret < 0) {
429 dev_err(sba->dev, "message error %d", ret);
430 return ret;
431 }
432
433 return 0;
434}
435
436static void sba_issue_pending(struct dma_chan *dchan)
437{
438 int ret;
439 unsigned long flags;
440 struct sba_request *req, *req1;
441 struct sba_device *sba = to_sba_device(dchan);
442
443 spin_lock_irqsave(&sba->reqs_lock, flags);
444
445 /* Process all pending request */
446 list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node) {
447 /* Try to make request active */
448 if (!_sba_active_request(sba, req))
449 break;
450
451 /* Send request to mailbox channel */
452 spin_unlock_irqrestore(&sba->reqs_lock, flags);
453 ret = sba_send_mbox_request(sba, req);
454 spin_lock_irqsave(&sba->reqs_lock, flags);
455
456 /* If something went wrong then keep request pending */
457 if (ret < 0) {
458 _sba_pending_request(sba, req);
459 break;
460 }
461 }
462
463 spin_unlock_irqrestore(&sba->reqs_lock, flags);
464}
465
466static dma_cookie_t sba_tx_submit(struct dma_async_tx_descriptor *tx)
467{
468 unsigned long flags;
469 dma_cookie_t cookie;
470 struct sba_device *sba;
471 struct sba_request *req, *nreq;
472
473 if (unlikely(!tx))
474 return -EINVAL;
475
476 sba = to_sba_device(tx->chan);
477 req = to_sba_request(tx);
478
479 /* Assign cookie and mark all chained requests pending */
480 spin_lock_irqsave(&sba->reqs_lock, flags);
481 cookie = dma_cookie_assign(tx);
482 _sba_pending_request(sba, req);
483 list_for_each_entry(nreq, &req->next, next)
484 _sba_pending_request(sba, nreq);
485 spin_unlock_irqrestore(&sba->reqs_lock, flags);
486
487 return cookie;
488}
489
490static enum dma_status sba_tx_status(struct dma_chan *dchan,
491 dma_cookie_t cookie,
492 struct dma_tx_state *txstate)
493{
494 int mchan_idx;
495 enum dma_status ret;
496 struct sba_device *sba = to_sba_device(dchan);
497
498 for (mchan_idx = 0; mchan_idx < sba->mchans_count; mchan_idx++)
499 mbox_client_peek_data(sba->mchans[mchan_idx]);
500
501 ret = dma_cookie_status(dchan, cookie, txstate);
502 if (ret == DMA_COMPLETE)
503 return ret;
504
505 return dma_cookie_status(dchan, cookie, txstate);
506}
507
508static void sba_fillup_interrupt_msg(struct sba_request *req,
509 struct brcm_sba_command *cmds,
510 struct brcm_message *msg)
511{
512 u64 cmd;
513 u32 c_mdata;
Anup Patele7ae72a2017-08-22 15:26:54 +0530514 dma_addr_t resp_dma = req->tx.phys;
Anup Patel743e1c82017-05-15 10:34:54 +0530515 struct brcm_sba_command *cmdsp = cmds;
516
517 /* Type-B command to load dummy data into buf0 */
518 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
519 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
520 cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size,
521 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
522 c_mdata = sba_cmd_load_c_mdata(0);
523 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
524 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
525 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
526 SBA_CMD_SHIFT, SBA_CMD_MASK);
527 cmdsp->cmd = cmd;
528 *cmdsp->cmd_dma = cpu_to_le64(cmd);
529 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
Anup Patele7ae72a2017-08-22 15:26:54 +0530530 cmdsp->data = resp_dma;
Anup Patel743e1c82017-05-15 10:34:54 +0530531 cmdsp->data_len = req->sba->hw_resp_size;
532 cmdsp++;
533
534 /* Type-A command to write buf0 to dummy location */
535 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
536 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
537 cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size,
538 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
539 cmd = sba_cmd_enc(cmd, 0x1,
540 SBA_RESP_SHIFT, SBA_RESP_MASK);
541 c_mdata = sba_cmd_write_c_mdata(0);
542 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
543 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
544 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
545 SBA_CMD_SHIFT, SBA_CMD_MASK);
546 cmdsp->cmd = cmd;
547 *cmdsp->cmd_dma = cpu_to_le64(cmd);
548 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
549 if (req->sba->hw_resp_size) {
550 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
Anup Patele7ae72a2017-08-22 15:26:54 +0530551 cmdsp->resp = resp_dma;
Anup Patel743e1c82017-05-15 10:34:54 +0530552 cmdsp->resp_len = req->sba->hw_resp_size;
553 }
554 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
Anup Patele7ae72a2017-08-22 15:26:54 +0530555 cmdsp->data = resp_dma;
Anup Patel743e1c82017-05-15 10:34:54 +0530556 cmdsp->data_len = req->sba->hw_resp_size;
557 cmdsp++;
558
559 /* Fillup brcm_message */
560 msg->type = BRCM_MESSAGE_SBA;
561 msg->sba.cmds = cmds;
562 msg->sba.cmds_count = cmdsp - cmds;
563 msg->ctx = req;
564 msg->error = 0;
565}
566
567static struct dma_async_tx_descriptor *
568sba_prep_dma_interrupt(struct dma_chan *dchan, unsigned long flags)
569{
570 struct sba_request *req = NULL;
571 struct sba_device *sba = to_sba_device(dchan);
572
573 /* Alloc new request */
574 req = sba_alloc_request(sba);
575 if (!req)
576 return NULL;
577
578 /*
579 * Force fence so that no requests are submitted
580 * until DMA callback for this request is invoked.
581 */
Anup Patel57a28502017-08-22 15:26:52 +0530582 req->flags |= SBA_REQUEST_FENCE;
Anup Patel743e1c82017-05-15 10:34:54 +0530583
584 /* Fillup request message */
585 sba_fillup_interrupt_msg(req, req->cmds, &req->msg);
586
587 /* Init async_tx descriptor */
588 req->tx.flags = flags;
589 req->tx.cookie = -EBUSY;
590
Colin Ian King1fc63cb2017-05-17 22:58:50 +0100591 return &req->tx;
Anup Patel743e1c82017-05-15 10:34:54 +0530592}
593
594static void sba_fillup_memcpy_msg(struct sba_request *req,
595 struct brcm_sba_command *cmds,
596 struct brcm_message *msg,
597 dma_addr_t msg_offset, size_t msg_len,
598 dma_addr_t dst, dma_addr_t src)
599{
600 u64 cmd;
601 u32 c_mdata;
Anup Patele7ae72a2017-08-22 15:26:54 +0530602 dma_addr_t resp_dma = req->tx.phys;
Anup Patel743e1c82017-05-15 10:34:54 +0530603 struct brcm_sba_command *cmdsp = cmds;
604
605 /* Type-B command to load data into buf0 */
606 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
607 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
608 cmd = sba_cmd_enc(cmd, msg_len,
609 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
610 c_mdata = sba_cmd_load_c_mdata(0);
611 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
612 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
613 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
614 SBA_CMD_SHIFT, SBA_CMD_MASK);
615 cmdsp->cmd = cmd;
616 *cmdsp->cmd_dma = cpu_to_le64(cmd);
617 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
618 cmdsp->data = src + msg_offset;
619 cmdsp->data_len = msg_len;
620 cmdsp++;
621
622 /* Type-A command to write buf0 */
623 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
624 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
625 cmd = sba_cmd_enc(cmd, msg_len,
626 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
627 cmd = sba_cmd_enc(cmd, 0x1,
628 SBA_RESP_SHIFT, SBA_RESP_MASK);
629 c_mdata = sba_cmd_write_c_mdata(0);
630 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
631 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
632 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
633 SBA_CMD_SHIFT, SBA_CMD_MASK);
634 cmdsp->cmd = cmd;
635 *cmdsp->cmd_dma = cpu_to_le64(cmd);
636 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
637 if (req->sba->hw_resp_size) {
638 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
Anup Patele7ae72a2017-08-22 15:26:54 +0530639 cmdsp->resp = resp_dma;
Anup Patel743e1c82017-05-15 10:34:54 +0530640 cmdsp->resp_len = req->sba->hw_resp_size;
641 }
642 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
643 cmdsp->data = dst + msg_offset;
644 cmdsp->data_len = msg_len;
645 cmdsp++;
646
647 /* Fillup brcm_message */
648 msg->type = BRCM_MESSAGE_SBA;
649 msg->sba.cmds = cmds;
650 msg->sba.cmds_count = cmdsp - cmds;
651 msg->ctx = req;
652 msg->error = 0;
653}
654
655static struct sba_request *
656sba_prep_dma_memcpy_req(struct sba_device *sba,
657 dma_addr_t off, dma_addr_t dst, dma_addr_t src,
658 size_t len, unsigned long flags)
659{
660 struct sba_request *req = NULL;
661
662 /* Alloc new request */
663 req = sba_alloc_request(sba);
664 if (!req)
665 return NULL;
Anup Patel57a28502017-08-22 15:26:52 +0530666 if (flags & DMA_PREP_FENCE)
667 req->flags |= SBA_REQUEST_FENCE;
Anup Patel743e1c82017-05-15 10:34:54 +0530668
669 /* Fillup request message */
670 sba_fillup_memcpy_msg(req, req->cmds, &req->msg,
671 off, len, dst, src);
672
673 /* Init async_tx descriptor */
674 req->tx.flags = flags;
675 req->tx.cookie = -EBUSY;
676
677 return req;
678}
679
680static struct dma_async_tx_descriptor *
681sba_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src,
682 size_t len, unsigned long flags)
683{
684 size_t req_len;
685 dma_addr_t off = 0;
686 struct sba_device *sba = to_sba_device(dchan);
687 struct sba_request *first = NULL, *req;
688
689 /* Create chained requests where each request is upto hw_buf_size */
690 while (len) {
691 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
692
693 req = sba_prep_dma_memcpy_req(sba, off, dst, src,
694 req_len, flags);
695 if (!req) {
696 if (first)
697 sba_free_chained_requests(first);
698 return NULL;
699 }
700
701 if (first)
702 sba_chain_request(first, req);
703 else
704 first = req;
705
706 off += req_len;
707 len -= req_len;
708 }
709
710 return (first) ? &first->tx : NULL;
711}
712
713static void sba_fillup_xor_msg(struct sba_request *req,
714 struct brcm_sba_command *cmds,
715 struct brcm_message *msg,
716 dma_addr_t msg_offset, size_t msg_len,
717 dma_addr_t dst, dma_addr_t *src, u32 src_cnt)
718{
719 u64 cmd;
720 u32 c_mdata;
721 unsigned int i;
Anup Patele7ae72a2017-08-22 15:26:54 +0530722 dma_addr_t resp_dma = req->tx.phys;
Anup Patel743e1c82017-05-15 10:34:54 +0530723 struct brcm_sba_command *cmdsp = cmds;
724
725 /* Type-B command to load data into buf0 */
726 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
727 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
728 cmd = sba_cmd_enc(cmd, msg_len,
729 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
730 c_mdata = sba_cmd_load_c_mdata(0);
731 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
732 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
733 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
734 SBA_CMD_SHIFT, SBA_CMD_MASK);
735 cmdsp->cmd = cmd;
736 *cmdsp->cmd_dma = cpu_to_le64(cmd);
737 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
738 cmdsp->data = src[0] + msg_offset;
739 cmdsp->data_len = msg_len;
740 cmdsp++;
741
742 /* Type-B commands to xor data with buf0 and put it back in buf0 */
743 for (i = 1; i < src_cnt; i++) {
744 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
745 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
746 cmd = sba_cmd_enc(cmd, msg_len,
747 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
748 c_mdata = sba_cmd_xor_c_mdata(0, 0);
749 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
750 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
751 cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
752 SBA_CMD_SHIFT, SBA_CMD_MASK);
753 cmdsp->cmd = cmd;
754 *cmdsp->cmd_dma = cpu_to_le64(cmd);
755 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
756 cmdsp->data = src[i] + msg_offset;
757 cmdsp->data_len = msg_len;
758 cmdsp++;
759 }
760
761 /* Type-A command to write buf0 */
762 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
763 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
764 cmd = sba_cmd_enc(cmd, msg_len,
765 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
766 cmd = sba_cmd_enc(cmd, 0x1,
767 SBA_RESP_SHIFT, SBA_RESP_MASK);
768 c_mdata = sba_cmd_write_c_mdata(0);
769 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
770 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
771 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
772 SBA_CMD_SHIFT, SBA_CMD_MASK);
773 cmdsp->cmd = cmd;
774 *cmdsp->cmd_dma = cpu_to_le64(cmd);
775 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
776 if (req->sba->hw_resp_size) {
777 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
Anup Patele7ae72a2017-08-22 15:26:54 +0530778 cmdsp->resp = resp_dma;
Anup Patel743e1c82017-05-15 10:34:54 +0530779 cmdsp->resp_len = req->sba->hw_resp_size;
780 }
781 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
782 cmdsp->data = dst + msg_offset;
783 cmdsp->data_len = msg_len;
784 cmdsp++;
785
786 /* Fillup brcm_message */
787 msg->type = BRCM_MESSAGE_SBA;
788 msg->sba.cmds = cmds;
789 msg->sba.cmds_count = cmdsp - cmds;
790 msg->ctx = req;
791 msg->error = 0;
792}
793
Vinod Kouldd2bceb2017-07-19 10:03:24 +0530794static struct sba_request *
Anup Patel743e1c82017-05-15 10:34:54 +0530795sba_prep_dma_xor_req(struct sba_device *sba,
796 dma_addr_t off, dma_addr_t dst, dma_addr_t *src,
797 u32 src_cnt, size_t len, unsigned long flags)
798{
799 struct sba_request *req = NULL;
800
801 /* Alloc new request */
802 req = sba_alloc_request(sba);
803 if (!req)
804 return NULL;
Anup Patel57a28502017-08-22 15:26:52 +0530805 if (flags & DMA_PREP_FENCE)
806 req->flags |= SBA_REQUEST_FENCE;
Anup Patel743e1c82017-05-15 10:34:54 +0530807
808 /* Fillup request message */
809 sba_fillup_xor_msg(req, req->cmds, &req->msg,
810 off, len, dst, src, src_cnt);
811
812 /* Init async_tx descriptor */
813 req->tx.flags = flags;
814 req->tx.cookie = -EBUSY;
815
816 return req;
817}
818
819static struct dma_async_tx_descriptor *
820sba_prep_dma_xor(struct dma_chan *dchan, dma_addr_t dst, dma_addr_t *src,
821 u32 src_cnt, size_t len, unsigned long flags)
822{
823 size_t req_len;
824 dma_addr_t off = 0;
825 struct sba_device *sba = to_sba_device(dchan);
826 struct sba_request *first = NULL, *req;
827
828 /* Sanity checks */
829 if (unlikely(src_cnt > sba->max_xor_srcs))
830 return NULL;
831
832 /* Create chained requests where each request is upto hw_buf_size */
833 while (len) {
834 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
835
836 req = sba_prep_dma_xor_req(sba, off, dst, src, src_cnt,
837 req_len, flags);
838 if (!req) {
839 if (first)
840 sba_free_chained_requests(first);
841 return NULL;
842 }
843
844 if (first)
845 sba_chain_request(first, req);
846 else
847 first = req;
848
849 off += req_len;
850 len -= req_len;
851 }
852
853 return (first) ? &first->tx : NULL;
854}
855
856static void sba_fillup_pq_msg(struct sba_request *req,
857 bool pq_continue,
858 struct brcm_sba_command *cmds,
859 struct brcm_message *msg,
860 dma_addr_t msg_offset, size_t msg_len,
861 dma_addr_t *dst_p, dma_addr_t *dst_q,
862 const u8 *scf, dma_addr_t *src, u32 src_cnt)
863{
864 u64 cmd;
865 u32 c_mdata;
866 unsigned int i;
Anup Patele7ae72a2017-08-22 15:26:54 +0530867 dma_addr_t resp_dma = req->tx.phys;
Anup Patel743e1c82017-05-15 10:34:54 +0530868 struct brcm_sba_command *cmdsp = cmds;
869
870 if (pq_continue) {
871 /* Type-B command to load old P into buf0 */
872 if (dst_p) {
873 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
874 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
875 cmd = sba_cmd_enc(cmd, msg_len,
876 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
877 c_mdata = sba_cmd_load_c_mdata(0);
878 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
879 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
880 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
881 SBA_CMD_SHIFT, SBA_CMD_MASK);
882 cmdsp->cmd = cmd;
883 *cmdsp->cmd_dma = cpu_to_le64(cmd);
884 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
885 cmdsp->data = *dst_p + msg_offset;
886 cmdsp->data_len = msg_len;
887 cmdsp++;
888 }
889
890 /* Type-B command to load old Q into buf1 */
891 if (dst_q) {
892 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
893 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
894 cmd = sba_cmd_enc(cmd, msg_len,
895 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
896 c_mdata = sba_cmd_load_c_mdata(1);
897 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
898 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
899 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
900 SBA_CMD_SHIFT, SBA_CMD_MASK);
901 cmdsp->cmd = cmd;
902 *cmdsp->cmd_dma = cpu_to_le64(cmd);
903 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
904 cmdsp->data = *dst_q + msg_offset;
905 cmdsp->data_len = msg_len;
906 cmdsp++;
907 }
908 } else {
909 /* Type-A command to zero all buffers */
910 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
911 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
912 cmd = sba_cmd_enc(cmd, msg_len,
913 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
914 cmd = sba_cmd_enc(cmd, SBA_CMD_ZERO_ALL_BUFFERS,
915 SBA_CMD_SHIFT, SBA_CMD_MASK);
916 cmdsp->cmd = cmd;
917 *cmdsp->cmd_dma = cpu_to_le64(cmd);
918 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
919 cmdsp++;
920 }
921
922 /* Type-B commands for generate P onto buf0 and Q onto buf1 */
923 for (i = 0; i < src_cnt; i++) {
924 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
925 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
926 cmd = sba_cmd_enc(cmd, msg_len,
927 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
928 c_mdata = sba_cmd_pq_c_mdata(raid6_gflog[scf[i]], 1, 0);
929 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
930 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
931 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
932 SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
933 cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS_XOR,
934 SBA_CMD_SHIFT, SBA_CMD_MASK);
935 cmdsp->cmd = cmd;
936 *cmdsp->cmd_dma = cpu_to_le64(cmd);
937 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
938 cmdsp->data = src[i] + msg_offset;
939 cmdsp->data_len = msg_len;
940 cmdsp++;
941 }
942
943 /* Type-A command to write buf0 */
944 if (dst_p) {
945 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
946 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
947 cmd = sba_cmd_enc(cmd, msg_len,
948 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
949 cmd = sba_cmd_enc(cmd, 0x1,
950 SBA_RESP_SHIFT, SBA_RESP_MASK);
951 c_mdata = sba_cmd_write_c_mdata(0);
952 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
953 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
954 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
955 SBA_CMD_SHIFT, SBA_CMD_MASK);
956 cmdsp->cmd = cmd;
957 *cmdsp->cmd_dma = cpu_to_le64(cmd);
958 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
959 if (req->sba->hw_resp_size) {
960 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
Anup Patele7ae72a2017-08-22 15:26:54 +0530961 cmdsp->resp = resp_dma;
Anup Patel743e1c82017-05-15 10:34:54 +0530962 cmdsp->resp_len = req->sba->hw_resp_size;
963 }
964 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
965 cmdsp->data = *dst_p + msg_offset;
966 cmdsp->data_len = msg_len;
967 cmdsp++;
968 }
969
970 /* Type-A command to write buf1 */
971 if (dst_q) {
972 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
973 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
974 cmd = sba_cmd_enc(cmd, msg_len,
975 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
976 cmd = sba_cmd_enc(cmd, 0x1,
977 SBA_RESP_SHIFT, SBA_RESP_MASK);
978 c_mdata = sba_cmd_write_c_mdata(1);
979 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
980 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
981 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
982 SBA_CMD_SHIFT, SBA_CMD_MASK);
983 cmdsp->cmd = cmd;
984 *cmdsp->cmd_dma = cpu_to_le64(cmd);
985 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
986 if (req->sba->hw_resp_size) {
987 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
Anup Patele7ae72a2017-08-22 15:26:54 +0530988 cmdsp->resp = resp_dma;
Anup Patel743e1c82017-05-15 10:34:54 +0530989 cmdsp->resp_len = req->sba->hw_resp_size;
990 }
991 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
992 cmdsp->data = *dst_q + msg_offset;
993 cmdsp->data_len = msg_len;
994 cmdsp++;
995 }
996
997 /* Fillup brcm_message */
998 msg->type = BRCM_MESSAGE_SBA;
999 msg->sba.cmds = cmds;
1000 msg->sba.cmds_count = cmdsp - cmds;
1001 msg->ctx = req;
1002 msg->error = 0;
1003}
1004
Vinod Kouldd2bceb2017-07-19 10:03:24 +05301005static struct sba_request *
Anup Patel743e1c82017-05-15 10:34:54 +05301006sba_prep_dma_pq_req(struct sba_device *sba, dma_addr_t off,
1007 dma_addr_t *dst_p, dma_addr_t *dst_q, dma_addr_t *src,
1008 u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
1009{
1010 struct sba_request *req = NULL;
1011
1012 /* Alloc new request */
1013 req = sba_alloc_request(sba);
1014 if (!req)
1015 return NULL;
Anup Patel57a28502017-08-22 15:26:52 +05301016 if (flags & DMA_PREP_FENCE)
1017 req->flags |= SBA_REQUEST_FENCE;
Anup Patel743e1c82017-05-15 10:34:54 +05301018
1019 /* Fillup request messages */
1020 sba_fillup_pq_msg(req, dmaf_continue(flags),
1021 req->cmds, &req->msg,
1022 off, len, dst_p, dst_q, scf, src, src_cnt);
1023
1024 /* Init async_tx descriptor */
1025 req->tx.flags = flags;
1026 req->tx.cookie = -EBUSY;
1027
1028 return req;
1029}
1030
1031static void sba_fillup_pq_single_msg(struct sba_request *req,
1032 bool pq_continue,
1033 struct brcm_sba_command *cmds,
1034 struct brcm_message *msg,
1035 dma_addr_t msg_offset, size_t msg_len,
1036 dma_addr_t *dst_p, dma_addr_t *dst_q,
1037 dma_addr_t src, u8 scf)
1038{
1039 u64 cmd;
1040 u32 c_mdata;
1041 u8 pos, dpos = raid6_gflog[scf];
Anup Patele7ae72a2017-08-22 15:26:54 +05301042 dma_addr_t resp_dma = req->tx.phys;
Anup Patel743e1c82017-05-15 10:34:54 +05301043 struct brcm_sba_command *cmdsp = cmds;
1044
1045 if (!dst_p)
1046 goto skip_p;
1047
1048 if (pq_continue) {
1049 /* Type-B command to load old P into buf0 */
1050 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
1051 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1052 cmd = sba_cmd_enc(cmd, msg_len,
1053 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1054 c_mdata = sba_cmd_load_c_mdata(0);
1055 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1056 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1057 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
1058 SBA_CMD_SHIFT, SBA_CMD_MASK);
1059 cmdsp->cmd = cmd;
1060 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1061 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
1062 cmdsp->data = *dst_p + msg_offset;
1063 cmdsp->data_len = msg_len;
1064 cmdsp++;
1065
1066 /*
1067 * Type-B commands to xor data with buf0 and put it
1068 * back in buf0
1069 */
1070 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
1071 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1072 cmd = sba_cmd_enc(cmd, msg_len,
1073 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1074 c_mdata = sba_cmd_xor_c_mdata(0, 0);
1075 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1076 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1077 cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
1078 SBA_CMD_SHIFT, SBA_CMD_MASK);
1079 cmdsp->cmd = cmd;
1080 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1081 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
1082 cmdsp->data = src + msg_offset;
1083 cmdsp->data_len = msg_len;
1084 cmdsp++;
1085 } else {
1086 /* Type-B command to load old P into buf0 */
1087 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
1088 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1089 cmd = sba_cmd_enc(cmd, msg_len,
1090 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1091 c_mdata = sba_cmd_load_c_mdata(0);
1092 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1093 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1094 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
1095 SBA_CMD_SHIFT, SBA_CMD_MASK);
1096 cmdsp->cmd = cmd;
1097 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1098 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
1099 cmdsp->data = src + msg_offset;
1100 cmdsp->data_len = msg_len;
1101 cmdsp++;
1102 }
1103
1104 /* Type-A command to write buf0 */
1105 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
1106 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1107 cmd = sba_cmd_enc(cmd, msg_len,
1108 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1109 cmd = sba_cmd_enc(cmd, 0x1,
1110 SBA_RESP_SHIFT, SBA_RESP_MASK);
1111 c_mdata = sba_cmd_write_c_mdata(0);
1112 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1113 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1114 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
1115 SBA_CMD_SHIFT, SBA_CMD_MASK);
1116 cmdsp->cmd = cmd;
1117 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1118 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
1119 if (req->sba->hw_resp_size) {
1120 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
Anup Patele7ae72a2017-08-22 15:26:54 +05301121 cmdsp->resp = resp_dma;
Anup Patel743e1c82017-05-15 10:34:54 +05301122 cmdsp->resp_len = req->sba->hw_resp_size;
1123 }
1124 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
1125 cmdsp->data = *dst_p + msg_offset;
1126 cmdsp->data_len = msg_len;
1127 cmdsp++;
1128
1129skip_p:
1130 if (!dst_q)
1131 goto skip_q;
1132
1133 /* Type-A command to zero all buffers */
1134 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
1135 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1136 cmd = sba_cmd_enc(cmd, msg_len,
1137 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1138 cmd = sba_cmd_enc(cmd, SBA_CMD_ZERO_ALL_BUFFERS,
1139 SBA_CMD_SHIFT, SBA_CMD_MASK);
1140 cmdsp->cmd = cmd;
1141 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1142 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
1143 cmdsp++;
1144
1145 if (dpos == 255)
1146 goto skip_q_computation;
1147 pos = (dpos < req->sba->max_pq_coefs) ?
1148 dpos : (req->sba->max_pq_coefs - 1);
1149
1150 /*
1151 * Type-B command to generate initial Q from data
1152 * and store output into buf0
1153 */
1154 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
1155 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1156 cmd = sba_cmd_enc(cmd, msg_len,
1157 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1158 c_mdata = sba_cmd_pq_c_mdata(pos, 0, 0);
1159 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1160 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1161 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
1162 SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
1163 cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS,
1164 SBA_CMD_SHIFT, SBA_CMD_MASK);
1165 cmdsp->cmd = cmd;
1166 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1167 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
1168 cmdsp->data = src + msg_offset;
1169 cmdsp->data_len = msg_len;
1170 cmdsp++;
1171
1172 dpos -= pos;
1173
1174 /* Multiple Type-A command to generate final Q */
1175 while (dpos) {
1176 pos = (dpos < req->sba->max_pq_coefs) ?
1177 dpos : (req->sba->max_pq_coefs - 1);
1178
1179 /*
1180 * Type-A command to generate Q with buf0 and
1181 * buf1 store result in buf0
1182 */
1183 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
1184 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1185 cmd = sba_cmd_enc(cmd, msg_len,
1186 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1187 c_mdata = sba_cmd_pq_c_mdata(pos, 0, 1);
1188 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1189 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1190 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
1191 SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
1192 cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS,
1193 SBA_CMD_SHIFT, SBA_CMD_MASK);
1194 cmdsp->cmd = cmd;
1195 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1196 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
1197 cmdsp++;
1198
1199 dpos -= pos;
1200 }
1201
1202skip_q_computation:
1203 if (pq_continue) {
1204 /*
1205 * Type-B command to XOR previous output with
1206 * buf0 and write it into buf0
1207 */
1208 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
1209 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1210 cmd = sba_cmd_enc(cmd, msg_len,
1211 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1212 c_mdata = sba_cmd_xor_c_mdata(0, 0);
1213 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1214 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1215 cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
1216 SBA_CMD_SHIFT, SBA_CMD_MASK);
1217 cmdsp->cmd = cmd;
1218 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1219 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
1220 cmdsp->data = *dst_q + msg_offset;
1221 cmdsp->data_len = msg_len;
1222 cmdsp++;
1223 }
1224
1225 /* Type-A command to write buf0 */
1226 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
1227 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1228 cmd = sba_cmd_enc(cmd, msg_len,
1229 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1230 cmd = sba_cmd_enc(cmd, 0x1,
1231 SBA_RESP_SHIFT, SBA_RESP_MASK);
1232 c_mdata = sba_cmd_write_c_mdata(0);
1233 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1234 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1235 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
1236 SBA_CMD_SHIFT, SBA_CMD_MASK);
1237 cmdsp->cmd = cmd;
1238 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1239 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
1240 if (req->sba->hw_resp_size) {
1241 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
Anup Patele7ae72a2017-08-22 15:26:54 +05301242 cmdsp->resp = resp_dma;
Anup Patel743e1c82017-05-15 10:34:54 +05301243 cmdsp->resp_len = req->sba->hw_resp_size;
1244 }
1245 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
1246 cmdsp->data = *dst_q + msg_offset;
1247 cmdsp->data_len = msg_len;
1248 cmdsp++;
1249
1250skip_q:
1251 /* Fillup brcm_message */
1252 msg->type = BRCM_MESSAGE_SBA;
1253 msg->sba.cmds = cmds;
1254 msg->sba.cmds_count = cmdsp - cmds;
1255 msg->ctx = req;
1256 msg->error = 0;
1257}
1258
Vinod Kouldd2bceb2017-07-19 10:03:24 +05301259static struct sba_request *
Anup Patel743e1c82017-05-15 10:34:54 +05301260sba_prep_dma_pq_single_req(struct sba_device *sba, dma_addr_t off,
1261 dma_addr_t *dst_p, dma_addr_t *dst_q,
1262 dma_addr_t src, u8 scf, size_t len,
1263 unsigned long flags)
1264{
1265 struct sba_request *req = NULL;
1266
1267 /* Alloc new request */
1268 req = sba_alloc_request(sba);
1269 if (!req)
1270 return NULL;
Anup Patel57a28502017-08-22 15:26:52 +05301271 if (flags & DMA_PREP_FENCE)
1272 req->flags |= SBA_REQUEST_FENCE;
Anup Patel743e1c82017-05-15 10:34:54 +05301273
1274 /* Fillup request messages */
1275 sba_fillup_pq_single_msg(req, dmaf_continue(flags),
1276 req->cmds, &req->msg, off, len,
1277 dst_p, dst_q, src, scf);
1278
1279 /* Init async_tx descriptor */
1280 req->tx.flags = flags;
1281 req->tx.cookie = -EBUSY;
1282
1283 return req;
1284}
1285
1286static struct dma_async_tx_descriptor *
1287sba_prep_dma_pq(struct dma_chan *dchan, dma_addr_t *dst, dma_addr_t *src,
1288 u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
1289{
1290 u32 i, dst_q_index;
1291 size_t req_len;
1292 bool slow = false;
1293 dma_addr_t off = 0;
1294 dma_addr_t *dst_p = NULL, *dst_q = NULL;
1295 struct sba_device *sba = to_sba_device(dchan);
1296 struct sba_request *first = NULL, *req;
1297
1298 /* Sanity checks */
1299 if (unlikely(src_cnt > sba->max_pq_srcs))
1300 return NULL;
1301 for (i = 0; i < src_cnt; i++)
1302 if (sba->max_pq_coefs <= raid6_gflog[scf[i]])
1303 slow = true;
1304
1305 /* Figure-out P and Q destination addresses */
1306 if (!(flags & DMA_PREP_PQ_DISABLE_P))
1307 dst_p = &dst[0];
1308 if (!(flags & DMA_PREP_PQ_DISABLE_Q))
1309 dst_q = &dst[1];
1310
1311 /* Create chained requests where each request is upto hw_buf_size */
1312 while (len) {
1313 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
1314
1315 if (slow) {
1316 dst_q_index = src_cnt;
1317
1318 if (dst_q) {
1319 for (i = 0; i < src_cnt; i++) {
1320 if (*dst_q == src[i]) {
1321 dst_q_index = i;
1322 break;
1323 }
1324 }
1325 }
1326
1327 if (dst_q_index < src_cnt) {
1328 i = dst_q_index;
1329 req = sba_prep_dma_pq_single_req(sba,
1330 off, dst_p, dst_q, src[i], scf[i],
1331 req_len, flags | DMA_PREP_FENCE);
1332 if (!req)
1333 goto fail;
1334
1335 if (first)
1336 sba_chain_request(first, req);
1337 else
1338 first = req;
1339
1340 flags |= DMA_PREP_CONTINUE;
1341 }
1342
1343 for (i = 0; i < src_cnt; i++) {
1344 if (dst_q_index == i)
1345 continue;
1346
1347 req = sba_prep_dma_pq_single_req(sba,
1348 off, dst_p, dst_q, src[i], scf[i],
1349 req_len, flags | DMA_PREP_FENCE);
1350 if (!req)
1351 goto fail;
1352
1353 if (first)
1354 sba_chain_request(first, req);
1355 else
1356 first = req;
1357
1358 flags |= DMA_PREP_CONTINUE;
1359 }
1360 } else {
1361 req = sba_prep_dma_pq_req(sba, off,
1362 dst_p, dst_q, src, src_cnt,
1363 scf, req_len, flags);
1364 if (!req)
1365 goto fail;
1366
1367 if (first)
1368 sba_chain_request(first, req);
1369 else
1370 first = req;
1371 }
1372
1373 off += req_len;
1374 len -= req_len;
1375 }
1376
1377 return (first) ? &first->tx : NULL;
1378
1379fail:
1380 if (first)
1381 sba_free_chained_requests(first);
1382 return NULL;
1383}
1384
1385/* ====== Mailbox callbacks ===== */
1386
1387static void sba_dma_tx_actions(struct sba_request *req)
1388{
1389 struct dma_async_tx_descriptor *tx = &req->tx;
1390
1391 WARN_ON(tx->cookie < 0);
1392
1393 if (tx->cookie > 0) {
1394 dma_cookie_complete(tx);
1395
1396 /*
1397 * Call the callback (must not sleep or submit new
1398 * operations to this channel)
1399 */
1400 if (tx->callback)
1401 tx->callback(tx->callback_param);
1402
1403 dma_descriptor_unmap(tx);
1404 }
1405
1406 /* Run dependent operations */
1407 dma_run_dependencies(tx);
1408
1409 /* If waiting for 'ack' then move to completed list */
1410 if (!async_tx_test_ack(&req->tx))
1411 sba_complete_chained_requests(req);
1412 else
1413 sba_free_chained_requests(req);
1414}
1415
1416static void sba_receive_message(struct mbox_client *cl, void *msg)
1417{
1418 unsigned long flags;
1419 struct brcm_message *m = msg;
1420 struct sba_request *req = m->ctx, *req1;
1421 struct sba_device *sba = req->sba;
1422
1423 /* Error count if message has error */
1424 if (m->error < 0)
1425 dev_err(sba->dev, "%s got message with error %d",
1426 dma_chan_name(&sba->dma_chan), m->error);
1427
1428 /* Mark request as received */
1429 sba_received_request(req);
1430
1431 /* Wait for all chained requests to be completed */
1432 if (atomic_dec_return(&req->first->next_pending_count))
1433 goto done;
1434
1435 /* Point to first request */
1436 req = req->first;
1437
1438 /* Update request */
Anup Patel57a28502017-08-22 15:26:52 +05301439 if (req->flags & SBA_REQUEST_STATE_RECEIVED)
Anup Patel743e1c82017-05-15 10:34:54 +05301440 sba_dma_tx_actions(req);
1441 else
1442 sba_free_chained_requests(req);
1443
1444 spin_lock_irqsave(&sba->reqs_lock, flags);
1445
1446 /* Re-check all completed request waiting for 'ack' */
1447 list_for_each_entry_safe(req, req1, &sba->reqs_completed_list, node) {
1448 spin_unlock_irqrestore(&sba->reqs_lock, flags);
1449 sba_dma_tx_actions(req);
1450 spin_lock_irqsave(&sba->reqs_lock, flags);
1451 }
1452
1453 spin_unlock_irqrestore(&sba->reqs_lock, flags);
1454
1455done:
1456 /* Try to submit pending request */
1457 sba_issue_pending(&sba->dma_chan);
1458}
1459
1460/* ====== Platform driver routines ===== */
1461
1462static int sba_prealloc_channel_resources(struct sba_device *sba)
1463{
Anup Patele7ae72a2017-08-22 15:26:54 +05301464 int i, j, ret = 0;
Anup Patel743e1c82017-05-15 10:34:54 +05301465 struct sba_request *req = NULL;
1466
1467 sba->resp_base = dma_alloc_coherent(sba->dma_dev.dev,
1468 sba->max_resp_pool_size,
1469 &sba->resp_dma_base, GFP_KERNEL);
1470 if (!sba->resp_base)
1471 return -ENOMEM;
1472
1473 sba->cmds_base = dma_alloc_coherent(sba->dma_dev.dev,
1474 sba->max_cmds_pool_size,
1475 &sba->cmds_dma_base, GFP_KERNEL);
1476 if (!sba->cmds_base) {
1477 ret = -ENOMEM;
1478 goto fail_free_resp_pool;
1479 }
1480
1481 spin_lock_init(&sba->reqs_lock);
1482 sba->reqs_fence = false;
1483 INIT_LIST_HEAD(&sba->reqs_alloc_list);
1484 INIT_LIST_HEAD(&sba->reqs_pending_list);
1485 INIT_LIST_HEAD(&sba->reqs_active_list);
1486 INIT_LIST_HEAD(&sba->reqs_received_list);
1487 INIT_LIST_HEAD(&sba->reqs_completed_list);
1488 INIT_LIST_HEAD(&sba->reqs_aborted_list);
1489 INIT_LIST_HEAD(&sba->reqs_free_list);
1490
1491 sba->reqs = devm_kcalloc(sba->dev, sba->max_req,
1492 sizeof(*req), GFP_KERNEL);
1493 if (!sba->reqs) {
1494 ret = -ENOMEM;
1495 goto fail_free_cmds_pool;
1496 }
1497
Anup Patele7ae72a2017-08-22 15:26:54 +05301498 for (i = 0; i < sba->max_req; i++) {
Anup Patel743e1c82017-05-15 10:34:54 +05301499 req = &sba->reqs[i];
1500 INIT_LIST_HEAD(&req->node);
1501 req->sba = sba;
Anup Patel57a28502017-08-22 15:26:52 +05301502 req->flags = SBA_REQUEST_STATE_FREE;
Anup Patel743e1c82017-05-15 10:34:54 +05301503 INIT_LIST_HEAD(&req->next);
Anup Patel743e1c82017-05-15 10:34:54 +05301504 atomic_set(&req->next_pending_count, 0);
Anup Patel743e1c82017-05-15 10:34:54 +05301505 req->cmds = devm_kcalloc(sba->dev, sba->max_cmd_per_req,
1506 sizeof(*req->cmds), GFP_KERNEL);
1507 if (!req->cmds) {
1508 ret = -ENOMEM;
1509 goto fail_free_cmds_pool;
1510 }
1511 for (j = 0; j < sba->max_cmd_per_req; j++) {
1512 req->cmds[j].cmd = 0;
1513 req->cmds[j].cmd_dma = sba->cmds_base +
1514 (i * sba->max_cmd_per_req + j) * sizeof(u64);
1515 req->cmds[j].cmd_dma_addr = sba->cmds_dma_base +
1516 (i * sba->max_cmd_per_req + j) * sizeof(u64);
1517 req->cmds[j].flags = 0;
1518 }
1519 memset(&req->msg, 0, sizeof(req->msg));
1520 dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
1521 req->tx.tx_submit = sba_tx_submit;
Anup Patele7ae72a2017-08-22 15:26:54 +05301522 req->tx.phys = sba->resp_dma_base + i * sba->hw_resp_size;
Anup Patel743e1c82017-05-15 10:34:54 +05301523 list_add_tail(&req->node, &sba->reqs_free_list);
1524 }
1525
1526 sba->reqs_free_count = sba->max_req;
1527
1528 return 0;
1529
1530fail_free_cmds_pool:
1531 dma_free_coherent(sba->dma_dev.dev,
1532 sba->max_cmds_pool_size,
1533 sba->cmds_base, sba->cmds_dma_base);
1534fail_free_resp_pool:
1535 dma_free_coherent(sba->dma_dev.dev,
1536 sba->max_resp_pool_size,
1537 sba->resp_base, sba->resp_dma_base);
1538 return ret;
1539}
1540
1541static void sba_freeup_channel_resources(struct sba_device *sba)
1542{
1543 dmaengine_terminate_all(&sba->dma_chan);
1544 dma_free_coherent(sba->dma_dev.dev, sba->max_cmds_pool_size,
1545 sba->cmds_base, sba->cmds_dma_base);
1546 dma_free_coherent(sba->dma_dev.dev, sba->max_resp_pool_size,
1547 sba->resp_base, sba->resp_dma_base);
1548 sba->resp_base = NULL;
1549 sba->resp_dma_base = 0;
1550}
1551
1552static int sba_async_register(struct sba_device *sba)
1553{
1554 int ret;
1555 struct dma_device *dma_dev = &sba->dma_dev;
1556
1557 /* Initialize DMA channel cookie */
1558 sba->dma_chan.device = dma_dev;
1559 dma_cookie_init(&sba->dma_chan);
1560
1561 /* Initialize DMA device capability mask */
1562 dma_cap_zero(dma_dev->cap_mask);
1563 dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
1564 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1565 dma_cap_set(DMA_XOR, dma_dev->cap_mask);
1566 dma_cap_set(DMA_PQ, dma_dev->cap_mask);
1567
1568 /*
1569 * Set mailbox channel device as the base device of
1570 * our dma_device because the actual memory accesses
1571 * will be done by mailbox controller
1572 */
1573 dma_dev->dev = sba->mbox_dev;
1574
1575 /* Set base prep routines */
1576 dma_dev->device_free_chan_resources = sba_free_chan_resources;
1577 dma_dev->device_terminate_all = sba_device_terminate_all;
1578 dma_dev->device_issue_pending = sba_issue_pending;
1579 dma_dev->device_tx_status = sba_tx_status;
1580
1581 /* Set interrupt routine */
1582 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
1583 dma_dev->device_prep_dma_interrupt = sba_prep_dma_interrupt;
1584
1585 /* Set memcpy routine */
1586 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1587 dma_dev->device_prep_dma_memcpy = sba_prep_dma_memcpy;
1588
1589 /* Set xor routine and capability */
1590 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1591 dma_dev->device_prep_dma_xor = sba_prep_dma_xor;
1592 dma_dev->max_xor = sba->max_xor_srcs;
1593 }
1594
1595 /* Set pq routine and capability */
1596 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
1597 dma_dev->device_prep_dma_pq = sba_prep_dma_pq;
1598 dma_set_maxpq(dma_dev, sba->max_pq_srcs, 0);
1599 }
1600
1601 /* Initialize DMA device channel list */
1602 INIT_LIST_HEAD(&dma_dev->channels);
1603 list_add_tail(&sba->dma_chan.device_node, &dma_dev->channels);
1604
1605 /* Register with Linux async DMA framework*/
1606 ret = dma_async_device_register(dma_dev);
1607 if (ret) {
1608 dev_err(sba->dev, "async device register error %d", ret);
1609 return ret;
1610 }
1611
1612 dev_info(sba->dev, "%s capabilities: %s%s%s%s\n",
1613 dma_chan_name(&sba->dma_chan),
1614 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "interrupt " : "",
1615 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "memcpy " : "",
1616 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1617 dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "");
1618
1619 return 0;
1620}
1621
1622static int sba_probe(struct platform_device *pdev)
1623{
1624 int i, ret = 0, mchans_count;
1625 struct sba_device *sba;
1626 struct platform_device *mbox_pdev;
1627 struct of_phandle_args args;
1628
1629 /* Allocate main SBA struct */
1630 sba = devm_kzalloc(&pdev->dev, sizeof(*sba), GFP_KERNEL);
1631 if (!sba)
1632 return -ENOMEM;
1633
1634 sba->dev = &pdev->dev;
1635 platform_set_drvdata(pdev, sba);
1636
1637 /* Determine SBA version from DT compatible string */
1638 if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba"))
1639 sba->ver = SBA_VER_1;
1640 else if (of_device_is_compatible(sba->dev->of_node,
1641 "brcm,iproc-sba-v2"))
1642 sba->ver = SBA_VER_2;
1643 else
1644 return -ENODEV;
1645
1646 /* Derived Configuration parameters */
1647 switch (sba->ver) {
1648 case SBA_VER_1:
1649 sba->max_req = 1024;
1650 sba->hw_buf_size = 4096;
1651 sba->hw_resp_size = 8;
1652 sba->max_pq_coefs = 6;
1653 sba->max_pq_srcs = 6;
1654 break;
1655 case SBA_VER_2:
1656 sba->max_req = 1024;
1657 sba->hw_buf_size = 4096;
1658 sba->hw_resp_size = 8;
1659 sba->max_pq_coefs = 30;
1660 /*
1661 * We can support max_pq_srcs == max_pq_coefs because
1662 * we are limited by number of SBA commands that we can
1663 * fit in one message for underlying ring manager HW.
1664 */
1665 sba->max_pq_srcs = 12;
1666 break;
1667 default:
1668 return -EINVAL;
1669 }
1670 sba->max_cmd_per_req = sba->max_pq_srcs + 3;
1671 sba->max_xor_srcs = sba->max_cmd_per_req - 1;
1672 sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size;
1673 sba->max_cmds_pool_size = sba->max_req *
1674 sba->max_cmd_per_req * sizeof(u64);
1675
1676 /* Setup mailbox client */
1677 sba->client.dev = &pdev->dev;
1678 sba->client.rx_callback = sba_receive_message;
1679 sba->client.tx_block = false;
1680 sba->client.knows_txdone = false;
1681 sba->client.tx_tout = 0;
1682
1683 /* Number of channels equals number of mailbox channels */
1684 ret = of_count_phandle_with_args(pdev->dev.of_node,
1685 "mboxes", "#mbox-cells");
1686 if (ret <= 0)
1687 return -ENODEV;
1688 mchans_count = ret;
1689 sba->mchans_count = 0;
1690 atomic_set(&sba->mchans_current, 0);
1691
1692 /* Allocate mailbox channel array */
1693 sba->mchans = devm_kcalloc(&pdev->dev, sba->mchans_count,
1694 sizeof(*sba->mchans), GFP_KERNEL);
1695 if (!sba->mchans)
1696 return -ENOMEM;
1697
1698 /* Request mailbox channels */
1699 for (i = 0; i < mchans_count; i++) {
1700 sba->mchans[i] = mbox_request_channel(&sba->client, i);
1701 if (IS_ERR(sba->mchans[i])) {
1702 ret = PTR_ERR(sba->mchans[i]);
1703 goto fail_free_mchans;
1704 }
1705 sba->mchans_count++;
1706 }
1707
1708 /* Find-out underlying mailbox device */
1709 ret = of_parse_phandle_with_args(pdev->dev.of_node,
1710 "mboxes", "#mbox-cells", 0, &args);
1711 if (ret)
1712 goto fail_free_mchans;
1713 mbox_pdev = of_find_device_by_node(args.np);
1714 of_node_put(args.np);
1715 if (!mbox_pdev) {
1716 ret = -ENODEV;
1717 goto fail_free_mchans;
1718 }
1719 sba->mbox_dev = &mbox_pdev->dev;
1720
1721 /* All mailbox channels should be of same ring manager device */
1722 for (i = 1; i < mchans_count; i++) {
1723 ret = of_parse_phandle_with_args(pdev->dev.of_node,
1724 "mboxes", "#mbox-cells", i, &args);
1725 if (ret)
1726 goto fail_free_mchans;
1727 mbox_pdev = of_find_device_by_node(args.np);
1728 of_node_put(args.np);
1729 if (sba->mbox_dev != &mbox_pdev->dev) {
1730 ret = -EINVAL;
1731 goto fail_free_mchans;
1732 }
1733 }
1734
1735 /* Register DMA device with linux async framework */
1736 ret = sba_async_register(sba);
1737 if (ret)
1738 goto fail_free_mchans;
1739
1740 /* Prealloc channel resource */
1741 ret = sba_prealloc_channel_resources(sba);
1742 if (ret)
1743 goto fail_async_dev_unreg;
1744
1745 /* Print device info */
1746 dev_info(sba->dev, "%s using SBAv%d and %d mailbox channels",
1747 dma_chan_name(&sba->dma_chan), sba->ver+1,
1748 sba->mchans_count);
1749
1750 return 0;
1751
1752fail_async_dev_unreg:
1753 dma_async_device_unregister(&sba->dma_dev);
1754fail_free_mchans:
1755 for (i = 0; i < sba->mchans_count; i++)
1756 mbox_free_channel(sba->mchans[i]);
1757 return ret;
1758}
1759
1760static int sba_remove(struct platform_device *pdev)
1761{
1762 int i;
1763 struct sba_device *sba = platform_get_drvdata(pdev);
1764
1765 sba_freeup_channel_resources(sba);
1766
1767 dma_async_device_unregister(&sba->dma_dev);
1768
1769 for (i = 0; i < sba->mchans_count; i++)
1770 mbox_free_channel(sba->mchans[i]);
1771
1772 return 0;
1773}
1774
1775static const struct of_device_id sba_of_match[] = {
1776 { .compatible = "brcm,iproc-sba", },
1777 { .compatible = "brcm,iproc-sba-v2", },
1778 {},
1779};
1780MODULE_DEVICE_TABLE(of, sba_of_match);
1781
1782static struct platform_driver sba_driver = {
1783 .probe = sba_probe,
1784 .remove = sba_remove,
1785 .driver = {
1786 .name = "bcm-sba-raid",
1787 .of_match_table = sba_of_match,
1788 },
1789};
1790module_platform_driver(sba_driver);
1791
1792MODULE_DESCRIPTION("Broadcom SBA RAID driver");
1793MODULE_AUTHOR("Anup Patel <anup.patel@broadcom.com>");
1794MODULE_LICENSE("GPL v2");