blob: c5baaa37b0fe56eed6fcd3864793aec451db8a53 [file] [log] [blame]
Anup Patel743e1c82017-05-15 10:34:54 +05301/*
2 * Copyright (C) 2017 Broadcom
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License version 2 as
6 * published by the Free Software Foundation.
7 */
8
9/*
10 * Broadcom SBA RAID Driver
11 *
12 * The Broadcom stream buffer accelerator (SBA) provides offloading
13 * capabilities for RAID operations. The SBA offload engine is accessible
14 * via Broadcom SoC specific ring manager. Two or more offload engines
15 * can share same Broadcom SoC specific ring manager due to this Broadcom
16 * SoC specific ring manager driver is implemented as a mailbox controller
17 * driver and offload engine drivers are implemented as mallbox clients.
18 *
19 * Typically, Broadcom SoC specific ring manager will implement larger
20 * number of hardware rings over one or more SBA hardware devices. By
21 * design, the internal buffer size of SBA hardware device is limited
22 * but all offload operations supported by SBA can be broken down into
23 * multiple small size requests and executed parallely on multiple SBA
24 * hardware devices for achieving high through-put.
25 *
26 * The Broadcom SBA RAID driver does not require any register programming
27 * except submitting request to SBA hardware device via mailbox channels.
28 * This driver implements a DMA device with one DMA channel using a set
29 * of mailbox channels provided by Broadcom SoC specific ring manager
30 * driver. To exploit parallelism (as described above), all DMA request
31 * coming to SBA RAID DMA channel are broken down to smaller requests
32 * and submitted to multiple mailbox channels in round-robin fashion.
33 * For having more SBA DMA channels, we can create more SBA device nodes
34 * in Broadcom SoC specific DTS based on number of hardware rings supported
35 * by Broadcom SoC ring manager.
36 */
37
38#include <linux/bitops.h>
39#include <linux/dma-mapping.h>
40#include <linux/dmaengine.h>
41#include <linux/list.h>
42#include <linux/mailbox_client.h>
43#include <linux/mailbox/brcm-message.h>
44#include <linux/module.h>
45#include <linux/of_device.h>
46#include <linux/slab.h>
47#include <linux/raid/pq.h>
48
49#include "dmaengine.h"
50
Anup Patele8970912017-08-22 15:26:50 +053051/* ====== Driver macros and defines ===== */
52
Anup Patel743e1c82017-05-15 10:34:54 +053053#define SBA_TYPE_SHIFT 48
54#define SBA_TYPE_MASK GENMASK(1, 0)
55#define SBA_TYPE_A 0x0
56#define SBA_TYPE_B 0x2
57#define SBA_TYPE_C 0x3
58#define SBA_USER_DEF_SHIFT 32
59#define SBA_USER_DEF_MASK GENMASK(15, 0)
60#define SBA_R_MDATA_SHIFT 24
61#define SBA_R_MDATA_MASK GENMASK(7, 0)
62#define SBA_C_MDATA_MS_SHIFT 18
63#define SBA_C_MDATA_MS_MASK GENMASK(1, 0)
64#define SBA_INT_SHIFT 17
65#define SBA_INT_MASK BIT(0)
66#define SBA_RESP_SHIFT 16
67#define SBA_RESP_MASK BIT(0)
68#define SBA_C_MDATA_SHIFT 8
69#define SBA_C_MDATA_MASK GENMASK(7, 0)
70#define SBA_C_MDATA_BNUMx_SHIFT(__bnum) (2 * (__bnum))
71#define SBA_C_MDATA_BNUMx_MASK GENMASK(1, 0)
72#define SBA_C_MDATA_DNUM_SHIFT 5
73#define SBA_C_MDATA_DNUM_MASK GENMASK(4, 0)
74#define SBA_C_MDATA_LS(__v) ((__v) & 0xff)
75#define SBA_C_MDATA_MS(__v) (((__v) >> 8) & 0x3)
76#define SBA_CMD_SHIFT 0
77#define SBA_CMD_MASK GENMASK(3, 0)
78#define SBA_CMD_ZERO_BUFFER 0x4
79#define SBA_CMD_ZERO_ALL_BUFFERS 0x8
80#define SBA_CMD_LOAD_BUFFER 0x9
81#define SBA_CMD_XOR 0xa
82#define SBA_CMD_GALOIS_XOR 0xb
83#define SBA_CMD_WRITE_BUFFER 0xc
84#define SBA_CMD_GALOIS 0xe
85
Anup Patel5346aaf2017-08-22 15:26:57 +053086#define SBA_MAX_REQ_PER_MBOX_CHANNEL 8192
87
Anup Patel743e1c82017-05-15 10:34:54 +053088/* Driver helper macros */
89#define to_sba_request(tx) \
90 container_of(tx, struct sba_request, tx)
91#define to_sba_device(dchan) \
92 container_of(dchan, struct sba_device, dma_chan)
93
Anup Patele8970912017-08-22 15:26:50 +053094/* ===== Driver data structures ===== */
95
Anup Patel57a28502017-08-22 15:26:52 +053096enum sba_request_flags {
97 SBA_REQUEST_STATE_FREE = 0x001,
98 SBA_REQUEST_STATE_ALLOCED = 0x002,
99 SBA_REQUEST_STATE_PENDING = 0x004,
100 SBA_REQUEST_STATE_ACTIVE = 0x008,
101 SBA_REQUEST_STATE_RECEIVED = 0x010,
102 SBA_REQUEST_STATE_COMPLETED = 0x020,
103 SBA_REQUEST_STATE_ABORTED = 0x040,
104 SBA_REQUEST_STATE_MASK = 0x0ff,
105 SBA_REQUEST_FENCE = 0x100,
Anup Patel743e1c82017-05-15 10:34:54 +0530106};
107
108struct sba_request {
109 /* Global state */
110 struct list_head node;
111 struct sba_device *sba;
Anup Patel57a28502017-08-22 15:26:52 +0530112 u32 flags;
Anup Patel743e1c82017-05-15 10:34:54 +0530113 /* Chained requests management */
114 struct sba_request *first;
115 struct list_head next;
Anup Patel743e1c82017-05-15 10:34:54 +0530116 atomic_t next_pending_count;
117 /* BRCM message data */
Anup Patel743e1c82017-05-15 10:34:54 +0530118 struct brcm_message msg;
119 struct dma_async_tx_descriptor tx;
Anup Patel5655e002017-08-22 15:26:56 +0530120 /* SBA commands */
121 struct brcm_sba_command cmds[0];
Anup Patel743e1c82017-05-15 10:34:54 +0530122};
123
124enum sba_version {
125 SBA_VER_1 = 0,
126 SBA_VER_2
127};
128
129struct sba_device {
130 /* Underlying device */
131 struct device *dev;
132 /* DT configuration parameters */
133 enum sba_version ver;
134 /* Derived configuration parameters */
135 u32 max_req;
136 u32 hw_buf_size;
137 u32 hw_resp_size;
138 u32 max_pq_coefs;
139 u32 max_pq_srcs;
140 u32 max_cmd_per_req;
141 u32 max_xor_srcs;
142 u32 max_resp_pool_size;
143 u32 max_cmds_pool_size;
144 /* Maibox client and Mailbox channels */
145 struct mbox_client client;
146 int mchans_count;
147 atomic_t mchans_current;
148 struct mbox_chan **mchans;
149 struct device *mbox_dev;
150 /* DMA device and DMA channel */
151 struct dma_device dma_dev;
152 struct dma_chan dma_chan;
153 /* DMA channel resources */
154 void *resp_base;
155 dma_addr_t resp_dma_base;
156 void *cmds_base;
157 dma_addr_t cmds_dma_base;
158 spinlock_t reqs_lock;
Anup Patel743e1c82017-05-15 10:34:54 +0530159 bool reqs_fence;
160 struct list_head reqs_alloc_list;
161 struct list_head reqs_pending_list;
162 struct list_head reqs_active_list;
163 struct list_head reqs_received_list;
164 struct list_head reqs_completed_list;
165 struct list_head reqs_aborted_list;
166 struct list_head reqs_free_list;
Anup Patel743e1c82017-05-15 10:34:54 +0530167};
168
Anup Patele8970912017-08-22 15:26:50 +0530169/* ====== Command helper routines ===== */
Anup Patel743e1c82017-05-15 10:34:54 +0530170
171static inline u64 __pure sba_cmd_enc(u64 cmd, u32 val, u32 shift, u32 mask)
172{
173 cmd &= ~((u64)mask << shift);
174 cmd |= ((u64)(val & mask) << shift);
175 return cmd;
176}
177
178static inline u32 __pure sba_cmd_load_c_mdata(u32 b0)
179{
180 return b0 & SBA_C_MDATA_BNUMx_MASK;
181}
182
183static inline u32 __pure sba_cmd_write_c_mdata(u32 b0)
184{
185 return b0 & SBA_C_MDATA_BNUMx_MASK;
186}
187
188static inline u32 __pure sba_cmd_xor_c_mdata(u32 b1, u32 b0)
189{
190 return (b0 & SBA_C_MDATA_BNUMx_MASK) |
191 ((b1 & SBA_C_MDATA_BNUMx_MASK) << SBA_C_MDATA_BNUMx_SHIFT(1));
192}
193
194static inline u32 __pure sba_cmd_pq_c_mdata(u32 d, u32 b1, u32 b0)
195{
196 return (b0 & SBA_C_MDATA_BNUMx_MASK) |
197 ((b1 & SBA_C_MDATA_BNUMx_MASK) << SBA_C_MDATA_BNUMx_SHIFT(1)) |
198 ((d & SBA_C_MDATA_DNUM_MASK) << SBA_C_MDATA_DNUM_SHIFT);
199}
200
Anup Patele8970912017-08-22 15:26:50 +0530201/* ====== General helper routines ===== */
Anup Patel743e1c82017-05-15 10:34:54 +0530202
Anup Patel6df8f912017-08-22 15:27:00 +0530203static void sba_peek_mchans(struct sba_device *sba)
204{
205 int mchan_idx;
206
207 for (mchan_idx = 0; mchan_idx < sba->mchans_count; mchan_idx++)
208 mbox_client_peek_data(sba->mchans[mchan_idx]);
209}
210
Anup Patel743e1c82017-05-15 10:34:54 +0530211static struct sba_request *sba_alloc_request(struct sba_device *sba)
212{
213 unsigned long flags;
214 struct sba_request *req = NULL;
215
216 spin_lock_irqsave(&sba->reqs_lock, flags);
Anup Patel743e1c82017-05-15 10:34:54 +0530217 req = list_first_entry_or_null(&sba->reqs_free_list,
218 struct sba_request, node);
Anup Patelabfa2512017-08-22 15:26:55 +0530219 if (req)
Anup Patel743e1c82017-05-15 10:34:54 +0530220 list_move_tail(&req->node, &sba->reqs_alloc_list);
Anup Patel743e1c82017-05-15 10:34:54 +0530221 spin_unlock_irqrestore(&sba->reqs_lock, flags);
Anup Patel6df8f912017-08-22 15:27:00 +0530222
223 if (!req) {
224 /*
225 * We have no more free requests so, we peek
226 * mailbox channels hoping few active requests
227 * would have completed which will create more
228 * room for new requests.
229 */
230 sba_peek_mchans(sba);
Anup Patele4274cf2017-08-22 15:26:51 +0530231 return NULL;
Anup Patel6df8f912017-08-22 15:27:00 +0530232 }
Anup Patele4274cf2017-08-22 15:26:51 +0530233
Anup Patel57a28502017-08-22 15:26:52 +0530234 req->flags = SBA_REQUEST_STATE_ALLOCED;
Anup Patele4274cf2017-08-22 15:26:51 +0530235 req->first = req;
236 INIT_LIST_HEAD(&req->next);
Anup Patele4274cf2017-08-22 15:26:51 +0530237 atomic_set(&req->next_pending_count, 1);
238
239 dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
Anup Patel743e1c82017-05-15 10:34:54 +0530240
241 return req;
242}
243
244/* Note: Must be called with sba->reqs_lock held */
245static void _sba_pending_request(struct sba_device *sba,
246 struct sba_request *req)
247{
248 lockdep_assert_held(&sba->reqs_lock);
Anup Patel57a28502017-08-22 15:26:52 +0530249 req->flags &= ~SBA_REQUEST_STATE_MASK;
250 req->flags |= SBA_REQUEST_STATE_PENDING;
Anup Patel743e1c82017-05-15 10:34:54 +0530251 list_move_tail(&req->node, &sba->reqs_pending_list);
252 if (list_empty(&sba->reqs_active_list))
253 sba->reqs_fence = false;
254}
255
256/* Note: Must be called with sba->reqs_lock held */
257static bool _sba_active_request(struct sba_device *sba,
258 struct sba_request *req)
259{
260 lockdep_assert_held(&sba->reqs_lock);
261 if (list_empty(&sba->reqs_active_list))
262 sba->reqs_fence = false;
263 if (sba->reqs_fence)
264 return false;
Anup Patel57a28502017-08-22 15:26:52 +0530265 req->flags &= ~SBA_REQUEST_STATE_MASK;
266 req->flags |= SBA_REQUEST_STATE_ACTIVE;
Anup Patel743e1c82017-05-15 10:34:54 +0530267 list_move_tail(&req->node, &sba->reqs_active_list);
Anup Patel57a28502017-08-22 15:26:52 +0530268 if (req->flags & SBA_REQUEST_FENCE)
Anup Patel743e1c82017-05-15 10:34:54 +0530269 sba->reqs_fence = true;
270 return true;
271}
272
273/* Note: Must be called with sba->reqs_lock held */
274static void _sba_abort_request(struct sba_device *sba,
275 struct sba_request *req)
276{
277 lockdep_assert_held(&sba->reqs_lock);
Anup Patel57a28502017-08-22 15:26:52 +0530278 req->flags &= ~SBA_REQUEST_STATE_MASK;
279 req->flags |= SBA_REQUEST_STATE_ABORTED;
Anup Patel743e1c82017-05-15 10:34:54 +0530280 list_move_tail(&req->node, &sba->reqs_aborted_list);
281 if (list_empty(&sba->reqs_active_list))
282 sba->reqs_fence = false;
283}
284
285/* Note: Must be called with sba->reqs_lock held */
286static void _sba_free_request(struct sba_device *sba,
287 struct sba_request *req)
288{
289 lockdep_assert_held(&sba->reqs_lock);
Anup Patel57a28502017-08-22 15:26:52 +0530290 req->flags &= ~SBA_REQUEST_STATE_MASK;
291 req->flags |= SBA_REQUEST_STATE_FREE;
Anup Patel743e1c82017-05-15 10:34:54 +0530292 list_move_tail(&req->node, &sba->reqs_free_list);
293 if (list_empty(&sba->reqs_active_list))
294 sba->reqs_fence = false;
Anup Patel743e1c82017-05-15 10:34:54 +0530295}
296
Anup Patelf8338512017-08-22 15:26:58 +0530297/* Note: Must be called with sba->reqs_lock held */
298static void _sba_complete_request(struct sba_device *sba,
299 struct sba_request *req)
Anup Patel743e1c82017-05-15 10:34:54 +0530300{
Anup Patelf8338512017-08-22 15:26:58 +0530301 lockdep_assert_held(&sba->reqs_lock);
Anup Patel57a28502017-08-22 15:26:52 +0530302 req->flags &= ~SBA_REQUEST_STATE_MASK;
303 req->flags |= SBA_REQUEST_STATE_COMPLETED;
Anup Patel743e1c82017-05-15 10:34:54 +0530304 list_move_tail(&req->node, &sba->reqs_completed_list);
Anup Patel743e1c82017-05-15 10:34:54 +0530305 if (list_empty(&sba->reqs_active_list))
306 sba->reqs_fence = false;
Anup Patelf8338512017-08-22 15:26:58 +0530307}
Anup Patel743e1c82017-05-15 10:34:54 +0530308
Anup Patelf8338512017-08-22 15:26:58 +0530309/* Note: Must be called with sba->reqs_lock held */
310static void _sba_received_request(struct sba_device *sba,
311 struct sba_request *req)
312{
313 lockdep_assert_held(&sba->reqs_lock);
314 req->flags &= ~SBA_REQUEST_STATE_MASK;
315 req->flags |= SBA_REQUEST_STATE_RECEIVED;
316 list_move_tail(&req->node, &sba->reqs_received_list);
317 if (list_empty(&sba->reqs_active_list))
318 sba->reqs_fence = false;
Anup Patel743e1c82017-05-15 10:34:54 +0530319}
320
321static void sba_free_chained_requests(struct sba_request *req)
322{
323 unsigned long flags;
324 struct sba_request *nreq;
325 struct sba_device *sba = req->sba;
326
327 spin_lock_irqsave(&sba->reqs_lock, flags);
328
329 _sba_free_request(sba, req);
330 list_for_each_entry(nreq, &req->next, next)
331 _sba_free_request(sba, nreq);
332
333 spin_unlock_irqrestore(&sba->reqs_lock, flags);
334}
335
336static void sba_chain_request(struct sba_request *first,
337 struct sba_request *req)
338{
339 unsigned long flags;
340 struct sba_device *sba = req->sba;
341
342 spin_lock_irqsave(&sba->reqs_lock, flags);
343
344 list_add_tail(&req->next, &first->next);
345 req->first = first;
Anup Patel10f1a332017-08-22 15:26:53 +0530346 atomic_inc(&first->next_pending_count);
Anup Patel743e1c82017-05-15 10:34:54 +0530347
348 spin_unlock_irqrestore(&sba->reqs_lock, flags);
349}
350
351static void sba_cleanup_nonpending_requests(struct sba_device *sba)
352{
353 unsigned long flags;
354 struct sba_request *req, *req1;
355
356 spin_lock_irqsave(&sba->reqs_lock, flags);
357
358 /* Freeup all alloced request */
359 list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node)
360 _sba_free_request(sba, req);
361
362 /* Freeup all received request */
363 list_for_each_entry_safe(req, req1, &sba->reqs_received_list, node)
364 _sba_free_request(sba, req);
365
366 /* Freeup all completed request */
367 list_for_each_entry_safe(req, req1, &sba->reqs_completed_list, node)
368 _sba_free_request(sba, req);
369
370 /* Set all active requests as aborted */
371 list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node)
372 _sba_abort_request(sba, req);
373
374 /*
375 * Note: We expect that aborted request will be eventually
376 * freed by sba_receive_message()
377 */
378
379 spin_unlock_irqrestore(&sba->reqs_lock, flags);
380}
381
382static void sba_cleanup_pending_requests(struct sba_device *sba)
383{
384 unsigned long flags;
385 struct sba_request *req, *req1;
386
387 spin_lock_irqsave(&sba->reqs_lock, flags);
388
389 /* Freeup all pending request */
390 list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node)
391 _sba_free_request(sba, req);
392
393 spin_unlock_irqrestore(&sba->reqs_lock, flags);
394}
395
Anup Patel743e1c82017-05-15 10:34:54 +0530396static int sba_send_mbox_request(struct sba_device *sba,
397 struct sba_request *req)
398{
399 int mchans_idx, ret = 0;
400
401 /* Select mailbox channel in round-robin fashion */
402 mchans_idx = atomic_inc_return(&sba->mchans_current);
403 mchans_idx = mchans_idx % sba->mchans_count;
404
405 /* Send message for the request */
406 req->msg.error = 0;
407 ret = mbox_send_message(sba->mchans[mchans_idx], &req->msg);
408 if (ret < 0) {
409 dev_err(sba->dev, "send message failed with error %d", ret);
410 return ret;
411 }
412 ret = req->msg.error;
413 if (ret < 0) {
414 dev_err(sba->dev, "message error %d", ret);
415 return ret;
416 }
417
418 return 0;
419}
420
Anup Patelf8338512017-08-22 15:26:58 +0530421static void sba_process_deferred_requests(struct sba_device *sba)
Anup Patel743e1c82017-05-15 10:34:54 +0530422{
423 int ret;
Anup Patelf8338512017-08-22 15:26:58 +0530424 u32 count;
Anup Patel743e1c82017-05-15 10:34:54 +0530425 unsigned long flags;
Anup Patelf8338512017-08-22 15:26:58 +0530426 struct sba_request *req;
427 struct dma_async_tx_descriptor *tx;
Anup Patel743e1c82017-05-15 10:34:54 +0530428
429 spin_lock_irqsave(&sba->reqs_lock, flags);
430
Anup Patelf8338512017-08-22 15:26:58 +0530431 /* Count pending requests */
432 count = 0;
433 list_for_each_entry(req, &sba->reqs_pending_list, node)
434 count++;
435
436 /* Process pending requests */
437 while (!list_empty(&sba->reqs_pending_list) && count) {
438 /* Get the first pending request */
439 req = list_first_entry(&sba->reqs_pending_list,
440 struct sba_request, node);
441
Anup Patel743e1c82017-05-15 10:34:54 +0530442 /* Try to make request active */
443 if (!_sba_active_request(sba, req))
444 break;
445
446 /* Send request to mailbox channel */
447 spin_unlock_irqrestore(&sba->reqs_lock, flags);
448 ret = sba_send_mbox_request(sba, req);
449 spin_lock_irqsave(&sba->reqs_lock, flags);
450
451 /* If something went wrong then keep request pending */
452 if (ret < 0) {
453 _sba_pending_request(sba, req);
454 break;
455 }
Anup Patelf8338512017-08-22 15:26:58 +0530456
457 count--;
Anup Patel743e1c82017-05-15 10:34:54 +0530458 }
459
Anup Patelf8338512017-08-22 15:26:58 +0530460 /* Count completed requests */
461 count = 0;
462 list_for_each_entry(req, &sba->reqs_completed_list, node)
463 count++;
464
465 /* Process completed requests */
466 while (!list_empty(&sba->reqs_completed_list) && count) {
467 req = list_first_entry(&sba->reqs_completed_list,
468 struct sba_request, node);
469 list_del_init(&req->node);
470 tx = &req->tx;
471
472 spin_unlock_irqrestore(&sba->reqs_lock, flags);
473
474 WARN_ON(tx->cookie < 0);
475 if (tx->cookie > 0) {
476 dma_cookie_complete(tx);
477 dmaengine_desc_get_callback_invoke(tx, NULL);
478 dma_descriptor_unmap(tx);
479 tx->callback = NULL;
480 tx->callback_result = NULL;
481 }
482
483 dma_run_dependencies(tx);
484
485 spin_lock_irqsave(&sba->reqs_lock, flags);
486
487 /* If waiting for 'ack' then move to completed list */
488 if (!async_tx_test_ack(&req->tx))
489 _sba_complete_request(sba, req);
490 else
491 _sba_free_request(sba, req);
492
493 count--;
494 }
495
496 /* Re-check pending and completed work */
497 count = 0;
498 if (!list_empty(&sba->reqs_pending_list) ||
499 !list_empty(&sba->reqs_completed_list))
500 count = 1;
501
Anup Patel743e1c82017-05-15 10:34:54 +0530502 spin_unlock_irqrestore(&sba->reqs_lock, flags);
503}
504
Anup Patelf8338512017-08-22 15:26:58 +0530505static void sba_process_received_request(struct sba_device *sba,
506 struct sba_request *req)
507{
508 unsigned long flags;
509
510 spin_lock_irqsave(&sba->reqs_lock, flags);
511
512 /* Mark request as received */
513 _sba_received_request(sba, req);
514
515 /* Update request */
516 if (!atomic_dec_return(&req->first->next_pending_count))
517 _sba_complete_request(sba, req->first);
518 if (req->first != req)
519 _sba_free_request(sba, req);
520
521 spin_unlock_irqrestore(&sba->reqs_lock, flags);
522}
523
524/* ====== DMAENGINE callbacks ===== */
525
526static void sba_free_chan_resources(struct dma_chan *dchan)
527{
528 /*
529 * Channel resources are pre-alloced so we just free-up
530 * whatever we can so that we can re-use pre-alloced
531 * channel resources next time.
532 */
533 sba_cleanup_nonpending_requests(to_sba_device(dchan));
534}
535
536static int sba_device_terminate_all(struct dma_chan *dchan)
537{
538 /* Cleanup all pending requests */
539 sba_cleanup_pending_requests(to_sba_device(dchan));
540
541 return 0;
542}
543
544static void sba_issue_pending(struct dma_chan *dchan)
545{
546 struct sba_device *sba = to_sba_device(dchan);
547
548 /* Process deferred requests */
549 sba_process_deferred_requests(sba);
550}
551
Anup Patel743e1c82017-05-15 10:34:54 +0530552static dma_cookie_t sba_tx_submit(struct dma_async_tx_descriptor *tx)
553{
554 unsigned long flags;
555 dma_cookie_t cookie;
556 struct sba_device *sba;
557 struct sba_request *req, *nreq;
558
559 if (unlikely(!tx))
560 return -EINVAL;
561
562 sba = to_sba_device(tx->chan);
563 req = to_sba_request(tx);
564
565 /* Assign cookie and mark all chained requests pending */
566 spin_lock_irqsave(&sba->reqs_lock, flags);
567 cookie = dma_cookie_assign(tx);
568 _sba_pending_request(sba, req);
569 list_for_each_entry(nreq, &req->next, next)
570 _sba_pending_request(sba, nreq);
571 spin_unlock_irqrestore(&sba->reqs_lock, flags);
572
573 return cookie;
574}
575
576static enum dma_status sba_tx_status(struct dma_chan *dchan,
577 dma_cookie_t cookie,
578 struct dma_tx_state *txstate)
579{
Anup Patel743e1c82017-05-15 10:34:54 +0530580 enum dma_status ret;
581 struct sba_device *sba = to_sba_device(dchan);
582
Anup Patel743e1c82017-05-15 10:34:54 +0530583 ret = dma_cookie_status(dchan, cookie, txstate);
584 if (ret == DMA_COMPLETE)
585 return ret;
586
Anup Patel6df8f912017-08-22 15:27:00 +0530587 sba_peek_mchans(sba);
588
Anup Patel743e1c82017-05-15 10:34:54 +0530589 return dma_cookie_status(dchan, cookie, txstate);
590}
591
592static void sba_fillup_interrupt_msg(struct sba_request *req,
593 struct brcm_sba_command *cmds,
594 struct brcm_message *msg)
595{
596 u64 cmd;
597 u32 c_mdata;
Anup Patele7ae72a2017-08-22 15:26:54 +0530598 dma_addr_t resp_dma = req->tx.phys;
Anup Patel743e1c82017-05-15 10:34:54 +0530599 struct brcm_sba_command *cmdsp = cmds;
600
601 /* Type-B command to load dummy data into buf0 */
602 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
603 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
604 cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size,
605 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
606 c_mdata = sba_cmd_load_c_mdata(0);
607 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
608 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
609 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
610 SBA_CMD_SHIFT, SBA_CMD_MASK);
611 cmdsp->cmd = cmd;
612 *cmdsp->cmd_dma = cpu_to_le64(cmd);
613 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
Anup Patele7ae72a2017-08-22 15:26:54 +0530614 cmdsp->data = resp_dma;
Anup Patel743e1c82017-05-15 10:34:54 +0530615 cmdsp->data_len = req->sba->hw_resp_size;
616 cmdsp++;
617
618 /* Type-A command to write buf0 to dummy location */
619 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
620 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
621 cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size,
622 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
623 cmd = sba_cmd_enc(cmd, 0x1,
624 SBA_RESP_SHIFT, SBA_RESP_MASK);
625 c_mdata = sba_cmd_write_c_mdata(0);
626 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
627 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
628 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
629 SBA_CMD_SHIFT, SBA_CMD_MASK);
630 cmdsp->cmd = cmd;
631 *cmdsp->cmd_dma = cpu_to_le64(cmd);
632 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
633 if (req->sba->hw_resp_size) {
634 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
Anup Patele7ae72a2017-08-22 15:26:54 +0530635 cmdsp->resp = resp_dma;
Anup Patel743e1c82017-05-15 10:34:54 +0530636 cmdsp->resp_len = req->sba->hw_resp_size;
637 }
638 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
Anup Patele7ae72a2017-08-22 15:26:54 +0530639 cmdsp->data = resp_dma;
Anup Patel743e1c82017-05-15 10:34:54 +0530640 cmdsp->data_len = req->sba->hw_resp_size;
641 cmdsp++;
642
643 /* Fillup brcm_message */
644 msg->type = BRCM_MESSAGE_SBA;
645 msg->sba.cmds = cmds;
646 msg->sba.cmds_count = cmdsp - cmds;
647 msg->ctx = req;
648 msg->error = 0;
649}
650
651static struct dma_async_tx_descriptor *
652sba_prep_dma_interrupt(struct dma_chan *dchan, unsigned long flags)
653{
654 struct sba_request *req = NULL;
655 struct sba_device *sba = to_sba_device(dchan);
656
657 /* Alloc new request */
658 req = sba_alloc_request(sba);
659 if (!req)
660 return NULL;
661
662 /*
663 * Force fence so that no requests are submitted
664 * until DMA callback for this request is invoked.
665 */
Anup Patel57a28502017-08-22 15:26:52 +0530666 req->flags |= SBA_REQUEST_FENCE;
Anup Patel743e1c82017-05-15 10:34:54 +0530667
668 /* Fillup request message */
669 sba_fillup_interrupt_msg(req, req->cmds, &req->msg);
670
671 /* Init async_tx descriptor */
672 req->tx.flags = flags;
673 req->tx.cookie = -EBUSY;
674
Colin Ian King1fc63cb2017-05-17 22:58:50 +0100675 return &req->tx;
Anup Patel743e1c82017-05-15 10:34:54 +0530676}
677
678static void sba_fillup_memcpy_msg(struct sba_request *req,
679 struct brcm_sba_command *cmds,
680 struct brcm_message *msg,
681 dma_addr_t msg_offset, size_t msg_len,
682 dma_addr_t dst, dma_addr_t src)
683{
684 u64 cmd;
685 u32 c_mdata;
Anup Patele7ae72a2017-08-22 15:26:54 +0530686 dma_addr_t resp_dma = req->tx.phys;
Anup Patel743e1c82017-05-15 10:34:54 +0530687 struct brcm_sba_command *cmdsp = cmds;
688
689 /* Type-B command to load data into buf0 */
690 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
691 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
692 cmd = sba_cmd_enc(cmd, msg_len,
693 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
694 c_mdata = sba_cmd_load_c_mdata(0);
695 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
696 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
697 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
698 SBA_CMD_SHIFT, SBA_CMD_MASK);
699 cmdsp->cmd = cmd;
700 *cmdsp->cmd_dma = cpu_to_le64(cmd);
701 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
702 cmdsp->data = src + msg_offset;
703 cmdsp->data_len = msg_len;
704 cmdsp++;
705
706 /* Type-A command to write buf0 */
707 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
708 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
709 cmd = sba_cmd_enc(cmd, msg_len,
710 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
711 cmd = sba_cmd_enc(cmd, 0x1,
712 SBA_RESP_SHIFT, SBA_RESP_MASK);
713 c_mdata = sba_cmd_write_c_mdata(0);
714 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
715 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
716 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
717 SBA_CMD_SHIFT, SBA_CMD_MASK);
718 cmdsp->cmd = cmd;
719 *cmdsp->cmd_dma = cpu_to_le64(cmd);
720 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
721 if (req->sba->hw_resp_size) {
722 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
Anup Patele7ae72a2017-08-22 15:26:54 +0530723 cmdsp->resp = resp_dma;
Anup Patel743e1c82017-05-15 10:34:54 +0530724 cmdsp->resp_len = req->sba->hw_resp_size;
725 }
726 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
727 cmdsp->data = dst + msg_offset;
728 cmdsp->data_len = msg_len;
729 cmdsp++;
730
731 /* Fillup brcm_message */
732 msg->type = BRCM_MESSAGE_SBA;
733 msg->sba.cmds = cmds;
734 msg->sba.cmds_count = cmdsp - cmds;
735 msg->ctx = req;
736 msg->error = 0;
737}
738
739static struct sba_request *
740sba_prep_dma_memcpy_req(struct sba_device *sba,
741 dma_addr_t off, dma_addr_t dst, dma_addr_t src,
742 size_t len, unsigned long flags)
743{
744 struct sba_request *req = NULL;
745
746 /* Alloc new request */
747 req = sba_alloc_request(sba);
748 if (!req)
749 return NULL;
Anup Patel57a28502017-08-22 15:26:52 +0530750 if (flags & DMA_PREP_FENCE)
751 req->flags |= SBA_REQUEST_FENCE;
Anup Patel743e1c82017-05-15 10:34:54 +0530752
753 /* Fillup request message */
754 sba_fillup_memcpy_msg(req, req->cmds, &req->msg,
755 off, len, dst, src);
756
757 /* Init async_tx descriptor */
758 req->tx.flags = flags;
759 req->tx.cookie = -EBUSY;
760
761 return req;
762}
763
764static struct dma_async_tx_descriptor *
765sba_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src,
766 size_t len, unsigned long flags)
767{
768 size_t req_len;
769 dma_addr_t off = 0;
770 struct sba_device *sba = to_sba_device(dchan);
771 struct sba_request *first = NULL, *req;
772
773 /* Create chained requests where each request is upto hw_buf_size */
774 while (len) {
775 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
776
777 req = sba_prep_dma_memcpy_req(sba, off, dst, src,
778 req_len, flags);
779 if (!req) {
780 if (first)
781 sba_free_chained_requests(first);
782 return NULL;
783 }
784
785 if (first)
786 sba_chain_request(first, req);
787 else
788 first = req;
789
790 off += req_len;
791 len -= req_len;
792 }
793
794 return (first) ? &first->tx : NULL;
795}
796
797static void sba_fillup_xor_msg(struct sba_request *req,
798 struct brcm_sba_command *cmds,
799 struct brcm_message *msg,
800 dma_addr_t msg_offset, size_t msg_len,
801 dma_addr_t dst, dma_addr_t *src, u32 src_cnt)
802{
803 u64 cmd;
804 u32 c_mdata;
805 unsigned int i;
Anup Patele7ae72a2017-08-22 15:26:54 +0530806 dma_addr_t resp_dma = req->tx.phys;
Anup Patel743e1c82017-05-15 10:34:54 +0530807 struct brcm_sba_command *cmdsp = cmds;
808
809 /* Type-B command to load data into buf0 */
810 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
811 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
812 cmd = sba_cmd_enc(cmd, msg_len,
813 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
814 c_mdata = sba_cmd_load_c_mdata(0);
815 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
816 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
817 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
818 SBA_CMD_SHIFT, SBA_CMD_MASK);
819 cmdsp->cmd = cmd;
820 *cmdsp->cmd_dma = cpu_to_le64(cmd);
821 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
822 cmdsp->data = src[0] + msg_offset;
823 cmdsp->data_len = msg_len;
824 cmdsp++;
825
826 /* Type-B commands to xor data with buf0 and put it back in buf0 */
827 for (i = 1; i < src_cnt; i++) {
828 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
829 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
830 cmd = sba_cmd_enc(cmd, msg_len,
831 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
832 c_mdata = sba_cmd_xor_c_mdata(0, 0);
833 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
834 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
835 cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
836 SBA_CMD_SHIFT, SBA_CMD_MASK);
837 cmdsp->cmd = cmd;
838 *cmdsp->cmd_dma = cpu_to_le64(cmd);
839 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
840 cmdsp->data = src[i] + msg_offset;
841 cmdsp->data_len = msg_len;
842 cmdsp++;
843 }
844
845 /* Type-A command to write buf0 */
846 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
847 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
848 cmd = sba_cmd_enc(cmd, msg_len,
849 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
850 cmd = sba_cmd_enc(cmd, 0x1,
851 SBA_RESP_SHIFT, SBA_RESP_MASK);
852 c_mdata = sba_cmd_write_c_mdata(0);
853 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
854 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
855 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
856 SBA_CMD_SHIFT, SBA_CMD_MASK);
857 cmdsp->cmd = cmd;
858 *cmdsp->cmd_dma = cpu_to_le64(cmd);
859 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
860 if (req->sba->hw_resp_size) {
861 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
Anup Patele7ae72a2017-08-22 15:26:54 +0530862 cmdsp->resp = resp_dma;
Anup Patel743e1c82017-05-15 10:34:54 +0530863 cmdsp->resp_len = req->sba->hw_resp_size;
864 }
865 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
866 cmdsp->data = dst + msg_offset;
867 cmdsp->data_len = msg_len;
868 cmdsp++;
869
870 /* Fillup brcm_message */
871 msg->type = BRCM_MESSAGE_SBA;
872 msg->sba.cmds = cmds;
873 msg->sba.cmds_count = cmdsp - cmds;
874 msg->ctx = req;
875 msg->error = 0;
876}
877
Vinod Kouldd2bceb2017-07-19 10:03:24 +0530878static struct sba_request *
Anup Patel743e1c82017-05-15 10:34:54 +0530879sba_prep_dma_xor_req(struct sba_device *sba,
880 dma_addr_t off, dma_addr_t dst, dma_addr_t *src,
881 u32 src_cnt, size_t len, unsigned long flags)
882{
883 struct sba_request *req = NULL;
884
885 /* Alloc new request */
886 req = sba_alloc_request(sba);
887 if (!req)
888 return NULL;
Anup Patel57a28502017-08-22 15:26:52 +0530889 if (flags & DMA_PREP_FENCE)
890 req->flags |= SBA_REQUEST_FENCE;
Anup Patel743e1c82017-05-15 10:34:54 +0530891
892 /* Fillup request message */
893 sba_fillup_xor_msg(req, req->cmds, &req->msg,
894 off, len, dst, src, src_cnt);
895
896 /* Init async_tx descriptor */
897 req->tx.flags = flags;
898 req->tx.cookie = -EBUSY;
899
900 return req;
901}
902
903static struct dma_async_tx_descriptor *
904sba_prep_dma_xor(struct dma_chan *dchan, dma_addr_t dst, dma_addr_t *src,
905 u32 src_cnt, size_t len, unsigned long flags)
906{
907 size_t req_len;
908 dma_addr_t off = 0;
909 struct sba_device *sba = to_sba_device(dchan);
910 struct sba_request *first = NULL, *req;
911
912 /* Sanity checks */
913 if (unlikely(src_cnt > sba->max_xor_srcs))
914 return NULL;
915
916 /* Create chained requests where each request is upto hw_buf_size */
917 while (len) {
918 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
919
920 req = sba_prep_dma_xor_req(sba, off, dst, src, src_cnt,
921 req_len, flags);
922 if (!req) {
923 if (first)
924 sba_free_chained_requests(first);
925 return NULL;
926 }
927
928 if (first)
929 sba_chain_request(first, req);
930 else
931 first = req;
932
933 off += req_len;
934 len -= req_len;
935 }
936
937 return (first) ? &first->tx : NULL;
938}
939
940static void sba_fillup_pq_msg(struct sba_request *req,
941 bool pq_continue,
942 struct brcm_sba_command *cmds,
943 struct brcm_message *msg,
944 dma_addr_t msg_offset, size_t msg_len,
945 dma_addr_t *dst_p, dma_addr_t *dst_q,
946 const u8 *scf, dma_addr_t *src, u32 src_cnt)
947{
948 u64 cmd;
949 u32 c_mdata;
950 unsigned int i;
Anup Patele7ae72a2017-08-22 15:26:54 +0530951 dma_addr_t resp_dma = req->tx.phys;
Anup Patel743e1c82017-05-15 10:34:54 +0530952 struct brcm_sba_command *cmdsp = cmds;
953
954 if (pq_continue) {
955 /* Type-B command to load old P into buf0 */
956 if (dst_p) {
957 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
958 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
959 cmd = sba_cmd_enc(cmd, msg_len,
960 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
961 c_mdata = sba_cmd_load_c_mdata(0);
962 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
963 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
964 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
965 SBA_CMD_SHIFT, SBA_CMD_MASK);
966 cmdsp->cmd = cmd;
967 *cmdsp->cmd_dma = cpu_to_le64(cmd);
968 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
969 cmdsp->data = *dst_p + msg_offset;
970 cmdsp->data_len = msg_len;
971 cmdsp++;
972 }
973
974 /* Type-B command to load old Q into buf1 */
975 if (dst_q) {
976 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
977 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
978 cmd = sba_cmd_enc(cmd, msg_len,
979 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
980 c_mdata = sba_cmd_load_c_mdata(1);
981 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
982 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
983 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
984 SBA_CMD_SHIFT, SBA_CMD_MASK);
985 cmdsp->cmd = cmd;
986 *cmdsp->cmd_dma = cpu_to_le64(cmd);
987 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
988 cmdsp->data = *dst_q + msg_offset;
989 cmdsp->data_len = msg_len;
990 cmdsp++;
991 }
992 } else {
993 /* Type-A command to zero all buffers */
994 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
995 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
996 cmd = sba_cmd_enc(cmd, msg_len,
997 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
998 cmd = sba_cmd_enc(cmd, SBA_CMD_ZERO_ALL_BUFFERS,
999 SBA_CMD_SHIFT, SBA_CMD_MASK);
1000 cmdsp->cmd = cmd;
1001 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1002 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
1003 cmdsp++;
1004 }
1005
1006 /* Type-B commands for generate P onto buf0 and Q onto buf1 */
1007 for (i = 0; i < src_cnt; i++) {
1008 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
1009 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1010 cmd = sba_cmd_enc(cmd, msg_len,
1011 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1012 c_mdata = sba_cmd_pq_c_mdata(raid6_gflog[scf[i]], 1, 0);
1013 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1014 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1015 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
1016 SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
1017 cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS_XOR,
1018 SBA_CMD_SHIFT, SBA_CMD_MASK);
1019 cmdsp->cmd = cmd;
1020 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1021 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
1022 cmdsp->data = src[i] + msg_offset;
1023 cmdsp->data_len = msg_len;
1024 cmdsp++;
1025 }
1026
1027 /* Type-A command to write buf0 */
1028 if (dst_p) {
1029 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
1030 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1031 cmd = sba_cmd_enc(cmd, msg_len,
1032 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1033 cmd = sba_cmd_enc(cmd, 0x1,
1034 SBA_RESP_SHIFT, SBA_RESP_MASK);
1035 c_mdata = sba_cmd_write_c_mdata(0);
1036 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1037 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1038 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
1039 SBA_CMD_SHIFT, SBA_CMD_MASK);
1040 cmdsp->cmd = cmd;
1041 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1042 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
1043 if (req->sba->hw_resp_size) {
1044 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
Anup Patele7ae72a2017-08-22 15:26:54 +05301045 cmdsp->resp = resp_dma;
Anup Patel743e1c82017-05-15 10:34:54 +05301046 cmdsp->resp_len = req->sba->hw_resp_size;
1047 }
1048 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
1049 cmdsp->data = *dst_p + msg_offset;
1050 cmdsp->data_len = msg_len;
1051 cmdsp++;
1052 }
1053
1054 /* Type-A command to write buf1 */
1055 if (dst_q) {
1056 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
1057 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1058 cmd = sba_cmd_enc(cmd, msg_len,
1059 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1060 cmd = sba_cmd_enc(cmd, 0x1,
1061 SBA_RESP_SHIFT, SBA_RESP_MASK);
1062 c_mdata = sba_cmd_write_c_mdata(1);
1063 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1064 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1065 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
1066 SBA_CMD_SHIFT, SBA_CMD_MASK);
1067 cmdsp->cmd = cmd;
1068 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1069 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
1070 if (req->sba->hw_resp_size) {
1071 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
Anup Patele7ae72a2017-08-22 15:26:54 +05301072 cmdsp->resp = resp_dma;
Anup Patel743e1c82017-05-15 10:34:54 +05301073 cmdsp->resp_len = req->sba->hw_resp_size;
1074 }
1075 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
1076 cmdsp->data = *dst_q + msg_offset;
1077 cmdsp->data_len = msg_len;
1078 cmdsp++;
1079 }
1080
1081 /* Fillup brcm_message */
1082 msg->type = BRCM_MESSAGE_SBA;
1083 msg->sba.cmds = cmds;
1084 msg->sba.cmds_count = cmdsp - cmds;
1085 msg->ctx = req;
1086 msg->error = 0;
1087}
1088
Vinod Kouldd2bceb2017-07-19 10:03:24 +05301089static struct sba_request *
Anup Patel743e1c82017-05-15 10:34:54 +05301090sba_prep_dma_pq_req(struct sba_device *sba, dma_addr_t off,
1091 dma_addr_t *dst_p, dma_addr_t *dst_q, dma_addr_t *src,
1092 u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
1093{
1094 struct sba_request *req = NULL;
1095
1096 /* Alloc new request */
1097 req = sba_alloc_request(sba);
1098 if (!req)
1099 return NULL;
Anup Patel57a28502017-08-22 15:26:52 +05301100 if (flags & DMA_PREP_FENCE)
1101 req->flags |= SBA_REQUEST_FENCE;
Anup Patel743e1c82017-05-15 10:34:54 +05301102
1103 /* Fillup request messages */
1104 sba_fillup_pq_msg(req, dmaf_continue(flags),
1105 req->cmds, &req->msg,
1106 off, len, dst_p, dst_q, scf, src, src_cnt);
1107
1108 /* Init async_tx descriptor */
1109 req->tx.flags = flags;
1110 req->tx.cookie = -EBUSY;
1111
1112 return req;
1113}
1114
1115static void sba_fillup_pq_single_msg(struct sba_request *req,
1116 bool pq_continue,
1117 struct brcm_sba_command *cmds,
1118 struct brcm_message *msg,
1119 dma_addr_t msg_offset, size_t msg_len,
1120 dma_addr_t *dst_p, dma_addr_t *dst_q,
1121 dma_addr_t src, u8 scf)
1122{
1123 u64 cmd;
1124 u32 c_mdata;
1125 u8 pos, dpos = raid6_gflog[scf];
Anup Patele7ae72a2017-08-22 15:26:54 +05301126 dma_addr_t resp_dma = req->tx.phys;
Anup Patel743e1c82017-05-15 10:34:54 +05301127 struct brcm_sba_command *cmdsp = cmds;
1128
1129 if (!dst_p)
1130 goto skip_p;
1131
1132 if (pq_continue) {
1133 /* Type-B command to load old P into buf0 */
1134 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
1135 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1136 cmd = sba_cmd_enc(cmd, msg_len,
1137 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1138 c_mdata = sba_cmd_load_c_mdata(0);
1139 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1140 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1141 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
1142 SBA_CMD_SHIFT, SBA_CMD_MASK);
1143 cmdsp->cmd = cmd;
1144 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1145 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
1146 cmdsp->data = *dst_p + msg_offset;
1147 cmdsp->data_len = msg_len;
1148 cmdsp++;
1149
1150 /*
1151 * Type-B commands to xor data with buf0 and put it
1152 * back in buf0
1153 */
1154 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
1155 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1156 cmd = sba_cmd_enc(cmd, msg_len,
1157 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1158 c_mdata = sba_cmd_xor_c_mdata(0, 0);
1159 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1160 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1161 cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
1162 SBA_CMD_SHIFT, SBA_CMD_MASK);
1163 cmdsp->cmd = cmd;
1164 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1165 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
1166 cmdsp->data = src + msg_offset;
1167 cmdsp->data_len = msg_len;
1168 cmdsp++;
1169 } else {
1170 /* Type-B command to load old P into buf0 */
1171 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
1172 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1173 cmd = sba_cmd_enc(cmd, msg_len,
1174 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1175 c_mdata = sba_cmd_load_c_mdata(0);
1176 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1177 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1178 cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER,
1179 SBA_CMD_SHIFT, SBA_CMD_MASK);
1180 cmdsp->cmd = cmd;
1181 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1182 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
1183 cmdsp->data = src + msg_offset;
1184 cmdsp->data_len = msg_len;
1185 cmdsp++;
1186 }
1187
1188 /* Type-A command to write buf0 */
1189 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
1190 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1191 cmd = sba_cmd_enc(cmd, msg_len,
1192 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1193 cmd = sba_cmd_enc(cmd, 0x1,
1194 SBA_RESP_SHIFT, SBA_RESP_MASK);
1195 c_mdata = sba_cmd_write_c_mdata(0);
1196 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1197 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1198 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
1199 SBA_CMD_SHIFT, SBA_CMD_MASK);
1200 cmdsp->cmd = cmd;
1201 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1202 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
1203 if (req->sba->hw_resp_size) {
1204 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
Anup Patele7ae72a2017-08-22 15:26:54 +05301205 cmdsp->resp = resp_dma;
Anup Patel743e1c82017-05-15 10:34:54 +05301206 cmdsp->resp_len = req->sba->hw_resp_size;
1207 }
1208 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
1209 cmdsp->data = *dst_p + msg_offset;
1210 cmdsp->data_len = msg_len;
1211 cmdsp++;
1212
1213skip_p:
1214 if (!dst_q)
1215 goto skip_q;
1216
1217 /* Type-A command to zero all buffers */
1218 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
1219 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1220 cmd = sba_cmd_enc(cmd, msg_len,
1221 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1222 cmd = sba_cmd_enc(cmd, SBA_CMD_ZERO_ALL_BUFFERS,
1223 SBA_CMD_SHIFT, SBA_CMD_MASK);
1224 cmdsp->cmd = cmd;
1225 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1226 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
1227 cmdsp++;
1228
1229 if (dpos == 255)
1230 goto skip_q_computation;
1231 pos = (dpos < req->sba->max_pq_coefs) ?
1232 dpos : (req->sba->max_pq_coefs - 1);
1233
1234 /*
1235 * Type-B command to generate initial Q from data
1236 * and store output into buf0
1237 */
1238 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
1239 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1240 cmd = sba_cmd_enc(cmd, msg_len,
1241 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1242 c_mdata = sba_cmd_pq_c_mdata(pos, 0, 0);
1243 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1244 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1245 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
1246 SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
1247 cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS,
1248 SBA_CMD_SHIFT, SBA_CMD_MASK);
1249 cmdsp->cmd = cmd;
1250 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1251 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
1252 cmdsp->data = src + msg_offset;
1253 cmdsp->data_len = msg_len;
1254 cmdsp++;
1255
1256 dpos -= pos;
1257
1258 /* Multiple Type-A command to generate final Q */
1259 while (dpos) {
1260 pos = (dpos < req->sba->max_pq_coefs) ?
1261 dpos : (req->sba->max_pq_coefs - 1);
1262
1263 /*
1264 * Type-A command to generate Q with buf0 and
1265 * buf1 store result in buf0
1266 */
1267 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
1268 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1269 cmd = sba_cmd_enc(cmd, msg_len,
1270 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1271 c_mdata = sba_cmd_pq_c_mdata(pos, 0, 1);
1272 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1273 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1274 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata),
1275 SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK);
1276 cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS,
1277 SBA_CMD_SHIFT, SBA_CMD_MASK);
1278 cmdsp->cmd = cmd;
1279 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1280 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
1281 cmdsp++;
1282
1283 dpos -= pos;
1284 }
1285
1286skip_q_computation:
1287 if (pq_continue) {
1288 /*
1289 * Type-B command to XOR previous output with
1290 * buf0 and write it into buf0
1291 */
1292 cmd = sba_cmd_enc(0x0, SBA_TYPE_B,
1293 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1294 cmd = sba_cmd_enc(cmd, msg_len,
1295 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1296 c_mdata = sba_cmd_xor_c_mdata(0, 0);
1297 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1298 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1299 cmd = sba_cmd_enc(cmd, SBA_CMD_XOR,
1300 SBA_CMD_SHIFT, SBA_CMD_MASK);
1301 cmdsp->cmd = cmd;
1302 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1303 cmdsp->flags = BRCM_SBA_CMD_TYPE_B;
1304 cmdsp->data = *dst_q + msg_offset;
1305 cmdsp->data_len = msg_len;
1306 cmdsp++;
1307 }
1308
1309 /* Type-A command to write buf0 */
1310 cmd = sba_cmd_enc(0x0, SBA_TYPE_A,
1311 SBA_TYPE_SHIFT, SBA_TYPE_MASK);
1312 cmd = sba_cmd_enc(cmd, msg_len,
1313 SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK);
1314 cmd = sba_cmd_enc(cmd, 0x1,
1315 SBA_RESP_SHIFT, SBA_RESP_MASK);
1316 c_mdata = sba_cmd_write_c_mdata(0);
1317 cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata),
1318 SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK);
1319 cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER,
1320 SBA_CMD_SHIFT, SBA_CMD_MASK);
1321 cmdsp->cmd = cmd;
1322 *cmdsp->cmd_dma = cpu_to_le64(cmd);
1323 cmdsp->flags = BRCM_SBA_CMD_TYPE_A;
1324 if (req->sba->hw_resp_size) {
1325 cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP;
Anup Patele7ae72a2017-08-22 15:26:54 +05301326 cmdsp->resp = resp_dma;
Anup Patel743e1c82017-05-15 10:34:54 +05301327 cmdsp->resp_len = req->sba->hw_resp_size;
1328 }
1329 cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT;
1330 cmdsp->data = *dst_q + msg_offset;
1331 cmdsp->data_len = msg_len;
1332 cmdsp++;
1333
1334skip_q:
1335 /* Fillup brcm_message */
1336 msg->type = BRCM_MESSAGE_SBA;
1337 msg->sba.cmds = cmds;
1338 msg->sba.cmds_count = cmdsp - cmds;
1339 msg->ctx = req;
1340 msg->error = 0;
1341}
1342
Vinod Kouldd2bceb2017-07-19 10:03:24 +05301343static struct sba_request *
Anup Patel743e1c82017-05-15 10:34:54 +05301344sba_prep_dma_pq_single_req(struct sba_device *sba, dma_addr_t off,
1345 dma_addr_t *dst_p, dma_addr_t *dst_q,
1346 dma_addr_t src, u8 scf, size_t len,
1347 unsigned long flags)
1348{
1349 struct sba_request *req = NULL;
1350
1351 /* Alloc new request */
1352 req = sba_alloc_request(sba);
1353 if (!req)
1354 return NULL;
Anup Patel57a28502017-08-22 15:26:52 +05301355 if (flags & DMA_PREP_FENCE)
1356 req->flags |= SBA_REQUEST_FENCE;
Anup Patel743e1c82017-05-15 10:34:54 +05301357
1358 /* Fillup request messages */
1359 sba_fillup_pq_single_msg(req, dmaf_continue(flags),
1360 req->cmds, &req->msg, off, len,
1361 dst_p, dst_q, src, scf);
1362
1363 /* Init async_tx descriptor */
1364 req->tx.flags = flags;
1365 req->tx.cookie = -EBUSY;
1366
1367 return req;
1368}
1369
1370static struct dma_async_tx_descriptor *
1371sba_prep_dma_pq(struct dma_chan *dchan, dma_addr_t *dst, dma_addr_t *src,
1372 u32 src_cnt, const u8 *scf, size_t len, unsigned long flags)
1373{
1374 u32 i, dst_q_index;
1375 size_t req_len;
1376 bool slow = false;
1377 dma_addr_t off = 0;
1378 dma_addr_t *dst_p = NULL, *dst_q = NULL;
1379 struct sba_device *sba = to_sba_device(dchan);
1380 struct sba_request *first = NULL, *req;
1381
1382 /* Sanity checks */
1383 if (unlikely(src_cnt > sba->max_pq_srcs))
1384 return NULL;
1385 for (i = 0; i < src_cnt; i++)
1386 if (sba->max_pq_coefs <= raid6_gflog[scf[i]])
1387 slow = true;
1388
1389 /* Figure-out P and Q destination addresses */
1390 if (!(flags & DMA_PREP_PQ_DISABLE_P))
1391 dst_p = &dst[0];
1392 if (!(flags & DMA_PREP_PQ_DISABLE_Q))
1393 dst_q = &dst[1];
1394
1395 /* Create chained requests where each request is upto hw_buf_size */
1396 while (len) {
1397 req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size;
1398
1399 if (slow) {
1400 dst_q_index = src_cnt;
1401
1402 if (dst_q) {
1403 for (i = 0; i < src_cnt; i++) {
1404 if (*dst_q == src[i]) {
1405 dst_q_index = i;
1406 break;
1407 }
1408 }
1409 }
1410
1411 if (dst_q_index < src_cnt) {
1412 i = dst_q_index;
1413 req = sba_prep_dma_pq_single_req(sba,
1414 off, dst_p, dst_q, src[i], scf[i],
1415 req_len, flags | DMA_PREP_FENCE);
1416 if (!req)
1417 goto fail;
1418
1419 if (first)
1420 sba_chain_request(first, req);
1421 else
1422 first = req;
1423
1424 flags |= DMA_PREP_CONTINUE;
1425 }
1426
1427 for (i = 0; i < src_cnt; i++) {
1428 if (dst_q_index == i)
1429 continue;
1430
1431 req = sba_prep_dma_pq_single_req(sba,
1432 off, dst_p, dst_q, src[i], scf[i],
1433 req_len, flags | DMA_PREP_FENCE);
1434 if (!req)
1435 goto fail;
1436
1437 if (first)
1438 sba_chain_request(first, req);
1439 else
1440 first = req;
1441
1442 flags |= DMA_PREP_CONTINUE;
1443 }
1444 } else {
1445 req = sba_prep_dma_pq_req(sba, off,
1446 dst_p, dst_q, src, src_cnt,
1447 scf, req_len, flags);
1448 if (!req)
1449 goto fail;
1450
1451 if (first)
1452 sba_chain_request(first, req);
1453 else
1454 first = req;
1455 }
1456
1457 off += req_len;
1458 len -= req_len;
1459 }
1460
1461 return (first) ? &first->tx : NULL;
1462
1463fail:
1464 if (first)
1465 sba_free_chained_requests(first);
1466 return NULL;
1467}
1468
1469/* ====== Mailbox callbacks ===== */
1470
Anup Patel743e1c82017-05-15 10:34:54 +05301471static void sba_receive_message(struct mbox_client *cl, void *msg)
1472{
Anup Patel743e1c82017-05-15 10:34:54 +05301473 struct brcm_message *m = msg;
Anup Patelf8338512017-08-22 15:26:58 +05301474 struct sba_request *req = m->ctx;
Anup Patel743e1c82017-05-15 10:34:54 +05301475 struct sba_device *sba = req->sba;
1476
1477 /* Error count if message has error */
1478 if (m->error < 0)
1479 dev_err(sba->dev, "%s got message with error %d",
1480 dma_chan_name(&sba->dma_chan), m->error);
1481
Anup Patelf8338512017-08-22 15:26:58 +05301482 /* Process received request */
1483 sba_process_received_request(sba, req);
Anup Patel743e1c82017-05-15 10:34:54 +05301484
Anup Patelf8338512017-08-22 15:26:58 +05301485 /* Process deferred requests */
1486 sba_process_deferred_requests(sba);
Anup Patel743e1c82017-05-15 10:34:54 +05301487}
1488
1489/* ====== Platform driver routines ===== */
1490
1491static int sba_prealloc_channel_resources(struct sba_device *sba)
1492{
Anup Patele7ae72a2017-08-22 15:26:54 +05301493 int i, j, ret = 0;
Anup Patel743e1c82017-05-15 10:34:54 +05301494 struct sba_request *req = NULL;
1495
Anup Pateleb677442017-08-22 15:26:59 +05301496 sba->resp_base = dma_alloc_coherent(sba->mbox_dev,
Anup Patel743e1c82017-05-15 10:34:54 +05301497 sba->max_resp_pool_size,
1498 &sba->resp_dma_base, GFP_KERNEL);
1499 if (!sba->resp_base)
1500 return -ENOMEM;
1501
Anup Pateleb677442017-08-22 15:26:59 +05301502 sba->cmds_base = dma_alloc_coherent(sba->mbox_dev,
Anup Patel743e1c82017-05-15 10:34:54 +05301503 sba->max_cmds_pool_size,
1504 &sba->cmds_dma_base, GFP_KERNEL);
1505 if (!sba->cmds_base) {
1506 ret = -ENOMEM;
1507 goto fail_free_resp_pool;
1508 }
1509
1510 spin_lock_init(&sba->reqs_lock);
1511 sba->reqs_fence = false;
1512 INIT_LIST_HEAD(&sba->reqs_alloc_list);
1513 INIT_LIST_HEAD(&sba->reqs_pending_list);
1514 INIT_LIST_HEAD(&sba->reqs_active_list);
1515 INIT_LIST_HEAD(&sba->reqs_received_list);
1516 INIT_LIST_HEAD(&sba->reqs_completed_list);
1517 INIT_LIST_HEAD(&sba->reqs_aborted_list);
1518 INIT_LIST_HEAD(&sba->reqs_free_list);
1519
Anup Patele7ae72a2017-08-22 15:26:54 +05301520 for (i = 0; i < sba->max_req; i++) {
Anup Patel5655e002017-08-22 15:26:56 +05301521 req = devm_kzalloc(sba->dev,
1522 sizeof(*req) +
1523 sba->max_cmd_per_req * sizeof(req->cmds[0]),
1524 GFP_KERNEL);
1525 if (!req) {
1526 ret = -ENOMEM;
1527 goto fail_free_cmds_pool;
1528 }
Anup Patel743e1c82017-05-15 10:34:54 +05301529 INIT_LIST_HEAD(&req->node);
1530 req->sba = sba;
Anup Patel57a28502017-08-22 15:26:52 +05301531 req->flags = SBA_REQUEST_STATE_FREE;
Anup Patel743e1c82017-05-15 10:34:54 +05301532 INIT_LIST_HEAD(&req->next);
Anup Patel743e1c82017-05-15 10:34:54 +05301533 atomic_set(&req->next_pending_count, 0);
Anup Patel743e1c82017-05-15 10:34:54 +05301534 for (j = 0; j < sba->max_cmd_per_req; j++) {
1535 req->cmds[j].cmd = 0;
1536 req->cmds[j].cmd_dma = sba->cmds_base +
1537 (i * sba->max_cmd_per_req + j) * sizeof(u64);
1538 req->cmds[j].cmd_dma_addr = sba->cmds_dma_base +
1539 (i * sba->max_cmd_per_req + j) * sizeof(u64);
1540 req->cmds[j].flags = 0;
1541 }
1542 memset(&req->msg, 0, sizeof(req->msg));
1543 dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan);
1544 req->tx.tx_submit = sba_tx_submit;
Anup Patele7ae72a2017-08-22 15:26:54 +05301545 req->tx.phys = sba->resp_dma_base + i * sba->hw_resp_size;
Anup Patel743e1c82017-05-15 10:34:54 +05301546 list_add_tail(&req->node, &sba->reqs_free_list);
1547 }
1548
Anup Patel743e1c82017-05-15 10:34:54 +05301549 return 0;
1550
1551fail_free_cmds_pool:
Anup Pateleb677442017-08-22 15:26:59 +05301552 dma_free_coherent(sba->mbox_dev,
Anup Patel743e1c82017-05-15 10:34:54 +05301553 sba->max_cmds_pool_size,
1554 sba->cmds_base, sba->cmds_dma_base);
1555fail_free_resp_pool:
Anup Pateleb677442017-08-22 15:26:59 +05301556 dma_free_coherent(sba->mbox_dev,
Anup Patel743e1c82017-05-15 10:34:54 +05301557 sba->max_resp_pool_size,
1558 sba->resp_base, sba->resp_dma_base);
1559 return ret;
1560}
1561
1562static void sba_freeup_channel_resources(struct sba_device *sba)
1563{
1564 dmaengine_terminate_all(&sba->dma_chan);
Anup Pateleb677442017-08-22 15:26:59 +05301565 dma_free_coherent(sba->mbox_dev, sba->max_cmds_pool_size,
Anup Patel743e1c82017-05-15 10:34:54 +05301566 sba->cmds_base, sba->cmds_dma_base);
Anup Pateleb677442017-08-22 15:26:59 +05301567 dma_free_coherent(sba->mbox_dev, sba->max_resp_pool_size,
Anup Patel743e1c82017-05-15 10:34:54 +05301568 sba->resp_base, sba->resp_dma_base);
1569 sba->resp_base = NULL;
1570 sba->resp_dma_base = 0;
1571}
1572
1573static int sba_async_register(struct sba_device *sba)
1574{
1575 int ret;
1576 struct dma_device *dma_dev = &sba->dma_dev;
1577
1578 /* Initialize DMA channel cookie */
1579 sba->dma_chan.device = dma_dev;
1580 dma_cookie_init(&sba->dma_chan);
1581
1582 /* Initialize DMA device capability mask */
1583 dma_cap_zero(dma_dev->cap_mask);
1584 dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask);
1585 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask);
1586 dma_cap_set(DMA_XOR, dma_dev->cap_mask);
1587 dma_cap_set(DMA_PQ, dma_dev->cap_mask);
1588
1589 /*
1590 * Set mailbox channel device as the base device of
1591 * our dma_device because the actual memory accesses
1592 * will be done by mailbox controller
1593 */
1594 dma_dev->dev = sba->mbox_dev;
1595
1596 /* Set base prep routines */
1597 dma_dev->device_free_chan_resources = sba_free_chan_resources;
1598 dma_dev->device_terminate_all = sba_device_terminate_all;
1599 dma_dev->device_issue_pending = sba_issue_pending;
1600 dma_dev->device_tx_status = sba_tx_status;
1601
1602 /* Set interrupt routine */
1603 if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask))
1604 dma_dev->device_prep_dma_interrupt = sba_prep_dma_interrupt;
1605
1606 /* Set memcpy routine */
1607 if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask))
1608 dma_dev->device_prep_dma_memcpy = sba_prep_dma_memcpy;
1609
1610 /* Set xor routine and capability */
1611 if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) {
1612 dma_dev->device_prep_dma_xor = sba_prep_dma_xor;
1613 dma_dev->max_xor = sba->max_xor_srcs;
1614 }
1615
1616 /* Set pq routine and capability */
1617 if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) {
1618 dma_dev->device_prep_dma_pq = sba_prep_dma_pq;
1619 dma_set_maxpq(dma_dev, sba->max_pq_srcs, 0);
1620 }
1621
1622 /* Initialize DMA device channel list */
1623 INIT_LIST_HEAD(&dma_dev->channels);
1624 list_add_tail(&sba->dma_chan.device_node, &dma_dev->channels);
1625
1626 /* Register with Linux async DMA framework*/
1627 ret = dma_async_device_register(dma_dev);
1628 if (ret) {
1629 dev_err(sba->dev, "async device register error %d", ret);
1630 return ret;
1631 }
1632
1633 dev_info(sba->dev, "%s capabilities: %s%s%s%s\n",
1634 dma_chan_name(&sba->dma_chan),
1635 dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "interrupt " : "",
1636 dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "memcpy " : "",
1637 dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "",
1638 dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : "");
1639
1640 return 0;
1641}
1642
1643static int sba_probe(struct platform_device *pdev)
1644{
1645 int i, ret = 0, mchans_count;
1646 struct sba_device *sba;
1647 struct platform_device *mbox_pdev;
1648 struct of_phandle_args args;
1649
1650 /* Allocate main SBA struct */
1651 sba = devm_kzalloc(&pdev->dev, sizeof(*sba), GFP_KERNEL);
1652 if (!sba)
1653 return -ENOMEM;
1654
1655 sba->dev = &pdev->dev;
1656 platform_set_drvdata(pdev, sba);
1657
Anup Patel5346aaf2017-08-22 15:26:57 +05301658 /* Number of channels equals number of mailbox channels */
1659 ret = of_count_phandle_with_args(pdev->dev.of_node,
1660 "mboxes", "#mbox-cells");
1661 if (ret <= 0)
1662 return -ENODEV;
1663 mchans_count = ret;
1664
Anup Patel743e1c82017-05-15 10:34:54 +05301665 /* Determine SBA version from DT compatible string */
1666 if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba"))
1667 sba->ver = SBA_VER_1;
1668 else if (of_device_is_compatible(sba->dev->of_node,
1669 "brcm,iproc-sba-v2"))
1670 sba->ver = SBA_VER_2;
1671 else
1672 return -ENODEV;
1673
1674 /* Derived Configuration parameters */
1675 switch (sba->ver) {
1676 case SBA_VER_1:
Anup Patel743e1c82017-05-15 10:34:54 +05301677 sba->hw_buf_size = 4096;
1678 sba->hw_resp_size = 8;
1679 sba->max_pq_coefs = 6;
1680 sba->max_pq_srcs = 6;
1681 break;
1682 case SBA_VER_2:
Anup Patel743e1c82017-05-15 10:34:54 +05301683 sba->hw_buf_size = 4096;
1684 sba->hw_resp_size = 8;
1685 sba->max_pq_coefs = 30;
1686 /*
1687 * We can support max_pq_srcs == max_pq_coefs because
1688 * we are limited by number of SBA commands that we can
1689 * fit in one message for underlying ring manager HW.
1690 */
1691 sba->max_pq_srcs = 12;
1692 break;
1693 default:
1694 return -EINVAL;
1695 }
Anup Patel5346aaf2017-08-22 15:26:57 +05301696 sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL * mchans_count;
Anup Patel743e1c82017-05-15 10:34:54 +05301697 sba->max_cmd_per_req = sba->max_pq_srcs + 3;
1698 sba->max_xor_srcs = sba->max_cmd_per_req - 1;
1699 sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size;
1700 sba->max_cmds_pool_size = sba->max_req *
1701 sba->max_cmd_per_req * sizeof(u64);
1702
1703 /* Setup mailbox client */
1704 sba->client.dev = &pdev->dev;
1705 sba->client.rx_callback = sba_receive_message;
1706 sba->client.tx_block = false;
1707 sba->client.knows_txdone = false;
1708 sba->client.tx_tout = 0;
1709
Anup Patel743e1c82017-05-15 10:34:54 +05301710 /* Allocate mailbox channel array */
Anup Patel5346aaf2017-08-22 15:26:57 +05301711 sba->mchans = devm_kcalloc(&pdev->dev, mchans_count,
Anup Patel743e1c82017-05-15 10:34:54 +05301712 sizeof(*sba->mchans), GFP_KERNEL);
1713 if (!sba->mchans)
1714 return -ENOMEM;
1715
1716 /* Request mailbox channels */
Anup Patel5346aaf2017-08-22 15:26:57 +05301717 sba->mchans_count = 0;
Anup Patel743e1c82017-05-15 10:34:54 +05301718 for (i = 0; i < mchans_count; i++) {
1719 sba->mchans[i] = mbox_request_channel(&sba->client, i);
1720 if (IS_ERR(sba->mchans[i])) {
1721 ret = PTR_ERR(sba->mchans[i]);
1722 goto fail_free_mchans;
1723 }
1724 sba->mchans_count++;
1725 }
Anup Patel5346aaf2017-08-22 15:26:57 +05301726 atomic_set(&sba->mchans_current, 0);
Anup Patel743e1c82017-05-15 10:34:54 +05301727
1728 /* Find-out underlying mailbox device */
1729 ret = of_parse_phandle_with_args(pdev->dev.of_node,
1730 "mboxes", "#mbox-cells", 0, &args);
1731 if (ret)
1732 goto fail_free_mchans;
1733 mbox_pdev = of_find_device_by_node(args.np);
1734 of_node_put(args.np);
1735 if (!mbox_pdev) {
1736 ret = -ENODEV;
1737 goto fail_free_mchans;
1738 }
1739 sba->mbox_dev = &mbox_pdev->dev;
1740
1741 /* All mailbox channels should be of same ring manager device */
1742 for (i = 1; i < mchans_count; i++) {
1743 ret = of_parse_phandle_with_args(pdev->dev.of_node,
1744 "mboxes", "#mbox-cells", i, &args);
1745 if (ret)
1746 goto fail_free_mchans;
1747 mbox_pdev = of_find_device_by_node(args.np);
1748 of_node_put(args.np);
1749 if (sba->mbox_dev != &mbox_pdev->dev) {
1750 ret = -EINVAL;
1751 goto fail_free_mchans;
1752 }
1753 }
1754
Anup Patel743e1c82017-05-15 10:34:54 +05301755 /* Prealloc channel resource */
1756 ret = sba_prealloc_channel_resources(sba);
1757 if (ret)
Anup Pateleb677442017-08-22 15:26:59 +05301758 goto fail_free_mchans;
1759
1760 /* Register DMA device with Linux async framework */
1761 ret = sba_async_register(sba);
1762 if (ret)
1763 goto fail_free_resources;
Anup Patel743e1c82017-05-15 10:34:54 +05301764
1765 /* Print device info */
1766 dev_info(sba->dev, "%s using SBAv%d and %d mailbox channels",
1767 dma_chan_name(&sba->dma_chan), sba->ver+1,
1768 sba->mchans_count);
1769
1770 return 0;
1771
Anup Pateleb677442017-08-22 15:26:59 +05301772fail_free_resources:
1773 sba_freeup_channel_resources(sba);
Anup Patel743e1c82017-05-15 10:34:54 +05301774fail_free_mchans:
1775 for (i = 0; i < sba->mchans_count; i++)
1776 mbox_free_channel(sba->mchans[i]);
1777 return ret;
1778}
1779
1780static int sba_remove(struct platform_device *pdev)
1781{
1782 int i;
1783 struct sba_device *sba = platform_get_drvdata(pdev);
1784
Anup Patel743e1c82017-05-15 10:34:54 +05301785 dma_async_device_unregister(&sba->dma_dev);
1786
Anup Pateleb677442017-08-22 15:26:59 +05301787 sba_freeup_channel_resources(sba);
1788
Anup Patel743e1c82017-05-15 10:34:54 +05301789 for (i = 0; i < sba->mchans_count; i++)
1790 mbox_free_channel(sba->mchans[i]);
1791
1792 return 0;
1793}
1794
1795static const struct of_device_id sba_of_match[] = {
1796 { .compatible = "brcm,iproc-sba", },
1797 { .compatible = "brcm,iproc-sba-v2", },
1798 {},
1799};
1800MODULE_DEVICE_TABLE(of, sba_of_match);
1801
1802static struct platform_driver sba_driver = {
1803 .probe = sba_probe,
1804 .remove = sba_remove,
1805 .driver = {
1806 .name = "bcm-sba-raid",
1807 .of_match_table = sba_of_match,
1808 },
1809};
1810module_platform_driver(sba_driver);
1811
1812MODULE_DESCRIPTION("Broadcom SBA RAID driver");
1813MODULE_AUTHOR("Anup Patel <anup.patel@broadcom.com>");
1814MODULE_LICENSE("GPL v2");