Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2017 Broadcom |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 as |
| 6 | * published by the Free Software Foundation. |
| 7 | */ |
| 8 | |
| 9 | /* |
| 10 | * Broadcom SBA RAID Driver |
| 11 | * |
| 12 | * The Broadcom stream buffer accelerator (SBA) provides offloading |
| 13 | * capabilities for RAID operations. The SBA offload engine is accessible |
| 14 | * via Broadcom SoC specific ring manager. Two or more offload engines |
| 15 | * can share same Broadcom SoC specific ring manager due to this Broadcom |
| 16 | * SoC specific ring manager driver is implemented as a mailbox controller |
| 17 | * driver and offload engine drivers are implemented as mallbox clients. |
| 18 | * |
| 19 | * Typically, Broadcom SoC specific ring manager will implement larger |
| 20 | * number of hardware rings over one or more SBA hardware devices. By |
| 21 | * design, the internal buffer size of SBA hardware device is limited |
| 22 | * but all offload operations supported by SBA can be broken down into |
| 23 | * multiple small size requests and executed parallely on multiple SBA |
| 24 | * hardware devices for achieving high through-put. |
| 25 | * |
| 26 | * The Broadcom SBA RAID driver does not require any register programming |
| 27 | * except submitting request to SBA hardware device via mailbox channels. |
| 28 | * This driver implements a DMA device with one DMA channel using a set |
| 29 | * of mailbox channels provided by Broadcom SoC specific ring manager |
| 30 | * driver. To exploit parallelism (as described above), all DMA request |
| 31 | * coming to SBA RAID DMA channel are broken down to smaller requests |
| 32 | * and submitted to multiple mailbox channels in round-robin fashion. |
| 33 | * For having more SBA DMA channels, we can create more SBA device nodes |
| 34 | * in Broadcom SoC specific DTS based on number of hardware rings supported |
| 35 | * by Broadcom SoC ring manager. |
| 36 | */ |
| 37 | |
| 38 | #include <linux/bitops.h> |
| 39 | #include <linux/dma-mapping.h> |
| 40 | #include <linux/dmaengine.h> |
| 41 | #include <linux/list.h> |
| 42 | #include <linux/mailbox_client.h> |
| 43 | #include <linux/mailbox/brcm-message.h> |
| 44 | #include <linux/module.h> |
| 45 | #include <linux/of_device.h> |
| 46 | #include <linux/slab.h> |
| 47 | #include <linux/raid/pq.h> |
| 48 | |
| 49 | #include "dmaengine.h" |
| 50 | |
Anup Patel | e897091 | 2017-08-22 15:26:50 +0530 | [diff] [blame] | 51 | /* ====== Driver macros and defines ===== */ |
| 52 | |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 53 | #define SBA_TYPE_SHIFT 48 |
| 54 | #define SBA_TYPE_MASK GENMASK(1, 0) |
| 55 | #define SBA_TYPE_A 0x0 |
| 56 | #define SBA_TYPE_B 0x2 |
| 57 | #define SBA_TYPE_C 0x3 |
| 58 | #define SBA_USER_DEF_SHIFT 32 |
| 59 | #define SBA_USER_DEF_MASK GENMASK(15, 0) |
| 60 | #define SBA_R_MDATA_SHIFT 24 |
| 61 | #define SBA_R_MDATA_MASK GENMASK(7, 0) |
| 62 | #define SBA_C_MDATA_MS_SHIFT 18 |
| 63 | #define SBA_C_MDATA_MS_MASK GENMASK(1, 0) |
| 64 | #define SBA_INT_SHIFT 17 |
| 65 | #define SBA_INT_MASK BIT(0) |
| 66 | #define SBA_RESP_SHIFT 16 |
| 67 | #define SBA_RESP_MASK BIT(0) |
| 68 | #define SBA_C_MDATA_SHIFT 8 |
| 69 | #define SBA_C_MDATA_MASK GENMASK(7, 0) |
| 70 | #define SBA_C_MDATA_BNUMx_SHIFT(__bnum) (2 * (__bnum)) |
| 71 | #define SBA_C_MDATA_BNUMx_MASK GENMASK(1, 0) |
| 72 | #define SBA_C_MDATA_DNUM_SHIFT 5 |
| 73 | #define SBA_C_MDATA_DNUM_MASK GENMASK(4, 0) |
| 74 | #define SBA_C_MDATA_LS(__v) ((__v) & 0xff) |
| 75 | #define SBA_C_MDATA_MS(__v) (((__v) >> 8) & 0x3) |
| 76 | #define SBA_CMD_SHIFT 0 |
| 77 | #define SBA_CMD_MASK GENMASK(3, 0) |
| 78 | #define SBA_CMD_ZERO_BUFFER 0x4 |
| 79 | #define SBA_CMD_ZERO_ALL_BUFFERS 0x8 |
| 80 | #define SBA_CMD_LOAD_BUFFER 0x9 |
| 81 | #define SBA_CMD_XOR 0xa |
| 82 | #define SBA_CMD_GALOIS_XOR 0xb |
| 83 | #define SBA_CMD_WRITE_BUFFER 0xc |
| 84 | #define SBA_CMD_GALOIS 0xe |
| 85 | |
Anup Patel | 5346aaf | 2017-08-22 15:26:57 +0530 | [diff] [blame] | 86 | #define SBA_MAX_REQ_PER_MBOX_CHANNEL 8192 |
| 87 | |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 88 | /* Driver helper macros */ |
| 89 | #define to_sba_request(tx) \ |
| 90 | container_of(tx, struct sba_request, tx) |
| 91 | #define to_sba_device(dchan) \ |
| 92 | container_of(dchan, struct sba_device, dma_chan) |
| 93 | |
Anup Patel | e897091 | 2017-08-22 15:26:50 +0530 | [diff] [blame] | 94 | /* ===== Driver data structures ===== */ |
| 95 | |
Anup Patel | 57a2850 | 2017-08-22 15:26:52 +0530 | [diff] [blame] | 96 | enum sba_request_flags { |
| 97 | SBA_REQUEST_STATE_FREE = 0x001, |
| 98 | SBA_REQUEST_STATE_ALLOCED = 0x002, |
| 99 | SBA_REQUEST_STATE_PENDING = 0x004, |
| 100 | SBA_REQUEST_STATE_ACTIVE = 0x008, |
| 101 | SBA_REQUEST_STATE_RECEIVED = 0x010, |
| 102 | SBA_REQUEST_STATE_COMPLETED = 0x020, |
| 103 | SBA_REQUEST_STATE_ABORTED = 0x040, |
| 104 | SBA_REQUEST_STATE_MASK = 0x0ff, |
| 105 | SBA_REQUEST_FENCE = 0x100, |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 106 | }; |
| 107 | |
| 108 | struct sba_request { |
| 109 | /* Global state */ |
| 110 | struct list_head node; |
| 111 | struct sba_device *sba; |
Anup Patel | 57a2850 | 2017-08-22 15:26:52 +0530 | [diff] [blame] | 112 | u32 flags; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 113 | /* Chained requests management */ |
| 114 | struct sba_request *first; |
| 115 | struct list_head next; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 116 | atomic_t next_pending_count; |
| 117 | /* BRCM message data */ |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 118 | struct brcm_message msg; |
| 119 | struct dma_async_tx_descriptor tx; |
Anup Patel | 5655e00 | 2017-08-22 15:26:56 +0530 | [diff] [blame] | 120 | /* SBA commands */ |
| 121 | struct brcm_sba_command cmds[0]; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 122 | }; |
| 123 | |
| 124 | enum sba_version { |
| 125 | SBA_VER_1 = 0, |
| 126 | SBA_VER_2 |
| 127 | }; |
| 128 | |
| 129 | struct sba_device { |
| 130 | /* Underlying device */ |
| 131 | struct device *dev; |
| 132 | /* DT configuration parameters */ |
| 133 | enum sba_version ver; |
| 134 | /* Derived configuration parameters */ |
| 135 | u32 max_req; |
| 136 | u32 hw_buf_size; |
| 137 | u32 hw_resp_size; |
| 138 | u32 max_pq_coefs; |
| 139 | u32 max_pq_srcs; |
| 140 | u32 max_cmd_per_req; |
| 141 | u32 max_xor_srcs; |
| 142 | u32 max_resp_pool_size; |
| 143 | u32 max_cmds_pool_size; |
| 144 | /* Maibox client and Mailbox channels */ |
| 145 | struct mbox_client client; |
| 146 | int mchans_count; |
| 147 | atomic_t mchans_current; |
| 148 | struct mbox_chan **mchans; |
| 149 | struct device *mbox_dev; |
| 150 | /* DMA device and DMA channel */ |
| 151 | struct dma_device dma_dev; |
| 152 | struct dma_chan dma_chan; |
| 153 | /* DMA channel resources */ |
| 154 | void *resp_base; |
| 155 | dma_addr_t resp_dma_base; |
| 156 | void *cmds_base; |
| 157 | dma_addr_t cmds_dma_base; |
| 158 | spinlock_t reqs_lock; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 159 | bool reqs_fence; |
| 160 | struct list_head reqs_alloc_list; |
| 161 | struct list_head reqs_pending_list; |
| 162 | struct list_head reqs_active_list; |
| 163 | struct list_head reqs_received_list; |
| 164 | struct list_head reqs_completed_list; |
| 165 | struct list_head reqs_aborted_list; |
| 166 | struct list_head reqs_free_list; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 167 | }; |
| 168 | |
Anup Patel | e897091 | 2017-08-22 15:26:50 +0530 | [diff] [blame] | 169 | /* ====== Command helper routines ===== */ |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 170 | |
| 171 | static inline u64 __pure sba_cmd_enc(u64 cmd, u32 val, u32 shift, u32 mask) |
| 172 | { |
| 173 | cmd &= ~((u64)mask << shift); |
| 174 | cmd |= ((u64)(val & mask) << shift); |
| 175 | return cmd; |
| 176 | } |
| 177 | |
| 178 | static inline u32 __pure sba_cmd_load_c_mdata(u32 b0) |
| 179 | { |
| 180 | return b0 & SBA_C_MDATA_BNUMx_MASK; |
| 181 | } |
| 182 | |
| 183 | static inline u32 __pure sba_cmd_write_c_mdata(u32 b0) |
| 184 | { |
| 185 | return b0 & SBA_C_MDATA_BNUMx_MASK; |
| 186 | } |
| 187 | |
| 188 | static inline u32 __pure sba_cmd_xor_c_mdata(u32 b1, u32 b0) |
| 189 | { |
| 190 | return (b0 & SBA_C_MDATA_BNUMx_MASK) | |
| 191 | ((b1 & SBA_C_MDATA_BNUMx_MASK) << SBA_C_MDATA_BNUMx_SHIFT(1)); |
| 192 | } |
| 193 | |
| 194 | static inline u32 __pure sba_cmd_pq_c_mdata(u32 d, u32 b1, u32 b0) |
| 195 | { |
| 196 | return (b0 & SBA_C_MDATA_BNUMx_MASK) | |
| 197 | ((b1 & SBA_C_MDATA_BNUMx_MASK) << SBA_C_MDATA_BNUMx_SHIFT(1)) | |
| 198 | ((d & SBA_C_MDATA_DNUM_MASK) << SBA_C_MDATA_DNUM_SHIFT); |
| 199 | } |
| 200 | |
Anup Patel | e897091 | 2017-08-22 15:26:50 +0530 | [diff] [blame] | 201 | /* ====== General helper routines ===== */ |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 202 | |
Anup Patel | 6df8f91 | 2017-08-22 15:27:00 +0530 | [diff] [blame] | 203 | static void sba_peek_mchans(struct sba_device *sba) |
| 204 | { |
| 205 | int mchan_idx; |
| 206 | |
| 207 | for (mchan_idx = 0; mchan_idx < sba->mchans_count; mchan_idx++) |
| 208 | mbox_client_peek_data(sba->mchans[mchan_idx]); |
| 209 | } |
| 210 | |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 211 | static struct sba_request *sba_alloc_request(struct sba_device *sba) |
| 212 | { |
| 213 | unsigned long flags; |
| 214 | struct sba_request *req = NULL; |
| 215 | |
| 216 | spin_lock_irqsave(&sba->reqs_lock, flags); |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 217 | req = list_first_entry_or_null(&sba->reqs_free_list, |
| 218 | struct sba_request, node); |
Anup Patel | abfa251 | 2017-08-22 15:26:55 +0530 | [diff] [blame] | 219 | if (req) |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 220 | list_move_tail(&req->node, &sba->reqs_alloc_list); |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 221 | spin_unlock_irqrestore(&sba->reqs_lock, flags); |
Anup Patel | 6df8f91 | 2017-08-22 15:27:00 +0530 | [diff] [blame] | 222 | |
| 223 | if (!req) { |
| 224 | /* |
| 225 | * We have no more free requests so, we peek |
| 226 | * mailbox channels hoping few active requests |
| 227 | * would have completed which will create more |
| 228 | * room for new requests. |
| 229 | */ |
| 230 | sba_peek_mchans(sba); |
Anup Patel | e4274cf | 2017-08-22 15:26:51 +0530 | [diff] [blame] | 231 | return NULL; |
Anup Patel | 6df8f91 | 2017-08-22 15:27:00 +0530 | [diff] [blame] | 232 | } |
Anup Patel | e4274cf | 2017-08-22 15:26:51 +0530 | [diff] [blame] | 233 | |
Anup Patel | 57a2850 | 2017-08-22 15:26:52 +0530 | [diff] [blame] | 234 | req->flags = SBA_REQUEST_STATE_ALLOCED; |
Anup Patel | e4274cf | 2017-08-22 15:26:51 +0530 | [diff] [blame] | 235 | req->first = req; |
| 236 | INIT_LIST_HEAD(&req->next); |
Anup Patel | e4274cf | 2017-08-22 15:26:51 +0530 | [diff] [blame] | 237 | atomic_set(&req->next_pending_count, 1); |
| 238 | |
| 239 | dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan); |
Anup Patel | fd8eb53 | 2017-08-22 15:27:01 +0530 | [diff] [blame^] | 240 | async_tx_ack(&req->tx); |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 241 | |
| 242 | return req; |
| 243 | } |
| 244 | |
| 245 | /* Note: Must be called with sba->reqs_lock held */ |
| 246 | static void _sba_pending_request(struct sba_device *sba, |
| 247 | struct sba_request *req) |
| 248 | { |
| 249 | lockdep_assert_held(&sba->reqs_lock); |
Anup Patel | 57a2850 | 2017-08-22 15:26:52 +0530 | [diff] [blame] | 250 | req->flags &= ~SBA_REQUEST_STATE_MASK; |
| 251 | req->flags |= SBA_REQUEST_STATE_PENDING; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 252 | list_move_tail(&req->node, &sba->reqs_pending_list); |
| 253 | if (list_empty(&sba->reqs_active_list)) |
| 254 | sba->reqs_fence = false; |
| 255 | } |
| 256 | |
| 257 | /* Note: Must be called with sba->reqs_lock held */ |
| 258 | static bool _sba_active_request(struct sba_device *sba, |
| 259 | struct sba_request *req) |
| 260 | { |
| 261 | lockdep_assert_held(&sba->reqs_lock); |
| 262 | if (list_empty(&sba->reqs_active_list)) |
| 263 | sba->reqs_fence = false; |
| 264 | if (sba->reqs_fence) |
| 265 | return false; |
Anup Patel | 57a2850 | 2017-08-22 15:26:52 +0530 | [diff] [blame] | 266 | req->flags &= ~SBA_REQUEST_STATE_MASK; |
| 267 | req->flags |= SBA_REQUEST_STATE_ACTIVE; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 268 | list_move_tail(&req->node, &sba->reqs_active_list); |
Anup Patel | 57a2850 | 2017-08-22 15:26:52 +0530 | [diff] [blame] | 269 | if (req->flags & SBA_REQUEST_FENCE) |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 270 | sba->reqs_fence = true; |
| 271 | return true; |
| 272 | } |
| 273 | |
| 274 | /* Note: Must be called with sba->reqs_lock held */ |
| 275 | static void _sba_abort_request(struct sba_device *sba, |
| 276 | struct sba_request *req) |
| 277 | { |
| 278 | lockdep_assert_held(&sba->reqs_lock); |
Anup Patel | 57a2850 | 2017-08-22 15:26:52 +0530 | [diff] [blame] | 279 | req->flags &= ~SBA_REQUEST_STATE_MASK; |
| 280 | req->flags |= SBA_REQUEST_STATE_ABORTED; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 281 | list_move_tail(&req->node, &sba->reqs_aborted_list); |
| 282 | if (list_empty(&sba->reqs_active_list)) |
| 283 | sba->reqs_fence = false; |
| 284 | } |
| 285 | |
| 286 | /* Note: Must be called with sba->reqs_lock held */ |
| 287 | static void _sba_free_request(struct sba_device *sba, |
| 288 | struct sba_request *req) |
| 289 | { |
| 290 | lockdep_assert_held(&sba->reqs_lock); |
Anup Patel | 57a2850 | 2017-08-22 15:26:52 +0530 | [diff] [blame] | 291 | req->flags &= ~SBA_REQUEST_STATE_MASK; |
| 292 | req->flags |= SBA_REQUEST_STATE_FREE; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 293 | list_move_tail(&req->node, &sba->reqs_free_list); |
| 294 | if (list_empty(&sba->reqs_active_list)) |
| 295 | sba->reqs_fence = false; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 296 | } |
| 297 | |
Anup Patel | f833851 | 2017-08-22 15:26:58 +0530 | [diff] [blame] | 298 | /* Note: Must be called with sba->reqs_lock held */ |
| 299 | static void _sba_complete_request(struct sba_device *sba, |
| 300 | struct sba_request *req) |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 301 | { |
Anup Patel | f833851 | 2017-08-22 15:26:58 +0530 | [diff] [blame] | 302 | lockdep_assert_held(&sba->reqs_lock); |
Anup Patel | 57a2850 | 2017-08-22 15:26:52 +0530 | [diff] [blame] | 303 | req->flags &= ~SBA_REQUEST_STATE_MASK; |
| 304 | req->flags |= SBA_REQUEST_STATE_COMPLETED; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 305 | list_move_tail(&req->node, &sba->reqs_completed_list); |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 306 | if (list_empty(&sba->reqs_active_list)) |
| 307 | sba->reqs_fence = false; |
Anup Patel | f833851 | 2017-08-22 15:26:58 +0530 | [diff] [blame] | 308 | } |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 309 | |
Anup Patel | f833851 | 2017-08-22 15:26:58 +0530 | [diff] [blame] | 310 | /* Note: Must be called with sba->reqs_lock held */ |
| 311 | static void _sba_received_request(struct sba_device *sba, |
| 312 | struct sba_request *req) |
| 313 | { |
| 314 | lockdep_assert_held(&sba->reqs_lock); |
| 315 | req->flags &= ~SBA_REQUEST_STATE_MASK; |
| 316 | req->flags |= SBA_REQUEST_STATE_RECEIVED; |
| 317 | list_move_tail(&req->node, &sba->reqs_received_list); |
| 318 | if (list_empty(&sba->reqs_active_list)) |
| 319 | sba->reqs_fence = false; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 320 | } |
| 321 | |
| 322 | static void sba_free_chained_requests(struct sba_request *req) |
| 323 | { |
| 324 | unsigned long flags; |
| 325 | struct sba_request *nreq; |
| 326 | struct sba_device *sba = req->sba; |
| 327 | |
| 328 | spin_lock_irqsave(&sba->reqs_lock, flags); |
| 329 | |
| 330 | _sba_free_request(sba, req); |
| 331 | list_for_each_entry(nreq, &req->next, next) |
| 332 | _sba_free_request(sba, nreq); |
| 333 | |
| 334 | spin_unlock_irqrestore(&sba->reqs_lock, flags); |
| 335 | } |
| 336 | |
| 337 | static void sba_chain_request(struct sba_request *first, |
| 338 | struct sba_request *req) |
| 339 | { |
| 340 | unsigned long flags; |
| 341 | struct sba_device *sba = req->sba; |
| 342 | |
| 343 | spin_lock_irqsave(&sba->reqs_lock, flags); |
| 344 | |
| 345 | list_add_tail(&req->next, &first->next); |
| 346 | req->first = first; |
Anup Patel | 10f1a33 | 2017-08-22 15:26:53 +0530 | [diff] [blame] | 347 | atomic_inc(&first->next_pending_count); |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 348 | |
| 349 | spin_unlock_irqrestore(&sba->reqs_lock, flags); |
| 350 | } |
| 351 | |
| 352 | static void sba_cleanup_nonpending_requests(struct sba_device *sba) |
| 353 | { |
| 354 | unsigned long flags; |
| 355 | struct sba_request *req, *req1; |
| 356 | |
| 357 | spin_lock_irqsave(&sba->reqs_lock, flags); |
| 358 | |
| 359 | /* Freeup all alloced request */ |
| 360 | list_for_each_entry_safe(req, req1, &sba->reqs_alloc_list, node) |
| 361 | _sba_free_request(sba, req); |
| 362 | |
| 363 | /* Freeup all received request */ |
| 364 | list_for_each_entry_safe(req, req1, &sba->reqs_received_list, node) |
| 365 | _sba_free_request(sba, req); |
| 366 | |
| 367 | /* Freeup all completed request */ |
| 368 | list_for_each_entry_safe(req, req1, &sba->reqs_completed_list, node) |
| 369 | _sba_free_request(sba, req); |
| 370 | |
| 371 | /* Set all active requests as aborted */ |
| 372 | list_for_each_entry_safe(req, req1, &sba->reqs_active_list, node) |
| 373 | _sba_abort_request(sba, req); |
| 374 | |
| 375 | /* |
| 376 | * Note: We expect that aborted request will be eventually |
| 377 | * freed by sba_receive_message() |
| 378 | */ |
| 379 | |
| 380 | spin_unlock_irqrestore(&sba->reqs_lock, flags); |
| 381 | } |
| 382 | |
| 383 | static void sba_cleanup_pending_requests(struct sba_device *sba) |
| 384 | { |
| 385 | unsigned long flags; |
| 386 | struct sba_request *req, *req1; |
| 387 | |
| 388 | spin_lock_irqsave(&sba->reqs_lock, flags); |
| 389 | |
| 390 | /* Freeup all pending request */ |
| 391 | list_for_each_entry_safe(req, req1, &sba->reqs_pending_list, node) |
| 392 | _sba_free_request(sba, req); |
| 393 | |
| 394 | spin_unlock_irqrestore(&sba->reqs_lock, flags); |
| 395 | } |
| 396 | |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 397 | static int sba_send_mbox_request(struct sba_device *sba, |
| 398 | struct sba_request *req) |
| 399 | { |
| 400 | int mchans_idx, ret = 0; |
| 401 | |
| 402 | /* Select mailbox channel in round-robin fashion */ |
| 403 | mchans_idx = atomic_inc_return(&sba->mchans_current); |
| 404 | mchans_idx = mchans_idx % sba->mchans_count; |
| 405 | |
| 406 | /* Send message for the request */ |
| 407 | req->msg.error = 0; |
| 408 | ret = mbox_send_message(sba->mchans[mchans_idx], &req->msg); |
| 409 | if (ret < 0) { |
| 410 | dev_err(sba->dev, "send message failed with error %d", ret); |
| 411 | return ret; |
| 412 | } |
| 413 | ret = req->msg.error; |
| 414 | if (ret < 0) { |
| 415 | dev_err(sba->dev, "message error %d", ret); |
| 416 | return ret; |
| 417 | } |
| 418 | |
| 419 | return 0; |
| 420 | } |
| 421 | |
Anup Patel | f833851 | 2017-08-22 15:26:58 +0530 | [diff] [blame] | 422 | static void sba_process_deferred_requests(struct sba_device *sba) |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 423 | { |
| 424 | int ret; |
Anup Patel | f833851 | 2017-08-22 15:26:58 +0530 | [diff] [blame] | 425 | u32 count; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 426 | unsigned long flags; |
Anup Patel | f833851 | 2017-08-22 15:26:58 +0530 | [diff] [blame] | 427 | struct sba_request *req; |
| 428 | struct dma_async_tx_descriptor *tx; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 429 | |
| 430 | spin_lock_irqsave(&sba->reqs_lock, flags); |
| 431 | |
Anup Patel | f833851 | 2017-08-22 15:26:58 +0530 | [diff] [blame] | 432 | /* Count pending requests */ |
| 433 | count = 0; |
| 434 | list_for_each_entry(req, &sba->reqs_pending_list, node) |
| 435 | count++; |
| 436 | |
| 437 | /* Process pending requests */ |
| 438 | while (!list_empty(&sba->reqs_pending_list) && count) { |
| 439 | /* Get the first pending request */ |
| 440 | req = list_first_entry(&sba->reqs_pending_list, |
| 441 | struct sba_request, node); |
| 442 | |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 443 | /* Try to make request active */ |
| 444 | if (!_sba_active_request(sba, req)) |
| 445 | break; |
| 446 | |
| 447 | /* Send request to mailbox channel */ |
| 448 | spin_unlock_irqrestore(&sba->reqs_lock, flags); |
| 449 | ret = sba_send_mbox_request(sba, req); |
| 450 | spin_lock_irqsave(&sba->reqs_lock, flags); |
| 451 | |
| 452 | /* If something went wrong then keep request pending */ |
| 453 | if (ret < 0) { |
| 454 | _sba_pending_request(sba, req); |
| 455 | break; |
| 456 | } |
Anup Patel | f833851 | 2017-08-22 15:26:58 +0530 | [diff] [blame] | 457 | |
| 458 | count--; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 459 | } |
| 460 | |
Anup Patel | f833851 | 2017-08-22 15:26:58 +0530 | [diff] [blame] | 461 | /* Count completed requests */ |
| 462 | count = 0; |
| 463 | list_for_each_entry(req, &sba->reqs_completed_list, node) |
| 464 | count++; |
| 465 | |
| 466 | /* Process completed requests */ |
| 467 | while (!list_empty(&sba->reqs_completed_list) && count) { |
| 468 | req = list_first_entry(&sba->reqs_completed_list, |
| 469 | struct sba_request, node); |
| 470 | list_del_init(&req->node); |
| 471 | tx = &req->tx; |
| 472 | |
| 473 | spin_unlock_irqrestore(&sba->reqs_lock, flags); |
| 474 | |
| 475 | WARN_ON(tx->cookie < 0); |
| 476 | if (tx->cookie > 0) { |
| 477 | dma_cookie_complete(tx); |
| 478 | dmaengine_desc_get_callback_invoke(tx, NULL); |
| 479 | dma_descriptor_unmap(tx); |
| 480 | tx->callback = NULL; |
| 481 | tx->callback_result = NULL; |
| 482 | } |
| 483 | |
| 484 | dma_run_dependencies(tx); |
| 485 | |
| 486 | spin_lock_irqsave(&sba->reqs_lock, flags); |
| 487 | |
| 488 | /* If waiting for 'ack' then move to completed list */ |
| 489 | if (!async_tx_test_ack(&req->tx)) |
| 490 | _sba_complete_request(sba, req); |
| 491 | else |
| 492 | _sba_free_request(sba, req); |
| 493 | |
| 494 | count--; |
| 495 | } |
| 496 | |
| 497 | /* Re-check pending and completed work */ |
| 498 | count = 0; |
| 499 | if (!list_empty(&sba->reqs_pending_list) || |
| 500 | !list_empty(&sba->reqs_completed_list)) |
| 501 | count = 1; |
| 502 | |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 503 | spin_unlock_irqrestore(&sba->reqs_lock, flags); |
| 504 | } |
| 505 | |
Anup Patel | f833851 | 2017-08-22 15:26:58 +0530 | [diff] [blame] | 506 | static void sba_process_received_request(struct sba_device *sba, |
| 507 | struct sba_request *req) |
| 508 | { |
| 509 | unsigned long flags; |
| 510 | |
| 511 | spin_lock_irqsave(&sba->reqs_lock, flags); |
| 512 | |
| 513 | /* Mark request as received */ |
| 514 | _sba_received_request(sba, req); |
| 515 | |
| 516 | /* Update request */ |
| 517 | if (!atomic_dec_return(&req->first->next_pending_count)) |
| 518 | _sba_complete_request(sba, req->first); |
| 519 | if (req->first != req) |
| 520 | _sba_free_request(sba, req); |
| 521 | |
| 522 | spin_unlock_irqrestore(&sba->reqs_lock, flags); |
| 523 | } |
| 524 | |
| 525 | /* ====== DMAENGINE callbacks ===== */ |
| 526 | |
| 527 | static void sba_free_chan_resources(struct dma_chan *dchan) |
| 528 | { |
| 529 | /* |
| 530 | * Channel resources are pre-alloced so we just free-up |
| 531 | * whatever we can so that we can re-use pre-alloced |
| 532 | * channel resources next time. |
| 533 | */ |
| 534 | sba_cleanup_nonpending_requests(to_sba_device(dchan)); |
| 535 | } |
| 536 | |
| 537 | static int sba_device_terminate_all(struct dma_chan *dchan) |
| 538 | { |
| 539 | /* Cleanup all pending requests */ |
| 540 | sba_cleanup_pending_requests(to_sba_device(dchan)); |
| 541 | |
| 542 | return 0; |
| 543 | } |
| 544 | |
| 545 | static void sba_issue_pending(struct dma_chan *dchan) |
| 546 | { |
| 547 | struct sba_device *sba = to_sba_device(dchan); |
| 548 | |
| 549 | /* Process deferred requests */ |
| 550 | sba_process_deferred_requests(sba); |
| 551 | } |
| 552 | |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 553 | static dma_cookie_t sba_tx_submit(struct dma_async_tx_descriptor *tx) |
| 554 | { |
| 555 | unsigned long flags; |
| 556 | dma_cookie_t cookie; |
| 557 | struct sba_device *sba; |
| 558 | struct sba_request *req, *nreq; |
| 559 | |
| 560 | if (unlikely(!tx)) |
| 561 | return -EINVAL; |
| 562 | |
| 563 | sba = to_sba_device(tx->chan); |
| 564 | req = to_sba_request(tx); |
| 565 | |
| 566 | /* Assign cookie and mark all chained requests pending */ |
| 567 | spin_lock_irqsave(&sba->reqs_lock, flags); |
| 568 | cookie = dma_cookie_assign(tx); |
| 569 | _sba_pending_request(sba, req); |
| 570 | list_for_each_entry(nreq, &req->next, next) |
| 571 | _sba_pending_request(sba, nreq); |
| 572 | spin_unlock_irqrestore(&sba->reqs_lock, flags); |
| 573 | |
| 574 | return cookie; |
| 575 | } |
| 576 | |
| 577 | static enum dma_status sba_tx_status(struct dma_chan *dchan, |
| 578 | dma_cookie_t cookie, |
| 579 | struct dma_tx_state *txstate) |
| 580 | { |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 581 | enum dma_status ret; |
| 582 | struct sba_device *sba = to_sba_device(dchan); |
| 583 | |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 584 | ret = dma_cookie_status(dchan, cookie, txstate); |
| 585 | if (ret == DMA_COMPLETE) |
| 586 | return ret; |
| 587 | |
Anup Patel | 6df8f91 | 2017-08-22 15:27:00 +0530 | [diff] [blame] | 588 | sba_peek_mchans(sba); |
| 589 | |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 590 | return dma_cookie_status(dchan, cookie, txstate); |
| 591 | } |
| 592 | |
| 593 | static void sba_fillup_interrupt_msg(struct sba_request *req, |
| 594 | struct brcm_sba_command *cmds, |
| 595 | struct brcm_message *msg) |
| 596 | { |
| 597 | u64 cmd; |
| 598 | u32 c_mdata; |
Anup Patel | e7ae72a | 2017-08-22 15:26:54 +0530 | [diff] [blame] | 599 | dma_addr_t resp_dma = req->tx.phys; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 600 | struct brcm_sba_command *cmdsp = cmds; |
| 601 | |
| 602 | /* Type-B command to load dummy data into buf0 */ |
| 603 | cmd = sba_cmd_enc(0x0, SBA_TYPE_B, |
| 604 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); |
| 605 | cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size, |
| 606 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); |
| 607 | c_mdata = sba_cmd_load_c_mdata(0); |
| 608 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), |
| 609 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); |
| 610 | cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER, |
| 611 | SBA_CMD_SHIFT, SBA_CMD_MASK); |
| 612 | cmdsp->cmd = cmd; |
| 613 | *cmdsp->cmd_dma = cpu_to_le64(cmd); |
| 614 | cmdsp->flags = BRCM_SBA_CMD_TYPE_B; |
Anup Patel | e7ae72a | 2017-08-22 15:26:54 +0530 | [diff] [blame] | 615 | cmdsp->data = resp_dma; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 616 | cmdsp->data_len = req->sba->hw_resp_size; |
| 617 | cmdsp++; |
| 618 | |
| 619 | /* Type-A command to write buf0 to dummy location */ |
| 620 | cmd = sba_cmd_enc(0x0, SBA_TYPE_A, |
| 621 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); |
| 622 | cmd = sba_cmd_enc(cmd, req->sba->hw_resp_size, |
| 623 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); |
| 624 | cmd = sba_cmd_enc(cmd, 0x1, |
| 625 | SBA_RESP_SHIFT, SBA_RESP_MASK); |
| 626 | c_mdata = sba_cmd_write_c_mdata(0); |
| 627 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), |
| 628 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); |
| 629 | cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER, |
| 630 | SBA_CMD_SHIFT, SBA_CMD_MASK); |
| 631 | cmdsp->cmd = cmd; |
| 632 | *cmdsp->cmd_dma = cpu_to_le64(cmd); |
| 633 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; |
| 634 | if (req->sba->hw_resp_size) { |
| 635 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; |
Anup Patel | e7ae72a | 2017-08-22 15:26:54 +0530 | [diff] [blame] | 636 | cmdsp->resp = resp_dma; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 637 | cmdsp->resp_len = req->sba->hw_resp_size; |
| 638 | } |
| 639 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; |
Anup Patel | e7ae72a | 2017-08-22 15:26:54 +0530 | [diff] [blame] | 640 | cmdsp->data = resp_dma; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 641 | cmdsp->data_len = req->sba->hw_resp_size; |
| 642 | cmdsp++; |
| 643 | |
| 644 | /* Fillup brcm_message */ |
| 645 | msg->type = BRCM_MESSAGE_SBA; |
| 646 | msg->sba.cmds = cmds; |
| 647 | msg->sba.cmds_count = cmdsp - cmds; |
| 648 | msg->ctx = req; |
| 649 | msg->error = 0; |
| 650 | } |
| 651 | |
| 652 | static struct dma_async_tx_descriptor * |
| 653 | sba_prep_dma_interrupt(struct dma_chan *dchan, unsigned long flags) |
| 654 | { |
| 655 | struct sba_request *req = NULL; |
| 656 | struct sba_device *sba = to_sba_device(dchan); |
| 657 | |
| 658 | /* Alloc new request */ |
| 659 | req = sba_alloc_request(sba); |
| 660 | if (!req) |
| 661 | return NULL; |
| 662 | |
| 663 | /* |
| 664 | * Force fence so that no requests are submitted |
| 665 | * until DMA callback for this request is invoked. |
| 666 | */ |
Anup Patel | 57a2850 | 2017-08-22 15:26:52 +0530 | [diff] [blame] | 667 | req->flags |= SBA_REQUEST_FENCE; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 668 | |
| 669 | /* Fillup request message */ |
| 670 | sba_fillup_interrupt_msg(req, req->cmds, &req->msg); |
| 671 | |
| 672 | /* Init async_tx descriptor */ |
| 673 | req->tx.flags = flags; |
| 674 | req->tx.cookie = -EBUSY; |
| 675 | |
Colin Ian King | 1fc63cb | 2017-05-17 22:58:50 +0100 | [diff] [blame] | 676 | return &req->tx; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 677 | } |
| 678 | |
| 679 | static void sba_fillup_memcpy_msg(struct sba_request *req, |
| 680 | struct brcm_sba_command *cmds, |
| 681 | struct brcm_message *msg, |
| 682 | dma_addr_t msg_offset, size_t msg_len, |
| 683 | dma_addr_t dst, dma_addr_t src) |
| 684 | { |
| 685 | u64 cmd; |
| 686 | u32 c_mdata; |
Anup Patel | e7ae72a | 2017-08-22 15:26:54 +0530 | [diff] [blame] | 687 | dma_addr_t resp_dma = req->tx.phys; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 688 | struct brcm_sba_command *cmdsp = cmds; |
| 689 | |
| 690 | /* Type-B command to load data into buf0 */ |
| 691 | cmd = sba_cmd_enc(0x0, SBA_TYPE_B, |
| 692 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); |
| 693 | cmd = sba_cmd_enc(cmd, msg_len, |
| 694 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); |
| 695 | c_mdata = sba_cmd_load_c_mdata(0); |
| 696 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), |
| 697 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); |
| 698 | cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER, |
| 699 | SBA_CMD_SHIFT, SBA_CMD_MASK); |
| 700 | cmdsp->cmd = cmd; |
| 701 | *cmdsp->cmd_dma = cpu_to_le64(cmd); |
| 702 | cmdsp->flags = BRCM_SBA_CMD_TYPE_B; |
| 703 | cmdsp->data = src + msg_offset; |
| 704 | cmdsp->data_len = msg_len; |
| 705 | cmdsp++; |
| 706 | |
| 707 | /* Type-A command to write buf0 */ |
| 708 | cmd = sba_cmd_enc(0x0, SBA_TYPE_A, |
| 709 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); |
| 710 | cmd = sba_cmd_enc(cmd, msg_len, |
| 711 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); |
| 712 | cmd = sba_cmd_enc(cmd, 0x1, |
| 713 | SBA_RESP_SHIFT, SBA_RESP_MASK); |
| 714 | c_mdata = sba_cmd_write_c_mdata(0); |
| 715 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), |
| 716 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); |
| 717 | cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER, |
| 718 | SBA_CMD_SHIFT, SBA_CMD_MASK); |
| 719 | cmdsp->cmd = cmd; |
| 720 | *cmdsp->cmd_dma = cpu_to_le64(cmd); |
| 721 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; |
| 722 | if (req->sba->hw_resp_size) { |
| 723 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; |
Anup Patel | e7ae72a | 2017-08-22 15:26:54 +0530 | [diff] [blame] | 724 | cmdsp->resp = resp_dma; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 725 | cmdsp->resp_len = req->sba->hw_resp_size; |
| 726 | } |
| 727 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; |
| 728 | cmdsp->data = dst + msg_offset; |
| 729 | cmdsp->data_len = msg_len; |
| 730 | cmdsp++; |
| 731 | |
| 732 | /* Fillup brcm_message */ |
| 733 | msg->type = BRCM_MESSAGE_SBA; |
| 734 | msg->sba.cmds = cmds; |
| 735 | msg->sba.cmds_count = cmdsp - cmds; |
| 736 | msg->ctx = req; |
| 737 | msg->error = 0; |
| 738 | } |
| 739 | |
| 740 | static struct sba_request * |
| 741 | sba_prep_dma_memcpy_req(struct sba_device *sba, |
| 742 | dma_addr_t off, dma_addr_t dst, dma_addr_t src, |
| 743 | size_t len, unsigned long flags) |
| 744 | { |
| 745 | struct sba_request *req = NULL; |
| 746 | |
| 747 | /* Alloc new request */ |
| 748 | req = sba_alloc_request(sba); |
| 749 | if (!req) |
| 750 | return NULL; |
Anup Patel | 57a2850 | 2017-08-22 15:26:52 +0530 | [diff] [blame] | 751 | if (flags & DMA_PREP_FENCE) |
| 752 | req->flags |= SBA_REQUEST_FENCE; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 753 | |
| 754 | /* Fillup request message */ |
| 755 | sba_fillup_memcpy_msg(req, req->cmds, &req->msg, |
| 756 | off, len, dst, src); |
| 757 | |
| 758 | /* Init async_tx descriptor */ |
| 759 | req->tx.flags = flags; |
| 760 | req->tx.cookie = -EBUSY; |
| 761 | |
| 762 | return req; |
| 763 | } |
| 764 | |
| 765 | static struct dma_async_tx_descriptor * |
| 766 | sba_prep_dma_memcpy(struct dma_chan *dchan, dma_addr_t dst, dma_addr_t src, |
| 767 | size_t len, unsigned long flags) |
| 768 | { |
| 769 | size_t req_len; |
| 770 | dma_addr_t off = 0; |
| 771 | struct sba_device *sba = to_sba_device(dchan); |
| 772 | struct sba_request *first = NULL, *req; |
| 773 | |
| 774 | /* Create chained requests where each request is upto hw_buf_size */ |
| 775 | while (len) { |
| 776 | req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size; |
| 777 | |
| 778 | req = sba_prep_dma_memcpy_req(sba, off, dst, src, |
| 779 | req_len, flags); |
| 780 | if (!req) { |
| 781 | if (first) |
| 782 | sba_free_chained_requests(first); |
| 783 | return NULL; |
| 784 | } |
| 785 | |
| 786 | if (first) |
| 787 | sba_chain_request(first, req); |
| 788 | else |
| 789 | first = req; |
| 790 | |
| 791 | off += req_len; |
| 792 | len -= req_len; |
| 793 | } |
| 794 | |
| 795 | return (first) ? &first->tx : NULL; |
| 796 | } |
| 797 | |
| 798 | static void sba_fillup_xor_msg(struct sba_request *req, |
| 799 | struct brcm_sba_command *cmds, |
| 800 | struct brcm_message *msg, |
| 801 | dma_addr_t msg_offset, size_t msg_len, |
| 802 | dma_addr_t dst, dma_addr_t *src, u32 src_cnt) |
| 803 | { |
| 804 | u64 cmd; |
| 805 | u32 c_mdata; |
| 806 | unsigned int i; |
Anup Patel | e7ae72a | 2017-08-22 15:26:54 +0530 | [diff] [blame] | 807 | dma_addr_t resp_dma = req->tx.phys; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 808 | struct brcm_sba_command *cmdsp = cmds; |
| 809 | |
| 810 | /* Type-B command to load data into buf0 */ |
| 811 | cmd = sba_cmd_enc(0x0, SBA_TYPE_B, |
| 812 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); |
| 813 | cmd = sba_cmd_enc(cmd, msg_len, |
| 814 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); |
| 815 | c_mdata = sba_cmd_load_c_mdata(0); |
| 816 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), |
| 817 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); |
| 818 | cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER, |
| 819 | SBA_CMD_SHIFT, SBA_CMD_MASK); |
| 820 | cmdsp->cmd = cmd; |
| 821 | *cmdsp->cmd_dma = cpu_to_le64(cmd); |
| 822 | cmdsp->flags = BRCM_SBA_CMD_TYPE_B; |
| 823 | cmdsp->data = src[0] + msg_offset; |
| 824 | cmdsp->data_len = msg_len; |
| 825 | cmdsp++; |
| 826 | |
| 827 | /* Type-B commands to xor data with buf0 and put it back in buf0 */ |
| 828 | for (i = 1; i < src_cnt; i++) { |
| 829 | cmd = sba_cmd_enc(0x0, SBA_TYPE_B, |
| 830 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); |
| 831 | cmd = sba_cmd_enc(cmd, msg_len, |
| 832 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); |
| 833 | c_mdata = sba_cmd_xor_c_mdata(0, 0); |
| 834 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), |
| 835 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); |
| 836 | cmd = sba_cmd_enc(cmd, SBA_CMD_XOR, |
| 837 | SBA_CMD_SHIFT, SBA_CMD_MASK); |
| 838 | cmdsp->cmd = cmd; |
| 839 | *cmdsp->cmd_dma = cpu_to_le64(cmd); |
| 840 | cmdsp->flags = BRCM_SBA_CMD_TYPE_B; |
| 841 | cmdsp->data = src[i] + msg_offset; |
| 842 | cmdsp->data_len = msg_len; |
| 843 | cmdsp++; |
| 844 | } |
| 845 | |
| 846 | /* Type-A command to write buf0 */ |
| 847 | cmd = sba_cmd_enc(0x0, SBA_TYPE_A, |
| 848 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); |
| 849 | cmd = sba_cmd_enc(cmd, msg_len, |
| 850 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); |
| 851 | cmd = sba_cmd_enc(cmd, 0x1, |
| 852 | SBA_RESP_SHIFT, SBA_RESP_MASK); |
| 853 | c_mdata = sba_cmd_write_c_mdata(0); |
| 854 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), |
| 855 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); |
| 856 | cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER, |
| 857 | SBA_CMD_SHIFT, SBA_CMD_MASK); |
| 858 | cmdsp->cmd = cmd; |
| 859 | *cmdsp->cmd_dma = cpu_to_le64(cmd); |
| 860 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; |
| 861 | if (req->sba->hw_resp_size) { |
| 862 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; |
Anup Patel | e7ae72a | 2017-08-22 15:26:54 +0530 | [diff] [blame] | 863 | cmdsp->resp = resp_dma; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 864 | cmdsp->resp_len = req->sba->hw_resp_size; |
| 865 | } |
| 866 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; |
| 867 | cmdsp->data = dst + msg_offset; |
| 868 | cmdsp->data_len = msg_len; |
| 869 | cmdsp++; |
| 870 | |
| 871 | /* Fillup brcm_message */ |
| 872 | msg->type = BRCM_MESSAGE_SBA; |
| 873 | msg->sba.cmds = cmds; |
| 874 | msg->sba.cmds_count = cmdsp - cmds; |
| 875 | msg->ctx = req; |
| 876 | msg->error = 0; |
| 877 | } |
| 878 | |
Vinod Koul | dd2bceb | 2017-07-19 10:03:24 +0530 | [diff] [blame] | 879 | static struct sba_request * |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 880 | sba_prep_dma_xor_req(struct sba_device *sba, |
| 881 | dma_addr_t off, dma_addr_t dst, dma_addr_t *src, |
| 882 | u32 src_cnt, size_t len, unsigned long flags) |
| 883 | { |
| 884 | struct sba_request *req = NULL; |
| 885 | |
| 886 | /* Alloc new request */ |
| 887 | req = sba_alloc_request(sba); |
| 888 | if (!req) |
| 889 | return NULL; |
Anup Patel | 57a2850 | 2017-08-22 15:26:52 +0530 | [diff] [blame] | 890 | if (flags & DMA_PREP_FENCE) |
| 891 | req->flags |= SBA_REQUEST_FENCE; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 892 | |
| 893 | /* Fillup request message */ |
| 894 | sba_fillup_xor_msg(req, req->cmds, &req->msg, |
| 895 | off, len, dst, src, src_cnt); |
| 896 | |
| 897 | /* Init async_tx descriptor */ |
| 898 | req->tx.flags = flags; |
| 899 | req->tx.cookie = -EBUSY; |
| 900 | |
| 901 | return req; |
| 902 | } |
| 903 | |
| 904 | static struct dma_async_tx_descriptor * |
| 905 | sba_prep_dma_xor(struct dma_chan *dchan, dma_addr_t dst, dma_addr_t *src, |
| 906 | u32 src_cnt, size_t len, unsigned long flags) |
| 907 | { |
| 908 | size_t req_len; |
| 909 | dma_addr_t off = 0; |
| 910 | struct sba_device *sba = to_sba_device(dchan); |
| 911 | struct sba_request *first = NULL, *req; |
| 912 | |
| 913 | /* Sanity checks */ |
| 914 | if (unlikely(src_cnt > sba->max_xor_srcs)) |
| 915 | return NULL; |
| 916 | |
| 917 | /* Create chained requests where each request is upto hw_buf_size */ |
| 918 | while (len) { |
| 919 | req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size; |
| 920 | |
| 921 | req = sba_prep_dma_xor_req(sba, off, dst, src, src_cnt, |
| 922 | req_len, flags); |
| 923 | if (!req) { |
| 924 | if (first) |
| 925 | sba_free_chained_requests(first); |
| 926 | return NULL; |
| 927 | } |
| 928 | |
| 929 | if (first) |
| 930 | sba_chain_request(first, req); |
| 931 | else |
| 932 | first = req; |
| 933 | |
| 934 | off += req_len; |
| 935 | len -= req_len; |
| 936 | } |
| 937 | |
| 938 | return (first) ? &first->tx : NULL; |
| 939 | } |
| 940 | |
| 941 | static void sba_fillup_pq_msg(struct sba_request *req, |
| 942 | bool pq_continue, |
| 943 | struct brcm_sba_command *cmds, |
| 944 | struct brcm_message *msg, |
| 945 | dma_addr_t msg_offset, size_t msg_len, |
| 946 | dma_addr_t *dst_p, dma_addr_t *dst_q, |
| 947 | const u8 *scf, dma_addr_t *src, u32 src_cnt) |
| 948 | { |
| 949 | u64 cmd; |
| 950 | u32 c_mdata; |
| 951 | unsigned int i; |
Anup Patel | e7ae72a | 2017-08-22 15:26:54 +0530 | [diff] [blame] | 952 | dma_addr_t resp_dma = req->tx.phys; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 953 | struct brcm_sba_command *cmdsp = cmds; |
| 954 | |
| 955 | if (pq_continue) { |
| 956 | /* Type-B command to load old P into buf0 */ |
| 957 | if (dst_p) { |
| 958 | cmd = sba_cmd_enc(0x0, SBA_TYPE_B, |
| 959 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); |
| 960 | cmd = sba_cmd_enc(cmd, msg_len, |
| 961 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); |
| 962 | c_mdata = sba_cmd_load_c_mdata(0); |
| 963 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), |
| 964 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); |
| 965 | cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER, |
| 966 | SBA_CMD_SHIFT, SBA_CMD_MASK); |
| 967 | cmdsp->cmd = cmd; |
| 968 | *cmdsp->cmd_dma = cpu_to_le64(cmd); |
| 969 | cmdsp->flags = BRCM_SBA_CMD_TYPE_B; |
| 970 | cmdsp->data = *dst_p + msg_offset; |
| 971 | cmdsp->data_len = msg_len; |
| 972 | cmdsp++; |
| 973 | } |
| 974 | |
| 975 | /* Type-B command to load old Q into buf1 */ |
| 976 | if (dst_q) { |
| 977 | cmd = sba_cmd_enc(0x0, SBA_TYPE_B, |
| 978 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); |
| 979 | cmd = sba_cmd_enc(cmd, msg_len, |
| 980 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); |
| 981 | c_mdata = sba_cmd_load_c_mdata(1); |
| 982 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), |
| 983 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); |
| 984 | cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER, |
| 985 | SBA_CMD_SHIFT, SBA_CMD_MASK); |
| 986 | cmdsp->cmd = cmd; |
| 987 | *cmdsp->cmd_dma = cpu_to_le64(cmd); |
| 988 | cmdsp->flags = BRCM_SBA_CMD_TYPE_B; |
| 989 | cmdsp->data = *dst_q + msg_offset; |
| 990 | cmdsp->data_len = msg_len; |
| 991 | cmdsp++; |
| 992 | } |
| 993 | } else { |
| 994 | /* Type-A command to zero all buffers */ |
| 995 | cmd = sba_cmd_enc(0x0, SBA_TYPE_A, |
| 996 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); |
| 997 | cmd = sba_cmd_enc(cmd, msg_len, |
| 998 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); |
| 999 | cmd = sba_cmd_enc(cmd, SBA_CMD_ZERO_ALL_BUFFERS, |
| 1000 | SBA_CMD_SHIFT, SBA_CMD_MASK); |
| 1001 | cmdsp->cmd = cmd; |
| 1002 | *cmdsp->cmd_dma = cpu_to_le64(cmd); |
| 1003 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; |
| 1004 | cmdsp++; |
| 1005 | } |
| 1006 | |
| 1007 | /* Type-B commands for generate P onto buf0 and Q onto buf1 */ |
| 1008 | for (i = 0; i < src_cnt; i++) { |
| 1009 | cmd = sba_cmd_enc(0x0, SBA_TYPE_B, |
| 1010 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); |
| 1011 | cmd = sba_cmd_enc(cmd, msg_len, |
| 1012 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); |
| 1013 | c_mdata = sba_cmd_pq_c_mdata(raid6_gflog[scf[i]], 1, 0); |
| 1014 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), |
| 1015 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); |
| 1016 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata), |
| 1017 | SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK); |
| 1018 | cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS_XOR, |
| 1019 | SBA_CMD_SHIFT, SBA_CMD_MASK); |
| 1020 | cmdsp->cmd = cmd; |
| 1021 | *cmdsp->cmd_dma = cpu_to_le64(cmd); |
| 1022 | cmdsp->flags = BRCM_SBA_CMD_TYPE_B; |
| 1023 | cmdsp->data = src[i] + msg_offset; |
| 1024 | cmdsp->data_len = msg_len; |
| 1025 | cmdsp++; |
| 1026 | } |
| 1027 | |
| 1028 | /* Type-A command to write buf0 */ |
| 1029 | if (dst_p) { |
| 1030 | cmd = sba_cmd_enc(0x0, SBA_TYPE_A, |
| 1031 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); |
| 1032 | cmd = sba_cmd_enc(cmd, msg_len, |
| 1033 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); |
| 1034 | cmd = sba_cmd_enc(cmd, 0x1, |
| 1035 | SBA_RESP_SHIFT, SBA_RESP_MASK); |
| 1036 | c_mdata = sba_cmd_write_c_mdata(0); |
| 1037 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), |
| 1038 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); |
| 1039 | cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER, |
| 1040 | SBA_CMD_SHIFT, SBA_CMD_MASK); |
| 1041 | cmdsp->cmd = cmd; |
| 1042 | *cmdsp->cmd_dma = cpu_to_le64(cmd); |
| 1043 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; |
| 1044 | if (req->sba->hw_resp_size) { |
| 1045 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; |
Anup Patel | e7ae72a | 2017-08-22 15:26:54 +0530 | [diff] [blame] | 1046 | cmdsp->resp = resp_dma; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1047 | cmdsp->resp_len = req->sba->hw_resp_size; |
| 1048 | } |
| 1049 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; |
| 1050 | cmdsp->data = *dst_p + msg_offset; |
| 1051 | cmdsp->data_len = msg_len; |
| 1052 | cmdsp++; |
| 1053 | } |
| 1054 | |
| 1055 | /* Type-A command to write buf1 */ |
| 1056 | if (dst_q) { |
| 1057 | cmd = sba_cmd_enc(0x0, SBA_TYPE_A, |
| 1058 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); |
| 1059 | cmd = sba_cmd_enc(cmd, msg_len, |
| 1060 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); |
| 1061 | cmd = sba_cmd_enc(cmd, 0x1, |
| 1062 | SBA_RESP_SHIFT, SBA_RESP_MASK); |
| 1063 | c_mdata = sba_cmd_write_c_mdata(1); |
| 1064 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), |
| 1065 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); |
| 1066 | cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER, |
| 1067 | SBA_CMD_SHIFT, SBA_CMD_MASK); |
| 1068 | cmdsp->cmd = cmd; |
| 1069 | *cmdsp->cmd_dma = cpu_to_le64(cmd); |
| 1070 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; |
| 1071 | if (req->sba->hw_resp_size) { |
| 1072 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; |
Anup Patel | e7ae72a | 2017-08-22 15:26:54 +0530 | [diff] [blame] | 1073 | cmdsp->resp = resp_dma; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1074 | cmdsp->resp_len = req->sba->hw_resp_size; |
| 1075 | } |
| 1076 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; |
| 1077 | cmdsp->data = *dst_q + msg_offset; |
| 1078 | cmdsp->data_len = msg_len; |
| 1079 | cmdsp++; |
| 1080 | } |
| 1081 | |
| 1082 | /* Fillup brcm_message */ |
| 1083 | msg->type = BRCM_MESSAGE_SBA; |
| 1084 | msg->sba.cmds = cmds; |
| 1085 | msg->sba.cmds_count = cmdsp - cmds; |
| 1086 | msg->ctx = req; |
| 1087 | msg->error = 0; |
| 1088 | } |
| 1089 | |
Vinod Koul | dd2bceb | 2017-07-19 10:03:24 +0530 | [diff] [blame] | 1090 | static struct sba_request * |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1091 | sba_prep_dma_pq_req(struct sba_device *sba, dma_addr_t off, |
| 1092 | dma_addr_t *dst_p, dma_addr_t *dst_q, dma_addr_t *src, |
| 1093 | u32 src_cnt, const u8 *scf, size_t len, unsigned long flags) |
| 1094 | { |
| 1095 | struct sba_request *req = NULL; |
| 1096 | |
| 1097 | /* Alloc new request */ |
| 1098 | req = sba_alloc_request(sba); |
| 1099 | if (!req) |
| 1100 | return NULL; |
Anup Patel | 57a2850 | 2017-08-22 15:26:52 +0530 | [diff] [blame] | 1101 | if (flags & DMA_PREP_FENCE) |
| 1102 | req->flags |= SBA_REQUEST_FENCE; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1103 | |
| 1104 | /* Fillup request messages */ |
| 1105 | sba_fillup_pq_msg(req, dmaf_continue(flags), |
| 1106 | req->cmds, &req->msg, |
| 1107 | off, len, dst_p, dst_q, scf, src, src_cnt); |
| 1108 | |
| 1109 | /* Init async_tx descriptor */ |
| 1110 | req->tx.flags = flags; |
| 1111 | req->tx.cookie = -EBUSY; |
| 1112 | |
| 1113 | return req; |
| 1114 | } |
| 1115 | |
| 1116 | static void sba_fillup_pq_single_msg(struct sba_request *req, |
| 1117 | bool pq_continue, |
| 1118 | struct brcm_sba_command *cmds, |
| 1119 | struct brcm_message *msg, |
| 1120 | dma_addr_t msg_offset, size_t msg_len, |
| 1121 | dma_addr_t *dst_p, dma_addr_t *dst_q, |
| 1122 | dma_addr_t src, u8 scf) |
| 1123 | { |
| 1124 | u64 cmd; |
| 1125 | u32 c_mdata; |
| 1126 | u8 pos, dpos = raid6_gflog[scf]; |
Anup Patel | e7ae72a | 2017-08-22 15:26:54 +0530 | [diff] [blame] | 1127 | dma_addr_t resp_dma = req->tx.phys; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1128 | struct brcm_sba_command *cmdsp = cmds; |
| 1129 | |
| 1130 | if (!dst_p) |
| 1131 | goto skip_p; |
| 1132 | |
| 1133 | if (pq_continue) { |
| 1134 | /* Type-B command to load old P into buf0 */ |
| 1135 | cmd = sba_cmd_enc(0x0, SBA_TYPE_B, |
| 1136 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); |
| 1137 | cmd = sba_cmd_enc(cmd, msg_len, |
| 1138 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); |
| 1139 | c_mdata = sba_cmd_load_c_mdata(0); |
| 1140 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), |
| 1141 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); |
| 1142 | cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER, |
| 1143 | SBA_CMD_SHIFT, SBA_CMD_MASK); |
| 1144 | cmdsp->cmd = cmd; |
| 1145 | *cmdsp->cmd_dma = cpu_to_le64(cmd); |
| 1146 | cmdsp->flags = BRCM_SBA_CMD_TYPE_B; |
| 1147 | cmdsp->data = *dst_p + msg_offset; |
| 1148 | cmdsp->data_len = msg_len; |
| 1149 | cmdsp++; |
| 1150 | |
| 1151 | /* |
| 1152 | * Type-B commands to xor data with buf0 and put it |
| 1153 | * back in buf0 |
| 1154 | */ |
| 1155 | cmd = sba_cmd_enc(0x0, SBA_TYPE_B, |
| 1156 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); |
| 1157 | cmd = sba_cmd_enc(cmd, msg_len, |
| 1158 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); |
| 1159 | c_mdata = sba_cmd_xor_c_mdata(0, 0); |
| 1160 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), |
| 1161 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); |
| 1162 | cmd = sba_cmd_enc(cmd, SBA_CMD_XOR, |
| 1163 | SBA_CMD_SHIFT, SBA_CMD_MASK); |
| 1164 | cmdsp->cmd = cmd; |
| 1165 | *cmdsp->cmd_dma = cpu_to_le64(cmd); |
| 1166 | cmdsp->flags = BRCM_SBA_CMD_TYPE_B; |
| 1167 | cmdsp->data = src + msg_offset; |
| 1168 | cmdsp->data_len = msg_len; |
| 1169 | cmdsp++; |
| 1170 | } else { |
| 1171 | /* Type-B command to load old P into buf0 */ |
| 1172 | cmd = sba_cmd_enc(0x0, SBA_TYPE_B, |
| 1173 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); |
| 1174 | cmd = sba_cmd_enc(cmd, msg_len, |
| 1175 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); |
| 1176 | c_mdata = sba_cmd_load_c_mdata(0); |
| 1177 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), |
| 1178 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); |
| 1179 | cmd = sba_cmd_enc(cmd, SBA_CMD_LOAD_BUFFER, |
| 1180 | SBA_CMD_SHIFT, SBA_CMD_MASK); |
| 1181 | cmdsp->cmd = cmd; |
| 1182 | *cmdsp->cmd_dma = cpu_to_le64(cmd); |
| 1183 | cmdsp->flags = BRCM_SBA_CMD_TYPE_B; |
| 1184 | cmdsp->data = src + msg_offset; |
| 1185 | cmdsp->data_len = msg_len; |
| 1186 | cmdsp++; |
| 1187 | } |
| 1188 | |
| 1189 | /* Type-A command to write buf0 */ |
| 1190 | cmd = sba_cmd_enc(0x0, SBA_TYPE_A, |
| 1191 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); |
| 1192 | cmd = sba_cmd_enc(cmd, msg_len, |
| 1193 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); |
| 1194 | cmd = sba_cmd_enc(cmd, 0x1, |
| 1195 | SBA_RESP_SHIFT, SBA_RESP_MASK); |
| 1196 | c_mdata = sba_cmd_write_c_mdata(0); |
| 1197 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), |
| 1198 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); |
| 1199 | cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER, |
| 1200 | SBA_CMD_SHIFT, SBA_CMD_MASK); |
| 1201 | cmdsp->cmd = cmd; |
| 1202 | *cmdsp->cmd_dma = cpu_to_le64(cmd); |
| 1203 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; |
| 1204 | if (req->sba->hw_resp_size) { |
| 1205 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; |
Anup Patel | e7ae72a | 2017-08-22 15:26:54 +0530 | [diff] [blame] | 1206 | cmdsp->resp = resp_dma; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1207 | cmdsp->resp_len = req->sba->hw_resp_size; |
| 1208 | } |
| 1209 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; |
| 1210 | cmdsp->data = *dst_p + msg_offset; |
| 1211 | cmdsp->data_len = msg_len; |
| 1212 | cmdsp++; |
| 1213 | |
| 1214 | skip_p: |
| 1215 | if (!dst_q) |
| 1216 | goto skip_q; |
| 1217 | |
| 1218 | /* Type-A command to zero all buffers */ |
| 1219 | cmd = sba_cmd_enc(0x0, SBA_TYPE_A, |
| 1220 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); |
| 1221 | cmd = sba_cmd_enc(cmd, msg_len, |
| 1222 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); |
| 1223 | cmd = sba_cmd_enc(cmd, SBA_CMD_ZERO_ALL_BUFFERS, |
| 1224 | SBA_CMD_SHIFT, SBA_CMD_MASK); |
| 1225 | cmdsp->cmd = cmd; |
| 1226 | *cmdsp->cmd_dma = cpu_to_le64(cmd); |
| 1227 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; |
| 1228 | cmdsp++; |
| 1229 | |
| 1230 | if (dpos == 255) |
| 1231 | goto skip_q_computation; |
| 1232 | pos = (dpos < req->sba->max_pq_coefs) ? |
| 1233 | dpos : (req->sba->max_pq_coefs - 1); |
| 1234 | |
| 1235 | /* |
| 1236 | * Type-B command to generate initial Q from data |
| 1237 | * and store output into buf0 |
| 1238 | */ |
| 1239 | cmd = sba_cmd_enc(0x0, SBA_TYPE_B, |
| 1240 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); |
| 1241 | cmd = sba_cmd_enc(cmd, msg_len, |
| 1242 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); |
| 1243 | c_mdata = sba_cmd_pq_c_mdata(pos, 0, 0); |
| 1244 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), |
| 1245 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); |
| 1246 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata), |
| 1247 | SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK); |
| 1248 | cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS, |
| 1249 | SBA_CMD_SHIFT, SBA_CMD_MASK); |
| 1250 | cmdsp->cmd = cmd; |
| 1251 | *cmdsp->cmd_dma = cpu_to_le64(cmd); |
| 1252 | cmdsp->flags = BRCM_SBA_CMD_TYPE_B; |
| 1253 | cmdsp->data = src + msg_offset; |
| 1254 | cmdsp->data_len = msg_len; |
| 1255 | cmdsp++; |
| 1256 | |
| 1257 | dpos -= pos; |
| 1258 | |
| 1259 | /* Multiple Type-A command to generate final Q */ |
| 1260 | while (dpos) { |
| 1261 | pos = (dpos < req->sba->max_pq_coefs) ? |
| 1262 | dpos : (req->sba->max_pq_coefs - 1); |
| 1263 | |
| 1264 | /* |
| 1265 | * Type-A command to generate Q with buf0 and |
| 1266 | * buf1 store result in buf0 |
| 1267 | */ |
| 1268 | cmd = sba_cmd_enc(0x0, SBA_TYPE_A, |
| 1269 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); |
| 1270 | cmd = sba_cmd_enc(cmd, msg_len, |
| 1271 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); |
| 1272 | c_mdata = sba_cmd_pq_c_mdata(pos, 0, 1); |
| 1273 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), |
| 1274 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); |
| 1275 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_MS(c_mdata), |
| 1276 | SBA_C_MDATA_MS_SHIFT, SBA_C_MDATA_MS_MASK); |
| 1277 | cmd = sba_cmd_enc(cmd, SBA_CMD_GALOIS, |
| 1278 | SBA_CMD_SHIFT, SBA_CMD_MASK); |
| 1279 | cmdsp->cmd = cmd; |
| 1280 | *cmdsp->cmd_dma = cpu_to_le64(cmd); |
| 1281 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; |
| 1282 | cmdsp++; |
| 1283 | |
| 1284 | dpos -= pos; |
| 1285 | } |
| 1286 | |
| 1287 | skip_q_computation: |
| 1288 | if (pq_continue) { |
| 1289 | /* |
| 1290 | * Type-B command to XOR previous output with |
| 1291 | * buf0 and write it into buf0 |
| 1292 | */ |
| 1293 | cmd = sba_cmd_enc(0x0, SBA_TYPE_B, |
| 1294 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); |
| 1295 | cmd = sba_cmd_enc(cmd, msg_len, |
| 1296 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); |
| 1297 | c_mdata = sba_cmd_xor_c_mdata(0, 0); |
| 1298 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), |
| 1299 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); |
| 1300 | cmd = sba_cmd_enc(cmd, SBA_CMD_XOR, |
| 1301 | SBA_CMD_SHIFT, SBA_CMD_MASK); |
| 1302 | cmdsp->cmd = cmd; |
| 1303 | *cmdsp->cmd_dma = cpu_to_le64(cmd); |
| 1304 | cmdsp->flags = BRCM_SBA_CMD_TYPE_B; |
| 1305 | cmdsp->data = *dst_q + msg_offset; |
| 1306 | cmdsp->data_len = msg_len; |
| 1307 | cmdsp++; |
| 1308 | } |
| 1309 | |
| 1310 | /* Type-A command to write buf0 */ |
| 1311 | cmd = sba_cmd_enc(0x0, SBA_TYPE_A, |
| 1312 | SBA_TYPE_SHIFT, SBA_TYPE_MASK); |
| 1313 | cmd = sba_cmd_enc(cmd, msg_len, |
| 1314 | SBA_USER_DEF_SHIFT, SBA_USER_DEF_MASK); |
| 1315 | cmd = sba_cmd_enc(cmd, 0x1, |
| 1316 | SBA_RESP_SHIFT, SBA_RESP_MASK); |
| 1317 | c_mdata = sba_cmd_write_c_mdata(0); |
| 1318 | cmd = sba_cmd_enc(cmd, SBA_C_MDATA_LS(c_mdata), |
| 1319 | SBA_C_MDATA_SHIFT, SBA_C_MDATA_MASK); |
| 1320 | cmd = sba_cmd_enc(cmd, SBA_CMD_WRITE_BUFFER, |
| 1321 | SBA_CMD_SHIFT, SBA_CMD_MASK); |
| 1322 | cmdsp->cmd = cmd; |
| 1323 | *cmdsp->cmd_dma = cpu_to_le64(cmd); |
| 1324 | cmdsp->flags = BRCM_SBA_CMD_TYPE_A; |
| 1325 | if (req->sba->hw_resp_size) { |
| 1326 | cmdsp->flags |= BRCM_SBA_CMD_HAS_RESP; |
Anup Patel | e7ae72a | 2017-08-22 15:26:54 +0530 | [diff] [blame] | 1327 | cmdsp->resp = resp_dma; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1328 | cmdsp->resp_len = req->sba->hw_resp_size; |
| 1329 | } |
| 1330 | cmdsp->flags |= BRCM_SBA_CMD_HAS_OUTPUT; |
| 1331 | cmdsp->data = *dst_q + msg_offset; |
| 1332 | cmdsp->data_len = msg_len; |
| 1333 | cmdsp++; |
| 1334 | |
| 1335 | skip_q: |
| 1336 | /* Fillup brcm_message */ |
| 1337 | msg->type = BRCM_MESSAGE_SBA; |
| 1338 | msg->sba.cmds = cmds; |
| 1339 | msg->sba.cmds_count = cmdsp - cmds; |
| 1340 | msg->ctx = req; |
| 1341 | msg->error = 0; |
| 1342 | } |
| 1343 | |
Vinod Koul | dd2bceb | 2017-07-19 10:03:24 +0530 | [diff] [blame] | 1344 | static struct sba_request * |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1345 | sba_prep_dma_pq_single_req(struct sba_device *sba, dma_addr_t off, |
| 1346 | dma_addr_t *dst_p, dma_addr_t *dst_q, |
| 1347 | dma_addr_t src, u8 scf, size_t len, |
| 1348 | unsigned long flags) |
| 1349 | { |
| 1350 | struct sba_request *req = NULL; |
| 1351 | |
| 1352 | /* Alloc new request */ |
| 1353 | req = sba_alloc_request(sba); |
| 1354 | if (!req) |
| 1355 | return NULL; |
Anup Patel | 57a2850 | 2017-08-22 15:26:52 +0530 | [diff] [blame] | 1356 | if (flags & DMA_PREP_FENCE) |
| 1357 | req->flags |= SBA_REQUEST_FENCE; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1358 | |
| 1359 | /* Fillup request messages */ |
| 1360 | sba_fillup_pq_single_msg(req, dmaf_continue(flags), |
| 1361 | req->cmds, &req->msg, off, len, |
| 1362 | dst_p, dst_q, src, scf); |
| 1363 | |
| 1364 | /* Init async_tx descriptor */ |
| 1365 | req->tx.flags = flags; |
| 1366 | req->tx.cookie = -EBUSY; |
| 1367 | |
| 1368 | return req; |
| 1369 | } |
| 1370 | |
| 1371 | static struct dma_async_tx_descriptor * |
| 1372 | sba_prep_dma_pq(struct dma_chan *dchan, dma_addr_t *dst, dma_addr_t *src, |
| 1373 | u32 src_cnt, const u8 *scf, size_t len, unsigned long flags) |
| 1374 | { |
| 1375 | u32 i, dst_q_index; |
| 1376 | size_t req_len; |
| 1377 | bool slow = false; |
| 1378 | dma_addr_t off = 0; |
| 1379 | dma_addr_t *dst_p = NULL, *dst_q = NULL; |
| 1380 | struct sba_device *sba = to_sba_device(dchan); |
| 1381 | struct sba_request *first = NULL, *req; |
| 1382 | |
| 1383 | /* Sanity checks */ |
| 1384 | if (unlikely(src_cnt > sba->max_pq_srcs)) |
| 1385 | return NULL; |
| 1386 | for (i = 0; i < src_cnt; i++) |
| 1387 | if (sba->max_pq_coefs <= raid6_gflog[scf[i]]) |
| 1388 | slow = true; |
| 1389 | |
| 1390 | /* Figure-out P and Q destination addresses */ |
| 1391 | if (!(flags & DMA_PREP_PQ_DISABLE_P)) |
| 1392 | dst_p = &dst[0]; |
| 1393 | if (!(flags & DMA_PREP_PQ_DISABLE_Q)) |
| 1394 | dst_q = &dst[1]; |
| 1395 | |
| 1396 | /* Create chained requests where each request is upto hw_buf_size */ |
| 1397 | while (len) { |
| 1398 | req_len = (len < sba->hw_buf_size) ? len : sba->hw_buf_size; |
| 1399 | |
| 1400 | if (slow) { |
| 1401 | dst_q_index = src_cnt; |
| 1402 | |
| 1403 | if (dst_q) { |
| 1404 | for (i = 0; i < src_cnt; i++) { |
| 1405 | if (*dst_q == src[i]) { |
| 1406 | dst_q_index = i; |
| 1407 | break; |
| 1408 | } |
| 1409 | } |
| 1410 | } |
| 1411 | |
| 1412 | if (dst_q_index < src_cnt) { |
| 1413 | i = dst_q_index; |
| 1414 | req = sba_prep_dma_pq_single_req(sba, |
| 1415 | off, dst_p, dst_q, src[i], scf[i], |
| 1416 | req_len, flags | DMA_PREP_FENCE); |
| 1417 | if (!req) |
| 1418 | goto fail; |
| 1419 | |
| 1420 | if (first) |
| 1421 | sba_chain_request(first, req); |
| 1422 | else |
| 1423 | first = req; |
| 1424 | |
| 1425 | flags |= DMA_PREP_CONTINUE; |
| 1426 | } |
| 1427 | |
| 1428 | for (i = 0; i < src_cnt; i++) { |
| 1429 | if (dst_q_index == i) |
| 1430 | continue; |
| 1431 | |
| 1432 | req = sba_prep_dma_pq_single_req(sba, |
| 1433 | off, dst_p, dst_q, src[i], scf[i], |
| 1434 | req_len, flags | DMA_PREP_FENCE); |
| 1435 | if (!req) |
| 1436 | goto fail; |
| 1437 | |
| 1438 | if (first) |
| 1439 | sba_chain_request(first, req); |
| 1440 | else |
| 1441 | first = req; |
| 1442 | |
| 1443 | flags |= DMA_PREP_CONTINUE; |
| 1444 | } |
| 1445 | } else { |
| 1446 | req = sba_prep_dma_pq_req(sba, off, |
| 1447 | dst_p, dst_q, src, src_cnt, |
| 1448 | scf, req_len, flags); |
| 1449 | if (!req) |
| 1450 | goto fail; |
| 1451 | |
| 1452 | if (first) |
| 1453 | sba_chain_request(first, req); |
| 1454 | else |
| 1455 | first = req; |
| 1456 | } |
| 1457 | |
| 1458 | off += req_len; |
| 1459 | len -= req_len; |
| 1460 | } |
| 1461 | |
| 1462 | return (first) ? &first->tx : NULL; |
| 1463 | |
| 1464 | fail: |
| 1465 | if (first) |
| 1466 | sba_free_chained_requests(first); |
| 1467 | return NULL; |
| 1468 | } |
| 1469 | |
| 1470 | /* ====== Mailbox callbacks ===== */ |
| 1471 | |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1472 | static void sba_receive_message(struct mbox_client *cl, void *msg) |
| 1473 | { |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1474 | struct brcm_message *m = msg; |
Anup Patel | f833851 | 2017-08-22 15:26:58 +0530 | [diff] [blame] | 1475 | struct sba_request *req = m->ctx; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1476 | struct sba_device *sba = req->sba; |
| 1477 | |
| 1478 | /* Error count if message has error */ |
| 1479 | if (m->error < 0) |
| 1480 | dev_err(sba->dev, "%s got message with error %d", |
| 1481 | dma_chan_name(&sba->dma_chan), m->error); |
| 1482 | |
Anup Patel | f833851 | 2017-08-22 15:26:58 +0530 | [diff] [blame] | 1483 | /* Process received request */ |
| 1484 | sba_process_received_request(sba, req); |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1485 | |
Anup Patel | f833851 | 2017-08-22 15:26:58 +0530 | [diff] [blame] | 1486 | /* Process deferred requests */ |
| 1487 | sba_process_deferred_requests(sba); |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1488 | } |
| 1489 | |
| 1490 | /* ====== Platform driver routines ===== */ |
| 1491 | |
| 1492 | static int sba_prealloc_channel_resources(struct sba_device *sba) |
| 1493 | { |
Anup Patel | e7ae72a | 2017-08-22 15:26:54 +0530 | [diff] [blame] | 1494 | int i, j, ret = 0; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1495 | struct sba_request *req = NULL; |
| 1496 | |
Anup Patel | eb67744 | 2017-08-22 15:26:59 +0530 | [diff] [blame] | 1497 | sba->resp_base = dma_alloc_coherent(sba->mbox_dev, |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1498 | sba->max_resp_pool_size, |
| 1499 | &sba->resp_dma_base, GFP_KERNEL); |
| 1500 | if (!sba->resp_base) |
| 1501 | return -ENOMEM; |
| 1502 | |
Anup Patel | eb67744 | 2017-08-22 15:26:59 +0530 | [diff] [blame] | 1503 | sba->cmds_base = dma_alloc_coherent(sba->mbox_dev, |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1504 | sba->max_cmds_pool_size, |
| 1505 | &sba->cmds_dma_base, GFP_KERNEL); |
| 1506 | if (!sba->cmds_base) { |
| 1507 | ret = -ENOMEM; |
| 1508 | goto fail_free_resp_pool; |
| 1509 | } |
| 1510 | |
| 1511 | spin_lock_init(&sba->reqs_lock); |
| 1512 | sba->reqs_fence = false; |
| 1513 | INIT_LIST_HEAD(&sba->reqs_alloc_list); |
| 1514 | INIT_LIST_HEAD(&sba->reqs_pending_list); |
| 1515 | INIT_LIST_HEAD(&sba->reqs_active_list); |
| 1516 | INIT_LIST_HEAD(&sba->reqs_received_list); |
| 1517 | INIT_LIST_HEAD(&sba->reqs_completed_list); |
| 1518 | INIT_LIST_HEAD(&sba->reqs_aborted_list); |
| 1519 | INIT_LIST_HEAD(&sba->reqs_free_list); |
| 1520 | |
Anup Patel | e7ae72a | 2017-08-22 15:26:54 +0530 | [diff] [blame] | 1521 | for (i = 0; i < sba->max_req; i++) { |
Anup Patel | 5655e00 | 2017-08-22 15:26:56 +0530 | [diff] [blame] | 1522 | req = devm_kzalloc(sba->dev, |
| 1523 | sizeof(*req) + |
| 1524 | sba->max_cmd_per_req * sizeof(req->cmds[0]), |
| 1525 | GFP_KERNEL); |
| 1526 | if (!req) { |
| 1527 | ret = -ENOMEM; |
| 1528 | goto fail_free_cmds_pool; |
| 1529 | } |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1530 | INIT_LIST_HEAD(&req->node); |
| 1531 | req->sba = sba; |
Anup Patel | 57a2850 | 2017-08-22 15:26:52 +0530 | [diff] [blame] | 1532 | req->flags = SBA_REQUEST_STATE_FREE; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1533 | INIT_LIST_HEAD(&req->next); |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1534 | atomic_set(&req->next_pending_count, 0); |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1535 | for (j = 0; j < sba->max_cmd_per_req; j++) { |
| 1536 | req->cmds[j].cmd = 0; |
| 1537 | req->cmds[j].cmd_dma = sba->cmds_base + |
| 1538 | (i * sba->max_cmd_per_req + j) * sizeof(u64); |
| 1539 | req->cmds[j].cmd_dma_addr = sba->cmds_dma_base + |
| 1540 | (i * sba->max_cmd_per_req + j) * sizeof(u64); |
| 1541 | req->cmds[j].flags = 0; |
| 1542 | } |
| 1543 | memset(&req->msg, 0, sizeof(req->msg)); |
| 1544 | dma_async_tx_descriptor_init(&req->tx, &sba->dma_chan); |
| 1545 | req->tx.tx_submit = sba_tx_submit; |
Anup Patel | e7ae72a | 2017-08-22 15:26:54 +0530 | [diff] [blame] | 1546 | req->tx.phys = sba->resp_dma_base + i * sba->hw_resp_size; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1547 | list_add_tail(&req->node, &sba->reqs_free_list); |
| 1548 | } |
| 1549 | |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1550 | return 0; |
| 1551 | |
| 1552 | fail_free_cmds_pool: |
Anup Patel | eb67744 | 2017-08-22 15:26:59 +0530 | [diff] [blame] | 1553 | dma_free_coherent(sba->mbox_dev, |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1554 | sba->max_cmds_pool_size, |
| 1555 | sba->cmds_base, sba->cmds_dma_base); |
| 1556 | fail_free_resp_pool: |
Anup Patel | eb67744 | 2017-08-22 15:26:59 +0530 | [diff] [blame] | 1557 | dma_free_coherent(sba->mbox_dev, |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1558 | sba->max_resp_pool_size, |
| 1559 | sba->resp_base, sba->resp_dma_base); |
| 1560 | return ret; |
| 1561 | } |
| 1562 | |
| 1563 | static void sba_freeup_channel_resources(struct sba_device *sba) |
| 1564 | { |
| 1565 | dmaengine_terminate_all(&sba->dma_chan); |
Anup Patel | eb67744 | 2017-08-22 15:26:59 +0530 | [diff] [blame] | 1566 | dma_free_coherent(sba->mbox_dev, sba->max_cmds_pool_size, |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1567 | sba->cmds_base, sba->cmds_dma_base); |
Anup Patel | eb67744 | 2017-08-22 15:26:59 +0530 | [diff] [blame] | 1568 | dma_free_coherent(sba->mbox_dev, sba->max_resp_pool_size, |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1569 | sba->resp_base, sba->resp_dma_base); |
| 1570 | sba->resp_base = NULL; |
| 1571 | sba->resp_dma_base = 0; |
| 1572 | } |
| 1573 | |
| 1574 | static int sba_async_register(struct sba_device *sba) |
| 1575 | { |
| 1576 | int ret; |
| 1577 | struct dma_device *dma_dev = &sba->dma_dev; |
| 1578 | |
| 1579 | /* Initialize DMA channel cookie */ |
| 1580 | sba->dma_chan.device = dma_dev; |
| 1581 | dma_cookie_init(&sba->dma_chan); |
| 1582 | |
| 1583 | /* Initialize DMA device capability mask */ |
| 1584 | dma_cap_zero(dma_dev->cap_mask); |
| 1585 | dma_cap_set(DMA_INTERRUPT, dma_dev->cap_mask); |
| 1586 | dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); |
| 1587 | dma_cap_set(DMA_XOR, dma_dev->cap_mask); |
| 1588 | dma_cap_set(DMA_PQ, dma_dev->cap_mask); |
| 1589 | |
| 1590 | /* |
| 1591 | * Set mailbox channel device as the base device of |
| 1592 | * our dma_device because the actual memory accesses |
| 1593 | * will be done by mailbox controller |
| 1594 | */ |
| 1595 | dma_dev->dev = sba->mbox_dev; |
| 1596 | |
| 1597 | /* Set base prep routines */ |
| 1598 | dma_dev->device_free_chan_resources = sba_free_chan_resources; |
| 1599 | dma_dev->device_terminate_all = sba_device_terminate_all; |
| 1600 | dma_dev->device_issue_pending = sba_issue_pending; |
| 1601 | dma_dev->device_tx_status = sba_tx_status; |
| 1602 | |
| 1603 | /* Set interrupt routine */ |
| 1604 | if (dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask)) |
| 1605 | dma_dev->device_prep_dma_interrupt = sba_prep_dma_interrupt; |
| 1606 | |
| 1607 | /* Set memcpy routine */ |
| 1608 | if (dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask)) |
| 1609 | dma_dev->device_prep_dma_memcpy = sba_prep_dma_memcpy; |
| 1610 | |
| 1611 | /* Set xor routine and capability */ |
| 1612 | if (dma_has_cap(DMA_XOR, dma_dev->cap_mask)) { |
| 1613 | dma_dev->device_prep_dma_xor = sba_prep_dma_xor; |
| 1614 | dma_dev->max_xor = sba->max_xor_srcs; |
| 1615 | } |
| 1616 | |
| 1617 | /* Set pq routine and capability */ |
| 1618 | if (dma_has_cap(DMA_PQ, dma_dev->cap_mask)) { |
| 1619 | dma_dev->device_prep_dma_pq = sba_prep_dma_pq; |
| 1620 | dma_set_maxpq(dma_dev, sba->max_pq_srcs, 0); |
| 1621 | } |
| 1622 | |
| 1623 | /* Initialize DMA device channel list */ |
| 1624 | INIT_LIST_HEAD(&dma_dev->channels); |
| 1625 | list_add_tail(&sba->dma_chan.device_node, &dma_dev->channels); |
| 1626 | |
| 1627 | /* Register with Linux async DMA framework*/ |
| 1628 | ret = dma_async_device_register(dma_dev); |
| 1629 | if (ret) { |
| 1630 | dev_err(sba->dev, "async device register error %d", ret); |
| 1631 | return ret; |
| 1632 | } |
| 1633 | |
| 1634 | dev_info(sba->dev, "%s capabilities: %s%s%s%s\n", |
| 1635 | dma_chan_name(&sba->dma_chan), |
| 1636 | dma_has_cap(DMA_INTERRUPT, dma_dev->cap_mask) ? "interrupt " : "", |
| 1637 | dma_has_cap(DMA_MEMCPY, dma_dev->cap_mask) ? "memcpy " : "", |
| 1638 | dma_has_cap(DMA_XOR, dma_dev->cap_mask) ? "xor " : "", |
| 1639 | dma_has_cap(DMA_PQ, dma_dev->cap_mask) ? "pq " : ""); |
| 1640 | |
| 1641 | return 0; |
| 1642 | } |
| 1643 | |
| 1644 | static int sba_probe(struct platform_device *pdev) |
| 1645 | { |
| 1646 | int i, ret = 0, mchans_count; |
| 1647 | struct sba_device *sba; |
| 1648 | struct platform_device *mbox_pdev; |
| 1649 | struct of_phandle_args args; |
| 1650 | |
| 1651 | /* Allocate main SBA struct */ |
| 1652 | sba = devm_kzalloc(&pdev->dev, sizeof(*sba), GFP_KERNEL); |
| 1653 | if (!sba) |
| 1654 | return -ENOMEM; |
| 1655 | |
| 1656 | sba->dev = &pdev->dev; |
| 1657 | platform_set_drvdata(pdev, sba); |
| 1658 | |
Anup Patel | 5346aaf | 2017-08-22 15:26:57 +0530 | [diff] [blame] | 1659 | /* Number of channels equals number of mailbox channels */ |
| 1660 | ret = of_count_phandle_with_args(pdev->dev.of_node, |
| 1661 | "mboxes", "#mbox-cells"); |
| 1662 | if (ret <= 0) |
| 1663 | return -ENODEV; |
| 1664 | mchans_count = ret; |
| 1665 | |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1666 | /* Determine SBA version from DT compatible string */ |
| 1667 | if (of_device_is_compatible(sba->dev->of_node, "brcm,iproc-sba")) |
| 1668 | sba->ver = SBA_VER_1; |
| 1669 | else if (of_device_is_compatible(sba->dev->of_node, |
| 1670 | "brcm,iproc-sba-v2")) |
| 1671 | sba->ver = SBA_VER_2; |
| 1672 | else |
| 1673 | return -ENODEV; |
| 1674 | |
| 1675 | /* Derived Configuration parameters */ |
| 1676 | switch (sba->ver) { |
| 1677 | case SBA_VER_1: |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1678 | sba->hw_buf_size = 4096; |
| 1679 | sba->hw_resp_size = 8; |
| 1680 | sba->max_pq_coefs = 6; |
| 1681 | sba->max_pq_srcs = 6; |
| 1682 | break; |
| 1683 | case SBA_VER_2: |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1684 | sba->hw_buf_size = 4096; |
| 1685 | sba->hw_resp_size = 8; |
| 1686 | sba->max_pq_coefs = 30; |
| 1687 | /* |
| 1688 | * We can support max_pq_srcs == max_pq_coefs because |
| 1689 | * we are limited by number of SBA commands that we can |
| 1690 | * fit in one message for underlying ring manager HW. |
| 1691 | */ |
| 1692 | sba->max_pq_srcs = 12; |
| 1693 | break; |
| 1694 | default: |
| 1695 | return -EINVAL; |
| 1696 | } |
Anup Patel | 5346aaf | 2017-08-22 15:26:57 +0530 | [diff] [blame] | 1697 | sba->max_req = SBA_MAX_REQ_PER_MBOX_CHANNEL * mchans_count; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1698 | sba->max_cmd_per_req = sba->max_pq_srcs + 3; |
| 1699 | sba->max_xor_srcs = sba->max_cmd_per_req - 1; |
| 1700 | sba->max_resp_pool_size = sba->max_req * sba->hw_resp_size; |
| 1701 | sba->max_cmds_pool_size = sba->max_req * |
| 1702 | sba->max_cmd_per_req * sizeof(u64); |
| 1703 | |
| 1704 | /* Setup mailbox client */ |
| 1705 | sba->client.dev = &pdev->dev; |
| 1706 | sba->client.rx_callback = sba_receive_message; |
| 1707 | sba->client.tx_block = false; |
| 1708 | sba->client.knows_txdone = false; |
| 1709 | sba->client.tx_tout = 0; |
| 1710 | |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1711 | /* Allocate mailbox channel array */ |
Anup Patel | 5346aaf | 2017-08-22 15:26:57 +0530 | [diff] [blame] | 1712 | sba->mchans = devm_kcalloc(&pdev->dev, mchans_count, |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1713 | sizeof(*sba->mchans), GFP_KERNEL); |
| 1714 | if (!sba->mchans) |
| 1715 | return -ENOMEM; |
| 1716 | |
| 1717 | /* Request mailbox channels */ |
Anup Patel | 5346aaf | 2017-08-22 15:26:57 +0530 | [diff] [blame] | 1718 | sba->mchans_count = 0; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1719 | for (i = 0; i < mchans_count; i++) { |
| 1720 | sba->mchans[i] = mbox_request_channel(&sba->client, i); |
| 1721 | if (IS_ERR(sba->mchans[i])) { |
| 1722 | ret = PTR_ERR(sba->mchans[i]); |
| 1723 | goto fail_free_mchans; |
| 1724 | } |
| 1725 | sba->mchans_count++; |
| 1726 | } |
Anup Patel | 5346aaf | 2017-08-22 15:26:57 +0530 | [diff] [blame] | 1727 | atomic_set(&sba->mchans_current, 0); |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1728 | |
| 1729 | /* Find-out underlying mailbox device */ |
| 1730 | ret = of_parse_phandle_with_args(pdev->dev.of_node, |
| 1731 | "mboxes", "#mbox-cells", 0, &args); |
| 1732 | if (ret) |
| 1733 | goto fail_free_mchans; |
| 1734 | mbox_pdev = of_find_device_by_node(args.np); |
| 1735 | of_node_put(args.np); |
| 1736 | if (!mbox_pdev) { |
| 1737 | ret = -ENODEV; |
| 1738 | goto fail_free_mchans; |
| 1739 | } |
| 1740 | sba->mbox_dev = &mbox_pdev->dev; |
| 1741 | |
| 1742 | /* All mailbox channels should be of same ring manager device */ |
| 1743 | for (i = 1; i < mchans_count; i++) { |
| 1744 | ret = of_parse_phandle_with_args(pdev->dev.of_node, |
| 1745 | "mboxes", "#mbox-cells", i, &args); |
| 1746 | if (ret) |
| 1747 | goto fail_free_mchans; |
| 1748 | mbox_pdev = of_find_device_by_node(args.np); |
| 1749 | of_node_put(args.np); |
| 1750 | if (sba->mbox_dev != &mbox_pdev->dev) { |
| 1751 | ret = -EINVAL; |
| 1752 | goto fail_free_mchans; |
| 1753 | } |
| 1754 | } |
| 1755 | |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1756 | /* Prealloc channel resource */ |
| 1757 | ret = sba_prealloc_channel_resources(sba); |
| 1758 | if (ret) |
Anup Patel | eb67744 | 2017-08-22 15:26:59 +0530 | [diff] [blame] | 1759 | goto fail_free_mchans; |
| 1760 | |
| 1761 | /* Register DMA device with Linux async framework */ |
| 1762 | ret = sba_async_register(sba); |
| 1763 | if (ret) |
| 1764 | goto fail_free_resources; |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1765 | |
| 1766 | /* Print device info */ |
| 1767 | dev_info(sba->dev, "%s using SBAv%d and %d mailbox channels", |
| 1768 | dma_chan_name(&sba->dma_chan), sba->ver+1, |
| 1769 | sba->mchans_count); |
| 1770 | |
| 1771 | return 0; |
| 1772 | |
Anup Patel | eb67744 | 2017-08-22 15:26:59 +0530 | [diff] [blame] | 1773 | fail_free_resources: |
| 1774 | sba_freeup_channel_resources(sba); |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1775 | fail_free_mchans: |
| 1776 | for (i = 0; i < sba->mchans_count; i++) |
| 1777 | mbox_free_channel(sba->mchans[i]); |
| 1778 | return ret; |
| 1779 | } |
| 1780 | |
| 1781 | static int sba_remove(struct platform_device *pdev) |
| 1782 | { |
| 1783 | int i; |
| 1784 | struct sba_device *sba = platform_get_drvdata(pdev); |
| 1785 | |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1786 | dma_async_device_unregister(&sba->dma_dev); |
| 1787 | |
Anup Patel | eb67744 | 2017-08-22 15:26:59 +0530 | [diff] [blame] | 1788 | sba_freeup_channel_resources(sba); |
| 1789 | |
Anup Patel | 743e1c8 | 2017-05-15 10:34:54 +0530 | [diff] [blame] | 1790 | for (i = 0; i < sba->mchans_count; i++) |
| 1791 | mbox_free_channel(sba->mchans[i]); |
| 1792 | |
| 1793 | return 0; |
| 1794 | } |
| 1795 | |
| 1796 | static const struct of_device_id sba_of_match[] = { |
| 1797 | { .compatible = "brcm,iproc-sba", }, |
| 1798 | { .compatible = "brcm,iproc-sba-v2", }, |
| 1799 | {}, |
| 1800 | }; |
| 1801 | MODULE_DEVICE_TABLE(of, sba_of_match); |
| 1802 | |
| 1803 | static struct platform_driver sba_driver = { |
| 1804 | .probe = sba_probe, |
| 1805 | .remove = sba_remove, |
| 1806 | .driver = { |
| 1807 | .name = "bcm-sba-raid", |
| 1808 | .of_match_table = sba_of_match, |
| 1809 | }, |
| 1810 | }; |
| 1811 | module_platform_driver(sba_driver); |
| 1812 | |
| 1813 | MODULE_DESCRIPTION("Broadcom SBA RAID driver"); |
| 1814 | MODULE_AUTHOR("Anup Patel <anup.patel@broadcom.com>"); |
| 1815 | MODULE_LICENSE("GPL v2"); |