Sudeep Dutt | 7cc31cd | 2015-09-29 18:16:04 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Intel MIC Platform Software Stack (MPSS) |
| 3 | * |
| 4 | * Copyright(c) 2015 Intel Corporation. |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License, version 2, as |
| 8 | * published by the Free Software Foundation. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, but |
| 11 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 13 | * General Public License for more details. |
| 14 | * |
| 15 | * Intel SCIF driver. |
| 16 | * |
| 17 | */ |
| 18 | #include "scif_main.h" |
| 19 | #include "scif_map.h" |
| 20 | |
| 21 | /* |
| 22 | * struct scif_dma_comp_cb - SCIF DMA completion callback |
| 23 | * |
| 24 | * @dma_completion_func: DMA completion callback |
| 25 | * @cb_cookie: DMA completion callback cookie |
| 26 | * @temp_buf: Temporary buffer |
| 27 | * @temp_buf_to_free: Temporary buffer to be freed |
| 28 | * @is_cache: Is a kmem_cache allocated buffer |
| 29 | * @dst_offset: Destination registration offset |
| 30 | * @dst_window: Destination registration window |
| 31 | * @len: Length of the temp buffer |
| 32 | * @temp_phys: DMA address of the temp buffer |
| 33 | * @sdev: The SCIF device |
| 34 | * @header_padding: padding for cache line alignment |
| 35 | */ |
| 36 | struct scif_dma_comp_cb { |
| 37 | void (*dma_completion_func)(void *cookie); |
| 38 | void *cb_cookie; |
| 39 | u8 *temp_buf; |
| 40 | u8 *temp_buf_to_free; |
| 41 | bool is_cache; |
| 42 | s64 dst_offset; |
| 43 | struct scif_window *dst_window; |
| 44 | size_t len; |
| 45 | dma_addr_t temp_phys; |
| 46 | struct scif_dev *sdev; |
| 47 | int header_padding; |
| 48 | }; |
| 49 | |
| 50 | /** |
| 51 | * struct scif_copy_work - Work for DMA copy |
| 52 | * |
| 53 | * @src_offset: Starting source offset |
| 54 | * @dst_offset: Starting destination offset |
| 55 | * @src_window: Starting src registered window |
| 56 | * @dst_window: Starting dst registered window |
| 57 | * @loopback: true if this is a loopback DMA transfer |
| 58 | * @len: Length of the transfer |
| 59 | * @comp_cb: DMA copy completion callback |
| 60 | * @remote_dev: The remote SCIF peer device |
| 61 | * @fence_type: polling or interrupt based |
| 62 | * @ordered: is this a tail byte ordered DMA transfer |
| 63 | */ |
| 64 | struct scif_copy_work { |
| 65 | s64 src_offset; |
| 66 | s64 dst_offset; |
| 67 | struct scif_window *src_window; |
| 68 | struct scif_window *dst_window; |
| 69 | int loopback; |
| 70 | size_t len; |
| 71 | struct scif_dma_comp_cb *comp_cb; |
| 72 | struct scif_dev *remote_dev; |
| 73 | int fence_type; |
| 74 | bool ordered; |
| 75 | }; |
| 76 | |
Sudeep Dutt | 7cc31cd | 2015-09-29 18:16:04 -0700 | [diff] [blame] | 77 | /** |
| 78 | * scif_reserve_dma_chan: |
| 79 | * @ep: Endpoint Descriptor. |
| 80 | * |
| 81 | * This routine reserves a DMA channel for a particular |
| 82 | * endpoint. All DMA transfers for an endpoint are always |
| 83 | * programmed on the same DMA channel. |
| 84 | */ |
| 85 | int scif_reserve_dma_chan(struct scif_endpt *ep) |
| 86 | { |
| 87 | int err = 0; |
| 88 | struct scif_dev *scifdev; |
| 89 | struct scif_hw_dev *sdev; |
| 90 | struct dma_chan *chan; |
| 91 | |
| 92 | /* Loopback DMAs are not supported on the management node */ |
| 93 | if (!scif_info.nodeid && scifdev_self(ep->remote_dev)) |
| 94 | return 0; |
| 95 | if (scif_info.nodeid) |
| 96 | scifdev = &scif_dev[0]; |
| 97 | else |
| 98 | scifdev = ep->remote_dev; |
| 99 | sdev = scifdev->sdev; |
| 100 | if (!sdev->num_dma_ch) |
| 101 | return -ENODEV; |
| 102 | chan = sdev->dma_ch[scifdev->dma_ch_idx]; |
| 103 | scifdev->dma_ch_idx = (scifdev->dma_ch_idx + 1) % sdev->num_dma_ch; |
| 104 | mutex_lock(&ep->rma_info.rma_lock); |
| 105 | ep->rma_info.dma_chan = chan; |
| 106 | mutex_unlock(&ep->rma_info.rma_lock); |
| 107 | return err; |
| 108 | } |
| 109 | |
| 110 | #ifdef CONFIG_MMU_NOTIFIER |
| 111 | /** |
| 112 | * scif_rma_destroy_tcw: |
| 113 | * |
| 114 | * This routine destroys temporary cached windows |
| 115 | */ |
| 116 | static |
| 117 | void __scif_rma_destroy_tcw(struct scif_mmu_notif *mmn, |
Sudeep Dutt | 7cc31cd | 2015-09-29 18:16:04 -0700 | [diff] [blame] | 118 | u64 start, u64 len) |
| 119 | { |
| 120 | struct list_head *item, *tmp; |
| 121 | struct scif_window *window; |
| 122 | u64 start_va, end_va; |
| 123 | u64 end = start + len; |
| 124 | |
| 125 | if (end <= start) |
| 126 | return; |
| 127 | |
| 128 | list_for_each_safe(item, tmp, &mmn->tc_reg_list) { |
| 129 | window = list_entry(item, struct scif_window, list); |
Sudeep Dutt | 7cc31cd | 2015-09-29 18:16:04 -0700 | [diff] [blame] | 130 | if (!len) |
| 131 | break; |
| 132 | start_va = window->va_for_temp; |
| 133 | end_va = start_va + (window->nr_pages << PAGE_SHIFT); |
| 134 | if (start < start_va && end <= start_va) |
| 135 | break; |
| 136 | if (start >= end_va) |
| 137 | continue; |
| 138 | __scif_rma_destroy_tcw_helper(window); |
| 139 | } |
| 140 | } |
| 141 | |
| 142 | static void scif_rma_destroy_tcw(struct scif_mmu_notif *mmn, u64 start, u64 len) |
| 143 | { |
| 144 | struct scif_endpt *ep = mmn->ep; |
| 145 | |
| 146 | spin_lock(&ep->rma_info.tc_lock); |
Arnd Bergmann | 9d32f82 | 2016-06-16 13:38:24 +0200 | [diff] [blame] | 147 | __scif_rma_destroy_tcw(mmn, start, len); |
Sudeep Dutt | 7cc31cd | 2015-09-29 18:16:04 -0700 | [diff] [blame] | 148 | spin_unlock(&ep->rma_info.tc_lock); |
| 149 | } |
| 150 | |
| 151 | static void scif_rma_destroy_tcw_ep(struct scif_endpt *ep) |
| 152 | { |
| 153 | struct list_head *item, *tmp; |
| 154 | struct scif_mmu_notif *mmn; |
| 155 | |
| 156 | list_for_each_safe(item, tmp, &ep->rma_info.mmn_list) { |
| 157 | mmn = list_entry(item, struct scif_mmu_notif, list); |
| 158 | scif_rma_destroy_tcw(mmn, 0, ULONG_MAX); |
| 159 | } |
| 160 | } |
| 161 | |
| 162 | static void __scif_rma_destroy_tcw_ep(struct scif_endpt *ep) |
| 163 | { |
| 164 | struct list_head *item, *tmp; |
| 165 | struct scif_mmu_notif *mmn; |
| 166 | |
| 167 | spin_lock(&ep->rma_info.tc_lock); |
| 168 | list_for_each_safe(item, tmp, &ep->rma_info.mmn_list) { |
| 169 | mmn = list_entry(item, struct scif_mmu_notif, list); |
Arnd Bergmann | 9d32f82 | 2016-06-16 13:38:24 +0200 | [diff] [blame] | 170 | __scif_rma_destroy_tcw(mmn, 0, ULONG_MAX); |
Sudeep Dutt | 7cc31cd | 2015-09-29 18:16:04 -0700 | [diff] [blame] | 171 | } |
| 172 | spin_unlock(&ep->rma_info.tc_lock); |
| 173 | } |
| 174 | |
| 175 | static bool scif_rma_tc_can_cache(struct scif_endpt *ep, size_t cur_bytes) |
| 176 | { |
| 177 | if ((cur_bytes >> PAGE_SHIFT) > scif_info.rma_tc_limit) |
| 178 | return false; |
| 179 | if ((atomic_read(&ep->rma_info.tcw_total_pages) |
| 180 | + (cur_bytes >> PAGE_SHIFT)) > |
| 181 | scif_info.rma_tc_limit) { |
| 182 | dev_info(scif_info.mdev.this_device, |
| 183 | "%s %d total=%d, current=%zu reached max\n", |
| 184 | __func__, __LINE__, |
| 185 | atomic_read(&ep->rma_info.tcw_total_pages), |
| 186 | (1 + (cur_bytes >> PAGE_SHIFT))); |
| 187 | scif_rma_destroy_tcw_invalid(); |
| 188 | __scif_rma_destroy_tcw_ep(ep); |
| 189 | } |
| 190 | return true; |
| 191 | } |
| 192 | |
| 193 | static void scif_mmu_notifier_release(struct mmu_notifier *mn, |
| 194 | struct mm_struct *mm) |
| 195 | { |
| 196 | struct scif_mmu_notif *mmn; |
| 197 | |
| 198 | mmn = container_of(mn, struct scif_mmu_notif, ep_mmu_notifier); |
| 199 | scif_rma_destroy_tcw(mmn, 0, ULONG_MAX); |
| 200 | schedule_work(&scif_info.misc_work); |
| 201 | } |
| 202 | |
| 203 | static void scif_mmu_notifier_invalidate_page(struct mmu_notifier *mn, |
| 204 | struct mm_struct *mm, |
| 205 | unsigned long address) |
| 206 | { |
| 207 | struct scif_mmu_notif *mmn; |
| 208 | |
| 209 | mmn = container_of(mn, struct scif_mmu_notif, ep_mmu_notifier); |
| 210 | scif_rma_destroy_tcw(mmn, address, PAGE_SIZE); |
| 211 | } |
| 212 | |
| 213 | static void scif_mmu_notifier_invalidate_range_start(struct mmu_notifier *mn, |
| 214 | struct mm_struct *mm, |
| 215 | unsigned long start, |
| 216 | unsigned long end) |
| 217 | { |
| 218 | struct scif_mmu_notif *mmn; |
| 219 | |
| 220 | mmn = container_of(mn, struct scif_mmu_notif, ep_mmu_notifier); |
| 221 | scif_rma_destroy_tcw(mmn, start, end - start); |
| 222 | } |
| 223 | |
| 224 | static void scif_mmu_notifier_invalidate_range_end(struct mmu_notifier *mn, |
| 225 | struct mm_struct *mm, |
| 226 | unsigned long start, |
| 227 | unsigned long end) |
| 228 | { |
| 229 | /* |
| 230 | * Nothing to do here, everything needed was done in |
| 231 | * invalidate_range_start. |
| 232 | */ |
| 233 | } |
| 234 | |
| 235 | static const struct mmu_notifier_ops scif_mmu_notifier_ops = { |
| 236 | .release = scif_mmu_notifier_release, |
| 237 | .clear_flush_young = NULL, |
| 238 | .invalidate_page = scif_mmu_notifier_invalidate_page, |
| 239 | .invalidate_range_start = scif_mmu_notifier_invalidate_range_start, |
| 240 | .invalidate_range_end = scif_mmu_notifier_invalidate_range_end}; |
| 241 | |
| 242 | static void scif_ep_unregister_mmu_notifier(struct scif_endpt *ep) |
| 243 | { |
| 244 | struct scif_endpt_rma_info *rma = &ep->rma_info; |
| 245 | struct scif_mmu_notif *mmn = NULL; |
| 246 | struct list_head *item, *tmp; |
| 247 | |
| 248 | mutex_lock(&ep->rma_info.mmn_lock); |
| 249 | list_for_each_safe(item, tmp, &rma->mmn_list) { |
| 250 | mmn = list_entry(item, struct scif_mmu_notif, list); |
| 251 | mmu_notifier_unregister(&mmn->ep_mmu_notifier, mmn->mm); |
| 252 | list_del(item); |
| 253 | kfree(mmn); |
| 254 | } |
| 255 | mutex_unlock(&ep->rma_info.mmn_lock); |
| 256 | } |
| 257 | |
| 258 | static void scif_init_mmu_notifier(struct scif_mmu_notif *mmn, |
| 259 | struct mm_struct *mm, struct scif_endpt *ep) |
| 260 | { |
| 261 | mmn->ep = ep; |
| 262 | mmn->mm = mm; |
| 263 | mmn->ep_mmu_notifier.ops = &scif_mmu_notifier_ops; |
| 264 | INIT_LIST_HEAD(&mmn->list); |
| 265 | INIT_LIST_HEAD(&mmn->tc_reg_list); |
| 266 | } |
| 267 | |
| 268 | static struct scif_mmu_notif * |
| 269 | scif_find_mmu_notifier(struct mm_struct *mm, struct scif_endpt_rma_info *rma) |
| 270 | { |
| 271 | struct scif_mmu_notif *mmn; |
Sudeep Dutt | 7cc31cd | 2015-09-29 18:16:04 -0700 | [diff] [blame] | 272 | |
Eric Biggers | d40a094 | 2015-12-11 20:09:16 -0600 | [diff] [blame] | 273 | list_for_each_entry(mmn, &rma->mmn_list, list) |
Sudeep Dutt | 7cc31cd | 2015-09-29 18:16:04 -0700 | [diff] [blame] | 274 | if (mmn->mm == mm) |
| 275 | return mmn; |
Sudeep Dutt | 7cc31cd | 2015-09-29 18:16:04 -0700 | [diff] [blame] | 276 | return NULL; |
| 277 | } |
| 278 | |
| 279 | static struct scif_mmu_notif * |
| 280 | scif_add_mmu_notifier(struct mm_struct *mm, struct scif_endpt *ep) |
| 281 | { |
| 282 | struct scif_mmu_notif *mmn |
| 283 | = kzalloc(sizeof(*mmn), GFP_KERNEL); |
| 284 | |
| 285 | if (!mmn) |
Eric Biggers | d40a094 | 2015-12-11 20:09:16 -0600 | [diff] [blame] | 286 | return ERR_PTR(-ENOMEM); |
Sudeep Dutt | 7cc31cd | 2015-09-29 18:16:04 -0700 | [diff] [blame] | 287 | |
| 288 | scif_init_mmu_notifier(mmn, current->mm, ep); |
Eric Biggers | d40a094 | 2015-12-11 20:09:16 -0600 | [diff] [blame] | 289 | if (mmu_notifier_register(&mmn->ep_mmu_notifier, current->mm)) { |
Sudeep Dutt | 7cc31cd | 2015-09-29 18:16:04 -0700 | [diff] [blame] | 290 | kfree(mmn); |
Eric Biggers | d40a094 | 2015-12-11 20:09:16 -0600 | [diff] [blame] | 291 | return ERR_PTR(-EBUSY); |
Sudeep Dutt | 7cc31cd | 2015-09-29 18:16:04 -0700 | [diff] [blame] | 292 | } |
| 293 | list_add(&mmn->list, &ep->rma_info.mmn_list); |
| 294 | return mmn; |
| 295 | } |
| 296 | |
| 297 | /* |
| 298 | * Called from the misc thread to destroy temporary cached windows and |
| 299 | * unregister the MMU notifier for the SCIF endpoint. |
| 300 | */ |
| 301 | void scif_mmu_notif_handler(struct work_struct *work) |
| 302 | { |
| 303 | struct list_head *pos, *tmpq; |
| 304 | struct scif_endpt *ep; |
| 305 | restart: |
| 306 | scif_rma_destroy_tcw_invalid(); |
| 307 | spin_lock(&scif_info.rmalock); |
| 308 | list_for_each_safe(pos, tmpq, &scif_info.mmu_notif_cleanup) { |
| 309 | ep = list_entry(pos, struct scif_endpt, mmu_list); |
| 310 | list_del(&ep->mmu_list); |
| 311 | spin_unlock(&scif_info.rmalock); |
| 312 | scif_rma_destroy_tcw_ep(ep); |
| 313 | scif_ep_unregister_mmu_notifier(ep); |
| 314 | goto restart; |
| 315 | } |
| 316 | spin_unlock(&scif_info.rmalock); |
| 317 | } |
| 318 | |
| 319 | static bool scif_is_set_reg_cache(int flags) |
| 320 | { |
| 321 | return !!(flags & SCIF_RMA_USECACHE); |
| 322 | } |
| 323 | #else |
| 324 | static struct scif_mmu_notif * |
| 325 | scif_find_mmu_notifier(struct mm_struct *mm, |
| 326 | struct scif_endpt_rma_info *rma) |
| 327 | { |
| 328 | return NULL; |
| 329 | } |
| 330 | |
| 331 | static struct scif_mmu_notif * |
| 332 | scif_add_mmu_notifier(struct mm_struct *mm, struct scif_endpt *ep) |
| 333 | { |
| 334 | return NULL; |
| 335 | } |
| 336 | |
| 337 | void scif_mmu_notif_handler(struct work_struct *work) |
| 338 | { |
| 339 | } |
| 340 | |
| 341 | static bool scif_is_set_reg_cache(int flags) |
| 342 | { |
| 343 | return false; |
| 344 | } |
| 345 | |
| 346 | static bool scif_rma_tc_can_cache(struct scif_endpt *ep, size_t cur_bytes) |
| 347 | { |
| 348 | return false; |
| 349 | } |
| 350 | #endif |
| 351 | |
| 352 | /** |
| 353 | * scif_register_temp: |
| 354 | * @epd: End Point Descriptor. |
| 355 | * @addr: virtual address to/from which to copy |
| 356 | * @len: length of range to copy |
| 357 | * @out_offset: computed offset returned by reference. |
| 358 | * @out_window: allocated registered window returned by reference. |
| 359 | * |
| 360 | * Create a temporary registered window. The peer will not know about this |
| 361 | * window. This API is used for scif_vreadfrom()/scif_vwriteto() API's. |
| 362 | */ |
| 363 | static int |
| 364 | scif_register_temp(scif_epd_t epd, unsigned long addr, size_t len, int prot, |
| 365 | off_t *out_offset, struct scif_window **out_window) |
| 366 | { |
| 367 | struct scif_endpt *ep = (struct scif_endpt *)epd; |
| 368 | int err; |
| 369 | scif_pinned_pages_t pinned_pages; |
| 370 | size_t aligned_len; |
| 371 | |
| 372 | aligned_len = ALIGN(len, PAGE_SIZE); |
| 373 | |
| 374 | err = __scif_pin_pages((void *)(addr & PAGE_MASK), |
| 375 | aligned_len, &prot, 0, &pinned_pages); |
| 376 | if (err) |
| 377 | return err; |
| 378 | |
| 379 | pinned_pages->prot = prot; |
| 380 | |
| 381 | /* Compute the offset for this registration */ |
| 382 | err = scif_get_window_offset(ep, 0, 0, |
| 383 | aligned_len >> PAGE_SHIFT, |
| 384 | (s64 *)out_offset); |
| 385 | if (err) |
| 386 | goto error_unpin; |
| 387 | |
| 388 | /* Allocate and prepare self registration window */ |
| 389 | *out_window = scif_create_window(ep, aligned_len >> PAGE_SHIFT, |
| 390 | *out_offset, true); |
| 391 | if (!*out_window) { |
| 392 | scif_free_window_offset(ep, NULL, *out_offset); |
| 393 | err = -ENOMEM; |
| 394 | goto error_unpin; |
| 395 | } |
| 396 | |
| 397 | (*out_window)->pinned_pages = pinned_pages; |
| 398 | (*out_window)->nr_pages = pinned_pages->nr_pages; |
| 399 | (*out_window)->prot = pinned_pages->prot; |
| 400 | |
| 401 | (*out_window)->va_for_temp = addr & PAGE_MASK; |
| 402 | err = scif_map_window(ep->remote_dev, *out_window); |
| 403 | if (err) { |
| 404 | /* Something went wrong! Rollback */ |
| 405 | scif_destroy_window(ep, *out_window); |
| 406 | *out_window = NULL; |
| 407 | } else { |
| 408 | *out_offset |= (addr - (*out_window)->va_for_temp); |
| 409 | } |
| 410 | return err; |
| 411 | error_unpin: |
| 412 | if (err) |
| 413 | dev_err(&ep->remote_dev->sdev->dev, |
| 414 | "%s %d err %d\n", __func__, __LINE__, err); |
| 415 | scif_unpin_pages(pinned_pages); |
| 416 | return err; |
| 417 | } |
| 418 | |
| 419 | #define SCIF_DMA_TO (3 * HZ) |
| 420 | |
| 421 | /* |
| 422 | * scif_sync_dma - Program a DMA without an interrupt descriptor |
| 423 | * |
| 424 | * @dev - The address of the pointer to the device instance used |
| 425 | * for DMA registration. |
| 426 | * @chan - DMA channel to be used. |
| 427 | * @sync_wait: Wait for DMA to complete? |
| 428 | * |
| 429 | * Return 0 on success and -errno on error. |
| 430 | */ |
| 431 | static int scif_sync_dma(struct scif_hw_dev *sdev, struct dma_chan *chan, |
| 432 | bool sync_wait) |
| 433 | { |
| 434 | int err = 0; |
| 435 | struct dma_async_tx_descriptor *tx = NULL; |
| 436 | enum dma_ctrl_flags flags = DMA_PREP_FENCE; |
| 437 | dma_cookie_t cookie; |
| 438 | struct dma_device *ddev; |
| 439 | |
| 440 | if (!chan) { |
| 441 | err = -EIO; |
| 442 | dev_err(&sdev->dev, "%s %d err %d\n", |
| 443 | __func__, __LINE__, err); |
| 444 | return err; |
| 445 | } |
| 446 | ddev = chan->device; |
| 447 | |
| 448 | tx = ddev->device_prep_dma_memcpy(chan, 0, 0, 0, flags); |
| 449 | if (!tx) { |
| 450 | err = -ENOMEM; |
| 451 | dev_err(&sdev->dev, "%s %d err %d\n", |
| 452 | __func__, __LINE__, err); |
| 453 | goto release; |
| 454 | } |
| 455 | cookie = tx->tx_submit(tx); |
| 456 | |
| 457 | if (dma_submit_error(cookie)) { |
| 458 | err = -ENOMEM; |
| 459 | dev_err(&sdev->dev, "%s %d err %d\n", |
| 460 | __func__, __LINE__, err); |
| 461 | goto release; |
| 462 | } |
| 463 | if (!sync_wait) { |
| 464 | dma_async_issue_pending(chan); |
| 465 | } else { |
| 466 | if (dma_sync_wait(chan, cookie) == DMA_COMPLETE) { |
| 467 | err = 0; |
| 468 | } else { |
| 469 | err = -EIO; |
| 470 | dev_err(&sdev->dev, "%s %d err %d\n", |
| 471 | __func__, __LINE__, err); |
| 472 | } |
| 473 | } |
| 474 | release: |
| 475 | return err; |
| 476 | } |
| 477 | |
| 478 | static void scif_dma_callback(void *arg) |
| 479 | { |
| 480 | struct completion *done = (struct completion *)arg; |
| 481 | |
| 482 | complete(done); |
| 483 | } |
| 484 | |
| 485 | #define SCIF_DMA_SYNC_WAIT true |
| 486 | #define SCIF_DMA_POLL BIT(0) |
| 487 | #define SCIF_DMA_INTR BIT(1) |
| 488 | |
| 489 | /* |
| 490 | * scif_async_dma - Program a DMA with an interrupt descriptor |
| 491 | * |
| 492 | * @dev - The address of the pointer to the device instance used |
| 493 | * for DMA registration. |
| 494 | * @chan - DMA channel to be used. |
| 495 | * Return 0 on success and -errno on error. |
| 496 | */ |
| 497 | static int scif_async_dma(struct scif_hw_dev *sdev, struct dma_chan *chan) |
| 498 | { |
| 499 | int err = 0; |
| 500 | struct dma_device *ddev; |
| 501 | struct dma_async_tx_descriptor *tx = NULL; |
| 502 | enum dma_ctrl_flags flags = DMA_PREP_INTERRUPT | DMA_PREP_FENCE; |
| 503 | DECLARE_COMPLETION_ONSTACK(done_wait); |
| 504 | dma_cookie_t cookie; |
| 505 | enum dma_status status; |
| 506 | |
| 507 | if (!chan) { |
| 508 | err = -EIO; |
| 509 | dev_err(&sdev->dev, "%s %d err %d\n", |
| 510 | __func__, __LINE__, err); |
| 511 | return err; |
| 512 | } |
| 513 | ddev = chan->device; |
| 514 | |
| 515 | tx = ddev->device_prep_dma_memcpy(chan, 0, 0, 0, flags); |
| 516 | if (!tx) { |
| 517 | err = -ENOMEM; |
| 518 | dev_err(&sdev->dev, "%s %d err %d\n", |
| 519 | __func__, __LINE__, err); |
| 520 | goto release; |
| 521 | } |
| 522 | reinit_completion(&done_wait); |
| 523 | tx->callback = scif_dma_callback; |
| 524 | tx->callback_param = &done_wait; |
| 525 | cookie = tx->tx_submit(tx); |
| 526 | |
| 527 | if (dma_submit_error(cookie)) { |
| 528 | err = -ENOMEM; |
| 529 | dev_err(&sdev->dev, "%s %d err %d\n", |
| 530 | __func__, __LINE__, err); |
| 531 | goto release; |
| 532 | } |
| 533 | dma_async_issue_pending(chan); |
| 534 | |
| 535 | err = wait_for_completion_timeout(&done_wait, SCIF_DMA_TO); |
| 536 | if (!err) { |
| 537 | err = -EIO; |
| 538 | dev_err(&sdev->dev, "%s %d err %d\n", |
| 539 | __func__, __LINE__, err); |
| 540 | goto release; |
| 541 | } |
| 542 | err = 0; |
| 543 | status = dma_async_is_tx_complete(chan, cookie, NULL, NULL); |
| 544 | if (status != DMA_COMPLETE) { |
| 545 | err = -EIO; |
| 546 | dev_err(&sdev->dev, "%s %d err %d\n", |
| 547 | __func__, __LINE__, err); |
| 548 | goto release; |
| 549 | } |
| 550 | release: |
| 551 | return err; |
| 552 | } |
| 553 | |
| 554 | /* |
| 555 | * scif_drain_dma_poll - Drain all outstanding DMA operations for a particular |
| 556 | * DMA channel via polling. |
| 557 | * |
| 558 | * @sdev - The SCIF device |
| 559 | * @chan - DMA channel |
| 560 | * Return 0 on success and -errno on error. |
| 561 | */ |
| 562 | static int scif_drain_dma_poll(struct scif_hw_dev *sdev, struct dma_chan *chan) |
| 563 | { |
| 564 | if (!chan) |
| 565 | return -EINVAL; |
| 566 | return scif_sync_dma(sdev, chan, SCIF_DMA_SYNC_WAIT); |
| 567 | } |
| 568 | |
| 569 | /* |
| 570 | * scif_drain_dma_intr - Drain all outstanding DMA operations for a particular |
| 571 | * DMA channel via interrupt based blocking wait. |
| 572 | * |
| 573 | * @sdev - The SCIF device |
| 574 | * @chan - DMA channel |
| 575 | * Return 0 on success and -errno on error. |
| 576 | */ |
| 577 | int scif_drain_dma_intr(struct scif_hw_dev *sdev, struct dma_chan *chan) |
| 578 | { |
| 579 | if (!chan) |
| 580 | return -EINVAL; |
| 581 | return scif_async_dma(sdev, chan); |
| 582 | } |
| 583 | |
| 584 | /** |
| 585 | * scif_rma_destroy_windows: |
| 586 | * |
| 587 | * This routine destroys all windows queued for cleanup |
| 588 | */ |
| 589 | void scif_rma_destroy_windows(void) |
| 590 | { |
| 591 | struct list_head *item, *tmp; |
| 592 | struct scif_window *window; |
| 593 | struct scif_endpt *ep; |
| 594 | struct dma_chan *chan; |
| 595 | |
| 596 | might_sleep(); |
| 597 | restart: |
| 598 | spin_lock(&scif_info.rmalock); |
| 599 | list_for_each_safe(item, tmp, &scif_info.rma) { |
| 600 | window = list_entry(item, struct scif_window, |
| 601 | list); |
| 602 | ep = (struct scif_endpt *)window->ep; |
| 603 | chan = ep->rma_info.dma_chan; |
| 604 | |
| 605 | list_del_init(&window->list); |
| 606 | spin_unlock(&scif_info.rmalock); |
| 607 | if (!chan || !scifdev_alive(ep) || |
| 608 | !scif_drain_dma_intr(ep->remote_dev->sdev, |
| 609 | ep->rma_info.dma_chan)) |
| 610 | /* Remove window from global list */ |
| 611 | window->unreg_state = OP_COMPLETED; |
| 612 | else |
| 613 | dev_warn(&ep->remote_dev->sdev->dev, |
| 614 | "DMA engine hung?\n"); |
| 615 | if (window->unreg_state == OP_COMPLETED) { |
| 616 | if (window->type == SCIF_WINDOW_SELF) |
| 617 | scif_destroy_window(ep, window); |
| 618 | else |
| 619 | scif_destroy_remote_window(window); |
| 620 | atomic_dec(&ep->rma_info.tw_refcount); |
| 621 | } |
| 622 | goto restart; |
| 623 | } |
| 624 | spin_unlock(&scif_info.rmalock); |
| 625 | } |
| 626 | |
| 627 | /** |
| 628 | * scif_rma_destroy_tcw: |
| 629 | * |
| 630 | * This routine destroys temporary cached registered windows |
| 631 | * which have been queued for cleanup. |
| 632 | */ |
| 633 | void scif_rma_destroy_tcw_invalid(void) |
| 634 | { |
| 635 | struct list_head *item, *tmp; |
| 636 | struct scif_window *window; |
| 637 | struct scif_endpt *ep; |
| 638 | struct dma_chan *chan; |
| 639 | |
| 640 | might_sleep(); |
| 641 | restart: |
| 642 | spin_lock(&scif_info.rmalock); |
| 643 | list_for_each_safe(item, tmp, &scif_info.rma_tc) { |
| 644 | window = list_entry(item, struct scif_window, list); |
| 645 | ep = (struct scif_endpt *)window->ep; |
| 646 | chan = ep->rma_info.dma_chan; |
| 647 | list_del_init(&window->list); |
| 648 | spin_unlock(&scif_info.rmalock); |
| 649 | mutex_lock(&ep->rma_info.rma_lock); |
| 650 | if (!chan || !scifdev_alive(ep) || |
| 651 | !scif_drain_dma_intr(ep->remote_dev->sdev, |
| 652 | ep->rma_info.dma_chan)) { |
| 653 | atomic_sub(window->nr_pages, |
| 654 | &ep->rma_info.tcw_total_pages); |
| 655 | scif_destroy_window(ep, window); |
| 656 | atomic_dec(&ep->rma_info.tcw_refcount); |
| 657 | } else { |
| 658 | dev_warn(&ep->remote_dev->sdev->dev, |
| 659 | "DMA engine hung?\n"); |
| 660 | } |
| 661 | mutex_unlock(&ep->rma_info.rma_lock); |
| 662 | goto restart; |
| 663 | } |
| 664 | spin_unlock(&scif_info.rmalock); |
| 665 | } |
| 666 | |
| 667 | static inline |
| 668 | void *_get_local_va(off_t off, struct scif_window *window, size_t len) |
| 669 | { |
| 670 | int page_nr = (off - window->offset) >> PAGE_SHIFT; |
| 671 | off_t page_off = off & ~PAGE_MASK; |
| 672 | void *va = NULL; |
| 673 | |
| 674 | if (window->type == SCIF_WINDOW_SELF) { |
| 675 | struct page **pages = window->pinned_pages->pages; |
| 676 | |
| 677 | va = page_address(pages[page_nr]) + page_off; |
| 678 | } |
| 679 | return va; |
| 680 | } |
| 681 | |
| 682 | static inline |
| 683 | void *ioremap_remote(off_t off, struct scif_window *window, |
| 684 | size_t len, struct scif_dev *dev, |
| 685 | struct scif_window_iter *iter) |
| 686 | { |
| 687 | dma_addr_t phys = scif_off_to_dma_addr(window, off, NULL, iter); |
| 688 | |
| 689 | /* |
| 690 | * If the DMA address is not card relative then we need the DMA |
| 691 | * addresses to be an offset into the bar. The aperture base was already |
| 692 | * added so subtract it here since scif_ioremap is going to add it again |
| 693 | */ |
| 694 | if (!scifdev_self(dev) && window->type == SCIF_WINDOW_PEER && |
| 695 | dev->sdev->aper && !dev->sdev->card_rel_da) |
| 696 | phys = phys - dev->sdev->aper->pa; |
| 697 | return scif_ioremap(phys, len, dev); |
| 698 | } |
| 699 | |
| 700 | static inline void |
| 701 | iounmap_remote(void *virt, size_t size, struct scif_copy_work *work) |
| 702 | { |
| 703 | scif_iounmap(virt, size, work->remote_dev); |
| 704 | } |
| 705 | |
| 706 | /* |
| 707 | * Takes care of ordering issue caused by |
| 708 | * 1. Hardware: Only in the case of cpu copy from mgmt node to card |
| 709 | * because of WC memory. |
| 710 | * 2. Software: If memcpy reorders copy instructions for optimization. |
| 711 | * This could happen at both mgmt node and card. |
| 712 | */ |
| 713 | static inline void |
| 714 | scif_ordered_memcpy_toio(char *dst, const char *src, size_t count) |
| 715 | { |
| 716 | if (!count) |
| 717 | return; |
| 718 | |
| 719 | memcpy_toio((void __iomem __force *)dst, src, --count); |
| 720 | /* Order the last byte with the previous stores */ |
| 721 | wmb(); |
| 722 | *(dst + count) = *(src + count); |
| 723 | } |
| 724 | |
| 725 | static inline void scif_unaligned_cpy_toio(char *dst, const char *src, |
| 726 | size_t count, bool ordered) |
| 727 | { |
| 728 | if (ordered) |
| 729 | scif_ordered_memcpy_toio(dst, src, count); |
| 730 | else |
| 731 | memcpy_toio((void __iomem __force *)dst, src, count); |
| 732 | } |
| 733 | |
| 734 | static inline |
| 735 | void scif_ordered_memcpy_fromio(char *dst, const char *src, size_t count) |
| 736 | { |
| 737 | if (!count) |
| 738 | return; |
| 739 | |
| 740 | memcpy_fromio(dst, (void __iomem __force *)src, --count); |
| 741 | /* Order the last byte with the previous loads */ |
| 742 | rmb(); |
| 743 | *(dst + count) = *(src + count); |
| 744 | } |
| 745 | |
| 746 | static inline void scif_unaligned_cpy_fromio(char *dst, const char *src, |
| 747 | size_t count, bool ordered) |
| 748 | { |
| 749 | if (ordered) |
| 750 | scif_ordered_memcpy_fromio(dst, src, count); |
| 751 | else |
| 752 | memcpy_fromio(dst, (void __iomem __force *)src, count); |
| 753 | } |
| 754 | |
| 755 | #define SCIF_RMA_ERROR_CODE (~(dma_addr_t)0x0) |
| 756 | |
| 757 | /* |
| 758 | * scif_off_to_dma_addr: |
| 759 | * Obtain the dma_addr given the window and the offset. |
| 760 | * @window: Registered window. |
| 761 | * @off: Window offset. |
| 762 | * @nr_bytes: Return the number of contiguous bytes till next DMA addr index. |
| 763 | * @index: Return the index of the dma_addr array found. |
| 764 | * @start_off: start offset of index of the dma addr array found. |
| 765 | * The nr_bytes provides the callee an estimate of the maximum possible |
| 766 | * DMA xfer possible while the index/start_off provide faster lookups |
| 767 | * for the next iteration. |
| 768 | */ |
| 769 | dma_addr_t scif_off_to_dma_addr(struct scif_window *window, s64 off, |
| 770 | size_t *nr_bytes, struct scif_window_iter *iter) |
| 771 | { |
| 772 | int i, page_nr; |
| 773 | s64 start, end; |
| 774 | off_t page_off; |
| 775 | |
| 776 | if (window->nr_pages == window->nr_contig_chunks) { |
| 777 | page_nr = (off - window->offset) >> PAGE_SHIFT; |
| 778 | page_off = off & ~PAGE_MASK; |
| 779 | |
| 780 | if (nr_bytes) |
| 781 | *nr_bytes = PAGE_SIZE - page_off; |
| 782 | return window->dma_addr[page_nr] | page_off; |
| 783 | } |
| 784 | if (iter) { |
| 785 | i = iter->index; |
| 786 | start = iter->offset; |
| 787 | } else { |
| 788 | i = 0; |
| 789 | start = window->offset; |
| 790 | } |
| 791 | for (; i < window->nr_contig_chunks; i++) { |
| 792 | end = start + (window->num_pages[i] << PAGE_SHIFT); |
| 793 | if (off >= start && off < end) { |
| 794 | if (iter) { |
| 795 | iter->index = i; |
| 796 | iter->offset = start; |
| 797 | } |
| 798 | if (nr_bytes) |
| 799 | *nr_bytes = end - off; |
| 800 | return (window->dma_addr[i] + (off - start)); |
| 801 | } |
| 802 | start += (window->num_pages[i] << PAGE_SHIFT); |
| 803 | } |
| 804 | dev_err(scif_info.mdev.this_device, |
| 805 | "%s %d BUG. Addr not found? window %p off 0x%llx\n", |
| 806 | __func__, __LINE__, window, off); |
| 807 | return SCIF_RMA_ERROR_CODE; |
| 808 | } |
| 809 | |
| 810 | /* |
| 811 | * Copy between rma window and temporary buffer |
| 812 | */ |
| 813 | static void scif_rma_local_cpu_copy(s64 offset, struct scif_window *window, |
| 814 | u8 *temp, size_t rem_len, bool to_temp) |
| 815 | { |
| 816 | void *window_virt; |
| 817 | size_t loop_len; |
| 818 | int offset_in_page; |
| 819 | s64 end_offset; |
| 820 | |
| 821 | offset_in_page = offset & ~PAGE_MASK; |
| 822 | loop_len = PAGE_SIZE - offset_in_page; |
| 823 | |
| 824 | if (rem_len < loop_len) |
| 825 | loop_len = rem_len; |
| 826 | |
| 827 | window_virt = _get_local_va(offset, window, loop_len); |
| 828 | if (!window_virt) |
| 829 | return; |
| 830 | if (to_temp) |
| 831 | memcpy(temp, window_virt, loop_len); |
| 832 | else |
| 833 | memcpy(window_virt, temp, loop_len); |
| 834 | |
| 835 | offset += loop_len; |
| 836 | temp += loop_len; |
| 837 | rem_len -= loop_len; |
| 838 | |
| 839 | end_offset = window->offset + |
| 840 | (window->nr_pages << PAGE_SHIFT); |
| 841 | while (rem_len) { |
| 842 | if (offset == end_offset) { |
Geliang Tang | 0d0ce9c | 2015-11-16 21:46:31 +0800 | [diff] [blame] | 843 | window = list_next_entry(window, list); |
Sudeep Dutt | 7cc31cd | 2015-09-29 18:16:04 -0700 | [diff] [blame] | 844 | end_offset = window->offset + |
| 845 | (window->nr_pages << PAGE_SHIFT); |
| 846 | } |
| 847 | loop_len = min(PAGE_SIZE, rem_len); |
| 848 | window_virt = _get_local_va(offset, window, loop_len); |
| 849 | if (!window_virt) |
| 850 | return; |
| 851 | if (to_temp) |
| 852 | memcpy(temp, window_virt, loop_len); |
| 853 | else |
| 854 | memcpy(window_virt, temp, loop_len); |
| 855 | offset += loop_len; |
| 856 | temp += loop_len; |
| 857 | rem_len -= loop_len; |
| 858 | } |
| 859 | } |
| 860 | |
| 861 | /** |
| 862 | * scif_rma_completion_cb: |
| 863 | * @data: RMA cookie |
| 864 | * |
| 865 | * RMA interrupt completion callback. |
| 866 | */ |
| 867 | static void scif_rma_completion_cb(void *data) |
| 868 | { |
| 869 | struct scif_dma_comp_cb *comp_cb = data; |
| 870 | |
| 871 | /* Free DMA Completion CB. */ |
| 872 | if (comp_cb->dst_window) |
| 873 | scif_rma_local_cpu_copy(comp_cb->dst_offset, |
| 874 | comp_cb->dst_window, |
| 875 | comp_cb->temp_buf + |
| 876 | comp_cb->header_padding, |
| 877 | comp_cb->len, false); |
| 878 | scif_unmap_single(comp_cb->temp_phys, comp_cb->sdev, |
| 879 | SCIF_KMEM_UNALIGNED_BUF_SIZE); |
| 880 | if (comp_cb->is_cache) |
| 881 | kmem_cache_free(unaligned_cache, |
| 882 | comp_cb->temp_buf_to_free); |
| 883 | else |
| 884 | kfree(comp_cb->temp_buf_to_free); |
| 885 | } |
| 886 | |
| 887 | /* Copies between temporary buffer and offsets provided in work */ |
| 888 | static int |
| 889 | scif_rma_list_dma_copy_unaligned(struct scif_copy_work *work, |
| 890 | u8 *temp, struct dma_chan *chan, |
| 891 | bool src_local) |
| 892 | { |
| 893 | struct scif_dma_comp_cb *comp_cb = work->comp_cb; |
| 894 | dma_addr_t window_dma_addr, temp_dma_addr; |
| 895 | dma_addr_t temp_phys = comp_cb->temp_phys; |
| 896 | size_t loop_len, nr_contig_bytes = 0, remaining_len = work->len; |
| 897 | int offset_in_ca, ret = 0; |
| 898 | s64 end_offset, offset; |
| 899 | struct scif_window *window; |
| 900 | void *window_virt_addr; |
| 901 | size_t tail_len; |
| 902 | struct dma_async_tx_descriptor *tx; |
| 903 | struct dma_device *dev = chan->device; |
| 904 | dma_cookie_t cookie; |
| 905 | |
| 906 | if (src_local) { |
| 907 | offset = work->dst_offset; |
| 908 | window = work->dst_window; |
| 909 | } else { |
| 910 | offset = work->src_offset; |
| 911 | window = work->src_window; |
| 912 | } |
| 913 | |
| 914 | offset_in_ca = offset & (L1_CACHE_BYTES - 1); |
| 915 | if (offset_in_ca) { |
| 916 | loop_len = L1_CACHE_BYTES - offset_in_ca; |
| 917 | loop_len = min(loop_len, remaining_len); |
| 918 | window_virt_addr = ioremap_remote(offset, window, |
| 919 | loop_len, |
| 920 | work->remote_dev, |
| 921 | NULL); |
| 922 | if (!window_virt_addr) |
| 923 | return -ENOMEM; |
| 924 | if (src_local) |
| 925 | scif_unaligned_cpy_toio(window_virt_addr, temp, |
| 926 | loop_len, |
| 927 | work->ordered && |
| 928 | !(remaining_len - loop_len)); |
| 929 | else |
| 930 | scif_unaligned_cpy_fromio(temp, window_virt_addr, |
| 931 | loop_len, work->ordered && |
| 932 | !(remaining_len - loop_len)); |
| 933 | iounmap_remote(window_virt_addr, loop_len, work); |
| 934 | |
| 935 | offset += loop_len; |
| 936 | temp += loop_len; |
| 937 | temp_phys += loop_len; |
| 938 | remaining_len -= loop_len; |
| 939 | } |
| 940 | |
| 941 | offset_in_ca = offset & ~PAGE_MASK; |
| 942 | end_offset = window->offset + |
| 943 | (window->nr_pages << PAGE_SHIFT); |
| 944 | |
| 945 | tail_len = remaining_len & (L1_CACHE_BYTES - 1); |
| 946 | remaining_len -= tail_len; |
| 947 | while (remaining_len) { |
| 948 | if (offset == end_offset) { |
Geliang Tang | 0d0ce9c | 2015-11-16 21:46:31 +0800 | [diff] [blame] | 949 | window = list_next_entry(window, list); |
Sudeep Dutt | 7cc31cd | 2015-09-29 18:16:04 -0700 | [diff] [blame] | 950 | end_offset = window->offset + |
| 951 | (window->nr_pages << PAGE_SHIFT); |
| 952 | } |
| 953 | if (scif_is_mgmt_node()) |
| 954 | temp_dma_addr = temp_phys; |
| 955 | else |
| 956 | /* Fix if we ever enable IOMMU on the card */ |
| 957 | temp_dma_addr = (dma_addr_t)virt_to_phys(temp); |
| 958 | window_dma_addr = scif_off_to_dma_addr(window, offset, |
| 959 | &nr_contig_bytes, |
| 960 | NULL); |
| 961 | loop_len = min(nr_contig_bytes, remaining_len); |
| 962 | if (src_local) { |
| 963 | if (work->ordered && !tail_len && |
| 964 | !(remaining_len - loop_len) && |
| 965 | loop_len != L1_CACHE_BYTES) { |
| 966 | /* |
| 967 | * Break up the last chunk of the transfer into |
| 968 | * two steps. if there is no tail to guarantee |
| 969 | * DMA ordering. SCIF_DMA_POLLING inserts |
| 970 | * a status update descriptor in step 1 which |
| 971 | * acts as a double sided synchronization fence |
| 972 | * for the DMA engine to ensure that the last |
| 973 | * cache line in step 2 is updated last. |
| 974 | */ |
| 975 | /* Step 1) DMA: Body Length - L1_CACHE_BYTES. */ |
| 976 | tx = |
| 977 | dev->device_prep_dma_memcpy(chan, |
| 978 | window_dma_addr, |
| 979 | temp_dma_addr, |
| 980 | loop_len - |
| 981 | L1_CACHE_BYTES, |
| 982 | DMA_PREP_FENCE); |
| 983 | if (!tx) { |
| 984 | ret = -ENOMEM; |
| 985 | goto err; |
| 986 | } |
| 987 | cookie = tx->tx_submit(tx); |
| 988 | if (dma_submit_error(cookie)) { |
| 989 | ret = -ENOMEM; |
| 990 | goto err; |
| 991 | } |
| 992 | dma_async_issue_pending(chan); |
| 993 | offset += (loop_len - L1_CACHE_BYTES); |
| 994 | temp_dma_addr += (loop_len - L1_CACHE_BYTES); |
| 995 | window_dma_addr += (loop_len - L1_CACHE_BYTES); |
| 996 | remaining_len -= (loop_len - L1_CACHE_BYTES); |
| 997 | loop_len = remaining_len; |
| 998 | |
| 999 | /* Step 2) DMA: L1_CACHE_BYTES */ |
| 1000 | tx = |
| 1001 | dev->device_prep_dma_memcpy(chan, |
| 1002 | window_dma_addr, |
| 1003 | temp_dma_addr, |
| 1004 | loop_len, 0); |
| 1005 | if (!tx) { |
| 1006 | ret = -ENOMEM; |
| 1007 | goto err; |
| 1008 | } |
| 1009 | cookie = tx->tx_submit(tx); |
| 1010 | if (dma_submit_error(cookie)) { |
| 1011 | ret = -ENOMEM; |
| 1012 | goto err; |
| 1013 | } |
| 1014 | dma_async_issue_pending(chan); |
| 1015 | } else { |
| 1016 | tx = |
| 1017 | dev->device_prep_dma_memcpy(chan, |
| 1018 | window_dma_addr, |
| 1019 | temp_dma_addr, |
| 1020 | loop_len, 0); |
| 1021 | if (!tx) { |
| 1022 | ret = -ENOMEM; |
| 1023 | goto err; |
| 1024 | } |
| 1025 | cookie = tx->tx_submit(tx); |
| 1026 | if (dma_submit_error(cookie)) { |
| 1027 | ret = -ENOMEM; |
| 1028 | goto err; |
| 1029 | } |
| 1030 | dma_async_issue_pending(chan); |
| 1031 | } |
| 1032 | } else { |
| 1033 | tx = dev->device_prep_dma_memcpy(chan, temp_dma_addr, |
| 1034 | window_dma_addr, loop_len, 0); |
| 1035 | if (!tx) { |
| 1036 | ret = -ENOMEM; |
| 1037 | goto err; |
| 1038 | } |
| 1039 | cookie = tx->tx_submit(tx); |
| 1040 | if (dma_submit_error(cookie)) { |
| 1041 | ret = -ENOMEM; |
| 1042 | goto err; |
| 1043 | } |
| 1044 | dma_async_issue_pending(chan); |
| 1045 | } |
| 1046 | if (ret < 0) |
| 1047 | goto err; |
| 1048 | offset += loop_len; |
| 1049 | temp += loop_len; |
| 1050 | temp_phys += loop_len; |
| 1051 | remaining_len -= loop_len; |
| 1052 | offset_in_ca = 0; |
| 1053 | } |
| 1054 | if (tail_len) { |
| 1055 | if (offset == end_offset) { |
Geliang Tang | 0d0ce9c | 2015-11-16 21:46:31 +0800 | [diff] [blame] | 1056 | window = list_next_entry(window, list); |
Sudeep Dutt | 7cc31cd | 2015-09-29 18:16:04 -0700 | [diff] [blame] | 1057 | end_offset = window->offset + |
| 1058 | (window->nr_pages << PAGE_SHIFT); |
| 1059 | } |
| 1060 | window_virt_addr = ioremap_remote(offset, window, tail_len, |
| 1061 | work->remote_dev, |
| 1062 | NULL); |
| 1063 | if (!window_virt_addr) |
| 1064 | return -ENOMEM; |
| 1065 | /* |
| 1066 | * The CPU copy for the tail bytes must be initiated only once |
| 1067 | * previous DMA transfers for this endpoint have completed |
| 1068 | * to guarantee ordering. |
| 1069 | */ |
| 1070 | if (work->ordered) { |
| 1071 | struct scif_dev *rdev = work->remote_dev; |
| 1072 | |
| 1073 | ret = scif_drain_dma_intr(rdev->sdev, chan); |
| 1074 | if (ret) |
| 1075 | return ret; |
| 1076 | } |
| 1077 | if (src_local) |
| 1078 | scif_unaligned_cpy_toio(window_virt_addr, temp, |
| 1079 | tail_len, work->ordered); |
| 1080 | else |
| 1081 | scif_unaligned_cpy_fromio(temp, window_virt_addr, |
| 1082 | tail_len, work->ordered); |
| 1083 | iounmap_remote(window_virt_addr, tail_len, work); |
| 1084 | } |
| 1085 | tx = dev->device_prep_dma_memcpy(chan, 0, 0, 0, DMA_PREP_INTERRUPT); |
| 1086 | if (!tx) { |
| 1087 | ret = -ENOMEM; |
| 1088 | return ret; |
| 1089 | } |
| 1090 | tx->callback = &scif_rma_completion_cb; |
| 1091 | tx->callback_param = comp_cb; |
| 1092 | cookie = tx->tx_submit(tx); |
| 1093 | |
| 1094 | if (dma_submit_error(cookie)) { |
| 1095 | ret = -ENOMEM; |
| 1096 | return ret; |
| 1097 | } |
| 1098 | dma_async_issue_pending(chan); |
| 1099 | return 0; |
| 1100 | err: |
| 1101 | dev_err(scif_info.mdev.this_device, |
| 1102 | "%s %d Desc Prog Failed ret %d\n", |
| 1103 | __func__, __LINE__, ret); |
| 1104 | return ret; |
| 1105 | } |
| 1106 | |
| 1107 | /* |
| 1108 | * _scif_rma_list_dma_copy_aligned: |
| 1109 | * |
| 1110 | * Traverse all the windows and perform DMA copy. |
| 1111 | */ |
| 1112 | static int _scif_rma_list_dma_copy_aligned(struct scif_copy_work *work, |
| 1113 | struct dma_chan *chan) |
| 1114 | { |
| 1115 | dma_addr_t src_dma_addr, dst_dma_addr; |
| 1116 | size_t loop_len, remaining_len, src_contig_bytes = 0; |
| 1117 | size_t dst_contig_bytes = 0; |
| 1118 | struct scif_window_iter src_win_iter; |
| 1119 | struct scif_window_iter dst_win_iter; |
| 1120 | s64 end_src_offset, end_dst_offset; |
| 1121 | struct scif_window *src_window = work->src_window; |
| 1122 | struct scif_window *dst_window = work->dst_window; |
| 1123 | s64 src_offset = work->src_offset, dst_offset = work->dst_offset; |
| 1124 | int ret = 0; |
| 1125 | struct dma_async_tx_descriptor *tx; |
| 1126 | struct dma_device *dev = chan->device; |
| 1127 | dma_cookie_t cookie; |
| 1128 | |
| 1129 | remaining_len = work->len; |
| 1130 | |
| 1131 | scif_init_window_iter(src_window, &src_win_iter); |
| 1132 | scif_init_window_iter(dst_window, &dst_win_iter); |
| 1133 | end_src_offset = src_window->offset + |
| 1134 | (src_window->nr_pages << PAGE_SHIFT); |
| 1135 | end_dst_offset = dst_window->offset + |
| 1136 | (dst_window->nr_pages << PAGE_SHIFT); |
| 1137 | while (remaining_len) { |
| 1138 | if (src_offset == end_src_offset) { |
Geliang Tang | 0d0ce9c | 2015-11-16 21:46:31 +0800 | [diff] [blame] | 1139 | src_window = list_next_entry(src_window, list); |
Sudeep Dutt | 7cc31cd | 2015-09-29 18:16:04 -0700 | [diff] [blame] | 1140 | end_src_offset = src_window->offset + |
| 1141 | (src_window->nr_pages << PAGE_SHIFT); |
| 1142 | scif_init_window_iter(src_window, &src_win_iter); |
| 1143 | } |
| 1144 | if (dst_offset == end_dst_offset) { |
Geliang Tang | 0d0ce9c | 2015-11-16 21:46:31 +0800 | [diff] [blame] | 1145 | dst_window = list_next_entry(dst_window, list); |
Sudeep Dutt | 7cc31cd | 2015-09-29 18:16:04 -0700 | [diff] [blame] | 1146 | end_dst_offset = dst_window->offset + |
| 1147 | (dst_window->nr_pages << PAGE_SHIFT); |
| 1148 | scif_init_window_iter(dst_window, &dst_win_iter); |
| 1149 | } |
| 1150 | |
| 1151 | /* compute dma addresses for transfer */ |
| 1152 | src_dma_addr = scif_off_to_dma_addr(src_window, src_offset, |
| 1153 | &src_contig_bytes, |
| 1154 | &src_win_iter); |
| 1155 | dst_dma_addr = scif_off_to_dma_addr(dst_window, dst_offset, |
| 1156 | &dst_contig_bytes, |
| 1157 | &dst_win_iter); |
| 1158 | loop_len = min(src_contig_bytes, dst_contig_bytes); |
| 1159 | loop_len = min(loop_len, remaining_len); |
| 1160 | if (work->ordered && !(remaining_len - loop_len)) { |
| 1161 | /* |
| 1162 | * Break up the last chunk of the transfer into two |
| 1163 | * steps to ensure that the last byte in step 2 is |
| 1164 | * updated last. |
| 1165 | */ |
| 1166 | /* Step 1) DMA: Body Length - 1 */ |
| 1167 | tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr, |
| 1168 | src_dma_addr, |
| 1169 | loop_len - 1, |
| 1170 | DMA_PREP_FENCE); |
| 1171 | if (!tx) { |
| 1172 | ret = -ENOMEM; |
| 1173 | goto err; |
| 1174 | } |
| 1175 | cookie = tx->tx_submit(tx); |
| 1176 | if (dma_submit_error(cookie)) { |
| 1177 | ret = -ENOMEM; |
| 1178 | goto err; |
| 1179 | } |
| 1180 | src_offset += (loop_len - 1); |
| 1181 | dst_offset += (loop_len - 1); |
| 1182 | src_dma_addr += (loop_len - 1); |
| 1183 | dst_dma_addr += (loop_len - 1); |
| 1184 | remaining_len -= (loop_len - 1); |
| 1185 | loop_len = remaining_len; |
| 1186 | |
| 1187 | /* Step 2) DMA: 1 BYTES */ |
| 1188 | tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr, |
| 1189 | src_dma_addr, loop_len, 0); |
| 1190 | if (!tx) { |
| 1191 | ret = -ENOMEM; |
| 1192 | goto err; |
| 1193 | } |
| 1194 | cookie = tx->tx_submit(tx); |
| 1195 | if (dma_submit_error(cookie)) { |
| 1196 | ret = -ENOMEM; |
| 1197 | goto err; |
| 1198 | } |
| 1199 | dma_async_issue_pending(chan); |
| 1200 | } else { |
| 1201 | tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr, |
| 1202 | src_dma_addr, loop_len, 0); |
| 1203 | if (!tx) { |
| 1204 | ret = -ENOMEM; |
| 1205 | goto err; |
| 1206 | } |
| 1207 | cookie = tx->tx_submit(tx); |
| 1208 | if (dma_submit_error(cookie)) { |
| 1209 | ret = -ENOMEM; |
| 1210 | goto err; |
| 1211 | } |
| 1212 | } |
| 1213 | src_offset += loop_len; |
| 1214 | dst_offset += loop_len; |
| 1215 | remaining_len -= loop_len; |
| 1216 | } |
| 1217 | return ret; |
| 1218 | err: |
| 1219 | dev_err(scif_info.mdev.this_device, |
| 1220 | "%s %d Desc Prog Failed ret %d\n", |
| 1221 | __func__, __LINE__, ret); |
| 1222 | return ret; |
| 1223 | } |
| 1224 | |
| 1225 | /* |
| 1226 | * scif_rma_list_dma_copy_aligned: |
| 1227 | * |
| 1228 | * Traverse all the windows and perform DMA copy. |
| 1229 | */ |
| 1230 | static int scif_rma_list_dma_copy_aligned(struct scif_copy_work *work, |
| 1231 | struct dma_chan *chan) |
| 1232 | { |
| 1233 | dma_addr_t src_dma_addr, dst_dma_addr; |
| 1234 | size_t loop_len, remaining_len, tail_len, src_contig_bytes = 0; |
| 1235 | size_t dst_contig_bytes = 0; |
| 1236 | int src_cache_off; |
| 1237 | s64 end_src_offset, end_dst_offset; |
| 1238 | struct scif_window_iter src_win_iter; |
| 1239 | struct scif_window_iter dst_win_iter; |
| 1240 | void *src_virt, *dst_virt; |
| 1241 | struct scif_window *src_window = work->src_window; |
| 1242 | struct scif_window *dst_window = work->dst_window; |
| 1243 | s64 src_offset = work->src_offset, dst_offset = work->dst_offset; |
| 1244 | int ret = 0; |
| 1245 | struct dma_async_tx_descriptor *tx; |
| 1246 | struct dma_device *dev = chan->device; |
| 1247 | dma_cookie_t cookie; |
| 1248 | |
| 1249 | remaining_len = work->len; |
| 1250 | scif_init_window_iter(src_window, &src_win_iter); |
| 1251 | scif_init_window_iter(dst_window, &dst_win_iter); |
| 1252 | |
| 1253 | src_cache_off = src_offset & (L1_CACHE_BYTES - 1); |
| 1254 | if (src_cache_off != 0) { |
| 1255 | /* Head */ |
| 1256 | loop_len = L1_CACHE_BYTES - src_cache_off; |
| 1257 | loop_len = min(loop_len, remaining_len); |
| 1258 | src_dma_addr = __scif_off_to_dma_addr(src_window, src_offset); |
| 1259 | dst_dma_addr = __scif_off_to_dma_addr(dst_window, dst_offset); |
| 1260 | if (src_window->type == SCIF_WINDOW_SELF) |
| 1261 | src_virt = _get_local_va(src_offset, src_window, |
| 1262 | loop_len); |
| 1263 | else |
| 1264 | src_virt = ioremap_remote(src_offset, src_window, |
| 1265 | loop_len, |
| 1266 | work->remote_dev, NULL); |
| 1267 | if (!src_virt) |
| 1268 | return -ENOMEM; |
| 1269 | if (dst_window->type == SCIF_WINDOW_SELF) |
| 1270 | dst_virt = _get_local_va(dst_offset, dst_window, |
| 1271 | loop_len); |
| 1272 | else |
| 1273 | dst_virt = ioremap_remote(dst_offset, dst_window, |
| 1274 | loop_len, |
| 1275 | work->remote_dev, NULL); |
| 1276 | if (!dst_virt) { |
| 1277 | if (src_window->type != SCIF_WINDOW_SELF) |
| 1278 | iounmap_remote(src_virt, loop_len, work); |
| 1279 | return -ENOMEM; |
| 1280 | } |
| 1281 | if (src_window->type == SCIF_WINDOW_SELF) |
| 1282 | scif_unaligned_cpy_toio(dst_virt, src_virt, loop_len, |
| 1283 | remaining_len == loop_len ? |
| 1284 | work->ordered : false); |
| 1285 | else |
| 1286 | scif_unaligned_cpy_fromio(dst_virt, src_virt, loop_len, |
| 1287 | remaining_len == loop_len ? |
| 1288 | work->ordered : false); |
| 1289 | if (src_window->type != SCIF_WINDOW_SELF) |
| 1290 | iounmap_remote(src_virt, loop_len, work); |
| 1291 | if (dst_window->type != SCIF_WINDOW_SELF) |
| 1292 | iounmap_remote(dst_virt, loop_len, work); |
| 1293 | src_offset += loop_len; |
| 1294 | dst_offset += loop_len; |
| 1295 | remaining_len -= loop_len; |
| 1296 | } |
| 1297 | |
| 1298 | end_src_offset = src_window->offset + |
| 1299 | (src_window->nr_pages << PAGE_SHIFT); |
| 1300 | end_dst_offset = dst_window->offset + |
| 1301 | (dst_window->nr_pages << PAGE_SHIFT); |
| 1302 | tail_len = remaining_len & (L1_CACHE_BYTES - 1); |
| 1303 | remaining_len -= tail_len; |
| 1304 | while (remaining_len) { |
| 1305 | if (src_offset == end_src_offset) { |
Geliang Tang | 0d0ce9c | 2015-11-16 21:46:31 +0800 | [diff] [blame] | 1306 | src_window = list_next_entry(src_window, list); |
Sudeep Dutt | 7cc31cd | 2015-09-29 18:16:04 -0700 | [diff] [blame] | 1307 | end_src_offset = src_window->offset + |
| 1308 | (src_window->nr_pages << PAGE_SHIFT); |
| 1309 | scif_init_window_iter(src_window, &src_win_iter); |
| 1310 | } |
| 1311 | if (dst_offset == end_dst_offset) { |
Geliang Tang | 0d0ce9c | 2015-11-16 21:46:31 +0800 | [diff] [blame] | 1312 | dst_window = list_next_entry(dst_window, list); |
Sudeep Dutt | 7cc31cd | 2015-09-29 18:16:04 -0700 | [diff] [blame] | 1313 | end_dst_offset = dst_window->offset + |
| 1314 | (dst_window->nr_pages << PAGE_SHIFT); |
| 1315 | scif_init_window_iter(dst_window, &dst_win_iter); |
| 1316 | } |
| 1317 | |
| 1318 | /* compute dma addresses for transfer */ |
| 1319 | src_dma_addr = scif_off_to_dma_addr(src_window, src_offset, |
| 1320 | &src_contig_bytes, |
| 1321 | &src_win_iter); |
| 1322 | dst_dma_addr = scif_off_to_dma_addr(dst_window, dst_offset, |
| 1323 | &dst_contig_bytes, |
| 1324 | &dst_win_iter); |
| 1325 | loop_len = min(src_contig_bytes, dst_contig_bytes); |
| 1326 | loop_len = min(loop_len, remaining_len); |
| 1327 | if (work->ordered && !tail_len && |
| 1328 | !(remaining_len - loop_len)) { |
| 1329 | /* |
| 1330 | * Break up the last chunk of the transfer into two |
| 1331 | * steps. if there is no tail to gurantee DMA ordering. |
| 1332 | * Passing SCIF_DMA_POLLING inserts a status update |
| 1333 | * descriptor in step 1 which acts as a double sided |
| 1334 | * synchronization fence for the DMA engine to ensure |
| 1335 | * that the last cache line in step 2 is updated last. |
| 1336 | */ |
| 1337 | /* Step 1) DMA: Body Length - L1_CACHE_BYTES. */ |
| 1338 | tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr, |
| 1339 | src_dma_addr, |
| 1340 | loop_len - |
| 1341 | L1_CACHE_BYTES, |
| 1342 | DMA_PREP_FENCE); |
| 1343 | if (!tx) { |
| 1344 | ret = -ENOMEM; |
| 1345 | goto err; |
| 1346 | } |
| 1347 | cookie = tx->tx_submit(tx); |
| 1348 | if (dma_submit_error(cookie)) { |
| 1349 | ret = -ENOMEM; |
| 1350 | goto err; |
| 1351 | } |
| 1352 | dma_async_issue_pending(chan); |
| 1353 | src_offset += (loop_len - L1_CACHE_BYTES); |
| 1354 | dst_offset += (loop_len - L1_CACHE_BYTES); |
| 1355 | src_dma_addr += (loop_len - L1_CACHE_BYTES); |
| 1356 | dst_dma_addr += (loop_len - L1_CACHE_BYTES); |
| 1357 | remaining_len -= (loop_len - L1_CACHE_BYTES); |
| 1358 | loop_len = remaining_len; |
| 1359 | |
| 1360 | /* Step 2) DMA: L1_CACHE_BYTES */ |
| 1361 | tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr, |
| 1362 | src_dma_addr, |
| 1363 | loop_len, 0); |
| 1364 | if (!tx) { |
| 1365 | ret = -ENOMEM; |
| 1366 | goto err; |
| 1367 | } |
| 1368 | cookie = tx->tx_submit(tx); |
| 1369 | if (dma_submit_error(cookie)) { |
| 1370 | ret = -ENOMEM; |
| 1371 | goto err; |
| 1372 | } |
| 1373 | dma_async_issue_pending(chan); |
| 1374 | } else { |
| 1375 | tx = dev->device_prep_dma_memcpy(chan, dst_dma_addr, |
| 1376 | src_dma_addr, |
| 1377 | loop_len, 0); |
| 1378 | if (!tx) { |
| 1379 | ret = -ENOMEM; |
| 1380 | goto err; |
| 1381 | } |
| 1382 | cookie = tx->tx_submit(tx); |
| 1383 | if (dma_submit_error(cookie)) { |
| 1384 | ret = -ENOMEM; |
| 1385 | goto err; |
| 1386 | } |
| 1387 | dma_async_issue_pending(chan); |
| 1388 | } |
| 1389 | src_offset += loop_len; |
| 1390 | dst_offset += loop_len; |
| 1391 | remaining_len -= loop_len; |
| 1392 | } |
| 1393 | remaining_len = tail_len; |
| 1394 | if (remaining_len) { |
| 1395 | loop_len = remaining_len; |
| 1396 | if (src_offset == end_src_offset) |
Geliang Tang | 0d0ce9c | 2015-11-16 21:46:31 +0800 | [diff] [blame] | 1397 | src_window = list_next_entry(src_window, list); |
Sudeep Dutt | 7cc31cd | 2015-09-29 18:16:04 -0700 | [diff] [blame] | 1398 | if (dst_offset == end_dst_offset) |
Geliang Tang | 0d0ce9c | 2015-11-16 21:46:31 +0800 | [diff] [blame] | 1399 | dst_window = list_next_entry(dst_window, list); |
Sudeep Dutt | 7cc31cd | 2015-09-29 18:16:04 -0700 | [diff] [blame] | 1400 | |
| 1401 | src_dma_addr = __scif_off_to_dma_addr(src_window, src_offset); |
| 1402 | dst_dma_addr = __scif_off_to_dma_addr(dst_window, dst_offset); |
| 1403 | /* |
| 1404 | * The CPU copy for the tail bytes must be initiated only once |
| 1405 | * previous DMA transfers for this endpoint have completed to |
| 1406 | * guarantee ordering. |
| 1407 | */ |
| 1408 | if (work->ordered) { |
| 1409 | struct scif_dev *rdev = work->remote_dev; |
| 1410 | |
| 1411 | ret = scif_drain_dma_poll(rdev->sdev, chan); |
| 1412 | if (ret) |
| 1413 | return ret; |
| 1414 | } |
| 1415 | if (src_window->type == SCIF_WINDOW_SELF) |
| 1416 | src_virt = _get_local_va(src_offset, src_window, |
| 1417 | loop_len); |
| 1418 | else |
| 1419 | src_virt = ioremap_remote(src_offset, src_window, |
| 1420 | loop_len, |
| 1421 | work->remote_dev, NULL); |
| 1422 | if (!src_virt) |
| 1423 | return -ENOMEM; |
| 1424 | |
| 1425 | if (dst_window->type == SCIF_WINDOW_SELF) |
| 1426 | dst_virt = _get_local_va(dst_offset, dst_window, |
| 1427 | loop_len); |
| 1428 | else |
| 1429 | dst_virt = ioremap_remote(dst_offset, dst_window, |
| 1430 | loop_len, |
| 1431 | work->remote_dev, NULL); |
| 1432 | if (!dst_virt) { |
| 1433 | if (src_window->type != SCIF_WINDOW_SELF) |
| 1434 | iounmap_remote(src_virt, loop_len, work); |
| 1435 | return -ENOMEM; |
| 1436 | } |
| 1437 | |
| 1438 | if (src_window->type == SCIF_WINDOW_SELF) |
| 1439 | scif_unaligned_cpy_toio(dst_virt, src_virt, loop_len, |
| 1440 | work->ordered); |
| 1441 | else |
| 1442 | scif_unaligned_cpy_fromio(dst_virt, src_virt, |
| 1443 | loop_len, work->ordered); |
| 1444 | if (src_window->type != SCIF_WINDOW_SELF) |
| 1445 | iounmap_remote(src_virt, loop_len, work); |
| 1446 | |
| 1447 | if (dst_window->type != SCIF_WINDOW_SELF) |
| 1448 | iounmap_remote(dst_virt, loop_len, work); |
| 1449 | remaining_len -= loop_len; |
| 1450 | } |
| 1451 | return ret; |
| 1452 | err: |
| 1453 | dev_err(scif_info.mdev.this_device, |
| 1454 | "%s %d Desc Prog Failed ret %d\n", |
| 1455 | __func__, __LINE__, ret); |
| 1456 | return ret; |
| 1457 | } |
| 1458 | |
| 1459 | /* |
| 1460 | * scif_rma_list_cpu_copy: |
| 1461 | * |
| 1462 | * Traverse all the windows and perform CPU copy. |
| 1463 | */ |
| 1464 | static int scif_rma_list_cpu_copy(struct scif_copy_work *work) |
| 1465 | { |
| 1466 | void *src_virt, *dst_virt; |
| 1467 | size_t loop_len, remaining_len; |
| 1468 | int src_page_off, dst_page_off; |
| 1469 | s64 src_offset = work->src_offset, dst_offset = work->dst_offset; |
| 1470 | struct scif_window *src_window = work->src_window; |
| 1471 | struct scif_window *dst_window = work->dst_window; |
| 1472 | s64 end_src_offset, end_dst_offset; |
| 1473 | int ret = 0; |
| 1474 | struct scif_window_iter src_win_iter; |
| 1475 | struct scif_window_iter dst_win_iter; |
| 1476 | |
| 1477 | remaining_len = work->len; |
| 1478 | |
| 1479 | scif_init_window_iter(src_window, &src_win_iter); |
| 1480 | scif_init_window_iter(dst_window, &dst_win_iter); |
| 1481 | while (remaining_len) { |
| 1482 | src_page_off = src_offset & ~PAGE_MASK; |
| 1483 | dst_page_off = dst_offset & ~PAGE_MASK; |
| 1484 | loop_len = min(PAGE_SIZE - |
| 1485 | max(src_page_off, dst_page_off), |
| 1486 | remaining_len); |
| 1487 | |
| 1488 | if (src_window->type == SCIF_WINDOW_SELF) |
| 1489 | src_virt = _get_local_va(src_offset, src_window, |
| 1490 | loop_len); |
| 1491 | else |
| 1492 | src_virt = ioremap_remote(src_offset, src_window, |
| 1493 | loop_len, |
| 1494 | work->remote_dev, |
| 1495 | &src_win_iter); |
| 1496 | if (!src_virt) { |
| 1497 | ret = -ENOMEM; |
| 1498 | goto error; |
| 1499 | } |
| 1500 | |
| 1501 | if (dst_window->type == SCIF_WINDOW_SELF) |
| 1502 | dst_virt = _get_local_va(dst_offset, dst_window, |
| 1503 | loop_len); |
| 1504 | else |
| 1505 | dst_virt = ioremap_remote(dst_offset, dst_window, |
| 1506 | loop_len, |
| 1507 | work->remote_dev, |
| 1508 | &dst_win_iter); |
| 1509 | if (!dst_virt) { |
| 1510 | if (src_window->type == SCIF_WINDOW_PEER) |
| 1511 | iounmap_remote(src_virt, loop_len, work); |
| 1512 | ret = -ENOMEM; |
| 1513 | goto error; |
| 1514 | } |
| 1515 | |
| 1516 | if (work->loopback) { |
| 1517 | memcpy(dst_virt, src_virt, loop_len); |
| 1518 | } else { |
| 1519 | if (src_window->type == SCIF_WINDOW_SELF) |
| 1520 | memcpy_toio((void __iomem __force *)dst_virt, |
| 1521 | src_virt, loop_len); |
| 1522 | else |
| 1523 | memcpy_fromio(dst_virt, |
| 1524 | (void __iomem __force *)src_virt, |
| 1525 | loop_len); |
| 1526 | } |
| 1527 | if (src_window->type == SCIF_WINDOW_PEER) |
| 1528 | iounmap_remote(src_virt, loop_len, work); |
| 1529 | |
| 1530 | if (dst_window->type == SCIF_WINDOW_PEER) |
| 1531 | iounmap_remote(dst_virt, loop_len, work); |
| 1532 | |
| 1533 | src_offset += loop_len; |
| 1534 | dst_offset += loop_len; |
| 1535 | remaining_len -= loop_len; |
| 1536 | if (remaining_len) { |
| 1537 | end_src_offset = src_window->offset + |
| 1538 | (src_window->nr_pages << PAGE_SHIFT); |
| 1539 | end_dst_offset = dst_window->offset + |
| 1540 | (dst_window->nr_pages << PAGE_SHIFT); |
| 1541 | if (src_offset == end_src_offset) { |
Geliang Tang | 0d0ce9c | 2015-11-16 21:46:31 +0800 | [diff] [blame] | 1542 | src_window = list_next_entry(src_window, list); |
Sudeep Dutt | 7cc31cd | 2015-09-29 18:16:04 -0700 | [diff] [blame] | 1543 | scif_init_window_iter(src_window, |
| 1544 | &src_win_iter); |
| 1545 | } |
| 1546 | if (dst_offset == end_dst_offset) { |
Geliang Tang | 0d0ce9c | 2015-11-16 21:46:31 +0800 | [diff] [blame] | 1547 | dst_window = list_next_entry(dst_window, list); |
Sudeep Dutt | 7cc31cd | 2015-09-29 18:16:04 -0700 | [diff] [blame] | 1548 | scif_init_window_iter(dst_window, |
| 1549 | &dst_win_iter); |
| 1550 | } |
| 1551 | } |
| 1552 | } |
| 1553 | error: |
| 1554 | return ret; |
| 1555 | } |
| 1556 | |
| 1557 | static int scif_rma_list_dma_copy_wrapper(struct scif_endpt *epd, |
| 1558 | struct scif_copy_work *work, |
| 1559 | struct dma_chan *chan, off_t loffset) |
| 1560 | { |
| 1561 | int src_cache_off, dst_cache_off; |
| 1562 | s64 src_offset = work->src_offset, dst_offset = work->dst_offset; |
| 1563 | u8 *temp = NULL; |
| 1564 | bool src_local = true, dst_local = false; |
| 1565 | struct scif_dma_comp_cb *comp_cb; |
| 1566 | dma_addr_t src_dma_addr, dst_dma_addr; |
| 1567 | int err; |
| 1568 | |
| 1569 | if (is_dma_copy_aligned(chan->device, 1, 1, 1)) |
| 1570 | return _scif_rma_list_dma_copy_aligned(work, chan); |
| 1571 | |
| 1572 | src_cache_off = src_offset & (L1_CACHE_BYTES - 1); |
| 1573 | dst_cache_off = dst_offset & (L1_CACHE_BYTES - 1); |
| 1574 | |
| 1575 | if (dst_cache_off == src_cache_off) |
| 1576 | return scif_rma_list_dma_copy_aligned(work, chan); |
| 1577 | |
| 1578 | if (work->loopback) |
| 1579 | return scif_rma_list_cpu_copy(work); |
| 1580 | src_dma_addr = __scif_off_to_dma_addr(work->src_window, src_offset); |
| 1581 | dst_dma_addr = __scif_off_to_dma_addr(work->dst_window, dst_offset); |
| 1582 | src_local = work->src_window->type == SCIF_WINDOW_SELF; |
| 1583 | dst_local = work->dst_window->type == SCIF_WINDOW_SELF; |
| 1584 | |
| 1585 | dst_local = dst_local; |
| 1586 | /* Allocate dma_completion cb */ |
| 1587 | comp_cb = kzalloc(sizeof(*comp_cb), GFP_KERNEL); |
| 1588 | if (!comp_cb) |
| 1589 | goto error; |
| 1590 | |
| 1591 | work->comp_cb = comp_cb; |
| 1592 | comp_cb->cb_cookie = comp_cb; |
| 1593 | comp_cb->dma_completion_func = &scif_rma_completion_cb; |
| 1594 | |
| 1595 | if (work->len + (L1_CACHE_BYTES << 1) < SCIF_KMEM_UNALIGNED_BUF_SIZE) { |
| 1596 | comp_cb->is_cache = false; |
| 1597 | /* Allocate padding bytes to align to a cache line */ |
| 1598 | temp = kmalloc(work->len + (L1_CACHE_BYTES << 1), |
| 1599 | GFP_KERNEL); |
| 1600 | if (!temp) |
| 1601 | goto free_comp_cb; |
| 1602 | comp_cb->temp_buf_to_free = temp; |
| 1603 | /* kmalloc(..) does not guarantee cache line alignment */ |
| 1604 | if (!IS_ALIGNED((u64)temp, L1_CACHE_BYTES)) |
| 1605 | temp = PTR_ALIGN(temp, L1_CACHE_BYTES); |
| 1606 | } else { |
| 1607 | comp_cb->is_cache = true; |
| 1608 | temp = kmem_cache_alloc(unaligned_cache, GFP_KERNEL); |
| 1609 | if (!temp) |
| 1610 | goto free_comp_cb; |
| 1611 | comp_cb->temp_buf_to_free = temp; |
| 1612 | } |
| 1613 | |
| 1614 | if (src_local) { |
| 1615 | temp += dst_cache_off; |
| 1616 | scif_rma_local_cpu_copy(work->src_offset, work->src_window, |
| 1617 | temp, work->len, true); |
| 1618 | } else { |
| 1619 | comp_cb->dst_window = work->dst_window; |
| 1620 | comp_cb->dst_offset = work->dst_offset; |
| 1621 | work->src_offset = work->src_offset - src_cache_off; |
| 1622 | comp_cb->len = work->len; |
| 1623 | work->len = ALIGN(work->len + src_cache_off, L1_CACHE_BYTES); |
| 1624 | comp_cb->header_padding = src_cache_off; |
| 1625 | } |
| 1626 | comp_cb->temp_buf = temp; |
| 1627 | |
| 1628 | err = scif_map_single(&comp_cb->temp_phys, temp, |
| 1629 | work->remote_dev, SCIF_KMEM_UNALIGNED_BUF_SIZE); |
| 1630 | if (err) |
| 1631 | goto free_temp_buf; |
| 1632 | comp_cb->sdev = work->remote_dev; |
| 1633 | if (scif_rma_list_dma_copy_unaligned(work, temp, chan, src_local) < 0) |
| 1634 | goto free_temp_buf; |
| 1635 | if (!src_local) |
| 1636 | work->fence_type = SCIF_DMA_INTR; |
| 1637 | return 0; |
| 1638 | free_temp_buf: |
| 1639 | if (comp_cb->is_cache) |
| 1640 | kmem_cache_free(unaligned_cache, comp_cb->temp_buf_to_free); |
| 1641 | else |
| 1642 | kfree(comp_cb->temp_buf_to_free); |
| 1643 | free_comp_cb: |
| 1644 | kfree(comp_cb); |
| 1645 | error: |
| 1646 | return -ENOMEM; |
| 1647 | } |
| 1648 | |
| 1649 | /** |
| 1650 | * scif_rma_copy: |
| 1651 | * @epd: end point descriptor. |
| 1652 | * @loffset: offset in local registered address space to/from which to copy |
| 1653 | * @addr: user virtual address to/from which to copy |
| 1654 | * @len: length of range to copy |
| 1655 | * @roffset: offset in remote registered address space to/from which to copy |
| 1656 | * @flags: flags |
| 1657 | * @dir: LOCAL->REMOTE or vice versa. |
| 1658 | * @last_chunk: true if this is the last chunk of a larger transfer |
| 1659 | * |
| 1660 | * Validate parameters, check if src/dst registered ranges requested for copy |
| 1661 | * are valid and initiate either CPU or DMA copy. |
| 1662 | */ |
| 1663 | static int scif_rma_copy(scif_epd_t epd, off_t loffset, unsigned long addr, |
| 1664 | size_t len, off_t roffset, int flags, |
| 1665 | enum scif_rma_dir dir, bool last_chunk) |
| 1666 | { |
| 1667 | struct scif_endpt *ep = (struct scif_endpt *)epd; |
| 1668 | struct scif_rma_req remote_req; |
| 1669 | struct scif_rma_req req; |
| 1670 | struct scif_window *local_window = NULL; |
| 1671 | struct scif_window *remote_window = NULL; |
| 1672 | struct scif_copy_work copy_work; |
| 1673 | bool loopback; |
| 1674 | int err = 0; |
| 1675 | struct dma_chan *chan; |
| 1676 | struct scif_mmu_notif *mmn = NULL; |
| 1677 | bool cache = false; |
| 1678 | struct device *spdev; |
| 1679 | |
| 1680 | err = scif_verify_epd(ep); |
| 1681 | if (err) |
| 1682 | return err; |
| 1683 | |
| 1684 | if (flags && !(flags & (SCIF_RMA_USECPU | SCIF_RMA_USECACHE | |
| 1685 | SCIF_RMA_SYNC | SCIF_RMA_ORDERED))) |
| 1686 | return -EINVAL; |
| 1687 | |
| 1688 | loopback = scifdev_self(ep->remote_dev) ? true : false; |
| 1689 | copy_work.fence_type = ((flags & SCIF_RMA_SYNC) && last_chunk) ? |
| 1690 | SCIF_DMA_POLL : 0; |
| 1691 | copy_work.ordered = !!((flags & SCIF_RMA_ORDERED) && last_chunk); |
| 1692 | |
| 1693 | /* Use CPU for Mgmt node <-> Mgmt node copies */ |
| 1694 | if (loopback && scif_is_mgmt_node()) { |
| 1695 | flags |= SCIF_RMA_USECPU; |
| 1696 | copy_work.fence_type = 0x0; |
| 1697 | } |
| 1698 | |
| 1699 | cache = scif_is_set_reg_cache(flags); |
| 1700 | |
| 1701 | remote_req.out_window = &remote_window; |
| 1702 | remote_req.offset = roffset; |
| 1703 | remote_req.nr_bytes = len; |
| 1704 | /* |
| 1705 | * If transfer is from local to remote then the remote window |
| 1706 | * must be writeable and vice versa. |
| 1707 | */ |
| 1708 | remote_req.prot = dir == SCIF_LOCAL_TO_REMOTE ? VM_WRITE : VM_READ; |
| 1709 | remote_req.type = SCIF_WINDOW_PARTIAL; |
| 1710 | remote_req.head = &ep->rma_info.remote_reg_list; |
| 1711 | |
| 1712 | spdev = scif_get_peer_dev(ep->remote_dev); |
| 1713 | if (IS_ERR(spdev)) { |
| 1714 | err = PTR_ERR(spdev); |
| 1715 | return err; |
| 1716 | } |
| 1717 | |
| 1718 | if (addr && cache) { |
| 1719 | mutex_lock(&ep->rma_info.mmn_lock); |
| 1720 | mmn = scif_find_mmu_notifier(current->mm, &ep->rma_info); |
| 1721 | if (!mmn) |
Eric Biggers | d40a094 | 2015-12-11 20:09:16 -0600 | [diff] [blame] | 1722 | mmn = scif_add_mmu_notifier(current->mm, ep); |
Sudeep Dutt | 7cc31cd | 2015-09-29 18:16:04 -0700 | [diff] [blame] | 1723 | mutex_unlock(&ep->rma_info.mmn_lock); |
| 1724 | if (IS_ERR(mmn)) { |
| 1725 | scif_put_peer_dev(spdev); |
| 1726 | return PTR_ERR(mmn); |
| 1727 | } |
| 1728 | cache = cache && !scif_rma_tc_can_cache(ep, len); |
| 1729 | } |
| 1730 | mutex_lock(&ep->rma_info.rma_lock); |
| 1731 | if (addr) { |
| 1732 | req.out_window = &local_window; |
| 1733 | req.nr_bytes = ALIGN(len + (addr & ~PAGE_MASK), |
| 1734 | PAGE_SIZE); |
| 1735 | req.va_for_temp = addr & PAGE_MASK; |
| 1736 | req.prot = (dir == SCIF_LOCAL_TO_REMOTE ? |
| 1737 | VM_READ : VM_WRITE | VM_READ); |
| 1738 | /* Does a valid local window exist? */ |
| 1739 | if (mmn) { |
| 1740 | spin_lock(&ep->rma_info.tc_lock); |
| 1741 | req.head = &mmn->tc_reg_list; |
| 1742 | err = scif_query_tcw(ep, &req); |
| 1743 | spin_unlock(&ep->rma_info.tc_lock); |
| 1744 | } |
| 1745 | if (!mmn || err) { |
| 1746 | err = scif_register_temp(epd, req.va_for_temp, |
| 1747 | req.nr_bytes, req.prot, |
| 1748 | &loffset, &local_window); |
| 1749 | if (err) { |
| 1750 | mutex_unlock(&ep->rma_info.rma_lock); |
| 1751 | goto error; |
| 1752 | } |
| 1753 | if (!cache) |
| 1754 | goto skip_cache; |
| 1755 | atomic_inc(&ep->rma_info.tcw_refcount); |
| 1756 | atomic_add_return(local_window->nr_pages, |
| 1757 | &ep->rma_info.tcw_total_pages); |
| 1758 | if (mmn) { |
| 1759 | spin_lock(&ep->rma_info.tc_lock); |
| 1760 | scif_insert_tcw(local_window, |
| 1761 | &mmn->tc_reg_list); |
| 1762 | spin_unlock(&ep->rma_info.tc_lock); |
| 1763 | } |
| 1764 | } |
| 1765 | skip_cache: |
| 1766 | loffset = local_window->offset + |
| 1767 | (addr - local_window->va_for_temp); |
| 1768 | } else { |
| 1769 | req.out_window = &local_window; |
| 1770 | req.offset = loffset; |
| 1771 | /* |
| 1772 | * If transfer is from local to remote then the self window |
| 1773 | * must be readable and vice versa. |
| 1774 | */ |
| 1775 | req.prot = dir == SCIF_LOCAL_TO_REMOTE ? VM_READ : VM_WRITE; |
| 1776 | req.nr_bytes = len; |
| 1777 | req.type = SCIF_WINDOW_PARTIAL; |
| 1778 | req.head = &ep->rma_info.reg_list; |
| 1779 | /* Does a valid local window exist? */ |
| 1780 | err = scif_query_window(&req); |
| 1781 | if (err) { |
| 1782 | mutex_unlock(&ep->rma_info.rma_lock); |
| 1783 | goto error; |
| 1784 | } |
| 1785 | } |
| 1786 | |
| 1787 | /* Does a valid remote window exist? */ |
| 1788 | err = scif_query_window(&remote_req); |
| 1789 | if (err) { |
| 1790 | mutex_unlock(&ep->rma_info.rma_lock); |
| 1791 | goto error; |
| 1792 | } |
| 1793 | |
| 1794 | /* |
| 1795 | * Prepare copy_work for submitting work to the DMA kernel thread |
| 1796 | * or CPU copy routine. |
| 1797 | */ |
| 1798 | copy_work.len = len; |
| 1799 | copy_work.loopback = loopback; |
| 1800 | copy_work.remote_dev = ep->remote_dev; |
| 1801 | if (dir == SCIF_LOCAL_TO_REMOTE) { |
| 1802 | copy_work.src_offset = loffset; |
| 1803 | copy_work.src_window = local_window; |
| 1804 | copy_work.dst_offset = roffset; |
| 1805 | copy_work.dst_window = remote_window; |
| 1806 | } else { |
| 1807 | copy_work.src_offset = roffset; |
| 1808 | copy_work.src_window = remote_window; |
| 1809 | copy_work.dst_offset = loffset; |
| 1810 | copy_work.dst_window = local_window; |
| 1811 | } |
| 1812 | |
| 1813 | if (flags & SCIF_RMA_USECPU) { |
| 1814 | scif_rma_list_cpu_copy(©_work); |
| 1815 | } else { |
| 1816 | chan = ep->rma_info.dma_chan; |
| 1817 | err = scif_rma_list_dma_copy_wrapper(epd, ©_work, |
| 1818 | chan, loffset); |
| 1819 | } |
| 1820 | if (addr && !cache) |
| 1821 | atomic_inc(&ep->rma_info.tw_refcount); |
| 1822 | |
| 1823 | mutex_unlock(&ep->rma_info.rma_lock); |
| 1824 | |
| 1825 | if (last_chunk) { |
| 1826 | struct scif_dev *rdev = ep->remote_dev; |
| 1827 | |
| 1828 | if (copy_work.fence_type == SCIF_DMA_POLL) |
| 1829 | err = scif_drain_dma_poll(rdev->sdev, |
| 1830 | ep->rma_info.dma_chan); |
| 1831 | else if (copy_work.fence_type == SCIF_DMA_INTR) |
| 1832 | err = scif_drain_dma_intr(rdev->sdev, |
| 1833 | ep->rma_info.dma_chan); |
| 1834 | } |
| 1835 | |
| 1836 | if (addr && !cache) |
| 1837 | scif_queue_for_cleanup(local_window, &scif_info.rma); |
| 1838 | scif_put_peer_dev(spdev); |
| 1839 | return err; |
| 1840 | error: |
| 1841 | if (err) { |
| 1842 | if (addr && local_window && !cache) |
| 1843 | scif_destroy_window(ep, local_window); |
| 1844 | dev_err(scif_info.mdev.this_device, |
| 1845 | "%s %d err %d len 0x%lx\n", |
| 1846 | __func__, __LINE__, err, len); |
| 1847 | } |
| 1848 | scif_put_peer_dev(spdev); |
| 1849 | return err; |
| 1850 | } |
| 1851 | |
| 1852 | int scif_readfrom(scif_epd_t epd, off_t loffset, size_t len, |
| 1853 | off_t roffset, int flags) |
| 1854 | { |
| 1855 | int err; |
| 1856 | |
| 1857 | dev_dbg(scif_info.mdev.this_device, |
| 1858 | "SCIFAPI readfrom: ep %p loffset 0x%lx len 0x%lx offset 0x%lx flags 0x%x\n", |
| 1859 | epd, loffset, len, roffset, flags); |
| 1860 | if (scif_unaligned(loffset, roffset)) { |
| 1861 | while (len > SCIF_MAX_UNALIGNED_BUF_SIZE) { |
| 1862 | err = scif_rma_copy(epd, loffset, 0x0, |
| 1863 | SCIF_MAX_UNALIGNED_BUF_SIZE, |
| 1864 | roffset, flags, |
| 1865 | SCIF_REMOTE_TO_LOCAL, false); |
| 1866 | if (err) |
| 1867 | goto readfrom_err; |
| 1868 | loffset += SCIF_MAX_UNALIGNED_BUF_SIZE; |
| 1869 | roffset += SCIF_MAX_UNALIGNED_BUF_SIZE; |
| 1870 | len -= SCIF_MAX_UNALIGNED_BUF_SIZE; |
| 1871 | } |
| 1872 | } |
| 1873 | err = scif_rma_copy(epd, loffset, 0x0, len, |
| 1874 | roffset, flags, SCIF_REMOTE_TO_LOCAL, true); |
| 1875 | readfrom_err: |
| 1876 | return err; |
| 1877 | } |
| 1878 | EXPORT_SYMBOL_GPL(scif_readfrom); |
| 1879 | |
| 1880 | int scif_writeto(scif_epd_t epd, off_t loffset, size_t len, |
| 1881 | off_t roffset, int flags) |
| 1882 | { |
| 1883 | int err; |
| 1884 | |
| 1885 | dev_dbg(scif_info.mdev.this_device, |
| 1886 | "SCIFAPI writeto: ep %p loffset 0x%lx len 0x%lx roffset 0x%lx flags 0x%x\n", |
| 1887 | epd, loffset, len, roffset, flags); |
| 1888 | if (scif_unaligned(loffset, roffset)) { |
| 1889 | while (len > SCIF_MAX_UNALIGNED_BUF_SIZE) { |
| 1890 | err = scif_rma_copy(epd, loffset, 0x0, |
| 1891 | SCIF_MAX_UNALIGNED_BUF_SIZE, |
| 1892 | roffset, flags, |
| 1893 | SCIF_LOCAL_TO_REMOTE, false); |
| 1894 | if (err) |
| 1895 | goto writeto_err; |
| 1896 | loffset += SCIF_MAX_UNALIGNED_BUF_SIZE; |
| 1897 | roffset += SCIF_MAX_UNALIGNED_BUF_SIZE; |
| 1898 | len -= SCIF_MAX_UNALIGNED_BUF_SIZE; |
| 1899 | } |
| 1900 | } |
| 1901 | err = scif_rma_copy(epd, loffset, 0x0, len, |
| 1902 | roffset, flags, SCIF_LOCAL_TO_REMOTE, true); |
| 1903 | writeto_err: |
| 1904 | return err; |
| 1905 | } |
| 1906 | EXPORT_SYMBOL_GPL(scif_writeto); |
| 1907 | |
| 1908 | int scif_vreadfrom(scif_epd_t epd, void *addr, size_t len, |
| 1909 | off_t roffset, int flags) |
| 1910 | { |
| 1911 | int err; |
| 1912 | |
| 1913 | dev_dbg(scif_info.mdev.this_device, |
| 1914 | "SCIFAPI vreadfrom: ep %p addr %p len 0x%lx roffset 0x%lx flags 0x%x\n", |
| 1915 | epd, addr, len, roffset, flags); |
| 1916 | if (scif_unaligned((off_t __force)addr, roffset)) { |
| 1917 | if (len > SCIF_MAX_UNALIGNED_BUF_SIZE) |
| 1918 | flags &= ~SCIF_RMA_USECACHE; |
| 1919 | |
| 1920 | while (len > SCIF_MAX_UNALIGNED_BUF_SIZE) { |
| 1921 | err = scif_rma_copy(epd, 0, (u64)addr, |
| 1922 | SCIF_MAX_UNALIGNED_BUF_SIZE, |
| 1923 | roffset, flags, |
| 1924 | SCIF_REMOTE_TO_LOCAL, false); |
| 1925 | if (err) |
| 1926 | goto vreadfrom_err; |
| 1927 | addr += SCIF_MAX_UNALIGNED_BUF_SIZE; |
| 1928 | roffset += SCIF_MAX_UNALIGNED_BUF_SIZE; |
| 1929 | len -= SCIF_MAX_UNALIGNED_BUF_SIZE; |
| 1930 | } |
| 1931 | } |
| 1932 | err = scif_rma_copy(epd, 0, (u64)addr, len, |
| 1933 | roffset, flags, SCIF_REMOTE_TO_LOCAL, true); |
| 1934 | vreadfrom_err: |
| 1935 | return err; |
| 1936 | } |
| 1937 | EXPORT_SYMBOL_GPL(scif_vreadfrom); |
| 1938 | |
| 1939 | int scif_vwriteto(scif_epd_t epd, void *addr, size_t len, |
| 1940 | off_t roffset, int flags) |
| 1941 | { |
| 1942 | int err; |
| 1943 | |
| 1944 | dev_dbg(scif_info.mdev.this_device, |
| 1945 | "SCIFAPI vwriteto: ep %p addr %p len 0x%lx roffset 0x%lx flags 0x%x\n", |
| 1946 | epd, addr, len, roffset, flags); |
| 1947 | if (scif_unaligned((off_t __force)addr, roffset)) { |
| 1948 | if (len > SCIF_MAX_UNALIGNED_BUF_SIZE) |
| 1949 | flags &= ~SCIF_RMA_USECACHE; |
| 1950 | |
| 1951 | while (len > SCIF_MAX_UNALIGNED_BUF_SIZE) { |
| 1952 | err = scif_rma_copy(epd, 0, (u64)addr, |
| 1953 | SCIF_MAX_UNALIGNED_BUF_SIZE, |
| 1954 | roffset, flags, |
| 1955 | SCIF_LOCAL_TO_REMOTE, false); |
| 1956 | if (err) |
| 1957 | goto vwriteto_err; |
| 1958 | addr += SCIF_MAX_UNALIGNED_BUF_SIZE; |
| 1959 | roffset += SCIF_MAX_UNALIGNED_BUF_SIZE; |
| 1960 | len -= SCIF_MAX_UNALIGNED_BUF_SIZE; |
| 1961 | } |
| 1962 | } |
| 1963 | err = scif_rma_copy(epd, 0, (u64)addr, len, |
| 1964 | roffset, flags, SCIF_LOCAL_TO_REMOTE, true); |
| 1965 | vwriteto_err: |
| 1966 | return err; |
| 1967 | } |
| 1968 | EXPORT_SYMBOL_GPL(scif_vwriteto); |