Sudeep Dutt | 564c8d8 | 2015-09-29 18:16:19 -0700 | [diff] [blame] | 1 | /* |
| 2 | * Intel MIC Platform Software Stack (MPSS) |
| 3 | * |
| 4 | * Copyright(c) 2015 Intel Corporation. |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License, version 2, as |
| 8 | * published by the Free Software Foundation. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, but |
| 11 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 13 | * General Public License for more details. |
| 14 | * |
| 15 | * Intel SCIF driver. |
| 16 | * |
| 17 | */ |
| 18 | |
| 19 | #include "scif_main.h" |
| 20 | |
| 21 | /** |
| 22 | * scif_recv_mark: Handle SCIF_MARK request |
| 23 | * @msg: Interrupt message |
| 24 | * |
| 25 | * The peer has requested a mark. |
| 26 | */ |
| 27 | void scif_recv_mark(struct scif_dev *scifdev, struct scifmsg *msg) |
| 28 | { |
| 29 | struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0]; |
| 30 | int mark, err; |
| 31 | |
| 32 | err = _scif_fence_mark(ep, &mark); |
| 33 | if (err) |
| 34 | msg->uop = SCIF_MARK_NACK; |
| 35 | else |
| 36 | msg->uop = SCIF_MARK_ACK; |
| 37 | msg->payload[0] = ep->remote_ep; |
| 38 | msg->payload[2] = mark; |
| 39 | scif_nodeqp_send(ep->remote_dev, msg); |
| 40 | } |
| 41 | |
| 42 | /** |
| 43 | * scif_recv_mark_resp: Handle SCIF_MARK_(N)ACK messages. |
| 44 | * @msg: Interrupt message |
| 45 | * |
| 46 | * The peer has responded to a SCIF_MARK message. |
| 47 | */ |
| 48 | void scif_recv_mark_resp(struct scif_dev *scifdev, struct scifmsg *msg) |
| 49 | { |
| 50 | struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0]; |
| 51 | struct scif_fence_info *fence_req = |
| 52 | (struct scif_fence_info *)msg->payload[1]; |
| 53 | |
| 54 | mutex_lock(&ep->rma_info.rma_lock); |
| 55 | if (msg->uop == SCIF_MARK_ACK) { |
| 56 | fence_req->state = OP_COMPLETED; |
| 57 | fence_req->dma_mark = (int)msg->payload[2]; |
| 58 | } else { |
| 59 | fence_req->state = OP_FAILED; |
| 60 | } |
| 61 | mutex_unlock(&ep->rma_info.rma_lock); |
| 62 | complete(&fence_req->comp); |
| 63 | } |
| 64 | |
| 65 | /** |
| 66 | * scif_recv_wait: Handle SCIF_WAIT request |
| 67 | * @msg: Interrupt message |
| 68 | * |
| 69 | * The peer has requested waiting on a fence. |
| 70 | */ |
| 71 | void scif_recv_wait(struct scif_dev *scifdev, struct scifmsg *msg) |
| 72 | { |
| 73 | struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0]; |
| 74 | struct scif_remote_fence_info *fence; |
| 75 | |
| 76 | /* |
| 77 | * Allocate structure for remote fence information and |
| 78 | * send a NACK if the allocation failed. The peer will |
| 79 | * return ENOMEM upon receiving a NACK. |
| 80 | */ |
| 81 | fence = kmalloc(sizeof(*fence), GFP_KERNEL); |
| 82 | if (!fence) { |
| 83 | msg->payload[0] = ep->remote_ep; |
| 84 | msg->uop = SCIF_WAIT_NACK; |
| 85 | scif_nodeqp_send(ep->remote_dev, msg); |
| 86 | return; |
| 87 | } |
| 88 | |
| 89 | /* Prepare the fence request */ |
| 90 | memcpy(&fence->msg, msg, sizeof(struct scifmsg)); |
| 91 | INIT_LIST_HEAD(&fence->list); |
| 92 | |
| 93 | /* Insert to the global remote fence request list */ |
| 94 | mutex_lock(&scif_info.fencelock); |
| 95 | atomic_inc(&ep->rma_info.fence_refcount); |
| 96 | list_add_tail(&fence->list, &scif_info.fence); |
| 97 | mutex_unlock(&scif_info.fencelock); |
| 98 | |
| 99 | schedule_work(&scif_info.misc_work); |
| 100 | } |
| 101 | |
| 102 | /** |
| 103 | * scif_recv_wait_resp: Handle SCIF_WAIT_(N)ACK messages. |
| 104 | * @msg: Interrupt message |
| 105 | * |
| 106 | * The peer has responded to a SCIF_WAIT message. |
| 107 | */ |
| 108 | void scif_recv_wait_resp(struct scif_dev *scifdev, struct scifmsg *msg) |
| 109 | { |
| 110 | struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0]; |
| 111 | struct scif_fence_info *fence_req = |
| 112 | (struct scif_fence_info *)msg->payload[1]; |
| 113 | |
| 114 | mutex_lock(&ep->rma_info.rma_lock); |
| 115 | if (msg->uop == SCIF_WAIT_ACK) |
| 116 | fence_req->state = OP_COMPLETED; |
| 117 | else |
| 118 | fence_req->state = OP_FAILED; |
| 119 | mutex_unlock(&ep->rma_info.rma_lock); |
| 120 | complete(&fence_req->comp); |
| 121 | } |
| 122 | |
| 123 | /** |
| 124 | * scif_recv_sig_local: Handle SCIF_SIG_LOCAL request |
| 125 | * @msg: Interrupt message |
| 126 | * |
| 127 | * The peer has requested a signal on a local offset. |
| 128 | */ |
| 129 | void scif_recv_sig_local(struct scif_dev *scifdev, struct scifmsg *msg) |
| 130 | { |
| 131 | struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0]; |
| 132 | int err; |
| 133 | |
| 134 | err = scif_prog_signal(ep, msg->payload[1], msg->payload[2], |
| 135 | SCIF_WINDOW_SELF); |
| 136 | if (err) |
| 137 | msg->uop = SCIF_SIG_NACK; |
| 138 | else |
| 139 | msg->uop = SCIF_SIG_ACK; |
| 140 | msg->payload[0] = ep->remote_ep; |
| 141 | scif_nodeqp_send(ep->remote_dev, msg); |
| 142 | } |
| 143 | |
| 144 | /** |
| 145 | * scif_recv_sig_remote: Handle SCIF_SIGNAL_REMOTE request |
| 146 | * @msg: Interrupt message |
| 147 | * |
| 148 | * The peer has requested a signal on a remote offset. |
| 149 | */ |
| 150 | void scif_recv_sig_remote(struct scif_dev *scifdev, struct scifmsg *msg) |
| 151 | { |
| 152 | struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0]; |
| 153 | int err; |
| 154 | |
| 155 | err = scif_prog_signal(ep, msg->payload[1], msg->payload[2], |
| 156 | SCIF_WINDOW_PEER); |
| 157 | if (err) |
| 158 | msg->uop = SCIF_SIG_NACK; |
| 159 | else |
| 160 | msg->uop = SCIF_SIG_ACK; |
| 161 | msg->payload[0] = ep->remote_ep; |
| 162 | scif_nodeqp_send(ep->remote_dev, msg); |
| 163 | } |
| 164 | |
| 165 | /** |
| 166 | * scif_recv_sig_resp: Handle SCIF_SIG_(N)ACK messages. |
| 167 | * @msg: Interrupt message |
| 168 | * |
| 169 | * The peer has responded to a signal request. |
| 170 | */ |
| 171 | void scif_recv_sig_resp(struct scif_dev *scifdev, struct scifmsg *msg) |
| 172 | { |
| 173 | struct scif_endpt *ep = (struct scif_endpt *)msg->payload[0]; |
| 174 | struct scif_fence_info *fence_req = |
| 175 | (struct scif_fence_info *)msg->payload[3]; |
| 176 | |
| 177 | mutex_lock(&ep->rma_info.rma_lock); |
| 178 | if (msg->uop == SCIF_SIG_ACK) |
| 179 | fence_req->state = OP_COMPLETED; |
| 180 | else |
| 181 | fence_req->state = OP_FAILED; |
| 182 | mutex_unlock(&ep->rma_info.rma_lock); |
| 183 | complete(&fence_req->comp); |
| 184 | } |
| 185 | |
| 186 | static inline void *scif_get_local_va(off_t off, struct scif_window *window) |
| 187 | { |
| 188 | struct page **pages = window->pinned_pages->pages; |
| 189 | int page_nr = (off - window->offset) >> PAGE_SHIFT; |
| 190 | off_t page_off = off & ~PAGE_MASK; |
| 191 | |
| 192 | return page_address(pages[page_nr]) + page_off; |
| 193 | } |
| 194 | |
| 195 | static void scif_prog_signal_cb(void *arg) |
| 196 | { |
| 197 | struct scif_status *status = arg; |
| 198 | |
| 199 | dma_pool_free(status->ep->remote_dev->signal_pool, status, |
| 200 | status->src_dma_addr); |
| 201 | } |
| 202 | |
| 203 | static int _scif_prog_signal(scif_epd_t epd, dma_addr_t dst, u64 val) |
| 204 | { |
| 205 | struct scif_endpt *ep = (struct scif_endpt *)epd; |
| 206 | struct dma_chan *chan = ep->rma_info.dma_chan; |
| 207 | struct dma_device *ddev = chan->device; |
| 208 | bool x100 = !is_dma_copy_aligned(chan->device, 1, 1, 1); |
| 209 | struct dma_async_tx_descriptor *tx; |
| 210 | struct scif_status *status = NULL; |
| 211 | dma_addr_t src; |
| 212 | dma_cookie_t cookie; |
| 213 | int err; |
| 214 | |
| 215 | tx = ddev->device_prep_dma_memcpy(chan, 0, 0, 0, DMA_PREP_FENCE); |
| 216 | if (!tx) { |
| 217 | err = -ENOMEM; |
| 218 | dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n", |
| 219 | __func__, __LINE__, err); |
| 220 | goto alloc_fail; |
| 221 | } |
| 222 | cookie = tx->tx_submit(tx); |
| 223 | if (dma_submit_error(cookie)) { |
| 224 | err = (int)cookie; |
| 225 | dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n", |
| 226 | __func__, __LINE__, err); |
| 227 | goto alloc_fail; |
| 228 | } |
| 229 | dma_async_issue_pending(chan); |
| 230 | if (x100) { |
| 231 | /* |
| 232 | * For X100 use the status descriptor to write the value to |
| 233 | * the destination. |
| 234 | */ |
| 235 | tx = ddev->device_prep_dma_imm_data(chan, dst, val, 0); |
| 236 | } else { |
| 237 | status = dma_pool_alloc(ep->remote_dev->signal_pool, GFP_KERNEL, |
| 238 | &src); |
| 239 | if (!status) { |
| 240 | err = -ENOMEM; |
| 241 | dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n", |
| 242 | __func__, __LINE__, err); |
| 243 | goto alloc_fail; |
| 244 | } |
| 245 | status->val = val; |
| 246 | status->src_dma_addr = src; |
| 247 | status->ep = ep; |
| 248 | src += offsetof(struct scif_status, val); |
| 249 | tx = ddev->device_prep_dma_memcpy(chan, dst, src, sizeof(val), |
| 250 | DMA_PREP_INTERRUPT); |
| 251 | } |
| 252 | if (!tx) { |
| 253 | err = -ENOMEM; |
| 254 | dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n", |
| 255 | __func__, __LINE__, err); |
| 256 | goto dma_fail; |
| 257 | } |
| 258 | if (!x100) { |
| 259 | tx->callback = scif_prog_signal_cb; |
| 260 | tx->callback_param = status; |
| 261 | } |
| 262 | cookie = tx->tx_submit(tx); |
| 263 | if (dma_submit_error(cookie)) { |
| 264 | err = -EIO; |
| 265 | dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n", |
| 266 | __func__, __LINE__, err); |
| 267 | goto dma_fail; |
| 268 | } |
| 269 | dma_async_issue_pending(chan); |
| 270 | return 0; |
| 271 | dma_fail: |
| 272 | if (!x100) |
| 273 | dma_pool_free(ep->remote_dev->signal_pool, status, |
| 274 | status->src_dma_addr); |
| 275 | alloc_fail: |
| 276 | return err; |
| 277 | } |
| 278 | |
| 279 | /* |
| 280 | * scif_prog_signal: |
| 281 | * @epd - Endpoint Descriptor |
| 282 | * @offset - registered address to write @val to |
| 283 | * @val - Value to be written at @offset |
| 284 | * @type - Type of the window. |
| 285 | * |
| 286 | * Arrange to write a value to the registered offset after ensuring that the |
| 287 | * offset provided is indeed valid. |
| 288 | */ |
| 289 | int scif_prog_signal(scif_epd_t epd, off_t offset, u64 val, |
| 290 | enum scif_window_type type) |
| 291 | { |
| 292 | struct scif_endpt *ep = (struct scif_endpt *)epd; |
| 293 | struct scif_window *window = NULL; |
| 294 | struct scif_rma_req req; |
| 295 | dma_addr_t dst_dma_addr; |
| 296 | int err; |
| 297 | |
| 298 | mutex_lock(&ep->rma_info.rma_lock); |
| 299 | req.out_window = &window; |
| 300 | req.offset = offset; |
| 301 | req.nr_bytes = sizeof(u64); |
| 302 | req.prot = SCIF_PROT_WRITE; |
| 303 | req.type = SCIF_WINDOW_SINGLE; |
| 304 | if (type == SCIF_WINDOW_SELF) |
| 305 | req.head = &ep->rma_info.reg_list; |
| 306 | else |
| 307 | req.head = &ep->rma_info.remote_reg_list; |
| 308 | /* Does a valid window exist? */ |
| 309 | err = scif_query_window(&req); |
| 310 | if (err) { |
| 311 | dev_err(scif_info.mdev.this_device, |
| 312 | "%s %d err %d\n", __func__, __LINE__, err); |
| 313 | goto unlock_ret; |
| 314 | } |
| 315 | |
| 316 | if (scif_is_mgmt_node() && scifdev_self(ep->remote_dev)) { |
| 317 | u64 *dst_virt; |
| 318 | |
| 319 | if (type == SCIF_WINDOW_SELF) |
| 320 | dst_virt = scif_get_local_va(offset, window); |
| 321 | else |
| 322 | dst_virt = |
| 323 | scif_get_local_va(offset, (struct scif_window *) |
| 324 | window->peer_window); |
| 325 | *dst_virt = val; |
| 326 | } else { |
| 327 | dst_dma_addr = __scif_off_to_dma_addr(window, offset); |
| 328 | err = _scif_prog_signal(epd, dst_dma_addr, val); |
| 329 | } |
| 330 | unlock_ret: |
| 331 | mutex_unlock(&ep->rma_info.rma_lock); |
| 332 | return err; |
| 333 | } |
| 334 | |
| 335 | static int _scif_fence_wait(scif_epd_t epd, int mark) |
| 336 | { |
| 337 | struct scif_endpt *ep = (struct scif_endpt *)epd; |
| 338 | dma_cookie_t cookie = mark & ~SCIF_REMOTE_FENCE; |
| 339 | int err; |
| 340 | |
| 341 | /* Wait for DMA callback in scif_fence_mark_cb(..) */ |
| 342 | err = wait_event_interruptible_timeout(ep->rma_info.markwq, |
| 343 | dma_async_is_tx_complete( |
| 344 | ep->rma_info.dma_chan, |
| 345 | cookie, NULL, NULL) == |
| 346 | DMA_COMPLETE, |
| 347 | SCIF_NODE_ALIVE_TIMEOUT); |
| 348 | if (!err) |
| 349 | err = -ETIMEDOUT; |
| 350 | else if (err > 0) |
| 351 | err = 0; |
| 352 | return err; |
| 353 | } |
| 354 | |
| 355 | /** |
| 356 | * scif_rma_handle_remote_fences: |
| 357 | * |
| 358 | * This routine services remote fence requests. |
| 359 | */ |
| 360 | void scif_rma_handle_remote_fences(void) |
| 361 | { |
| 362 | struct list_head *item, *tmp; |
| 363 | struct scif_remote_fence_info *fence; |
| 364 | struct scif_endpt *ep; |
| 365 | int mark, err; |
| 366 | |
| 367 | might_sleep(); |
| 368 | mutex_lock(&scif_info.fencelock); |
| 369 | list_for_each_safe(item, tmp, &scif_info.fence) { |
| 370 | fence = list_entry(item, struct scif_remote_fence_info, |
| 371 | list); |
| 372 | /* Remove fence from global list */ |
| 373 | list_del(&fence->list); |
| 374 | |
| 375 | /* Initiate the fence operation */ |
| 376 | ep = (struct scif_endpt *)fence->msg.payload[0]; |
| 377 | mark = fence->msg.payload[2]; |
| 378 | err = _scif_fence_wait(ep, mark); |
| 379 | if (err) |
| 380 | fence->msg.uop = SCIF_WAIT_NACK; |
| 381 | else |
| 382 | fence->msg.uop = SCIF_WAIT_ACK; |
| 383 | fence->msg.payload[0] = ep->remote_ep; |
| 384 | scif_nodeqp_send(ep->remote_dev, &fence->msg); |
| 385 | kfree(fence); |
| 386 | if (!atomic_sub_return(1, &ep->rma_info.fence_refcount)) |
| 387 | schedule_work(&scif_info.misc_work); |
| 388 | } |
| 389 | mutex_unlock(&scif_info.fencelock); |
| 390 | } |
| 391 | |
| 392 | static int _scif_send_fence(scif_epd_t epd, int uop, int mark, int *out_mark) |
| 393 | { |
| 394 | int err; |
| 395 | struct scifmsg msg; |
| 396 | struct scif_fence_info *fence_req; |
| 397 | struct scif_endpt *ep = (struct scif_endpt *)epd; |
| 398 | |
| 399 | fence_req = kmalloc(sizeof(*fence_req), GFP_KERNEL); |
| 400 | if (!fence_req) { |
| 401 | err = -ENOMEM; |
| 402 | goto error; |
| 403 | } |
| 404 | |
| 405 | fence_req->state = OP_IN_PROGRESS; |
| 406 | init_completion(&fence_req->comp); |
| 407 | |
| 408 | msg.src = ep->port; |
| 409 | msg.uop = uop; |
| 410 | msg.payload[0] = ep->remote_ep; |
| 411 | msg.payload[1] = (u64)fence_req; |
| 412 | if (uop == SCIF_WAIT) |
| 413 | msg.payload[2] = mark; |
| 414 | spin_lock(&ep->lock); |
| 415 | if (ep->state == SCIFEP_CONNECTED) |
| 416 | err = scif_nodeqp_send(ep->remote_dev, &msg); |
| 417 | else |
| 418 | err = -ENOTCONN; |
| 419 | spin_unlock(&ep->lock); |
| 420 | if (err) |
| 421 | goto error_free; |
| 422 | retry: |
| 423 | /* Wait for a SCIF_WAIT_(N)ACK message */ |
| 424 | err = wait_for_completion_timeout(&fence_req->comp, |
| 425 | SCIF_NODE_ALIVE_TIMEOUT); |
| 426 | if (!err && scifdev_alive(ep)) |
| 427 | goto retry; |
| 428 | if (!err) |
| 429 | err = -ENODEV; |
| 430 | if (err > 0) |
| 431 | err = 0; |
| 432 | mutex_lock(&ep->rma_info.rma_lock); |
| 433 | if (err < 0) { |
| 434 | if (fence_req->state == OP_IN_PROGRESS) |
| 435 | fence_req->state = OP_FAILED; |
| 436 | } |
| 437 | if (fence_req->state == OP_FAILED && !err) |
| 438 | err = -ENOMEM; |
| 439 | if (uop == SCIF_MARK && fence_req->state == OP_COMPLETED) |
| 440 | *out_mark = SCIF_REMOTE_FENCE | fence_req->dma_mark; |
| 441 | mutex_unlock(&ep->rma_info.rma_lock); |
| 442 | error_free: |
| 443 | kfree(fence_req); |
| 444 | error: |
| 445 | return err; |
| 446 | } |
| 447 | |
| 448 | /** |
| 449 | * scif_send_fence_mark: |
| 450 | * @epd: end point descriptor. |
| 451 | * @out_mark: Output DMA mark reported by peer. |
| 452 | * |
| 453 | * Send a remote fence mark request. |
| 454 | */ |
| 455 | static int scif_send_fence_mark(scif_epd_t epd, int *out_mark) |
| 456 | { |
| 457 | return _scif_send_fence(epd, SCIF_MARK, 0, out_mark); |
| 458 | } |
| 459 | |
| 460 | /** |
| 461 | * scif_send_fence_wait: |
| 462 | * @epd: end point descriptor. |
| 463 | * @mark: DMA mark to wait for. |
| 464 | * |
| 465 | * Send a remote fence wait request. |
| 466 | */ |
| 467 | static int scif_send_fence_wait(scif_epd_t epd, int mark) |
| 468 | { |
| 469 | return _scif_send_fence(epd, SCIF_WAIT, mark, NULL); |
| 470 | } |
| 471 | |
| 472 | static int _scif_send_fence_signal_wait(struct scif_endpt *ep, |
| 473 | struct scif_fence_info *fence_req) |
| 474 | { |
| 475 | int err; |
| 476 | |
| 477 | retry: |
| 478 | /* Wait for a SCIF_SIG_(N)ACK message */ |
| 479 | err = wait_for_completion_timeout(&fence_req->comp, |
| 480 | SCIF_NODE_ALIVE_TIMEOUT); |
| 481 | if (!err && scifdev_alive(ep)) |
| 482 | goto retry; |
| 483 | if (!err) |
| 484 | err = -ENODEV; |
| 485 | if (err > 0) |
| 486 | err = 0; |
| 487 | if (err < 0) { |
| 488 | mutex_lock(&ep->rma_info.rma_lock); |
| 489 | if (fence_req->state == OP_IN_PROGRESS) |
| 490 | fence_req->state = OP_FAILED; |
| 491 | mutex_unlock(&ep->rma_info.rma_lock); |
| 492 | } |
| 493 | if (fence_req->state == OP_FAILED && !err) |
| 494 | err = -ENXIO; |
| 495 | return err; |
| 496 | } |
| 497 | |
| 498 | /** |
| 499 | * scif_send_fence_signal: |
| 500 | * @epd - endpoint descriptor |
| 501 | * @loff - local offset |
| 502 | * @lval - local value to write to loffset |
| 503 | * @roff - remote offset |
| 504 | * @rval - remote value to write to roffset |
| 505 | * @flags - flags |
| 506 | * |
| 507 | * Sends a remote fence signal request |
| 508 | */ |
| 509 | static int scif_send_fence_signal(scif_epd_t epd, off_t roff, u64 rval, |
| 510 | off_t loff, u64 lval, int flags) |
| 511 | { |
| 512 | int err = 0; |
| 513 | struct scifmsg msg; |
| 514 | struct scif_fence_info *fence_req; |
| 515 | struct scif_endpt *ep = (struct scif_endpt *)epd; |
| 516 | |
| 517 | fence_req = kmalloc(sizeof(*fence_req), GFP_KERNEL); |
| 518 | if (!fence_req) { |
| 519 | err = -ENOMEM; |
| 520 | goto error; |
| 521 | } |
| 522 | |
| 523 | fence_req->state = OP_IN_PROGRESS; |
| 524 | init_completion(&fence_req->comp); |
| 525 | msg.src = ep->port; |
| 526 | if (flags & SCIF_SIGNAL_LOCAL) { |
| 527 | msg.uop = SCIF_SIG_LOCAL; |
| 528 | msg.payload[0] = ep->remote_ep; |
| 529 | msg.payload[1] = roff; |
| 530 | msg.payload[2] = rval; |
| 531 | msg.payload[3] = (u64)fence_req; |
| 532 | spin_lock(&ep->lock); |
| 533 | if (ep->state == SCIFEP_CONNECTED) |
| 534 | err = scif_nodeqp_send(ep->remote_dev, &msg); |
| 535 | else |
| 536 | err = -ENOTCONN; |
| 537 | spin_unlock(&ep->lock); |
| 538 | if (err) |
| 539 | goto error_free; |
| 540 | err = _scif_send_fence_signal_wait(ep, fence_req); |
| 541 | if (err) |
| 542 | goto error_free; |
| 543 | } |
| 544 | fence_req->state = OP_IN_PROGRESS; |
| 545 | |
| 546 | if (flags & SCIF_SIGNAL_REMOTE) { |
| 547 | msg.uop = SCIF_SIG_REMOTE; |
| 548 | msg.payload[0] = ep->remote_ep; |
| 549 | msg.payload[1] = loff; |
| 550 | msg.payload[2] = lval; |
| 551 | msg.payload[3] = (u64)fence_req; |
| 552 | spin_lock(&ep->lock); |
| 553 | if (ep->state == SCIFEP_CONNECTED) |
| 554 | err = scif_nodeqp_send(ep->remote_dev, &msg); |
| 555 | else |
| 556 | err = -ENOTCONN; |
| 557 | spin_unlock(&ep->lock); |
| 558 | if (err) |
| 559 | goto error_free; |
| 560 | err = _scif_send_fence_signal_wait(ep, fence_req); |
| 561 | } |
| 562 | error_free: |
| 563 | kfree(fence_req); |
| 564 | error: |
| 565 | return err; |
| 566 | } |
| 567 | |
| 568 | static void scif_fence_mark_cb(void *arg) |
| 569 | { |
| 570 | struct scif_endpt *ep = (struct scif_endpt *)arg; |
| 571 | |
| 572 | wake_up_interruptible(&ep->rma_info.markwq); |
| 573 | atomic_dec(&ep->rma_info.fence_refcount); |
| 574 | } |
| 575 | |
| 576 | /* |
| 577 | * _scif_fence_mark: |
| 578 | * |
| 579 | * @epd - endpoint descriptor |
| 580 | * Set up a mark for this endpoint and return the value of the mark. |
| 581 | */ |
| 582 | int _scif_fence_mark(scif_epd_t epd, int *mark) |
| 583 | { |
| 584 | struct scif_endpt *ep = (struct scif_endpt *)epd; |
| 585 | struct dma_chan *chan = ep->rma_info.dma_chan; |
| 586 | struct dma_device *ddev = chan->device; |
| 587 | struct dma_async_tx_descriptor *tx; |
| 588 | dma_cookie_t cookie; |
| 589 | int err; |
| 590 | |
| 591 | tx = ddev->device_prep_dma_memcpy(chan, 0, 0, 0, DMA_PREP_FENCE); |
| 592 | if (!tx) { |
| 593 | err = -ENOMEM; |
| 594 | dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n", |
| 595 | __func__, __LINE__, err); |
| 596 | return err; |
| 597 | } |
| 598 | cookie = tx->tx_submit(tx); |
| 599 | if (dma_submit_error(cookie)) { |
| 600 | err = (int)cookie; |
| 601 | dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n", |
| 602 | __func__, __LINE__, err); |
| 603 | return err; |
| 604 | } |
| 605 | dma_async_issue_pending(chan); |
| 606 | tx = ddev->device_prep_dma_interrupt(chan, DMA_PREP_INTERRUPT); |
| 607 | if (!tx) { |
| 608 | err = -ENOMEM; |
| 609 | dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n", |
| 610 | __func__, __LINE__, err); |
| 611 | return err; |
| 612 | } |
| 613 | tx->callback = scif_fence_mark_cb; |
| 614 | tx->callback_param = ep; |
| 615 | *mark = cookie = tx->tx_submit(tx); |
| 616 | if (dma_submit_error(cookie)) { |
| 617 | err = (int)cookie; |
| 618 | dev_err(&ep->remote_dev->sdev->dev, "%s %d err %d\n", |
| 619 | __func__, __LINE__, err); |
| 620 | return err; |
| 621 | } |
| 622 | atomic_inc(&ep->rma_info.fence_refcount); |
| 623 | dma_async_issue_pending(chan); |
| 624 | return 0; |
| 625 | } |
| 626 | |
| 627 | #define SCIF_LOOPB_MAGIC_MARK 0xdead |
| 628 | |
| 629 | int scif_fence_mark(scif_epd_t epd, int flags, int *mark) |
| 630 | { |
| 631 | struct scif_endpt *ep = (struct scif_endpt *)epd; |
| 632 | int err = 0; |
| 633 | |
| 634 | dev_dbg(scif_info.mdev.this_device, |
| 635 | "SCIFAPI fence_mark: ep %p flags 0x%x mark 0x%x\n", |
| 636 | ep, flags, *mark); |
| 637 | err = scif_verify_epd(ep); |
| 638 | if (err) |
| 639 | return err; |
| 640 | |
| 641 | /* Invalid flags? */ |
| 642 | if (flags & ~(SCIF_FENCE_INIT_SELF | SCIF_FENCE_INIT_PEER)) |
| 643 | return -EINVAL; |
| 644 | |
| 645 | /* At least one of init self or peer RMA should be set */ |
| 646 | if (!(flags & (SCIF_FENCE_INIT_SELF | SCIF_FENCE_INIT_PEER))) |
| 647 | return -EINVAL; |
| 648 | |
| 649 | /* Exactly one of init self or peer RMA should be set but not both */ |
| 650 | if ((flags & SCIF_FENCE_INIT_SELF) && (flags & SCIF_FENCE_INIT_PEER)) |
| 651 | return -EINVAL; |
| 652 | |
| 653 | /* |
| 654 | * Management node loopback does not need to use DMA. |
| 655 | * Return a valid mark to be symmetric. |
| 656 | */ |
| 657 | if (scifdev_self(ep->remote_dev) && scif_is_mgmt_node()) { |
| 658 | *mark = SCIF_LOOPB_MAGIC_MARK; |
| 659 | return 0; |
| 660 | } |
| 661 | |
| 662 | if (flags & SCIF_FENCE_INIT_SELF) |
| 663 | err = _scif_fence_mark(epd, mark); |
| 664 | else |
| 665 | err = scif_send_fence_mark(ep, mark); |
| 666 | |
| 667 | if (err) |
| 668 | dev_err(scif_info.mdev.this_device, |
| 669 | "%s %d err %d\n", __func__, __LINE__, err); |
| 670 | dev_dbg(scif_info.mdev.this_device, |
| 671 | "SCIFAPI fence_mark: ep %p flags 0x%x mark 0x%x err %d\n", |
| 672 | ep, flags, *mark, err); |
| 673 | return err; |
| 674 | } |
| 675 | EXPORT_SYMBOL_GPL(scif_fence_mark); |
| 676 | |
| 677 | int scif_fence_wait(scif_epd_t epd, int mark) |
| 678 | { |
| 679 | struct scif_endpt *ep = (struct scif_endpt *)epd; |
| 680 | int err = 0; |
| 681 | |
| 682 | dev_dbg(scif_info.mdev.this_device, |
| 683 | "SCIFAPI fence_wait: ep %p mark 0x%x\n", |
| 684 | ep, mark); |
| 685 | err = scif_verify_epd(ep); |
| 686 | if (err) |
| 687 | return err; |
| 688 | /* |
| 689 | * Management node loopback does not need to use DMA. |
| 690 | * The only valid mark provided is 0 so simply |
| 691 | * return success if the mark is valid. |
| 692 | */ |
| 693 | if (scifdev_self(ep->remote_dev) && scif_is_mgmt_node()) { |
| 694 | if (mark == SCIF_LOOPB_MAGIC_MARK) |
| 695 | return 0; |
| 696 | else |
| 697 | return -EINVAL; |
| 698 | } |
| 699 | if (mark & SCIF_REMOTE_FENCE) |
| 700 | err = scif_send_fence_wait(epd, mark); |
| 701 | else |
| 702 | err = _scif_fence_wait(epd, mark); |
| 703 | if (err < 0) |
| 704 | dev_err(scif_info.mdev.this_device, |
| 705 | "%s %d err %d\n", __func__, __LINE__, err); |
| 706 | return err; |
| 707 | } |
| 708 | EXPORT_SYMBOL_GPL(scif_fence_wait); |
| 709 | |
| 710 | int scif_fence_signal(scif_epd_t epd, off_t loff, u64 lval, |
| 711 | off_t roff, u64 rval, int flags) |
| 712 | { |
| 713 | struct scif_endpt *ep = (struct scif_endpt *)epd; |
| 714 | int err = 0; |
| 715 | |
| 716 | dev_dbg(scif_info.mdev.this_device, |
| 717 | "SCIFAPI fence_signal: ep %p loff 0x%lx lval 0x%llx roff 0x%lx rval 0x%llx flags 0x%x\n", |
| 718 | ep, loff, lval, roff, rval, flags); |
| 719 | err = scif_verify_epd(ep); |
| 720 | if (err) |
| 721 | return err; |
| 722 | |
| 723 | /* Invalid flags? */ |
| 724 | if (flags & ~(SCIF_FENCE_INIT_SELF | SCIF_FENCE_INIT_PEER | |
| 725 | SCIF_SIGNAL_LOCAL | SCIF_SIGNAL_REMOTE)) |
| 726 | return -EINVAL; |
| 727 | |
| 728 | /* At least one of init self or peer RMA should be set */ |
| 729 | if (!(flags & (SCIF_FENCE_INIT_SELF | SCIF_FENCE_INIT_PEER))) |
| 730 | return -EINVAL; |
| 731 | |
| 732 | /* Exactly one of init self or peer RMA should be set but not both */ |
| 733 | if ((flags & SCIF_FENCE_INIT_SELF) && (flags & SCIF_FENCE_INIT_PEER)) |
| 734 | return -EINVAL; |
| 735 | |
| 736 | /* At least one of SCIF_SIGNAL_LOCAL or SCIF_SIGNAL_REMOTE required */ |
| 737 | if (!(flags & (SCIF_SIGNAL_LOCAL | SCIF_SIGNAL_REMOTE))) |
| 738 | return -EINVAL; |
| 739 | |
| 740 | /* Only Dword offsets allowed */ |
| 741 | if ((flags & SCIF_SIGNAL_LOCAL) && (loff & (sizeof(u32) - 1))) |
| 742 | return -EINVAL; |
| 743 | |
| 744 | /* Only Dword aligned offsets allowed */ |
| 745 | if ((flags & SCIF_SIGNAL_REMOTE) && (roff & (sizeof(u32) - 1))) |
| 746 | return -EINVAL; |
| 747 | |
| 748 | if (flags & SCIF_FENCE_INIT_PEER) { |
| 749 | err = scif_send_fence_signal(epd, roff, rval, loff, |
| 750 | lval, flags); |
| 751 | } else { |
| 752 | /* Local Signal in Local RAS */ |
| 753 | if (flags & SCIF_SIGNAL_LOCAL) { |
| 754 | err = scif_prog_signal(epd, loff, lval, |
| 755 | SCIF_WINDOW_SELF); |
| 756 | if (err) |
| 757 | goto error_ret; |
| 758 | } |
| 759 | |
| 760 | /* Signal in Remote RAS */ |
| 761 | if (flags & SCIF_SIGNAL_REMOTE) |
| 762 | err = scif_prog_signal(epd, roff, |
| 763 | rval, SCIF_WINDOW_PEER); |
| 764 | } |
| 765 | error_ret: |
| 766 | if (err) |
| 767 | dev_err(scif_info.mdev.this_device, |
| 768 | "%s %d err %d\n", __func__, __LINE__, err); |
| 769 | return err; |
| 770 | } |
| 771 | EXPORT_SYMBOL_GPL(scif_fence_signal); |