blob: 1fe4f726cee9b65f9d2bd0f2064444b40b1c2d79 [file] [log] [blame]
Heiko J Schickfab97222006-09-22 15:22:22 -07001/*
2 * IBM eServer eHCA Infiniband device driver for Linux on POWER
3 *
4 * MR/MW functions
5 *
6 * Authors: Dietmar Decker <ddecker@de.ibm.com>
7 * Christoph Raisch <raisch@de.ibm.com>
8 *
9 * Copyright (c) 2005 IBM Corporation
10 *
11 * All rights reserved.
12 *
13 * This source code is distributed under a dual license of GPL v2.0 and OpenIB
14 * BSD.
15 *
16 * OpenIB BSD License
17 *
18 * Redistribution and use in source and binary forms, with or without
19 * modification, are permitted provided that the following conditions are met:
20 *
21 * Redistributions of source code must retain the above copyright notice, this
22 * list of conditions and the following disclaimer.
23 *
24 * Redistributions in binary form must reproduce the above copyright notice,
25 * this list of conditions and the following disclaimer in the documentation
26 * and/or other materials
27 * provided with the distribution.
28 *
29 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
30 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
33 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
34 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
35 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
36 * BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER
37 * IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
38 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
39 * POSSIBILITY OF SUCH DAMAGE.
40 */
41
Roland Dreierf7c6a7b2007-03-04 16:15:11 -080042#include <rdma/ib_umem.h>
43
Heiko J Schickfab97222006-09-22 15:22:22 -070044#include <asm/current.h>
45
46#include "ehca_iverbs.h"
47#include "ehca_mrmw.h"
48#include "hcp_if.h"
49#include "hipz_hw.h"
50
Hoang-Nam Nguyen4e4e74c2007-07-12 17:51:04 +020051/* max number of rpages (per hcall register_rpages) */
52#define MAX_RPAGES 512
53
Heiko J Schickfab97222006-09-22 15:22:22 -070054static struct kmem_cache *mr_cache;
55static struct kmem_cache *mw_cache;
56
57static struct ehca_mr *ehca_mr_new(void)
58{
59 struct ehca_mr *me;
60
Robert P. J. Dayc3762222007-02-10 01:45:03 -080061 me = kmem_cache_zalloc(mr_cache, GFP_KERNEL);
Heiko J Schickfab97222006-09-22 15:22:22 -070062 if (me) {
Heiko J Schickfab97222006-09-22 15:22:22 -070063 spin_lock_init(&me->mrlock);
64 } else
65 ehca_gen_err("alloc failed");
66
67 return me;
68}
69
70static void ehca_mr_delete(struct ehca_mr *me)
71{
72 kmem_cache_free(mr_cache, me);
73}
74
75static struct ehca_mw *ehca_mw_new(void)
76{
77 struct ehca_mw *me;
78
Robert P. J. Dayc3762222007-02-10 01:45:03 -080079 me = kmem_cache_zalloc(mw_cache, GFP_KERNEL);
Heiko J Schickfab97222006-09-22 15:22:22 -070080 if (me) {
Heiko J Schickfab97222006-09-22 15:22:22 -070081 spin_lock_init(&me->mwlock);
82 } else
83 ehca_gen_err("alloc failed");
84
85 return me;
86}
87
88static void ehca_mw_delete(struct ehca_mw *me)
89{
90 kmem_cache_free(mw_cache, me);
91}
92
93/*----------------------------------------------------------------------*/
94
95struct ib_mr *ehca_get_dma_mr(struct ib_pd *pd, int mr_access_flags)
96{
97 struct ib_mr *ib_mr;
98 int ret;
99 struct ehca_mr *e_maxmr;
100 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
101 struct ehca_shca *shca =
102 container_of(pd->device, struct ehca_shca, ib_device);
103
104 if (shca->maxmr) {
105 e_maxmr = ehca_mr_new();
106 if (!e_maxmr) {
107 ehca_err(&shca->ib_device, "out of memory");
108 ib_mr = ERR_PTR(-ENOMEM);
109 goto get_dma_mr_exit0;
110 }
111
112 ret = ehca_reg_maxmr(shca, e_maxmr, (u64*)KERNELBASE,
113 mr_access_flags, e_pd,
114 &e_maxmr->ib.ib_mr.lkey,
115 &e_maxmr->ib.ib_mr.rkey);
116 if (ret) {
Hoang-Nam Nguyen3df78f82007-07-12 17:48:22 +0200117 ehca_mr_delete(e_maxmr);
Heiko J Schickfab97222006-09-22 15:22:22 -0700118 ib_mr = ERR_PTR(ret);
119 goto get_dma_mr_exit0;
120 }
121 ib_mr = &e_maxmr->ib.ib_mr;
122 } else {
123 ehca_err(&shca->ib_device, "no internal max-MR exist!");
124 ib_mr = ERR_PTR(-EINVAL);
125 goto get_dma_mr_exit0;
126 }
127
128get_dma_mr_exit0:
129 if (IS_ERR(ib_mr))
130 ehca_err(&shca->ib_device, "rc=%lx pd=%p mr_access_flags=%x ",
131 PTR_ERR(ib_mr), pd, mr_access_flags);
132 return ib_mr;
133} /* end ehca_get_dma_mr() */
134
135/*----------------------------------------------------------------------*/
136
137struct ib_mr *ehca_reg_phys_mr(struct ib_pd *pd,
138 struct ib_phys_buf *phys_buf_array,
139 int num_phys_buf,
140 int mr_access_flags,
141 u64 *iova_start)
142{
143 struct ib_mr *ib_mr;
144 int ret;
145 struct ehca_mr *e_mr;
146 struct ehca_shca *shca =
147 container_of(pd->device, struct ehca_shca, ib_device);
148 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
149
150 u64 size;
151 struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
152 u32 num_pages_mr;
153 u32 num_pages_4k; /* 4k portion "pages" */
154
155 if ((num_phys_buf <= 0) || !phys_buf_array) {
156 ehca_err(pd->device, "bad input values: num_phys_buf=%x "
157 "phys_buf_array=%p", num_phys_buf, phys_buf_array);
158 ib_mr = ERR_PTR(-EINVAL);
159 goto reg_phys_mr_exit0;
160 }
161 if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
162 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
163 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
164 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
165 /*
166 * Remote Write Access requires Local Write Access
167 * Remote Atomic Access requires Local Write Access
168 */
169 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
170 mr_access_flags);
171 ib_mr = ERR_PTR(-EINVAL);
172 goto reg_phys_mr_exit0;
173 }
174
175 /* check physical buffer list and calculate size */
176 ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array, num_phys_buf,
177 iova_start, &size);
178 if (ret) {
179 ib_mr = ERR_PTR(ret);
180 goto reg_phys_mr_exit0;
181 }
182 if ((size == 0) ||
183 (((u64)iova_start + size) < (u64)iova_start)) {
184 ehca_err(pd->device, "bad input values: size=%lx iova_start=%p",
185 size, iova_start);
186 ib_mr = ERR_PTR(-EINVAL);
187 goto reg_phys_mr_exit0;
188 }
189
190 e_mr = ehca_mr_new();
191 if (!e_mr) {
192 ehca_err(pd->device, "out of memory");
193 ib_mr = ERR_PTR(-ENOMEM);
194 goto reg_phys_mr_exit0;
195 }
196
197 /* determine number of MR pages */
198 num_pages_mr = ((((u64)iova_start % PAGE_SIZE) + size +
199 PAGE_SIZE - 1) / PAGE_SIZE);
200 num_pages_4k = ((((u64)iova_start % EHCA_PAGESIZE) + size +
201 EHCA_PAGESIZE - 1) / EHCA_PAGESIZE);
202
203 /* register MR on HCA */
204 if (ehca_mr_is_maxmr(size, iova_start)) {
205 e_mr->flags |= EHCA_MR_FLAG_MAXMR;
206 ret = ehca_reg_maxmr(shca, e_mr, iova_start, mr_access_flags,
207 e_pd, &e_mr->ib.ib_mr.lkey,
208 &e_mr->ib.ib_mr.rkey);
209 if (ret) {
210 ib_mr = ERR_PTR(ret);
211 goto reg_phys_mr_exit1;
212 }
213 } else {
214 pginfo.type = EHCA_MR_PGI_PHYS;
215 pginfo.num_pages = num_pages_mr;
216 pginfo.num_4k = num_pages_4k;
217 pginfo.num_phys_buf = num_phys_buf;
218 pginfo.phys_buf_array = phys_buf_array;
219 pginfo.next_4k = (((u64)iova_start & ~PAGE_MASK) /
220 EHCA_PAGESIZE);
221
222 ret = ehca_reg_mr(shca, e_mr, iova_start, size, mr_access_flags,
223 e_pd, &pginfo, &e_mr->ib.ib_mr.lkey,
224 &e_mr->ib.ib_mr.rkey);
225 if (ret) {
226 ib_mr = ERR_PTR(ret);
227 goto reg_phys_mr_exit1;
228 }
229 }
230
231 /* successful registration of all pages */
232 return &e_mr->ib.ib_mr;
233
234reg_phys_mr_exit1:
235 ehca_mr_delete(e_mr);
236reg_phys_mr_exit0:
237 if (IS_ERR(ib_mr))
238 ehca_err(pd->device, "rc=%lx pd=%p phys_buf_array=%p "
239 "num_phys_buf=%x mr_access_flags=%x iova_start=%p",
240 PTR_ERR(ib_mr), pd, phys_buf_array,
241 num_phys_buf, mr_access_flags, iova_start);
242 return ib_mr;
243} /* end ehca_reg_phys_mr() */
244
245/*----------------------------------------------------------------------*/
246
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800247struct ib_mr *ehca_reg_user_mr(struct ib_pd *pd, u64 start, u64 length, u64 virt,
248 int mr_access_flags, struct ib_udata *udata)
Heiko J Schickfab97222006-09-22 15:22:22 -0700249{
250 struct ib_mr *ib_mr;
251 struct ehca_mr *e_mr;
252 struct ehca_shca *shca =
253 container_of(pd->device, struct ehca_shca, ib_device);
254 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
255 struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
256 int ret;
257 u32 num_pages_mr;
258 u32 num_pages_4k; /* 4k portion "pages" */
259
260 if (!pd) {
261 ehca_gen_err("bad pd=%p", pd);
262 return ERR_PTR(-EFAULT);
263 }
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800264
Heiko J Schickfab97222006-09-22 15:22:22 -0700265 if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
266 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
267 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
268 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
269 /*
270 * Remote Write Access requires Local Write Access
271 * Remote Atomic Access requires Local Write Access
272 */
273 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
274 mr_access_flags);
275 ib_mr = ERR_PTR(-EINVAL);
276 goto reg_user_mr_exit0;
277 }
Heiko J Schickfab97222006-09-22 15:22:22 -0700278
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800279 if (length == 0 || virt + length < virt) {
Heiko J Schickfab97222006-09-22 15:22:22 -0700280 ehca_err(pd->device, "bad input values: length=%lx "
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800281 "virt_base=%lx", length, virt);
Heiko J Schickfab97222006-09-22 15:22:22 -0700282 ib_mr = ERR_PTR(-EINVAL);
283 goto reg_user_mr_exit0;
284 }
285
286 e_mr = ehca_mr_new();
287 if (!e_mr) {
288 ehca_err(pd->device, "out of memory");
289 ib_mr = ERR_PTR(-ENOMEM);
290 goto reg_user_mr_exit0;
291 }
292
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800293 e_mr->umem = ib_umem_get(pd->uobject->context, start, length,
294 mr_access_flags);
295 if (IS_ERR(e_mr->umem)) {
296 ib_mr = (void *) e_mr->umem;
297 goto reg_user_mr_exit1;
298 }
299
300 if (e_mr->umem->page_size != PAGE_SIZE) {
301 ehca_err(pd->device, "page size not supported, "
302 "e_mr->umem->page_size=%x", e_mr->umem->page_size);
303 ib_mr = ERR_PTR(-EINVAL);
304 goto reg_user_mr_exit2;
305 }
306
Heiko J Schickfab97222006-09-22 15:22:22 -0700307 /* determine number of MR pages */
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800308 num_pages_mr = (((virt % PAGE_SIZE) + length + PAGE_SIZE - 1) /
309 PAGE_SIZE);
310 num_pages_4k = (((virt % EHCA_PAGESIZE) + length + EHCA_PAGESIZE - 1) /
311 EHCA_PAGESIZE);
Heiko J Schickfab97222006-09-22 15:22:22 -0700312
313 /* register MR on HCA */
314 pginfo.type = EHCA_MR_PGI_USER;
315 pginfo.num_pages = num_pages_mr;
316 pginfo.num_4k = num_pages_4k;
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800317 pginfo.region = e_mr->umem;
318 pginfo.next_4k = e_mr->umem->offset / EHCA_PAGESIZE;
Heiko J Schickfab97222006-09-22 15:22:22 -0700319 pginfo.next_chunk = list_prepare_entry(pginfo.next_chunk,
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800320 (&e_mr->umem->chunk_list),
Heiko J Schickfab97222006-09-22 15:22:22 -0700321 list);
322
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800323 ret = ehca_reg_mr(shca, e_mr, (u64*) virt, length, mr_access_flags, e_pd,
324 &pginfo, &e_mr->ib.ib_mr.lkey, &e_mr->ib.ib_mr.rkey);
Heiko J Schickfab97222006-09-22 15:22:22 -0700325 if (ret) {
326 ib_mr = ERR_PTR(ret);
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800327 goto reg_user_mr_exit2;
Heiko J Schickfab97222006-09-22 15:22:22 -0700328 }
329
330 /* successful registration of all pages */
331 return &e_mr->ib.ib_mr;
332
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800333reg_user_mr_exit2:
334 ib_umem_release(e_mr->umem);
Heiko J Schickfab97222006-09-22 15:22:22 -0700335reg_user_mr_exit1:
336 ehca_mr_delete(e_mr);
337reg_user_mr_exit0:
338 if (IS_ERR(ib_mr))
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800339 ehca_err(pd->device, "rc=%lx pd=%p mr_access_flags=%x"
Heiko J Schickfab97222006-09-22 15:22:22 -0700340 " udata=%p",
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800341 PTR_ERR(ib_mr), pd, mr_access_flags, udata);
Heiko J Schickfab97222006-09-22 15:22:22 -0700342 return ib_mr;
343} /* end ehca_reg_user_mr() */
344
345/*----------------------------------------------------------------------*/
346
347int ehca_rereg_phys_mr(struct ib_mr *mr,
348 int mr_rereg_mask,
349 struct ib_pd *pd,
350 struct ib_phys_buf *phys_buf_array,
351 int num_phys_buf,
352 int mr_access_flags,
353 u64 *iova_start)
354{
355 int ret;
356
357 struct ehca_shca *shca =
358 container_of(mr->device, struct ehca_shca, ib_device);
359 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
360 struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
361 u64 new_size;
362 u64 *new_start;
363 u32 new_acl;
364 struct ehca_pd *new_pd;
365 u32 tmp_lkey, tmp_rkey;
366 unsigned long sl_flags;
367 u32 num_pages_mr = 0;
368 u32 num_pages_4k = 0; /* 4k portion "pages" */
369 struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
370 u32 cur_pid = current->tgid;
371
372 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
373 (my_pd->ownpid != cur_pid)) {
374 ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
375 cur_pid, my_pd->ownpid);
376 ret = -EINVAL;
377 goto rereg_phys_mr_exit0;
378 }
379
380 if (!(mr_rereg_mask & IB_MR_REREG_TRANS)) {
381 /* TODO not supported, because PHYP rereg hCall needs pages */
382 ehca_err(mr->device, "rereg without IB_MR_REREG_TRANS not "
383 "supported yet, mr_rereg_mask=%x", mr_rereg_mask);
384 ret = -EINVAL;
385 goto rereg_phys_mr_exit0;
386 }
387
388 if (mr_rereg_mask & IB_MR_REREG_PD) {
389 if (!pd) {
390 ehca_err(mr->device, "rereg with bad pd, pd=%p "
391 "mr_rereg_mask=%x", pd, mr_rereg_mask);
392 ret = -EINVAL;
393 goto rereg_phys_mr_exit0;
394 }
395 }
396
397 if ((mr_rereg_mask &
398 ~(IB_MR_REREG_TRANS | IB_MR_REREG_PD | IB_MR_REREG_ACCESS)) ||
399 (mr_rereg_mask == 0)) {
400 ret = -EINVAL;
401 goto rereg_phys_mr_exit0;
402 }
403
404 /* check other parameters */
405 if (e_mr == shca->maxmr) {
406 /* should be impossible, however reject to be sure */
407 ehca_err(mr->device, "rereg internal max-MR impossible, mr=%p "
408 "shca->maxmr=%p mr->lkey=%x",
409 mr, shca->maxmr, mr->lkey);
410 ret = -EINVAL;
411 goto rereg_phys_mr_exit0;
412 }
413 if (mr_rereg_mask & IB_MR_REREG_TRANS) { /* transl., i.e. addr/size */
414 if (e_mr->flags & EHCA_MR_FLAG_FMR) {
415 ehca_err(mr->device, "not supported for FMR, mr=%p "
416 "flags=%x", mr, e_mr->flags);
417 ret = -EINVAL;
418 goto rereg_phys_mr_exit0;
419 }
420 if (!phys_buf_array || num_phys_buf <= 0) {
421 ehca_err(mr->device, "bad input values: mr_rereg_mask=%x"
422 " phys_buf_array=%p num_phys_buf=%x",
423 mr_rereg_mask, phys_buf_array, num_phys_buf);
424 ret = -EINVAL;
425 goto rereg_phys_mr_exit0;
426 }
427 }
428 if ((mr_rereg_mask & IB_MR_REREG_ACCESS) && /* change ACL */
429 (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
430 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
431 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
432 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)))) {
433 /*
434 * Remote Write Access requires Local Write Access
435 * Remote Atomic Access requires Local Write Access
436 */
437 ehca_err(mr->device, "bad input values: mr_rereg_mask=%x "
438 "mr_access_flags=%x", mr_rereg_mask, mr_access_flags);
439 ret = -EINVAL;
440 goto rereg_phys_mr_exit0;
441 }
442
443 /* set requested values dependent on rereg request */
444 spin_lock_irqsave(&e_mr->mrlock, sl_flags);
445 new_start = e_mr->start; /* new == old address */
446 new_size = e_mr->size; /* new == old length */
447 new_acl = e_mr->acl; /* new == old access control */
448 new_pd = container_of(mr->pd,struct ehca_pd,ib_pd); /*new == old PD*/
449
450 if (mr_rereg_mask & IB_MR_REREG_TRANS) {
451 new_start = iova_start; /* change address */
452 /* check physical buffer list and calculate size */
453 ret = ehca_mr_chk_buf_and_calc_size(phys_buf_array,
454 num_phys_buf, iova_start,
455 &new_size);
456 if (ret)
457 goto rereg_phys_mr_exit1;
458 if ((new_size == 0) ||
459 (((u64)iova_start + new_size) < (u64)iova_start)) {
460 ehca_err(mr->device, "bad input values: new_size=%lx "
461 "iova_start=%p", new_size, iova_start);
462 ret = -EINVAL;
463 goto rereg_phys_mr_exit1;
464 }
465 num_pages_mr = ((((u64)new_start % PAGE_SIZE) + new_size +
466 PAGE_SIZE - 1) / PAGE_SIZE);
467 num_pages_4k = ((((u64)new_start % EHCA_PAGESIZE) + new_size +
468 EHCA_PAGESIZE - 1) / EHCA_PAGESIZE);
469 pginfo.type = EHCA_MR_PGI_PHYS;
470 pginfo.num_pages = num_pages_mr;
471 pginfo.num_4k = num_pages_4k;
472 pginfo.num_phys_buf = num_phys_buf;
473 pginfo.phys_buf_array = phys_buf_array;
474 pginfo.next_4k = (((u64)iova_start & ~PAGE_MASK) /
475 EHCA_PAGESIZE);
476 }
477 if (mr_rereg_mask & IB_MR_REREG_ACCESS)
478 new_acl = mr_access_flags;
479 if (mr_rereg_mask & IB_MR_REREG_PD)
480 new_pd = container_of(pd, struct ehca_pd, ib_pd);
481
482 ret = ehca_rereg_mr(shca, e_mr, new_start, new_size, new_acl,
483 new_pd, &pginfo, &tmp_lkey, &tmp_rkey);
484 if (ret)
485 goto rereg_phys_mr_exit1;
486
487 /* successful reregistration */
488 if (mr_rereg_mask & IB_MR_REREG_PD)
489 mr->pd = pd;
490 mr->lkey = tmp_lkey;
491 mr->rkey = tmp_rkey;
492
493rereg_phys_mr_exit1:
494 spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
495rereg_phys_mr_exit0:
496 if (ret)
497 ehca_err(mr->device, "ret=%x mr=%p mr_rereg_mask=%x pd=%p "
498 "phys_buf_array=%p num_phys_buf=%x mr_access_flags=%x "
499 "iova_start=%p",
500 ret, mr, mr_rereg_mask, pd, phys_buf_array,
501 num_phys_buf, mr_access_flags, iova_start);
502 return ret;
503} /* end ehca_rereg_phys_mr() */
504
505/*----------------------------------------------------------------------*/
506
507int ehca_query_mr(struct ib_mr *mr, struct ib_mr_attr *mr_attr)
508{
509 int ret = 0;
510 u64 h_ret;
511 struct ehca_shca *shca =
512 container_of(mr->device, struct ehca_shca, ib_device);
513 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
514 struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
515 u32 cur_pid = current->tgid;
516 unsigned long sl_flags;
517 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
518
519 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
520 (my_pd->ownpid != cur_pid)) {
521 ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
522 cur_pid, my_pd->ownpid);
523 ret = -EINVAL;
524 goto query_mr_exit0;
525 }
526
527 if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
528 ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
529 "e_mr->flags=%x", mr, e_mr, e_mr->flags);
530 ret = -EINVAL;
531 goto query_mr_exit0;
532 }
533
534 memset(mr_attr, 0, sizeof(struct ib_mr_attr));
535 spin_lock_irqsave(&e_mr->mrlock, sl_flags);
536
537 h_ret = hipz_h_query_mr(shca->ipz_hca_handle, e_mr, &hipzout);
538 if (h_ret != H_SUCCESS) {
539 ehca_err(mr->device, "hipz_mr_query failed, h_ret=%lx mr=%p "
540 "hca_hndl=%lx mr_hndl=%lx lkey=%x",
541 h_ret, mr, shca->ipz_hca_handle.handle,
542 e_mr->ipz_mr_handle.handle, mr->lkey);
Hoang-Nam Nguyena1a6ff12007-07-12 17:49:02 +0200543 ret = ehca2ib_return_code(h_ret);
Heiko J Schickfab97222006-09-22 15:22:22 -0700544 goto query_mr_exit1;
545 }
546 mr_attr->pd = mr->pd;
547 mr_attr->device_virt_addr = hipzout.vaddr;
548 mr_attr->size = hipzout.len;
549 mr_attr->lkey = hipzout.lkey;
550 mr_attr->rkey = hipzout.rkey;
551 ehca_mrmw_reverse_map_acl(&hipzout.acl, &mr_attr->mr_access_flags);
552
553query_mr_exit1:
554 spin_unlock_irqrestore(&e_mr->mrlock, sl_flags);
555query_mr_exit0:
556 if (ret)
557 ehca_err(mr->device, "ret=%x mr=%p mr_attr=%p",
558 ret, mr, mr_attr);
559 return ret;
560} /* end ehca_query_mr() */
561
562/*----------------------------------------------------------------------*/
563
564int ehca_dereg_mr(struct ib_mr *mr)
565{
566 int ret = 0;
567 u64 h_ret;
568 struct ehca_shca *shca =
569 container_of(mr->device, struct ehca_shca, ib_device);
570 struct ehca_mr *e_mr = container_of(mr, struct ehca_mr, ib.ib_mr);
571 struct ehca_pd *my_pd = container_of(mr->pd, struct ehca_pd, ib_pd);
572 u32 cur_pid = current->tgid;
573
574 if (my_pd->ib_pd.uobject && my_pd->ib_pd.uobject->context &&
575 (my_pd->ownpid != cur_pid)) {
576 ehca_err(mr->device, "Invalid caller pid=%x ownpid=%x",
577 cur_pid, my_pd->ownpid);
578 ret = -EINVAL;
579 goto dereg_mr_exit0;
580 }
581
582 if ((e_mr->flags & EHCA_MR_FLAG_FMR)) {
583 ehca_err(mr->device, "not supported for FMR, mr=%p e_mr=%p "
584 "e_mr->flags=%x", mr, e_mr, e_mr->flags);
585 ret = -EINVAL;
586 goto dereg_mr_exit0;
587 } else if (e_mr == shca->maxmr) {
588 /* should be impossible, however reject to be sure */
589 ehca_err(mr->device, "dereg internal max-MR impossible, mr=%p "
590 "shca->maxmr=%p mr->lkey=%x",
591 mr, shca->maxmr, mr->lkey);
592 ret = -EINVAL;
593 goto dereg_mr_exit0;
594 }
595
596 /* TODO: BUSY: MR still has bound window(s) */
597 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
598 if (h_ret != H_SUCCESS) {
599 ehca_err(mr->device, "hipz_free_mr failed, h_ret=%lx shca=%p "
600 "e_mr=%p hca_hndl=%lx mr_hndl=%lx mr->lkey=%x",
601 h_ret, shca, e_mr, shca->ipz_hca_handle.handle,
602 e_mr->ipz_mr_handle.handle, mr->lkey);
Hoang-Nam Nguyena1a6ff12007-07-12 17:49:02 +0200603 ret = ehca2ib_return_code(h_ret);
Heiko J Schickfab97222006-09-22 15:22:22 -0700604 goto dereg_mr_exit0;
605 }
606
Roland Dreierf7c6a7b2007-03-04 16:15:11 -0800607 if (e_mr->umem)
608 ib_umem_release(e_mr->umem);
609
Heiko J Schickfab97222006-09-22 15:22:22 -0700610 /* successful deregistration */
611 ehca_mr_delete(e_mr);
612
613dereg_mr_exit0:
614 if (ret)
615 ehca_err(mr->device, "ret=%x mr=%p", ret, mr);
616 return ret;
617} /* end ehca_dereg_mr() */
618
619/*----------------------------------------------------------------------*/
620
621struct ib_mw *ehca_alloc_mw(struct ib_pd *pd)
622{
623 struct ib_mw *ib_mw;
624 u64 h_ret;
625 struct ehca_mw *e_mw;
626 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
627 struct ehca_shca *shca =
628 container_of(pd->device, struct ehca_shca, ib_device);
629 struct ehca_mw_hipzout_parms hipzout = {{0},0};
630
631 e_mw = ehca_mw_new();
632 if (!e_mw) {
633 ib_mw = ERR_PTR(-ENOMEM);
634 goto alloc_mw_exit0;
635 }
636
637 h_ret = hipz_h_alloc_resource_mw(shca->ipz_hca_handle, e_mw,
638 e_pd->fw_pd, &hipzout);
639 if (h_ret != H_SUCCESS) {
640 ehca_err(pd->device, "hipz_mw_allocate failed, h_ret=%lx "
641 "shca=%p hca_hndl=%lx mw=%p",
642 h_ret, shca, shca->ipz_hca_handle.handle, e_mw);
Hoang-Nam Nguyena1a6ff12007-07-12 17:49:02 +0200643 ib_mw = ERR_PTR(ehca2ib_return_code(h_ret));
Heiko J Schickfab97222006-09-22 15:22:22 -0700644 goto alloc_mw_exit1;
645 }
646 /* successful MW allocation */
647 e_mw->ipz_mw_handle = hipzout.handle;
648 e_mw->ib_mw.rkey = hipzout.rkey;
649 return &e_mw->ib_mw;
650
651alloc_mw_exit1:
652 ehca_mw_delete(e_mw);
653alloc_mw_exit0:
654 if (IS_ERR(ib_mw))
655 ehca_err(pd->device, "rc=%lx pd=%p", PTR_ERR(ib_mw), pd);
656 return ib_mw;
657} /* end ehca_alloc_mw() */
658
659/*----------------------------------------------------------------------*/
660
661int ehca_bind_mw(struct ib_qp *qp,
662 struct ib_mw *mw,
663 struct ib_mw_bind *mw_bind)
664{
665 /* TODO: not supported up to now */
666 ehca_gen_err("bind MW currently not supported by HCAD");
667
668 return -EPERM;
669} /* end ehca_bind_mw() */
670
671/*----------------------------------------------------------------------*/
672
673int ehca_dealloc_mw(struct ib_mw *mw)
674{
675 u64 h_ret;
676 struct ehca_shca *shca =
677 container_of(mw->device, struct ehca_shca, ib_device);
678 struct ehca_mw *e_mw = container_of(mw, struct ehca_mw, ib_mw);
679
680 h_ret = hipz_h_free_resource_mw(shca->ipz_hca_handle, e_mw);
681 if (h_ret != H_SUCCESS) {
682 ehca_err(mw->device, "hipz_free_mw failed, h_ret=%lx shca=%p "
683 "mw=%p rkey=%x hca_hndl=%lx mw_hndl=%lx",
684 h_ret, shca, mw, mw->rkey, shca->ipz_hca_handle.handle,
685 e_mw->ipz_mw_handle.handle);
Hoang-Nam Nguyena1a6ff12007-07-12 17:49:02 +0200686 return ehca2ib_return_code(h_ret);
Heiko J Schickfab97222006-09-22 15:22:22 -0700687 }
688 /* successful deallocation */
689 ehca_mw_delete(e_mw);
690 return 0;
691} /* end ehca_dealloc_mw() */
692
693/*----------------------------------------------------------------------*/
694
695struct ib_fmr *ehca_alloc_fmr(struct ib_pd *pd,
696 int mr_access_flags,
697 struct ib_fmr_attr *fmr_attr)
698{
699 struct ib_fmr *ib_fmr;
700 struct ehca_shca *shca =
701 container_of(pd->device, struct ehca_shca, ib_device);
702 struct ehca_pd *e_pd = container_of(pd, struct ehca_pd, ib_pd);
703 struct ehca_mr *e_fmr;
704 int ret;
705 u32 tmp_lkey, tmp_rkey;
706 struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
707
708 /* check other parameters */
709 if (((mr_access_flags & IB_ACCESS_REMOTE_WRITE) &&
710 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE)) ||
711 ((mr_access_flags & IB_ACCESS_REMOTE_ATOMIC) &&
712 !(mr_access_flags & IB_ACCESS_LOCAL_WRITE))) {
713 /*
714 * Remote Write Access requires Local Write Access
715 * Remote Atomic Access requires Local Write Access
716 */
717 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
718 mr_access_flags);
719 ib_fmr = ERR_PTR(-EINVAL);
720 goto alloc_fmr_exit0;
721 }
722 if (mr_access_flags & IB_ACCESS_MW_BIND) {
723 ehca_err(pd->device, "bad input values: mr_access_flags=%x",
724 mr_access_flags);
725 ib_fmr = ERR_PTR(-EINVAL);
726 goto alloc_fmr_exit0;
727 }
728 if ((fmr_attr->max_pages == 0) || (fmr_attr->max_maps == 0)) {
729 ehca_err(pd->device, "bad input values: fmr_attr->max_pages=%x "
730 "fmr_attr->max_maps=%x fmr_attr->page_shift=%x",
731 fmr_attr->max_pages, fmr_attr->max_maps,
732 fmr_attr->page_shift);
733 ib_fmr = ERR_PTR(-EINVAL);
734 goto alloc_fmr_exit0;
735 }
736 if (((1 << fmr_attr->page_shift) != EHCA_PAGESIZE) &&
737 ((1 << fmr_attr->page_shift) != PAGE_SIZE)) {
738 ehca_err(pd->device, "unsupported fmr_attr->page_shift=%x",
739 fmr_attr->page_shift);
740 ib_fmr = ERR_PTR(-EINVAL);
741 goto alloc_fmr_exit0;
742 }
743
744 e_fmr = ehca_mr_new();
745 if (!e_fmr) {
746 ib_fmr = ERR_PTR(-ENOMEM);
747 goto alloc_fmr_exit0;
748 }
749 e_fmr->flags |= EHCA_MR_FLAG_FMR;
750
751 /* register MR on HCA */
752 ret = ehca_reg_mr(shca, e_fmr, NULL,
753 fmr_attr->max_pages * (1 << fmr_attr->page_shift),
754 mr_access_flags, e_pd, &pginfo,
755 &tmp_lkey, &tmp_rkey);
756 if (ret) {
757 ib_fmr = ERR_PTR(ret);
758 goto alloc_fmr_exit1;
759 }
760
761 /* successful */
762 e_fmr->fmr_page_size = 1 << fmr_attr->page_shift;
763 e_fmr->fmr_max_pages = fmr_attr->max_pages;
764 e_fmr->fmr_max_maps = fmr_attr->max_maps;
765 e_fmr->fmr_map_cnt = 0;
766 return &e_fmr->ib.ib_fmr;
767
768alloc_fmr_exit1:
769 ehca_mr_delete(e_fmr);
770alloc_fmr_exit0:
771 if (IS_ERR(ib_fmr))
772 ehca_err(pd->device, "rc=%lx pd=%p mr_access_flags=%x "
773 "fmr_attr=%p", PTR_ERR(ib_fmr), pd,
774 mr_access_flags, fmr_attr);
775 return ib_fmr;
776} /* end ehca_alloc_fmr() */
777
778/*----------------------------------------------------------------------*/
779
780int ehca_map_phys_fmr(struct ib_fmr *fmr,
781 u64 *page_list,
782 int list_len,
783 u64 iova)
784{
785 int ret;
786 struct ehca_shca *shca =
787 container_of(fmr->device, struct ehca_shca, ib_device);
788 struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
789 struct ehca_pd *e_pd = container_of(fmr->pd, struct ehca_pd, ib_pd);
790 struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
791 u32 tmp_lkey, tmp_rkey;
792
793 if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
794 ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
795 e_fmr, e_fmr->flags);
796 ret = -EINVAL;
797 goto map_phys_fmr_exit0;
798 }
799 ret = ehca_fmr_check_page_list(e_fmr, page_list, list_len);
800 if (ret)
801 goto map_phys_fmr_exit0;
802 if (iova % e_fmr->fmr_page_size) {
803 /* only whole-numbered pages */
804 ehca_err(fmr->device, "bad iova, iova=%lx fmr_page_size=%x",
805 iova, e_fmr->fmr_page_size);
806 ret = -EINVAL;
807 goto map_phys_fmr_exit0;
808 }
809 if (e_fmr->fmr_map_cnt >= e_fmr->fmr_max_maps) {
810 /* HCAD does not limit the maps, however trace this anyway */
811 ehca_info(fmr->device, "map limit exceeded, fmr=%p "
812 "e_fmr->fmr_map_cnt=%x e_fmr->fmr_max_maps=%x",
813 fmr, e_fmr->fmr_map_cnt, e_fmr->fmr_max_maps);
814 }
815
816 pginfo.type = EHCA_MR_PGI_FMR;
817 pginfo.num_pages = list_len;
818 pginfo.num_4k = list_len * (e_fmr->fmr_page_size / EHCA_PAGESIZE);
819 pginfo.page_list = page_list;
820 pginfo.next_4k = ((iova & (e_fmr->fmr_page_size-1)) /
821 EHCA_PAGESIZE);
822
823 ret = ehca_rereg_mr(shca, e_fmr, (u64*)iova,
824 list_len * e_fmr->fmr_page_size,
825 e_fmr->acl, e_pd, &pginfo, &tmp_lkey, &tmp_rkey);
826 if (ret)
827 goto map_phys_fmr_exit0;
828
829 /* successful reregistration */
830 e_fmr->fmr_map_cnt++;
831 e_fmr->ib.ib_fmr.lkey = tmp_lkey;
832 e_fmr->ib.ib_fmr.rkey = tmp_rkey;
833 return 0;
834
835map_phys_fmr_exit0:
836 if (ret)
837 ehca_err(fmr->device, "ret=%x fmr=%p page_list=%p list_len=%x "
838 "iova=%lx",
839 ret, fmr, page_list, list_len, iova);
840 return ret;
841} /* end ehca_map_phys_fmr() */
842
843/*----------------------------------------------------------------------*/
844
845int ehca_unmap_fmr(struct list_head *fmr_list)
846{
847 int ret = 0;
848 struct ib_fmr *ib_fmr;
849 struct ehca_shca *shca = NULL;
850 struct ehca_shca *prev_shca;
851 struct ehca_mr *e_fmr;
852 u32 num_fmr = 0;
853 u32 unmap_fmr_cnt = 0;
854
855 /* check all FMR belong to same SHCA, and check internal flag */
856 list_for_each_entry(ib_fmr, fmr_list, list) {
857 prev_shca = shca;
858 if (!ib_fmr) {
859 ehca_gen_err("bad fmr=%p in list", ib_fmr);
860 ret = -EINVAL;
861 goto unmap_fmr_exit0;
862 }
863 shca = container_of(ib_fmr->device, struct ehca_shca,
864 ib_device);
865 e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
866 if ((shca != prev_shca) && prev_shca) {
867 ehca_err(&shca->ib_device, "SHCA mismatch, shca=%p "
868 "prev_shca=%p e_fmr=%p",
869 shca, prev_shca, e_fmr);
870 ret = -EINVAL;
871 goto unmap_fmr_exit0;
872 }
873 if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
874 ehca_err(&shca->ib_device, "not a FMR, e_fmr=%p "
875 "e_fmr->flags=%x", e_fmr, e_fmr->flags);
876 ret = -EINVAL;
877 goto unmap_fmr_exit0;
878 }
879 num_fmr++;
880 }
881
882 /* loop over all FMRs to unmap */
883 list_for_each_entry(ib_fmr, fmr_list, list) {
884 unmap_fmr_cnt++;
885 e_fmr = container_of(ib_fmr, struct ehca_mr, ib.ib_fmr);
886 shca = container_of(ib_fmr->device, struct ehca_shca,
887 ib_device);
888 ret = ehca_unmap_one_fmr(shca, e_fmr);
889 if (ret) {
890 /* unmap failed, stop unmapping of rest of FMRs */
891 ehca_err(&shca->ib_device, "unmap of one FMR failed, "
892 "stop rest, e_fmr=%p num_fmr=%x "
893 "unmap_fmr_cnt=%x lkey=%x", e_fmr, num_fmr,
894 unmap_fmr_cnt, e_fmr->ib.ib_fmr.lkey);
895 goto unmap_fmr_exit0;
896 }
897 }
898
899unmap_fmr_exit0:
900 if (ret)
901 ehca_gen_err("ret=%x fmr_list=%p num_fmr=%x unmap_fmr_cnt=%x",
902 ret, fmr_list, num_fmr, unmap_fmr_cnt);
903 return ret;
904} /* end ehca_unmap_fmr() */
905
906/*----------------------------------------------------------------------*/
907
908int ehca_dealloc_fmr(struct ib_fmr *fmr)
909{
910 int ret;
911 u64 h_ret;
912 struct ehca_shca *shca =
913 container_of(fmr->device, struct ehca_shca, ib_device);
914 struct ehca_mr *e_fmr = container_of(fmr, struct ehca_mr, ib.ib_fmr);
915
916 if (!(e_fmr->flags & EHCA_MR_FLAG_FMR)) {
917 ehca_err(fmr->device, "not a FMR, e_fmr=%p e_fmr->flags=%x",
918 e_fmr, e_fmr->flags);
919 ret = -EINVAL;
920 goto free_fmr_exit0;
921 }
922
923 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
924 if (h_ret != H_SUCCESS) {
925 ehca_err(fmr->device, "hipz_free_mr failed, h_ret=%lx e_fmr=%p "
926 "hca_hndl=%lx fmr_hndl=%lx fmr->lkey=%x",
927 h_ret, e_fmr, shca->ipz_hca_handle.handle,
928 e_fmr->ipz_mr_handle.handle, fmr->lkey);
Hoang-Nam Nguyena1a6ff12007-07-12 17:49:02 +0200929 ret = ehca2ib_return_code(h_ret);
Heiko J Schickfab97222006-09-22 15:22:22 -0700930 goto free_fmr_exit0;
931 }
932 /* successful deregistration */
933 ehca_mr_delete(e_fmr);
934 return 0;
935
936free_fmr_exit0:
937 if (ret)
938 ehca_err(&shca->ib_device, "ret=%x fmr=%p", ret, fmr);
939 return ret;
940} /* end ehca_dealloc_fmr() */
941
942/*----------------------------------------------------------------------*/
943
944int ehca_reg_mr(struct ehca_shca *shca,
945 struct ehca_mr *e_mr,
946 u64 *iova_start,
947 u64 size,
948 int acl,
949 struct ehca_pd *e_pd,
950 struct ehca_mr_pginfo *pginfo,
951 u32 *lkey, /*OUT*/
952 u32 *rkey) /*OUT*/
953{
954 int ret;
955 u64 h_ret;
956 u32 hipz_acl;
957 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
958
959 ehca_mrmw_map_acl(acl, &hipz_acl);
960 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
961 if (ehca_use_hp_mr == 1)
962 hipz_acl |= 0x00000001;
963
964 h_ret = hipz_h_alloc_resource_mr(shca->ipz_hca_handle, e_mr,
965 (u64)iova_start, size, hipz_acl,
966 e_pd->fw_pd, &hipzout);
967 if (h_ret != H_SUCCESS) {
968 ehca_err(&shca->ib_device, "hipz_alloc_mr failed, h_ret=%lx "
969 "hca_hndl=%lx", h_ret, shca->ipz_hca_handle.handle);
Hoang-Nam Nguyena1a6ff12007-07-12 17:49:02 +0200970 ret = ehca2ib_return_code(h_ret);
Heiko J Schickfab97222006-09-22 15:22:22 -0700971 goto ehca_reg_mr_exit0;
972 }
973
974 e_mr->ipz_mr_handle = hipzout.handle;
975
976 ret = ehca_reg_mr_rpages(shca, e_mr, pginfo);
977 if (ret)
978 goto ehca_reg_mr_exit1;
979
980 /* successful registration */
981 e_mr->num_pages = pginfo->num_pages;
982 e_mr->num_4k = pginfo->num_4k;
983 e_mr->start = iova_start;
984 e_mr->size = size;
985 e_mr->acl = acl;
986 *lkey = hipzout.lkey;
987 *rkey = hipzout.rkey;
988 return 0;
989
990ehca_reg_mr_exit1:
991 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
992 if (h_ret != H_SUCCESS) {
993 ehca_err(&shca->ib_device, "h_ret=%lx shca=%p e_mr=%p "
994 "iova_start=%p size=%lx acl=%x e_pd=%p lkey=%x "
995 "pginfo=%p num_pages=%lx num_4k=%lx ret=%x",
996 h_ret, shca, e_mr, iova_start, size, acl, e_pd,
997 hipzout.lkey, pginfo, pginfo->num_pages,
998 pginfo->num_4k, ret);
999 ehca_err(&shca->ib_device, "internal error in ehca_reg_mr, "
1000 "not recoverable");
1001 }
1002ehca_reg_mr_exit0:
1003 if (ret)
1004 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
1005 "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
1006 "num_pages=%lx num_4k=%lx",
1007 ret, shca, e_mr, iova_start, size, acl, e_pd, pginfo,
1008 pginfo->num_pages, pginfo->num_4k);
1009 return ret;
1010} /* end ehca_reg_mr() */
1011
1012/*----------------------------------------------------------------------*/
1013
1014int ehca_reg_mr_rpages(struct ehca_shca *shca,
1015 struct ehca_mr *e_mr,
1016 struct ehca_mr_pginfo *pginfo)
1017{
1018 int ret = 0;
1019 u64 h_ret;
1020 u32 rnum;
1021 u64 rpage;
1022 u32 i;
1023 u64 *kpage;
1024
Hoang-Nam Nguyenf2d91362007-01-09 18:04:14 +01001025 kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
Heiko J Schickfab97222006-09-22 15:22:22 -07001026 if (!kpage) {
1027 ehca_err(&shca->ib_device, "kpage alloc failed");
1028 ret = -ENOMEM;
1029 goto ehca_reg_mr_rpages_exit0;
1030 }
1031
1032 /* max 512 pages per shot */
Hoang-Nam Nguyen4e4e74c2007-07-12 17:51:04 +02001033 for (i = 0; i < ((pginfo->num_4k + MAX_RPAGES - 1) / MAX_RPAGES); i++) {
Heiko J Schickfab97222006-09-22 15:22:22 -07001034
Hoang-Nam Nguyen4e4e74c2007-07-12 17:51:04 +02001035 if (i == ((pginfo->num_4k + MAX_RPAGES - 1) / MAX_RPAGES) - 1) {
1036 rnum = pginfo->num_4k % MAX_RPAGES; /* last shot */
Heiko J Schickfab97222006-09-22 15:22:22 -07001037 if (rnum == 0)
Hoang-Nam Nguyen4e4e74c2007-07-12 17:51:04 +02001038 rnum = MAX_RPAGES; /* last shot is full */
Heiko J Schickfab97222006-09-22 15:22:22 -07001039 } else
Hoang-Nam Nguyen4e4e74c2007-07-12 17:51:04 +02001040 rnum = MAX_RPAGES;
Heiko J Schickfab97222006-09-22 15:22:22 -07001041
1042 if (rnum > 1) {
1043 ret = ehca_set_pagebuf(e_mr, pginfo, rnum, kpage);
1044 if (ret) {
1045 ehca_err(&shca->ib_device, "ehca_set_pagebuf "
1046 "bad rc, ret=%x rnum=%x kpage=%p",
1047 ret, rnum, kpage);
1048 ret = -EFAULT;
1049 goto ehca_reg_mr_rpages_exit1;
1050 }
1051 rpage = virt_to_abs(kpage);
1052 if (!rpage) {
1053 ehca_err(&shca->ib_device, "kpage=%p i=%x",
1054 kpage, i);
1055 ret = -EFAULT;
1056 goto ehca_reg_mr_rpages_exit1;
1057 }
1058 } else { /* rnum==1 */
1059 ret = ehca_set_pagebuf_1(e_mr, pginfo, &rpage);
1060 if (ret) {
1061 ehca_err(&shca->ib_device, "ehca_set_pagebuf_1 "
1062 "bad rc, ret=%x i=%x", ret, i);
1063 ret = -EFAULT;
1064 goto ehca_reg_mr_rpages_exit1;
1065 }
1066 }
1067
1068 h_ret = hipz_h_register_rpage_mr(shca->ipz_hca_handle, e_mr,
1069 0, /* pagesize 4k */
1070 0, rpage, rnum);
1071
Hoang-Nam Nguyen4e4e74c2007-07-12 17:51:04 +02001072 if (i == ((pginfo->num_4k + MAX_RPAGES - 1) / MAX_RPAGES) - 1) {
Heiko J Schickfab97222006-09-22 15:22:22 -07001073 /*
1074 * check for 'registration complete'==H_SUCCESS
1075 * and for 'page registered'==H_PAGE_REGISTERED
1076 */
1077 if (h_ret != H_SUCCESS) {
1078 ehca_err(&shca->ib_device, "last "
1079 "hipz_reg_rpage_mr failed, h_ret=%lx "
1080 "e_mr=%p i=%x hca_hndl=%lx mr_hndl=%lx"
1081 " lkey=%x", h_ret, e_mr, i,
1082 shca->ipz_hca_handle.handle,
1083 e_mr->ipz_mr_handle.handle,
1084 e_mr->ib.ib_mr.lkey);
Hoang-Nam Nguyena1a6ff12007-07-12 17:49:02 +02001085 ret = ehca2ib_return_code(h_ret);
Heiko J Schickfab97222006-09-22 15:22:22 -07001086 break;
1087 } else
1088 ret = 0;
1089 } else if (h_ret != H_PAGE_REGISTERED) {
1090 ehca_err(&shca->ib_device, "hipz_reg_rpage_mr failed, "
1091 "h_ret=%lx e_mr=%p i=%x lkey=%x hca_hndl=%lx "
1092 "mr_hndl=%lx", h_ret, e_mr, i,
1093 e_mr->ib.ib_mr.lkey,
1094 shca->ipz_hca_handle.handle,
1095 e_mr->ipz_mr_handle.handle);
Hoang-Nam Nguyena1a6ff12007-07-12 17:49:02 +02001096 ret = ehca2ib_return_code(h_ret);
Heiko J Schickfab97222006-09-22 15:22:22 -07001097 break;
1098 } else
1099 ret = 0;
1100 } /* end for(i) */
1101
1102
1103ehca_reg_mr_rpages_exit1:
Hoang-Nam Nguyen7e28db52006-11-07 00:56:39 +01001104 ehca_free_fw_ctrlblock(kpage);
Heiko J Schickfab97222006-09-22 15:22:22 -07001105ehca_reg_mr_rpages_exit0:
1106 if (ret)
1107 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p pginfo=%p "
1108 "num_pages=%lx num_4k=%lx", ret, shca, e_mr, pginfo,
1109 pginfo->num_pages, pginfo->num_4k);
1110 return ret;
1111} /* end ehca_reg_mr_rpages() */
1112
1113/*----------------------------------------------------------------------*/
1114
1115inline int ehca_rereg_mr_rereg1(struct ehca_shca *shca,
1116 struct ehca_mr *e_mr,
1117 u64 *iova_start,
1118 u64 size,
1119 u32 acl,
1120 struct ehca_pd *e_pd,
1121 struct ehca_mr_pginfo *pginfo,
1122 u32 *lkey, /*OUT*/
1123 u32 *rkey) /*OUT*/
1124{
1125 int ret;
1126 u64 h_ret;
1127 u32 hipz_acl;
1128 u64 *kpage;
1129 u64 rpage;
1130 struct ehca_mr_pginfo pginfo_save;
1131 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
1132
1133 ehca_mrmw_map_acl(acl, &hipz_acl);
1134 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
1135
Hoang-Nam Nguyenf2d91362007-01-09 18:04:14 +01001136 kpage = ehca_alloc_fw_ctrlblock(GFP_KERNEL);
Heiko J Schickfab97222006-09-22 15:22:22 -07001137 if (!kpage) {
1138 ehca_err(&shca->ib_device, "kpage alloc failed");
1139 ret = -ENOMEM;
1140 goto ehca_rereg_mr_rereg1_exit0;
1141 }
1142
1143 pginfo_save = *pginfo;
1144 ret = ehca_set_pagebuf(e_mr, pginfo, pginfo->num_4k, kpage);
1145 if (ret) {
1146 ehca_err(&shca->ib_device, "set pagebuf failed, e_mr=%p "
1147 "pginfo=%p type=%x num_pages=%lx num_4k=%lx kpage=%p",
1148 e_mr, pginfo, pginfo->type, pginfo->num_pages,
1149 pginfo->num_4k,kpage);
1150 goto ehca_rereg_mr_rereg1_exit1;
1151 }
1152 rpage = virt_to_abs(kpage);
1153 if (!rpage) {
1154 ehca_err(&shca->ib_device, "kpage=%p", kpage);
1155 ret = -EFAULT;
1156 goto ehca_rereg_mr_rereg1_exit1;
1157 }
1158 h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_mr,
1159 (u64)iova_start, size, hipz_acl,
1160 e_pd->fw_pd, rpage, &hipzout);
1161 if (h_ret != H_SUCCESS) {
1162 /*
1163 * reregistration unsuccessful, try it again with the 3 hCalls,
1164 * e.g. this is required in case H_MR_CONDITION
1165 * (MW bound or MR is shared)
1166 */
1167 ehca_warn(&shca->ib_device, "hipz_h_reregister_pmr failed "
1168 "(Rereg1), h_ret=%lx e_mr=%p", h_ret, e_mr);
1169 *pginfo = pginfo_save;
1170 ret = -EAGAIN;
1171 } else if ((u64*)hipzout.vaddr != iova_start) {
1172 ehca_err(&shca->ib_device, "PHYP changed iova_start in "
1173 "rereg_pmr, iova_start=%p iova_start_out=%lx e_mr=%p "
1174 "mr_handle=%lx lkey=%x lkey_out=%x", iova_start,
1175 hipzout.vaddr, e_mr, e_mr->ipz_mr_handle.handle,
1176 e_mr->ib.ib_mr.lkey, hipzout.lkey);
1177 ret = -EFAULT;
1178 } else {
1179 /*
1180 * successful reregistration
1181 * note: start and start_out are identical for eServer HCAs
1182 */
1183 e_mr->num_pages = pginfo->num_pages;
1184 e_mr->num_4k = pginfo->num_4k;
1185 e_mr->start = iova_start;
1186 e_mr->size = size;
1187 e_mr->acl = acl;
1188 *lkey = hipzout.lkey;
1189 *rkey = hipzout.rkey;
1190 }
1191
1192ehca_rereg_mr_rereg1_exit1:
Hoang-Nam Nguyen7e28db52006-11-07 00:56:39 +01001193 ehca_free_fw_ctrlblock(kpage);
Heiko J Schickfab97222006-09-22 15:22:22 -07001194ehca_rereg_mr_rereg1_exit0:
1195 if ( ret && (ret != -EAGAIN) )
1196 ehca_err(&shca->ib_device, "ret=%x lkey=%x rkey=%x "
1197 "pginfo=%p num_pages=%lx num_4k=%lx",
1198 ret, *lkey, *rkey, pginfo, pginfo->num_pages,
1199 pginfo->num_4k);
1200 return ret;
1201} /* end ehca_rereg_mr_rereg1() */
1202
1203/*----------------------------------------------------------------------*/
1204
1205int ehca_rereg_mr(struct ehca_shca *shca,
1206 struct ehca_mr *e_mr,
1207 u64 *iova_start,
1208 u64 size,
1209 int acl,
1210 struct ehca_pd *e_pd,
1211 struct ehca_mr_pginfo *pginfo,
1212 u32 *lkey,
1213 u32 *rkey)
1214{
1215 int ret = 0;
1216 u64 h_ret;
1217 int rereg_1_hcall = 1; /* 1: use hipz_h_reregister_pmr directly */
1218 int rereg_3_hcall = 0; /* 1: use 3 hipz calls for reregistration */
1219
1220 /* first determine reregistration hCall(s) */
Hoang-Nam Nguyen4e4e74c2007-07-12 17:51:04 +02001221 if ((pginfo->num_4k > MAX_RPAGES) || (e_mr->num_4k > MAX_RPAGES) ||
Heiko J Schickfab97222006-09-22 15:22:22 -07001222 (pginfo->num_4k > e_mr->num_4k)) {
1223 ehca_dbg(&shca->ib_device, "Rereg3 case, pginfo->num_4k=%lx "
1224 "e_mr->num_4k=%x", pginfo->num_4k, e_mr->num_4k);
1225 rereg_1_hcall = 0;
1226 rereg_3_hcall = 1;
1227 }
1228
1229 if (e_mr->flags & EHCA_MR_FLAG_MAXMR) { /* check for max-MR */
1230 rereg_1_hcall = 0;
1231 rereg_3_hcall = 1;
1232 e_mr->flags &= ~EHCA_MR_FLAG_MAXMR;
1233 ehca_err(&shca->ib_device, "Rereg MR for max-MR! e_mr=%p",
1234 e_mr);
1235 }
1236
1237 if (rereg_1_hcall) {
1238 ret = ehca_rereg_mr_rereg1(shca, e_mr, iova_start, size,
1239 acl, e_pd, pginfo, lkey, rkey);
1240 if (ret) {
1241 if (ret == -EAGAIN)
1242 rereg_3_hcall = 1;
1243 else
1244 goto ehca_rereg_mr_exit0;
1245 }
1246 }
1247
1248 if (rereg_3_hcall) {
1249 struct ehca_mr save_mr;
1250
1251 /* first deregister old MR */
1252 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_mr);
1253 if (h_ret != H_SUCCESS) {
1254 ehca_err(&shca->ib_device, "hipz_free_mr failed, "
1255 "h_ret=%lx e_mr=%p hca_hndl=%lx mr_hndl=%lx "
1256 "mr->lkey=%x",
1257 h_ret, e_mr, shca->ipz_hca_handle.handle,
1258 e_mr->ipz_mr_handle.handle,
1259 e_mr->ib.ib_mr.lkey);
Hoang-Nam Nguyena1a6ff12007-07-12 17:49:02 +02001260 ret = ehca2ib_return_code(h_ret);
Heiko J Schickfab97222006-09-22 15:22:22 -07001261 goto ehca_rereg_mr_exit0;
1262 }
1263 /* clean ehca_mr_t, without changing struct ib_mr and lock */
1264 save_mr = *e_mr;
1265 ehca_mr_deletenew(e_mr);
1266
1267 /* set some MR values */
1268 e_mr->flags = save_mr.flags;
1269 e_mr->fmr_page_size = save_mr.fmr_page_size;
1270 e_mr->fmr_max_pages = save_mr.fmr_max_pages;
1271 e_mr->fmr_max_maps = save_mr.fmr_max_maps;
1272 e_mr->fmr_map_cnt = save_mr.fmr_map_cnt;
1273
1274 ret = ehca_reg_mr(shca, e_mr, iova_start, size, acl,
1275 e_pd, pginfo, lkey, rkey);
1276 if (ret) {
1277 u32 offset = (u64)(&e_mr->flags) - (u64)e_mr;
1278 memcpy(&e_mr->flags, &(save_mr.flags),
1279 sizeof(struct ehca_mr) - offset);
1280 goto ehca_rereg_mr_exit0;
1281 }
1282 }
1283
1284ehca_rereg_mr_exit0:
1285 if (ret)
1286 ehca_err(&shca->ib_device, "ret=%x shca=%p e_mr=%p "
1287 "iova_start=%p size=%lx acl=%x e_pd=%p pginfo=%p "
1288 "num_pages=%lx lkey=%x rkey=%x rereg_1_hcall=%x "
1289 "rereg_3_hcall=%x", ret, shca, e_mr, iova_start, size,
1290 acl, e_pd, pginfo, pginfo->num_pages, *lkey, *rkey,
1291 rereg_1_hcall, rereg_3_hcall);
1292 return ret;
1293} /* end ehca_rereg_mr() */
1294
1295/*----------------------------------------------------------------------*/
1296
1297int ehca_unmap_one_fmr(struct ehca_shca *shca,
1298 struct ehca_mr *e_fmr)
1299{
1300 int ret = 0;
1301 u64 h_ret;
1302 int rereg_1_hcall = 1; /* 1: use hipz_mr_reregister directly */
1303 int rereg_3_hcall = 0; /* 1: use 3 hipz calls for unmapping */
1304 struct ehca_pd *e_pd =
1305 container_of(e_fmr->ib.ib_fmr.pd, struct ehca_pd, ib_pd);
1306 struct ehca_mr save_fmr;
1307 u32 tmp_lkey, tmp_rkey;
1308 struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
1309 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
1310
1311 /* first check if reregistration hCall can be used for unmap */
Hoang-Nam Nguyen4e4e74c2007-07-12 17:51:04 +02001312 if (e_fmr->fmr_max_pages > MAX_RPAGES) {
Heiko J Schickfab97222006-09-22 15:22:22 -07001313 rereg_1_hcall = 0;
1314 rereg_3_hcall = 1;
1315 }
1316
1317 if (rereg_1_hcall) {
1318 /*
1319 * note: after using rereg hcall with len=0,
1320 * rereg hcall must be used again for registering pages
1321 */
1322 h_ret = hipz_h_reregister_pmr(shca->ipz_hca_handle, e_fmr, 0,
1323 0, 0, e_pd->fw_pd, 0, &hipzout);
1324 if (h_ret != H_SUCCESS) {
1325 /*
1326 * should not happen, because length checked above,
1327 * FMRs are not shared and no MW bound to FMRs
1328 */
1329 ehca_err(&shca->ib_device, "hipz_reregister_pmr failed "
1330 "(Rereg1), h_ret=%lx e_fmr=%p hca_hndl=%lx "
1331 "mr_hndl=%lx lkey=%x lkey_out=%x",
1332 h_ret, e_fmr, shca->ipz_hca_handle.handle,
1333 e_fmr->ipz_mr_handle.handle,
1334 e_fmr->ib.ib_fmr.lkey, hipzout.lkey);
1335 rereg_3_hcall = 1;
1336 } else {
1337 /* successful reregistration */
1338 e_fmr->start = NULL;
1339 e_fmr->size = 0;
1340 tmp_lkey = hipzout.lkey;
1341 tmp_rkey = hipzout.rkey;
1342 }
1343 }
1344
1345 if (rereg_3_hcall) {
1346 struct ehca_mr save_mr;
1347
1348 /* first free old FMR */
1349 h_ret = hipz_h_free_resource_mr(shca->ipz_hca_handle, e_fmr);
1350 if (h_ret != H_SUCCESS) {
1351 ehca_err(&shca->ib_device, "hipz_free_mr failed, "
1352 "h_ret=%lx e_fmr=%p hca_hndl=%lx mr_hndl=%lx "
1353 "lkey=%x",
1354 h_ret, e_fmr, shca->ipz_hca_handle.handle,
1355 e_fmr->ipz_mr_handle.handle,
1356 e_fmr->ib.ib_fmr.lkey);
Hoang-Nam Nguyena1a6ff12007-07-12 17:49:02 +02001357 ret = ehca2ib_return_code(h_ret);
Heiko J Schickfab97222006-09-22 15:22:22 -07001358 goto ehca_unmap_one_fmr_exit0;
1359 }
1360 /* clean ehca_mr_t, without changing lock */
1361 save_fmr = *e_fmr;
1362 ehca_mr_deletenew(e_fmr);
1363
1364 /* set some MR values */
1365 e_fmr->flags = save_fmr.flags;
1366 e_fmr->fmr_page_size = save_fmr.fmr_page_size;
1367 e_fmr->fmr_max_pages = save_fmr.fmr_max_pages;
1368 e_fmr->fmr_max_maps = save_fmr.fmr_max_maps;
1369 e_fmr->fmr_map_cnt = save_fmr.fmr_map_cnt;
1370 e_fmr->acl = save_fmr.acl;
1371
1372 pginfo.type = EHCA_MR_PGI_FMR;
1373 pginfo.num_pages = 0;
1374 pginfo.num_4k = 0;
1375 ret = ehca_reg_mr(shca, e_fmr, NULL,
1376 (e_fmr->fmr_max_pages * e_fmr->fmr_page_size),
1377 e_fmr->acl, e_pd, &pginfo, &tmp_lkey,
1378 &tmp_rkey);
1379 if (ret) {
1380 u32 offset = (u64)(&e_fmr->flags) - (u64)e_fmr;
1381 memcpy(&e_fmr->flags, &(save_mr.flags),
1382 sizeof(struct ehca_mr) - offset);
1383 goto ehca_unmap_one_fmr_exit0;
1384 }
1385 }
1386
1387ehca_unmap_one_fmr_exit0:
1388 if (ret)
1389 ehca_err(&shca->ib_device, "ret=%x tmp_lkey=%x tmp_rkey=%x "
1390 "fmr_max_pages=%x rereg_1_hcall=%x rereg_3_hcall=%x",
1391 ret, tmp_lkey, tmp_rkey, e_fmr->fmr_max_pages,
1392 rereg_1_hcall, rereg_3_hcall);
1393 return ret;
1394} /* end ehca_unmap_one_fmr() */
1395
1396/*----------------------------------------------------------------------*/
1397
1398int ehca_reg_smr(struct ehca_shca *shca,
1399 struct ehca_mr *e_origmr,
1400 struct ehca_mr *e_newmr,
1401 u64 *iova_start,
1402 int acl,
1403 struct ehca_pd *e_pd,
1404 u32 *lkey, /*OUT*/
1405 u32 *rkey) /*OUT*/
1406{
1407 int ret = 0;
1408 u64 h_ret;
1409 u32 hipz_acl;
1410 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
1411
1412 ehca_mrmw_map_acl(acl, &hipz_acl);
1413 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
1414
1415 h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
1416 (u64)iova_start, hipz_acl, e_pd->fw_pd,
1417 &hipzout);
1418 if (h_ret != H_SUCCESS) {
1419 ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lx "
1420 "shca=%p e_origmr=%p e_newmr=%p iova_start=%p acl=%x "
1421 "e_pd=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
1422 h_ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd,
1423 shca->ipz_hca_handle.handle,
1424 e_origmr->ipz_mr_handle.handle,
1425 e_origmr->ib.ib_mr.lkey);
Hoang-Nam Nguyena1a6ff12007-07-12 17:49:02 +02001426 ret = ehca2ib_return_code(h_ret);
Heiko J Schickfab97222006-09-22 15:22:22 -07001427 goto ehca_reg_smr_exit0;
1428 }
1429 /* successful registration */
1430 e_newmr->num_pages = e_origmr->num_pages;
1431 e_newmr->num_4k = e_origmr->num_4k;
1432 e_newmr->start = iova_start;
1433 e_newmr->size = e_origmr->size;
1434 e_newmr->acl = acl;
1435 e_newmr->ipz_mr_handle = hipzout.handle;
1436 *lkey = hipzout.lkey;
1437 *rkey = hipzout.rkey;
1438 return 0;
1439
1440ehca_reg_smr_exit0:
1441 if (ret)
1442 ehca_err(&shca->ib_device, "ret=%x shca=%p e_origmr=%p "
1443 "e_newmr=%p iova_start=%p acl=%x e_pd=%p",
1444 ret, shca, e_origmr, e_newmr, iova_start, acl, e_pd);
1445 return ret;
1446} /* end ehca_reg_smr() */
1447
1448/*----------------------------------------------------------------------*/
1449
1450/* register internal max-MR to internal SHCA */
1451int ehca_reg_internal_maxmr(
1452 struct ehca_shca *shca,
1453 struct ehca_pd *e_pd,
1454 struct ehca_mr **e_maxmr) /*OUT*/
1455{
1456 int ret;
1457 struct ehca_mr *e_mr;
1458 u64 *iova_start;
1459 u64 size_maxmr;
1460 struct ehca_mr_pginfo pginfo={0,0,0,0,0,0,0,NULL,0,NULL,NULL,0,NULL,0};
1461 struct ib_phys_buf ib_pbuf;
1462 u32 num_pages_mr;
1463 u32 num_pages_4k; /* 4k portion "pages" */
1464
1465 e_mr = ehca_mr_new();
1466 if (!e_mr) {
1467 ehca_err(&shca->ib_device, "out of memory");
1468 ret = -ENOMEM;
1469 goto ehca_reg_internal_maxmr_exit0;
1470 }
1471 e_mr->flags |= EHCA_MR_FLAG_MAXMR;
1472
1473 /* register internal max-MR on HCA */
1474 size_maxmr = (u64)high_memory - PAGE_OFFSET;
1475 iova_start = (u64*)KERNELBASE;
1476 ib_pbuf.addr = 0;
1477 ib_pbuf.size = size_maxmr;
1478 num_pages_mr = ((((u64)iova_start % PAGE_SIZE) + size_maxmr +
1479 PAGE_SIZE - 1) / PAGE_SIZE);
1480 num_pages_4k = ((((u64)iova_start % EHCA_PAGESIZE) + size_maxmr +
1481 EHCA_PAGESIZE - 1) / EHCA_PAGESIZE);
1482
1483 pginfo.type = EHCA_MR_PGI_PHYS;
1484 pginfo.num_pages = num_pages_mr;
1485 pginfo.num_4k = num_pages_4k;
1486 pginfo.num_phys_buf = 1;
1487 pginfo.phys_buf_array = &ib_pbuf;
1488
1489 ret = ehca_reg_mr(shca, e_mr, iova_start, size_maxmr, 0, e_pd,
1490 &pginfo, &e_mr->ib.ib_mr.lkey,
1491 &e_mr->ib.ib_mr.rkey);
1492 if (ret) {
1493 ehca_err(&shca->ib_device, "reg of internal max MR failed, "
1494 "e_mr=%p iova_start=%p size_maxmr=%lx num_pages_mr=%x "
1495 "num_pages_4k=%x", e_mr, iova_start, size_maxmr,
1496 num_pages_mr, num_pages_4k);
1497 goto ehca_reg_internal_maxmr_exit1;
1498 }
1499
1500 /* successful registration of all pages */
1501 e_mr->ib.ib_mr.device = e_pd->ib_pd.device;
1502 e_mr->ib.ib_mr.pd = &e_pd->ib_pd;
1503 e_mr->ib.ib_mr.uobject = NULL;
1504 atomic_inc(&(e_pd->ib_pd.usecnt));
1505 atomic_set(&(e_mr->ib.ib_mr.usecnt), 0);
1506 *e_maxmr = e_mr;
1507 return 0;
1508
1509ehca_reg_internal_maxmr_exit1:
1510 ehca_mr_delete(e_mr);
1511ehca_reg_internal_maxmr_exit0:
1512 if (ret)
1513 ehca_err(&shca->ib_device, "ret=%x shca=%p e_pd=%p e_maxmr=%p",
1514 ret, shca, e_pd, e_maxmr);
1515 return ret;
1516} /* end ehca_reg_internal_maxmr() */
1517
1518/*----------------------------------------------------------------------*/
1519
1520int ehca_reg_maxmr(struct ehca_shca *shca,
1521 struct ehca_mr *e_newmr,
1522 u64 *iova_start,
1523 int acl,
1524 struct ehca_pd *e_pd,
1525 u32 *lkey,
1526 u32 *rkey)
1527{
1528 u64 h_ret;
1529 struct ehca_mr *e_origmr = shca->maxmr;
1530 u32 hipz_acl;
1531 struct ehca_mr_hipzout_parms hipzout = {{0},0,0,0,0,0};
1532
1533 ehca_mrmw_map_acl(acl, &hipz_acl);
1534 ehca_mrmw_set_pgsize_hipz_acl(&hipz_acl);
1535
1536 h_ret = hipz_h_register_smr(shca->ipz_hca_handle, e_newmr, e_origmr,
1537 (u64)iova_start, hipz_acl, e_pd->fw_pd,
1538 &hipzout);
1539 if (h_ret != H_SUCCESS) {
1540 ehca_err(&shca->ib_device, "hipz_reg_smr failed, h_ret=%lx "
1541 "e_origmr=%p hca_hndl=%lx mr_hndl=%lx lkey=%x",
1542 h_ret, e_origmr, shca->ipz_hca_handle.handle,
1543 e_origmr->ipz_mr_handle.handle,
1544 e_origmr->ib.ib_mr.lkey);
Hoang-Nam Nguyena1a6ff12007-07-12 17:49:02 +02001545 return ehca2ib_return_code(h_ret);
Heiko J Schickfab97222006-09-22 15:22:22 -07001546 }
1547 /* successful registration */
1548 e_newmr->num_pages = e_origmr->num_pages;
1549 e_newmr->num_4k = e_origmr->num_4k;
1550 e_newmr->start = iova_start;
1551 e_newmr->size = e_origmr->size;
1552 e_newmr->acl = acl;
1553 e_newmr->ipz_mr_handle = hipzout.handle;
1554 *lkey = hipzout.lkey;
1555 *rkey = hipzout.rkey;
1556 return 0;
1557} /* end ehca_reg_maxmr() */
1558
1559/*----------------------------------------------------------------------*/
1560
1561int ehca_dereg_internal_maxmr(struct ehca_shca *shca)
1562{
1563 int ret;
1564 struct ehca_mr *e_maxmr;
1565 struct ib_pd *ib_pd;
1566
1567 if (!shca->maxmr) {
1568 ehca_err(&shca->ib_device, "bad call, shca=%p", shca);
1569 ret = -EINVAL;
1570 goto ehca_dereg_internal_maxmr_exit0;
1571 }
1572
1573 e_maxmr = shca->maxmr;
1574 ib_pd = e_maxmr->ib.ib_mr.pd;
1575 shca->maxmr = NULL; /* remove internal max-MR indication from SHCA */
1576
1577 ret = ehca_dereg_mr(&e_maxmr->ib.ib_mr);
1578 if (ret) {
1579 ehca_err(&shca->ib_device, "dereg internal max-MR failed, "
1580 "ret=%x e_maxmr=%p shca=%p lkey=%x",
1581 ret, e_maxmr, shca, e_maxmr->ib.ib_mr.lkey);
1582 shca->maxmr = e_maxmr;
1583 goto ehca_dereg_internal_maxmr_exit0;
1584 }
1585
1586 atomic_dec(&ib_pd->usecnt);
1587
1588ehca_dereg_internal_maxmr_exit0:
1589 if (ret)
1590 ehca_err(&shca->ib_device, "ret=%x shca=%p shca->maxmr=%p",
1591 ret, shca, shca->maxmr);
1592 return ret;
1593} /* end ehca_dereg_internal_maxmr() */
1594
1595/*----------------------------------------------------------------------*/
1596
1597/*
1598 * check physical buffer array of MR verbs for validness and
1599 * calculates MR size
1600 */
1601int ehca_mr_chk_buf_and_calc_size(struct ib_phys_buf *phys_buf_array,
1602 int num_phys_buf,
1603 u64 *iova_start,
1604 u64 *size)
1605{
1606 struct ib_phys_buf *pbuf = phys_buf_array;
1607 u64 size_count = 0;
1608 u32 i;
1609
1610 if (num_phys_buf == 0) {
1611 ehca_gen_err("bad phys buf array len, num_phys_buf=0");
1612 return -EINVAL;
1613 }
1614 /* check first buffer */
1615 if (((u64)iova_start & ~PAGE_MASK) != (pbuf->addr & ~PAGE_MASK)) {
1616 ehca_gen_err("iova_start/addr mismatch, iova_start=%p "
1617 "pbuf->addr=%lx pbuf->size=%lx",
1618 iova_start, pbuf->addr, pbuf->size);
1619 return -EINVAL;
1620 }
1621 if (((pbuf->addr + pbuf->size) % PAGE_SIZE) &&
1622 (num_phys_buf > 1)) {
1623 ehca_gen_err("addr/size mismatch in 1st buf, pbuf->addr=%lx "
1624 "pbuf->size=%lx", pbuf->addr, pbuf->size);
1625 return -EINVAL;
1626 }
1627
1628 for (i = 0; i < num_phys_buf; i++) {
1629 if ((i > 0) && (pbuf->addr % PAGE_SIZE)) {
1630 ehca_gen_err("bad address, i=%x pbuf->addr=%lx "
1631 "pbuf->size=%lx",
1632 i, pbuf->addr, pbuf->size);
1633 return -EINVAL;
1634 }
1635 if (((i > 0) && /* not 1st */
1636 (i < (num_phys_buf - 1)) && /* not last */
1637 (pbuf->size % PAGE_SIZE)) || (pbuf->size == 0)) {
1638 ehca_gen_err("bad size, i=%x pbuf->size=%lx",
1639 i, pbuf->size);
1640 return -EINVAL;
1641 }
1642 size_count += pbuf->size;
1643 pbuf++;
1644 }
1645
1646 *size = size_count;
1647 return 0;
1648} /* end ehca_mr_chk_buf_and_calc_size() */
1649
1650/*----------------------------------------------------------------------*/
1651
1652/* check page list of map FMR verb for validness */
1653int ehca_fmr_check_page_list(struct ehca_mr *e_fmr,
1654 u64 *page_list,
1655 int list_len)
1656{
1657 u32 i;
1658 u64 *page;
1659
1660 if ((list_len == 0) || (list_len > e_fmr->fmr_max_pages)) {
1661 ehca_gen_err("bad list_len, list_len=%x "
1662 "e_fmr->fmr_max_pages=%x fmr=%p",
1663 list_len, e_fmr->fmr_max_pages, e_fmr);
1664 return -EINVAL;
1665 }
1666
1667 /* each page must be aligned */
1668 page = page_list;
1669 for (i = 0; i < list_len; i++) {
1670 if (*page % e_fmr->fmr_page_size) {
1671 ehca_gen_err("bad page, i=%x *page=%lx page=%p fmr=%p "
1672 "fmr_page_size=%x", i, *page, page, e_fmr,
1673 e_fmr->fmr_page_size);
1674 return -EINVAL;
1675 }
1676 page++;
1677 }
1678
1679 return 0;
1680} /* end ehca_fmr_check_page_list() */
1681
1682/*----------------------------------------------------------------------*/
1683
1684/* setup page buffer from page info */
1685int ehca_set_pagebuf(struct ehca_mr *e_mr,
1686 struct ehca_mr_pginfo *pginfo,
1687 u32 number,
1688 u64 *kpage)
1689{
1690 int ret = 0;
1691 struct ib_umem_chunk *prev_chunk;
1692 struct ib_umem_chunk *chunk;
1693 struct ib_phys_buf *pbuf;
1694 u64 *fmrlist;
1695 u64 num4k, pgaddr, offs4k;
1696 u32 i = 0;
1697 u32 j = 0;
1698
1699 if (pginfo->type == EHCA_MR_PGI_PHYS) {
1700 /* loop over desired phys_buf_array entries */
1701 while (i < number) {
1702 pbuf = pginfo->phys_buf_array + pginfo->next_buf;
1703 num4k = ((pbuf->addr % EHCA_PAGESIZE) + pbuf->size +
1704 EHCA_PAGESIZE - 1) / EHCA_PAGESIZE;
1705 offs4k = (pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE;
1706 while (pginfo->next_4k < offs4k + num4k) {
1707 /* sanity check */
1708 if ((pginfo->page_cnt >= pginfo->num_pages) ||
1709 (pginfo->page_4k_cnt >= pginfo->num_4k)) {
1710 ehca_gen_err("page_cnt >= num_pages, "
1711 "page_cnt=%lx "
1712 "num_pages=%lx "
1713 "page_4k_cnt=%lx "
1714 "num_4k=%lx i=%x",
1715 pginfo->page_cnt,
1716 pginfo->num_pages,
1717 pginfo->page_4k_cnt,
1718 pginfo->num_4k, i);
1719 ret = -EFAULT;
1720 goto ehca_set_pagebuf_exit0;
1721 }
1722 *kpage = phys_to_abs(
1723 (pbuf->addr & EHCA_PAGEMASK)
1724 + (pginfo->next_4k * EHCA_PAGESIZE));
1725 if ( !(*kpage) && pbuf->addr ) {
1726 ehca_gen_err("pbuf->addr=%lx "
1727 "pbuf->size=%lx "
1728 "next_4k=%lx", pbuf->addr,
1729 pbuf->size,
1730 pginfo->next_4k);
1731 ret = -EFAULT;
1732 goto ehca_set_pagebuf_exit0;
1733 }
1734 (pginfo->page_4k_cnt)++;
1735 (pginfo->next_4k)++;
1736 if (pginfo->next_4k %
1737 (PAGE_SIZE / EHCA_PAGESIZE) == 0)
1738 (pginfo->page_cnt)++;
1739 kpage++;
1740 i++;
1741 if (i >= number) break;
1742 }
1743 if (pginfo->next_4k >= offs4k + num4k) {
1744 (pginfo->next_buf)++;
1745 pginfo->next_4k = 0;
1746 }
1747 }
1748 } else if (pginfo->type == EHCA_MR_PGI_USER) {
1749 /* loop over desired chunk entries */
1750 chunk = pginfo->next_chunk;
1751 prev_chunk = pginfo->next_chunk;
1752 list_for_each_entry_continue(chunk,
1753 (&(pginfo->region->chunk_list)),
1754 list) {
1755 for (i = pginfo->next_nmap; i < chunk->nmap; ) {
1756 pgaddr = ( page_to_pfn(chunk->page_list[i].page)
1757 << PAGE_SHIFT );
1758 *kpage = phys_to_abs(pgaddr +
1759 (pginfo->next_4k *
1760 EHCA_PAGESIZE));
1761 if ( !(*kpage) ) {
1762 ehca_gen_err("pgaddr=%lx "
1763 "chunk->page_list[i]=%lx "
1764 "i=%x next_4k=%lx mr=%p",
1765 pgaddr,
1766 (u64)sg_dma_address(
1767 &chunk->
1768 page_list[i]),
1769 i, pginfo->next_4k, e_mr);
1770 ret = -EFAULT;
1771 goto ehca_set_pagebuf_exit0;
1772 }
1773 (pginfo->page_4k_cnt)++;
1774 (pginfo->next_4k)++;
1775 kpage++;
1776 if (pginfo->next_4k %
1777 (PAGE_SIZE / EHCA_PAGESIZE) == 0) {
1778 (pginfo->page_cnt)++;
1779 (pginfo->next_nmap)++;
1780 pginfo->next_4k = 0;
1781 i++;
1782 }
1783 j++;
1784 if (j >= number) break;
1785 }
1786 if ((pginfo->next_nmap >= chunk->nmap) &&
1787 (j >= number)) {
1788 pginfo->next_nmap = 0;
1789 prev_chunk = chunk;
1790 break;
1791 } else if (pginfo->next_nmap >= chunk->nmap) {
1792 pginfo->next_nmap = 0;
1793 prev_chunk = chunk;
1794 } else if (j >= number)
1795 break;
1796 else
1797 prev_chunk = chunk;
1798 }
1799 pginfo->next_chunk =
1800 list_prepare_entry(prev_chunk,
1801 (&(pginfo->region->chunk_list)),
1802 list);
1803 } else if (pginfo->type == EHCA_MR_PGI_FMR) {
1804 /* loop over desired page_list entries */
1805 fmrlist = pginfo->page_list + pginfo->next_listelem;
1806 for (i = 0; i < number; i++) {
1807 *kpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) +
1808 pginfo->next_4k * EHCA_PAGESIZE);
1809 if ( !(*kpage) ) {
1810 ehca_gen_err("*fmrlist=%lx fmrlist=%p "
1811 "next_listelem=%lx next_4k=%lx",
1812 *fmrlist, fmrlist,
1813 pginfo->next_listelem,
1814 pginfo->next_4k);
1815 ret = -EFAULT;
1816 goto ehca_set_pagebuf_exit0;
1817 }
1818 (pginfo->page_4k_cnt)++;
1819 (pginfo->next_4k)++;
1820 kpage++;
1821 if (pginfo->next_4k %
1822 (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) {
1823 (pginfo->page_cnt)++;
1824 (pginfo->next_listelem)++;
1825 fmrlist++;
1826 pginfo->next_4k = 0;
1827 }
1828 }
1829 } else {
1830 ehca_gen_err("bad pginfo->type=%x", pginfo->type);
1831 ret = -EFAULT;
1832 goto ehca_set_pagebuf_exit0;
1833 }
1834
1835ehca_set_pagebuf_exit0:
1836 if (ret)
1837 ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx "
1838 "num_4k=%lx next_buf=%lx next_4k=%lx number=%x "
1839 "kpage=%p page_cnt=%lx page_4k_cnt=%lx i=%x "
1840 "next_listelem=%lx region=%p next_chunk=%p "
1841 "next_nmap=%lx", ret, e_mr, pginfo, pginfo->type,
1842 pginfo->num_pages, pginfo->num_4k,
1843 pginfo->next_buf, pginfo->next_4k, number, kpage,
1844 pginfo->page_cnt, pginfo->page_4k_cnt, i,
1845 pginfo->next_listelem, pginfo->region,
1846 pginfo->next_chunk, pginfo->next_nmap);
1847 return ret;
1848} /* end ehca_set_pagebuf() */
1849
1850/*----------------------------------------------------------------------*/
1851
1852/* setup 1 page from page info page buffer */
1853int ehca_set_pagebuf_1(struct ehca_mr *e_mr,
1854 struct ehca_mr_pginfo *pginfo,
1855 u64 *rpage)
1856{
1857 int ret = 0;
1858 struct ib_phys_buf *tmp_pbuf;
1859 u64 *fmrlist;
1860 struct ib_umem_chunk *chunk;
1861 struct ib_umem_chunk *prev_chunk;
1862 u64 pgaddr, num4k, offs4k;
1863
1864 if (pginfo->type == EHCA_MR_PGI_PHYS) {
1865 /* sanity check */
1866 if ((pginfo->page_cnt >= pginfo->num_pages) ||
1867 (pginfo->page_4k_cnt >= pginfo->num_4k)) {
1868 ehca_gen_err("page_cnt >= num_pages, page_cnt=%lx "
1869 "num_pages=%lx page_4k_cnt=%lx num_4k=%lx",
1870 pginfo->page_cnt, pginfo->num_pages,
1871 pginfo->page_4k_cnt, pginfo->num_4k);
1872 ret = -EFAULT;
1873 goto ehca_set_pagebuf_1_exit0;
1874 }
1875 tmp_pbuf = pginfo->phys_buf_array + pginfo->next_buf;
1876 num4k = ((tmp_pbuf->addr % EHCA_PAGESIZE) + tmp_pbuf->size +
1877 EHCA_PAGESIZE - 1) / EHCA_PAGESIZE;
1878 offs4k = (tmp_pbuf->addr & ~PAGE_MASK) / EHCA_PAGESIZE;
1879 *rpage = phys_to_abs((tmp_pbuf->addr & EHCA_PAGEMASK) +
1880 (pginfo->next_4k * EHCA_PAGESIZE));
1881 if ( !(*rpage) && tmp_pbuf->addr ) {
1882 ehca_gen_err("tmp_pbuf->addr=%lx"
1883 " tmp_pbuf->size=%lx next_4k=%lx",
1884 tmp_pbuf->addr, tmp_pbuf->size,
1885 pginfo->next_4k);
1886 ret = -EFAULT;
1887 goto ehca_set_pagebuf_1_exit0;
1888 }
1889 (pginfo->page_4k_cnt)++;
1890 (pginfo->next_4k)++;
1891 if (pginfo->next_4k % (PAGE_SIZE / EHCA_PAGESIZE) == 0)
1892 (pginfo->page_cnt)++;
1893 if (pginfo->next_4k >= offs4k + num4k) {
1894 (pginfo->next_buf)++;
1895 pginfo->next_4k = 0;
1896 }
1897 } else if (pginfo->type == EHCA_MR_PGI_USER) {
1898 chunk = pginfo->next_chunk;
1899 prev_chunk = pginfo->next_chunk;
1900 list_for_each_entry_continue(chunk,
1901 (&(pginfo->region->chunk_list)),
1902 list) {
1903 pgaddr = ( page_to_pfn(chunk->page_list[
1904 pginfo->next_nmap].page)
1905 << PAGE_SHIFT);
1906 *rpage = phys_to_abs(pgaddr +
1907 (pginfo->next_4k * EHCA_PAGESIZE));
1908 if ( !(*rpage) ) {
1909 ehca_gen_err("pgaddr=%lx chunk->page_list[]=%lx"
1910 " next_nmap=%lx next_4k=%lx mr=%p",
1911 pgaddr, (u64)sg_dma_address(
1912 &chunk->page_list[
1913 pginfo->
1914 next_nmap]),
1915 pginfo->next_nmap, pginfo->next_4k,
1916 e_mr);
1917 ret = -EFAULT;
1918 goto ehca_set_pagebuf_1_exit0;
1919 }
1920 (pginfo->page_4k_cnt)++;
1921 (pginfo->next_4k)++;
1922 if (pginfo->next_4k %
1923 (PAGE_SIZE / EHCA_PAGESIZE) == 0) {
1924 (pginfo->page_cnt)++;
1925 (pginfo->next_nmap)++;
1926 pginfo->next_4k = 0;
1927 }
1928 if (pginfo->next_nmap >= chunk->nmap) {
1929 pginfo->next_nmap = 0;
1930 prev_chunk = chunk;
1931 }
1932 break;
1933 }
1934 pginfo->next_chunk =
1935 list_prepare_entry(prev_chunk,
1936 (&(pginfo->region->chunk_list)),
1937 list);
1938 } else if (pginfo->type == EHCA_MR_PGI_FMR) {
1939 fmrlist = pginfo->page_list + pginfo->next_listelem;
1940 *rpage = phys_to_abs((*fmrlist & EHCA_PAGEMASK) +
1941 pginfo->next_4k * EHCA_PAGESIZE);
1942 if ( !(*rpage) ) {
1943 ehca_gen_err("*fmrlist=%lx fmrlist=%p "
1944 "next_listelem=%lx next_4k=%lx",
1945 *fmrlist, fmrlist, pginfo->next_listelem,
1946 pginfo->next_4k);
1947 ret = -EFAULT;
1948 goto ehca_set_pagebuf_1_exit0;
1949 }
1950 (pginfo->page_4k_cnt)++;
1951 (pginfo->next_4k)++;
1952 if (pginfo->next_4k %
1953 (e_mr->fmr_page_size / EHCA_PAGESIZE) == 0) {
1954 (pginfo->page_cnt)++;
1955 (pginfo->next_listelem)++;
1956 pginfo->next_4k = 0;
1957 }
1958 } else {
1959 ehca_gen_err("bad pginfo->type=%x", pginfo->type);
1960 ret = -EFAULT;
1961 goto ehca_set_pagebuf_1_exit0;
1962 }
1963
1964ehca_set_pagebuf_1_exit0:
1965 if (ret)
1966 ehca_gen_err("ret=%x e_mr=%p pginfo=%p type=%x num_pages=%lx "
1967 "num_4k=%lx next_buf=%lx next_4k=%lx rpage=%p "
1968 "page_cnt=%lx page_4k_cnt=%lx next_listelem=%lx "
1969 "region=%p next_chunk=%p next_nmap=%lx", ret, e_mr,
1970 pginfo, pginfo->type, pginfo->num_pages,
1971 pginfo->num_4k, pginfo->next_buf, pginfo->next_4k,
1972 rpage, pginfo->page_cnt, pginfo->page_4k_cnt,
1973 pginfo->next_listelem, pginfo->region,
1974 pginfo->next_chunk, pginfo->next_nmap);
1975 return ret;
1976} /* end ehca_set_pagebuf_1() */
1977
1978/*----------------------------------------------------------------------*/
1979
1980/*
1981 * check MR if it is a max-MR, i.e. uses whole memory
1982 * in case it's a max-MR 1 is returned, else 0
1983 */
1984int ehca_mr_is_maxmr(u64 size,
1985 u64 *iova_start)
1986{
1987 /* a MR is treated as max-MR only if it fits following: */
1988 if ((size == ((u64)high_memory - PAGE_OFFSET)) &&
1989 (iova_start == (void*)KERNELBASE)) {
1990 ehca_gen_dbg("this is a max-MR");
1991 return 1;
1992 } else
1993 return 0;
1994} /* end ehca_mr_is_maxmr() */
1995
1996/*----------------------------------------------------------------------*/
1997
1998/* map access control for MR/MW. This routine is used for MR and MW. */
1999void ehca_mrmw_map_acl(int ib_acl,
2000 u32 *hipz_acl)
2001{
2002 *hipz_acl = 0;
2003 if (ib_acl & IB_ACCESS_REMOTE_READ)
2004 *hipz_acl |= HIPZ_ACCESSCTRL_R_READ;
2005 if (ib_acl & IB_ACCESS_REMOTE_WRITE)
2006 *hipz_acl |= HIPZ_ACCESSCTRL_R_WRITE;
2007 if (ib_acl & IB_ACCESS_REMOTE_ATOMIC)
2008 *hipz_acl |= HIPZ_ACCESSCTRL_R_ATOMIC;
2009 if (ib_acl & IB_ACCESS_LOCAL_WRITE)
2010 *hipz_acl |= HIPZ_ACCESSCTRL_L_WRITE;
2011 if (ib_acl & IB_ACCESS_MW_BIND)
2012 *hipz_acl |= HIPZ_ACCESSCTRL_MW_BIND;
2013} /* end ehca_mrmw_map_acl() */
2014
2015/*----------------------------------------------------------------------*/
2016
2017/* sets page size in hipz access control for MR/MW. */
2018void ehca_mrmw_set_pgsize_hipz_acl(u32 *hipz_acl) /*INOUT*/
2019{
2020 return; /* HCA supports only 4k */
2021} /* end ehca_mrmw_set_pgsize_hipz_acl() */
2022
2023/*----------------------------------------------------------------------*/
2024
2025/*
2026 * reverse map access control for MR/MW.
2027 * This routine is used for MR and MW.
2028 */
2029void ehca_mrmw_reverse_map_acl(const u32 *hipz_acl,
2030 int *ib_acl) /*OUT*/
2031{
2032 *ib_acl = 0;
2033 if (*hipz_acl & HIPZ_ACCESSCTRL_R_READ)
2034 *ib_acl |= IB_ACCESS_REMOTE_READ;
2035 if (*hipz_acl & HIPZ_ACCESSCTRL_R_WRITE)
2036 *ib_acl |= IB_ACCESS_REMOTE_WRITE;
2037 if (*hipz_acl & HIPZ_ACCESSCTRL_R_ATOMIC)
2038 *ib_acl |= IB_ACCESS_REMOTE_ATOMIC;
2039 if (*hipz_acl & HIPZ_ACCESSCTRL_L_WRITE)
2040 *ib_acl |= IB_ACCESS_LOCAL_WRITE;
2041 if (*hipz_acl & HIPZ_ACCESSCTRL_MW_BIND)
2042 *ib_acl |= IB_ACCESS_MW_BIND;
2043} /* end ehca_mrmw_reverse_map_acl() */
2044
2045
2046/*----------------------------------------------------------------------*/
2047
2048/*
Heiko J Schickfab97222006-09-22 15:22:22 -07002049 * MR destructor and constructor
2050 * used in Reregister MR verb, sets all fields in ehca_mr_t to 0,
2051 * except struct ib_mr and spinlock
2052 */
2053void ehca_mr_deletenew(struct ehca_mr *mr)
2054{
2055 mr->flags = 0;
2056 mr->num_pages = 0;
2057 mr->num_4k = 0;
2058 mr->acl = 0;
2059 mr->start = NULL;
2060 mr->fmr_page_size = 0;
2061 mr->fmr_max_pages = 0;
2062 mr->fmr_max_maps = 0;
2063 mr->fmr_map_cnt = 0;
2064 memset(&mr->ipz_mr_handle, 0, sizeof(mr->ipz_mr_handle));
2065 memset(&mr->galpas, 0, sizeof(mr->galpas));
2066 mr->nr_of_pages = 0;
2067 mr->pagearray = NULL;
2068} /* end ehca_mr_deletenew() */
2069
2070int ehca_init_mrmw_cache(void)
2071{
2072 mr_cache = kmem_cache_create("ehca_cache_mr",
2073 sizeof(struct ehca_mr), 0,
2074 SLAB_HWCACHE_ALIGN,
2075 NULL, NULL);
2076 if (!mr_cache)
2077 return -ENOMEM;
2078 mw_cache = kmem_cache_create("ehca_cache_mw",
2079 sizeof(struct ehca_mw), 0,
2080 SLAB_HWCACHE_ALIGN,
2081 NULL, NULL);
2082 if (!mw_cache) {
2083 kmem_cache_destroy(mr_cache);
2084 mr_cache = NULL;
2085 return -ENOMEM;
2086 }
2087 return 0;
2088}
2089
2090void ehca_cleanup_mrmw_cache(void)
2091{
2092 if (mr_cache)
2093 kmem_cache_destroy(mr_cache);
2094 if (mw_cache)
2095 kmem_cache_destroy(mw_cache);
2096}