blob: e995e2aa016dc7c3d661fa654d44e492e47dcbc9 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
Roland Dreier2a1d9b72005-08-10 23:03:10 -07003 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 * $Id: mthca_mr.c 1349 2004-12-16 21:09:43Z roland $
34 */
35
36#include <linux/slab.h>
37#include <linux/init.h>
38#include <linux/errno.h>
39
40#include "mthca_dev.h"
41#include "mthca_cmd.h"
Roland Dreier86562a12005-04-16 15:26:13 -070042#include "mthca_memfree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Roland Dreierd56d6f92005-06-27 14:36:43 -070044struct mthca_mtt {
45 struct mthca_buddy *buddy;
46 int order;
47 u32 first_seg;
48};
49
Linus Torvalds1da177e2005-04-16 15:20:36 -070050/*
51 * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
52 */
53struct mthca_mpt_entry {
Sean Hefty97f52eb2005-08-13 21:05:57 -070054 __be32 flags;
55 __be32 page_size;
56 __be32 key;
57 __be32 pd;
58 __be64 start;
59 __be64 length;
60 __be32 lkey;
61 __be32 window_count;
62 __be32 window_count_limit;
63 __be64 mtt_seg;
64 __be32 mtt_sz; /* Arbel only */
65 u32 reserved[2];
Linus Torvalds1da177e2005-04-16 15:20:36 -070066} __attribute__((packed));
67
68#define MTHCA_MPT_FLAG_SW_OWNS (0xfUL << 28)
69#define MTHCA_MPT_FLAG_MIO (1 << 17)
70#define MTHCA_MPT_FLAG_BIND_ENABLE (1 << 15)
71#define MTHCA_MPT_FLAG_PHYSICAL (1 << 9)
72#define MTHCA_MPT_FLAG_REGION (1 << 8)
73
74#define MTHCA_MTT_FLAG_PRESENT 1
75
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -070076#define MTHCA_MPT_STATUS_SW 0xF0
77#define MTHCA_MPT_STATUS_HW 0x00
78
Linus Torvalds1da177e2005-04-16 15:20:36 -070079/*
80 * Buddy allocator for MTT segments (currently not very efficient
81 * since it doesn't keep a free list and just searches linearly
82 * through the bitmaps)
83 */
84
Michael S. Tsirkin9095e202005-04-16 15:26:26 -070085static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -070086{
87 int o;
88 int m;
89 u32 seg;
90
Michael S. Tsirkin9095e202005-04-16 15:26:26 -070091 spin_lock(&buddy->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070092
Michael S. Tsirkin9095e202005-04-16 15:26:26 -070093 for (o = order; o <= buddy->max_order; ++o) {
94 m = 1 << (buddy->max_order - o);
95 seg = find_first_bit(buddy->bits[o], m);
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 if (seg < m)
97 goto found;
98 }
99
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700100 spin_unlock(&buddy->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 return -1;
102
103 found:
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700104 clear_bit(seg, buddy->bits[o]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
106 while (o > order) {
107 --o;
108 seg <<= 1;
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700109 set_bit(seg ^ 1, buddy->bits[o]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 }
111
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700112 spin_unlock(&buddy->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113
114 seg <<= order;
115
116 return seg;
117}
118
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700119static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120{
121 seg >>= order;
122
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700123 spin_lock(&buddy->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700125 while (test_bit(seg ^ 1, buddy->bits[order])) {
126 clear_bit(seg ^ 1, buddy->bits[order]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 seg >>= 1;
128 ++order;
129 }
130
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700131 set_bit(seg, buddy->bits[order]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700133 spin_unlock(&buddy->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134}
135
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700136static int __devinit mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
Roland Dreier86562a12005-04-16 15:26:13 -0700137{
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700138 int i, s;
139
140 buddy->max_order = max_order;
141 spin_lock_init(&buddy->lock);
142
Roland Dreierde6eb662005-11-02 07:23:14 -0800143 buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *),
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700144 GFP_KERNEL);
145 if (!buddy->bits)
146 goto err_out;
147
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700148 for (i = 0; i <= buddy->max_order; ++i) {
149 s = BITS_TO_LONGS(1 << (buddy->max_order - i));
150 buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL);
151 if (!buddy->bits[i])
152 goto err_out_free;
153 bitmap_zero(buddy->bits[i],
154 1 << (buddy->max_order - i));
155 }
156
157 set_bit(0, buddy->bits[buddy->max_order]);
158
159 return 0;
160
161err_out_free:
162 for (i = 0; i <= buddy->max_order; ++i)
163 kfree(buddy->bits[i]);
164
165 kfree(buddy->bits);
166
167err_out:
168 return -ENOMEM;
169}
170
171static void __devexit mthca_buddy_cleanup(struct mthca_buddy *buddy)
172{
173 int i;
174
175 for (i = 0; i <= buddy->max_order; ++i)
176 kfree(buddy->bits[i]);
177
178 kfree(buddy->bits);
179}
180
Roland Dreierd56d6f92005-06-27 14:36:43 -0700181static u32 mthca_alloc_mtt_range(struct mthca_dev *dev, int order,
182 struct mthca_buddy *buddy)
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700183{
184 u32 seg = mthca_buddy_alloc(buddy, order);
Roland Dreier86562a12005-04-16 15:26:13 -0700185
186 if (seg == -1)
187 return -1;
188
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700189 if (mthca_is_memfree(dev))
Roland Dreier86562a12005-04-16 15:26:13 -0700190 if (mthca_table_get_range(dev, dev->mr_table.mtt_table, seg,
191 seg + (1 << order) - 1)) {
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700192 mthca_buddy_free(buddy, seg, order);
Roland Dreier86562a12005-04-16 15:26:13 -0700193 seg = -1;
194 }
195
196 return seg;
197}
198
Roland Dreierd56d6f92005-06-27 14:36:43 -0700199static struct mthca_mtt *__mthca_alloc_mtt(struct mthca_dev *dev, int size,
200 struct mthca_buddy *buddy)
Roland Dreier86562a12005-04-16 15:26:13 -0700201{
Roland Dreierd56d6f92005-06-27 14:36:43 -0700202 struct mthca_mtt *mtt;
203 int i;
204
205 if (size <= 0)
206 return ERR_PTR(-EINVAL);
207
208 mtt = kmalloc(sizeof *mtt, GFP_KERNEL);
209 if (!mtt)
210 return ERR_PTR(-ENOMEM);
211
212 mtt->buddy = buddy;
213 mtt->order = 0;
214 for (i = MTHCA_MTT_SEG_SIZE / 8; i < size; i <<= 1)
215 ++mtt->order;
216
217 mtt->first_seg = mthca_alloc_mtt_range(dev, mtt->order, buddy);
218 if (mtt->first_seg == -1) {
219 kfree(mtt);
220 return ERR_PTR(-ENOMEM);
221 }
222
223 return mtt;
224}
225
226struct mthca_mtt *mthca_alloc_mtt(struct mthca_dev *dev, int size)
227{
228 return __mthca_alloc_mtt(dev, size, &dev->mr_table.mtt_buddy);
229}
230
231void mthca_free_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt)
232{
233 if (!mtt)
234 return;
235
236 mthca_buddy_free(mtt->buddy, mtt->first_seg, mtt->order);
237
238 mthca_table_put_range(dev, dev->mr_table.mtt_table,
239 mtt->first_seg,
240 mtt->first_seg + (1 << mtt->order) - 1);
241
242 kfree(mtt);
243}
244
245int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
246 int start_index, u64 *buffer_list, int list_len)
247{
Roland Dreiered878452005-06-27 14:36:45 -0700248 struct mthca_mailbox *mailbox;
Sean Hefty97f52eb2005-08-13 21:05:57 -0700249 __be64 *mtt_entry;
Roland Dreierd56d6f92005-06-27 14:36:43 -0700250 int err = 0;
251 u8 status;
252 int i;
253
Roland Dreiered878452005-06-27 14:36:45 -0700254 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
255 if (IS_ERR(mailbox))
256 return PTR_ERR(mailbox);
257 mtt_entry = mailbox->buf;
Roland Dreierd56d6f92005-06-27 14:36:43 -0700258
259 while (list_len > 0) {
260 mtt_entry[0] = cpu_to_be64(dev->mr_table.mtt_base +
261 mtt->first_seg * MTHCA_MTT_SEG_SIZE +
262 start_index * 8);
263 mtt_entry[1] = 0;
Roland Dreiered878452005-06-27 14:36:45 -0700264 for (i = 0; i < list_len && i < MTHCA_MAILBOX_SIZE / 8 - 2; ++i)
Roland Dreierd56d6f92005-06-27 14:36:43 -0700265 mtt_entry[i + 2] = cpu_to_be64(buffer_list[i] |
266 MTHCA_MTT_FLAG_PRESENT);
267
268 /*
269 * If we have an odd number of entries to write, add
270 * one more dummy entry for firmware efficiency.
271 */
272 if (i & 1)
273 mtt_entry[i + 2] = 0;
274
Roland Dreiered878452005-06-27 14:36:45 -0700275 err = mthca_WRITE_MTT(dev, mailbox, (i + 1) & ~1, &status);
Roland Dreierd56d6f92005-06-27 14:36:43 -0700276 if (err) {
277 mthca_warn(dev, "WRITE_MTT failed (%d)\n", err);
278 goto out;
279 }
280 if (status) {
281 mthca_warn(dev, "WRITE_MTT returned status 0x%02x\n",
282 status);
283 err = -EINVAL;
284 goto out;
285 }
286
287 list_len -= i;
288 start_index += i;
289 buffer_list += i;
290 }
291
292out:
Roland Dreiered878452005-06-27 14:36:45 -0700293 mthca_free_mailbox(dev, mailbox);
Roland Dreierd56d6f92005-06-27 14:36:43 -0700294 return err;
Roland Dreier86562a12005-04-16 15:26:13 -0700295}
296
Michael S. Tsirkind0a9d252005-04-16 15:26:30 -0700297static inline u32 tavor_hw_index_to_key(u32 ind)
298{
299 return ind;
300}
301
302static inline u32 tavor_key_to_hw_index(u32 key)
303{
304 return key;
305}
306
307static inline u32 arbel_hw_index_to_key(u32 ind)
308{
309 return (ind >> 24) | (ind << 8);
310}
311
312static inline u32 arbel_key_to_hw_index(u32 key)
313{
314 return (key << 24) | (key >> 8);
315}
316
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317static inline u32 hw_index_to_key(struct mthca_dev *dev, u32 ind)
318{
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700319 if (mthca_is_memfree(dev))
Michael S. Tsirkind0a9d252005-04-16 15:26:30 -0700320 return arbel_hw_index_to_key(ind);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 else
Michael S. Tsirkind0a9d252005-04-16 15:26:30 -0700322 return tavor_hw_index_to_key(ind);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323}
324
325static inline u32 key_to_hw_index(struct mthca_dev *dev, u32 key)
326{
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700327 if (mthca_is_memfree(dev))
Michael S. Tsirkind0a9d252005-04-16 15:26:30 -0700328 return arbel_key_to_hw_index(key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 else
Michael S. Tsirkind0a9d252005-04-16 15:26:30 -0700330 return tavor_key_to_hw_index(key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331}
332
Roland Dreierd56d6f92005-06-27 14:36:43 -0700333int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
334 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335{
Roland Dreiered878452005-06-27 14:36:45 -0700336 struct mthca_mailbox *mailbox;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 struct mthca_mpt_entry *mpt_entry;
338 u32 key;
Roland Dreierd56d6f92005-06-27 14:36:43 -0700339 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 int err;
341 u8 status;
342
343 might_sleep();
344
Roland Dreierd56d6f92005-06-27 14:36:43 -0700345 WARN_ON(buffer_size_shift >= 32);
346
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347 key = mthca_alloc(&dev->mr_table.mpt_alloc);
348 if (key == -1)
349 return -ENOMEM;
350 mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key);
351
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700352 if (mthca_is_memfree(dev)) {
Roland Dreier86562a12005-04-16 15:26:13 -0700353 err = mthca_table_get(dev, dev->mr_table.mpt_table, key);
354 if (err)
355 goto err_out_mpt_free;
356 }
357
Roland Dreiered878452005-06-27 14:36:45 -0700358 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
359 if (IS_ERR(mailbox)) {
360 err = PTR_ERR(mailbox);
Roland Dreier86562a12005-04-16 15:26:13 -0700361 goto err_out_table;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 }
Roland Dreiered878452005-06-27 14:36:45 -0700363 mpt_entry = mailbox->buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364
365 mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS |
366 MTHCA_MPT_FLAG_MIO |
Linus Torvalds1da177e2005-04-16 15:20:36 -0700367 MTHCA_MPT_FLAG_REGION |
368 access);
Roland Dreierd56d6f92005-06-27 14:36:43 -0700369 if (!mr->mtt)
370 mpt_entry->flags |= cpu_to_be32(MTHCA_MPT_FLAG_PHYSICAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371
372 mpt_entry->page_size = cpu_to_be32(buffer_size_shift - 12);
373 mpt_entry->key = cpu_to_be32(key);
374 mpt_entry->pd = cpu_to_be32(pd);
375 mpt_entry->start = cpu_to_be64(iova);
376 mpt_entry->length = cpu_to_be64(total_size);
Roland Dreierd56d6f92005-06-27 14:36:43 -0700377
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378 memset(&mpt_entry->lkey, 0,
379 sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, lkey));
Roland Dreierd56d6f92005-06-27 14:36:43 -0700380
381 if (mr->mtt)
382 mpt_entry->mtt_seg =
383 cpu_to_be64(dev->mr_table.mtt_base +
384 mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385
386 if (0) {
387 mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey);
388 for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) {
389 if (i % 4 == 0)
390 printk("[%02x] ", i * 4);
Sean Hefty97f52eb2005-08-13 21:05:57 -0700391 printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i]));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700392 if ((i + 1) % 4 == 0)
393 printk("\n");
394 }
395 }
396
Roland Dreiered878452005-06-27 14:36:45 -0700397 err = mthca_SW2HW_MPT(dev, mailbox,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 key & (dev->limits.num_mpts - 1),
399 &status);
Roland Dreierd56d6f92005-06-27 14:36:43 -0700400 if (err) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err);
Roland Dreierd56d6f92005-06-27 14:36:43 -0700402 goto err_out_mailbox;
403 } else if (status) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700404 mthca_warn(dev, "SW2HW_MPT returned status 0x%02x\n",
405 status);
406 err = -EINVAL;
Roland Dreierd56d6f92005-06-27 14:36:43 -0700407 goto err_out_mailbox;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 }
409
Roland Dreiered878452005-06-27 14:36:45 -0700410 mthca_free_mailbox(dev, mailbox);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411 return err;
412
Roland Dreierd56d6f92005-06-27 14:36:43 -0700413err_out_mailbox:
Roland Dreiered878452005-06-27 14:36:45 -0700414 mthca_free_mailbox(dev, mailbox);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415
Roland Dreier86562a12005-04-16 15:26:13 -0700416err_out_table:
Roland Dreiera03a5a62005-06-27 14:36:43 -0700417 mthca_table_put(dev, dev->mr_table.mpt_table, key);
Roland Dreier86562a12005-04-16 15:26:13 -0700418
419err_out_mpt_free:
Michael S. Tsirkin55645e92005-04-16 15:26:19 -0700420 mthca_free(&dev->mr_table.mpt_alloc, key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700421 return err;
422}
423
Roland Dreierd56d6f92005-06-27 14:36:43 -0700424int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
425 u32 access, struct mthca_mr *mr)
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700426{
Roland Dreierd56d6f92005-06-27 14:36:43 -0700427 mr->mtt = NULL;
428 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
429}
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700430
Roland Dreierd56d6f92005-06-27 14:36:43 -0700431int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
432 u64 *buffer_list, int buffer_size_shift,
433 int list_len, u64 iova, u64 total_size,
434 u32 access, struct mthca_mr *mr)
435{
436 int err;
437
438 mr->mtt = mthca_alloc_mtt(dev, list_len);
439 if (IS_ERR(mr->mtt))
440 return PTR_ERR(mr->mtt);
441
442 err = mthca_write_mtt(dev, mr->mtt, 0, buffer_list, list_len);
443 if (err) {
444 mthca_free_mtt(dev, mr->mtt);
445 return err;
446 }
447
448 err = mthca_mr_alloc(dev, pd, buffer_size_shift, iova,
449 total_size, access, mr);
450 if (err)
451 mthca_free_mtt(dev, mr->mtt);
452
453 return err;
454}
455
456/* Free mr or fmr */
457static void mthca_free_region(struct mthca_dev *dev, u32 lkey)
458{
Roland Dreiera03a5a62005-06-27 14:36:43 -0700459 mthca_table_put(dev, dev->mr_table.mpt_table,
Guy German7f9f2db2005-08-15 07:38:50 -0700460 key_to_hw_index(dev, lkey));
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700461
462 mthca_free(&dev->mr_table.mpt_alloc, key_to_hw_index(dev, lkey));
463}
464
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr)
466{
467 int err;
468 u8 status;
469
470 might_sleep();
471
472 err = mthca_HW2SW_MPT(dev, NULL,
473 key_to_hw_index(dev, mr->ibmr.lkey) &
474 (dev->limits.num_mpts - 1),
475 &status);
476 if (err)
477 mthca_warn(dev, "HW2SW_MPT failed (%d)\n", err);
478 else if (status)
479 mthca_warn(dev, "HW2SW_MPT returned status 0x%02x\n",
480 status);
481
Roland Dreierd56d6f92005-06-27 14:36:43 -0700482 mthca_free_region(dev, mr->ibmr.lkey);
483 mthca_free_mtt(dev, mr->mtt);
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700484}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700486int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
487 u32 access, struct mthca_fmr *mr)
488{
489 struct mthca_mpt_entry *mpt_entry;
Roland Dreiered878452005-06-27 14:36:45 -0700490 struct mthca_mailbox *mailbox;
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700491 u64 mtt_seg;
492 u32 key, idx;
493 u8 status;
494 int list_len = mr->attr.max_pages;
495 int err = -ENOMEM;
496 int i;
497
498 might_sleep();
499
500 if (mr->attr.page_size < 12 || mr->attr.page_size >= 32)
501 return -EINVAL;
502
503 /* For Arbel, all MTTs must fit in the same page. */
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700504 if (mthca_is_memfree(dev) &&
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700505 mr->attr.max_pages * sizeof *mr->mem.arbel.mtts > PAGE_SIZE)
506 return -EINVAL;
507
508 mr->maps = 0;
509
510 key = mthca_alloc(&dev->mr_table.mpt_alloc);
511 if (key == -1)
512 return -ENOMEM;
513
514 idx = key & (dev->limits.num_mpts - 1);
515 mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key);
516
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700517 if (mthca_is_memfree(dev)) {
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700518 err = mthca_table_get(dev, dev->mr_table.mpt_table, key);
519 if (err)
520 goto err_out_mpt_free;
521
522 mr->mem.arbel.mpt = mthca_table_find(dev->mr_table.mpt_table, key);
523 BUG_ON(!mr->mem.arbel.mpt);
524 } else
525 mr->mem.tavor.mpt = dev->mr_table.tavor_fmr.mpt_base +
526 sizeof *(mr->mem.tavor.mpt) * idx;
527
Roland Dreierd56d6f92005-06-27 14:36:43 -0700528 mr->mtt = __mthca_alloc_mtt(dev, list_len, dev->mr_table.fmr_mtt_buddy);
529 if (IS_ERR(mr->mtt))
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700530 goto err_out_table;
531
Roland Dreierd56d6f92005-06-27 14:36:43 -0700532 mtt_seg = mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE;
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700533
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700534 if (mthca_is_memfree(dev)) {
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700535 mr->mem.arbel.mtts = mthca_table_find(dev->mr_table.mtt_table,
Roland Dreierd56d6f92005-06-27 14:36:43 -0700536 mr->mtt->first_seg);
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700537 BUG_ON(!mr->mem.arbel.mtts);
538 } else
539 mr->mem.tavor.mtts = dev->mr_table.tavor_fmr.mtt_base + mtt_seg;
540
Roland Dreiered878452005-06-27 14:36:45 -0700541 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
542 if (IS_ERR(mailbox))
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700543 goto err_out_free_mtt;
544
Roland Dreiered878452005-06-27 14:36:45 -0700545 mpt_entry = mailbox->buf;
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700546
547 mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS |
548 MTHCA_MPT_FLAG_MIO |
549 MTHCA_MPT_FLAG_REGION |
550 access);
551
552 mpt_entry->page_size = cpu_to_be32(mr->attr.page_size - 12);
553 mpt_entry->key = cpu_to_be32(key);
554 mpt_entry->pd = cpu_to_be32(pd);
555 memset(&mpt_entry->start, 0,
556 sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, start));
557 mpt_entry->mtt_seg = cpu_to_be64(dev->mr_table.mtt_base + mtt_seg);
558
559 if (0) {
560 mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey);
561 for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) {
562 if (i % 4 == 0)
563 printk("[%02x] ", i * 4);
Sean Hefty97f52eb2005-08-13 21:05:57 -0700564 printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i]));
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700565 if ((i + 1) % 4 == 0)
566 printk("\n");
567 }
568 }
569
Roland Dreiered878452005-06-27 14:36:45 -0700570 err = mthca_SW2HW_MPT(dev, mailbox,
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700571 key & (dev->limits.num_mpts - 1),
572 &status);
573 if (err) {
574 mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err);
575 goto err_out_mailbox_free;
576 }
577 if (status) {
578 mthca_warn(dev, "SW2HW_MPT returned status 0x%02x\n",
579 status);
580 err = -EINVAL;
581 goto err_out_mailbox_free;
582 }
583
Roland Dreiered878452005-06-27 14:36:45 -0700584 mthca_free_mailbox(dev, mailbox);
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700585 return 0;
586
587err_out_mailbox_free:
Roland Dreiered878452005-06-27 14:36:45 -0700588 mthca_free_mailbox(dev, mailbox);
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700589
590err_out_free_mtt:
Roland Dreierd56d6f92005-06-27 14:36:43 -0700591 mthca_free_mtt(dev, mr->mtt);
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700592
593err_out_table:
Roland Dreiera03a5a62005-06-27 14:36:43 -0700594 mthca_table_put(dev, dev->mr_table.mpt_table, key);
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700595
596err_out_mpt_free:
597 mthca_free(&dev->mr_table.mpt_alloc, mr->ibmr.lkey);
598 return err;
599}
600
601int mthca_free_fmr(struct mthca_dev *dev, struct mthca_fmr *fmr)
602{
603 if (fmr->maps)
604 return -EBUSY;
605
Roland Dreierd56d6f92005-06-27 14:36:43 -0700606 mthca_free_region(dev, fmr->ibmr.lkey);
607 mthca_free_mtt(dev, fmr->mtt);
608
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700609 return 0;
610}
611
612static inline int mthca_check_fmr(struct mthca_fmr *fmr, u64 *page_list,
613 int list_len, u64 iova)
614{
615 int i, page_mask;
616
617 if (list_len > fmr->attr.max_pages)
618 return -EINVAL;
619
620 page_mask = (1 << fmr->attr.page_size) - 1;
621
622 /* We are getting page lists, so va must be page aligned. */
623 if (iova & page_mask)
624 return -EINVAL;
625
626 /* Trust the user not to pass misaligned data in page_list */
627 if (0)
628 for (i = 0; i < list_len; ++i) {
629 if (page_list[i] & ~page_mask)
630 return -EINVAL;
631 }
632
633 if (fmr->maps >= fmr->attr.max_maps)
634 return -EINVAL;
635
636 return 0;
637}
638
639
640int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
641 int list_len, u64 iova)
642{
643 struct mthca_fmr *fmr = to_mfmr(ibfmr);
644 struct mthca_dev *dev = to_mdev(ibfmr->device);
645 struct mthca_mpt_entry mpt_entry;
646 u32 key;
647 int i, err;
648
649 err = mthca_check_fmr(fmr, page_list, list_len, iova);
650 if (err)
651 return err;
652
653 ++fmr->maps;
654
655 key = tavor_key_to_hw_index(fmr->ibmr.lkey);
656 key += dev->limits.num_mpts;
657 fmr->ibmr.lkey = fmr->ibmr.rkey = tavor_hw_index_to_key(key);
658
659 writeb(MTHCA_MPT_STATUS_SW, fmr->mem.tavor.mpt);
660
661 for (i = 0; i < list_len; ++i) {
662 __be64 mtt_entry = cpu_to_be64(page_list[i] |
663 MTHCA_MTT_FLAG_PRESENT);
664 mthca_write64_raw(mtt_entry, fmr->mem.tavor.mtts + i);
665 }
666
667 mpt_entry.lkey = cpu_to_be32(key);
668 mpt_entry.length = cpu_to_be64(list_len * (1ull << fmr->attr.page_size));
669 mpt_entry.start = cpu_to_be64(iova);
670
Sean Hefty97f52eb2005-08-13 21:05:57 -0700671 __raw_writel((__force u32) mpt_entry.lkey, &fmr->mem.tavor.mpt->key);
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700672 memcpy_toio(&fmr->mem.tavor.mpt->start, &mpt_entry.start,
673 offsetof(struct mthca_mpt_entry, window_count) -
674 offsetof(struct mthca_mpt_entry, start));
675
676 writeb(MTHCA_MPT_STATUS_HW, fmr->mem.tavor.mpt);
677
678 return 0;
679}
680
681int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
682 int list_len, u64 iova)
683{
684 struct mthca_fmr *fmr = to_mfmr(ibfmr);
685 struct mthca_dev *dev = to_mdev(ibfmr->device);
686 u32 key;
687 int i, err;
688
689 err = mthca_check_fmr(fmr, page_list, list_len, iova);
690 if (err)
691 return err;
692
693 ++fmr->maps;
694
695 key = arbel_key_to_hw_index(fmr->ibmr.lkey);
696 key += dev->limits.num_mpts;
697 fmr->ibmr.lkey = fmr->ibmr.rkey = arbel_hw_index_to_key(key);
698
699 *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW;
700
701 wmb();
702
703 for (i = 0; i < list_len; ++i)
704 fmr->mem.arbel.mtts[i] = cpu_to_be64(page_list[i] |
705 MTHCA_MTT_FLAG_PRESENT);
706
707 fmr->mem.arbel.mpt->key = cpu_to_be32(key);
708 fmr->mem.arbel.mpt->lkey = cpu_to_be32(key);
709 fmr->mem.arbel.mpt->length = cpu_to_be64(list_len * (1ull << fmr->attr.page_size));
710 fmr->mem.arbel.mpt->start = cpu_to_be64(iova);
711
712 wmb();
713
714 *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_HW;
715
716 wmb();
717
718 return 0;
719}
720
721void mthca_tavor_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
722{
723 u32 key;
724
725 if (!fmr->maps)
726 return;
727
728 key = tavor_key_to_hw_index(fmr->ibmr.lkey);
729 key &= dev->limits.num_mpts - 1;
730 fmr->ibmr.lkey = fmr->ibmr.rkey = tavor_hw_index_to_key(key);
731
732 fmr->maps = 0;
733
734 writeb(MTHCA_MPT_STATUS_SW, fmr->mem.tavor.mpt);
735}
736
737void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
738{
739 u32 key;
740
741 if (!fmr->maps)
742 return;
743
744 key = arbel_key_to_hw_index(fmr->ibmr.lkey);
745 key &= dev->limits.num_mpts - 1;
746 fmr->ibmr.lkey = fmr->ibmr.rkey = arbel_hw_index_to_key(key);
747
748 fmr->maps = 0;
749
750 *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700751}
752
753int __devinit mthca_init_mr_table(struct mthca_dev *dev)
754{
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700755 int err, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700756
757 err = mthca_alloc_init(&dev->mr_table.mpt_alloc,
758 dev->limits.num_mpts,
759 ~0, dev->limits.reserved_mrws);
760 if (err)
761 return err;
762
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700763 if (!mthca_is_memfree(dev) &&
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700764 (dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN))
765 dev->limits.fmr_reserved_mtts = 0;
766 else
767 dev->mthca_flags |= MTHCA_FLAG_FMR;
768
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700769 err = mthca_buddy_init(&dev->mr_table.mtt_buddy,
770 fls(dev->limits.num_mtt_segs - 1));
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700771
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700772 if (err)
773 goto err_mtt_buddy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700775 dev->mr_table.tavor_fmr.mpt_base = NULL;
776 dev->mr_table.tavor_fmr.mtt_base = NULL;
777
778 if (dev->limits.fmr_reserved_mtts) {
779 i = fls(dev->limits.fmr_reserved_mtts - 1);
780
781 if (i >= 31) {
782 mthca_warn(dev, "Unable to reserve 2^31 FMR MTTs.\n");
783 err = -EINVAL;
784 goto err_fmr_mpt;
785 }
786
787 dev->mr_table.tavor_fmr.mpt_base =
788 ioremap(dev->mr_table.mpt_base,
789 (1 << i) * sizeof (struct mthca_mpt_entry));
790
791 if (!dev->mr_table.tavor_fmr.mpt_base) {
792 mthca_warn(dev, "MPT ioremap for FMR failed.\n");
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700793 err = -ENOMEM;
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700794 goto err_fmr_mpt;
795 }
796
797 dev->mr_table.tavor_fmr.mtt_base =
798 ioremap(dev->mr_table.mtt_base,
799 (1 << i) * MTHCA_MTT_SEG_SIZE);
800 if (!dev->mr_table.tavor_fmr.mtt_base) {
801 mthca_warn(dev, "MTT ioremap for FMR failed.\n");
802 err = -ENOMEM;
803 goto err_fmr_mtt;
804 }
805
806 err = mthca_buddy_init(&dev->mr_table.tavor_fmr.mtt_buddy, i);
807 if (err)
808 goto err_fmr_mtt_buddy;
809
810 /* Prevent regular MRs from using FMR keys */
811 err = mthca_buddy_alloc(&dev->mr_table.mtt_buddy, i);
812 if (err)
813 goto err_reserve_fmr;
814
815 dev->mr_table.fmr_mtt_buddy =
816 &dev->mr_table.tavor_fmr.mtt_buddy;
817 } else
818 dev->mr_table.fmr_mtt_buddy = &dev->mr_table.mtt_buddy;
819
820 /* FMR table is always the first, take reserved MTTs out of there */
821 if (dev->limits.reserved_mtts) {
822 i = fls(dev->limits.reserved_mtts - 1);
823
Roland Dreierd56d6f92005-06-27 14:36:43 -0700824 if (mthca_alloc_mtt_range(dev, i,
825 dev->mr_table.fmr_mtt_buddy) == -1) {
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700826 mthca_warn(dev, "MTT table of order %d is too small.\n",
827 dev->mr_table.fmr_mtt_buddy->max_order);
828 err = -ENOMEM;
829 goto err_reserve_mtts;
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700830 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 }
832
Linus Torvalds1da177e2005-04-16 15:20:36 -0700833 return 0;
834
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700835err_reserve_mtts:
836err_reserve_fmr:
837 if (dev->limits.fmr_reserved_mtts)
838 mthca_buddy_cleanup(&dev->mr_table.tavor_fmr.mtt_buddy);
839
840err_fmr_mtt_buddy:
841 if (dev->mr_table.tavor_fmr.mtt_base)
842 iounmap(dev->mr_table.tavor_fmr.mtt_base);
843
844err_fmr_mtt:
845 if (dev->mr_table.tavor_fmr.mpt_base)
846 iounmap(dev->mr_table.tavor_fmr.mpt_base);
847
848err_fmr_mpt:
849 mthca_buddy_cleanup(&dev->mr_table.mtt_buddy);
850
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700851err_mtt_buddy:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 mthca_alloc_cleanup(&dev->mr_table.mpt_alloc);
853
854 return err;
855}
856
857void __devexit mthca_cleanup_mr_table(struct mthca_dev *dev)
858{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 /* XXX check if any MRs are still allocated? */
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700860 if (dev->limits.fmr_reserved_mtts)
861 mthca_buddy_cleanup(&dev->mr_table.tavor_fmr.mtt_buddy);
862
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700863 mthca_buddy_cleanup(&dev->mr_table.mtt_buddy);
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700864
865 if (dev->mr_table.tavor_fmr.mtt_base)
866 iounmap(dev->mr_table.tavor_fmr.mtt_base);
867 if (dev->mr_table.tavor_fmr.mpt_base)
868 iounmap(dev->mr_table.tavor_fmr.mpt_base);
869
Linus Torvalds1da177e2005-04-16 15:20:36 -0700870 mthca_alloc_cleanup(&dev->mr_table.mpt_alloc);
871}