blob: 653f1321a13f2f8e43a65c6390ae1980eedc33b1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * Copyright (c) 2004 Topspin Communications. All rights reserved.
Roland Dreier2a1d9b72005-08-10 23:03:10 -07003 * Copyright (c) 2005 Mellanox Technologies. All rights reserved.
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 *
33 * $Id: mthca_mr.c 1349 2004-12-16 21:09:43Z roland $
34 */
35
36#include <linux/slab.h>
37#include <linux/init.h>
38#include <linux/errno.h>
39
40#include "mthca_dev.h"
41#include "mthca_cmd.h"
Roland Dreier86562a12005-04-16 15:26:13 -070042#include "mthca_memfree.h"
Linus Torvalds1da177e2005-04-16 15:20:36 -070043
Roland Dreierd56d6f92005-06-27 14:36:43 -070044struct mthca_mtt {
45 struct mthca_buddy *buddy;
46 int order;
47 u32 first_seg;
48};
49
Linus Torvalds1da177e2005-04-16 15:20:36 -070050/*
51 * Must be packed because mtt_seg is 64 bits but only aligned to 32 bits.
52 */
53struct mthca_mpt_entry {
Sean Hefty97f52eb2005-08-13 21:05:57 -070054 __be32 flags;
55 __be32 page_size;
56 __be32 key;
57 __be32 pd;
58 __be64 start;
59 __be64 length;
60 __be32 lkey;
61 __be32 window_count;
62 __be32 window_count_limit;
63 __be64 mtt_seg;
64 __be32 mtt_sz; /* Arbel only */
65 u32 reserved[2];
Linus Torvalds1da177e2005-04-16 15:20:36 -070066} __attribute__((packed));
67
68#define MTHCA_MPT_FLAG_SW_OWNS (0xfUL << 28)
69#define MTHCA_MPT_FLAG_MIO (1 << 17)
70#define MTHCA_MPT_FLAG_BIND_ENABLE (1 << 15)
71#define MTHCA_MPT_FLAG_PHYSICAL (1 << 9)
72#define MTHCA_MPT_FLAG_REGION (1 << 8)
73
74#define MTHCA_MTT_FLAG_PRESENT 1
75
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -070076#define MTHCA_MPT_STATUS_SW 0xF0
77#define MTHCA_MPT_STATUS_HW 0x00
78
Linus Torvalds1da177e2005-04-16 15:20:36 -070079/*
80 * Buddy allocator for MTT segments (currently not very efficient
81 * since it doesn't keep a free list and just searches linearly
82 * through the bitmaps)
83 */
84
Michael S. Tsirkin9095e202005-04-16 15:26:26 -070085static u32 mthca_buddy_alloc(struct mthca_buddy *buddy, int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -070086{
87 int o;
88 int m;
89 u32 seg;
90
Michael S. Tsirkin9095e202005-04-16 15:26:26 -070091 spin_lock(&buddy->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -070092
Michael S. Tsirkin9095e202005-04-16 15:26:26 -070093 for (o = order; o <= buddy->max_order; ++o) {
94 m = 1 << (buddy->max_order - o);
95 seg = find_first_bit(buddy->bits[o], m);
Linus Torvalds1da177e2005-04-16 15:20:36 -070096 if (seg < m)
97 goto found;
98 }
99
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700100 spin_unlock(&buddy->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101 return -1;
102
103 found:
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700104 clear_bit(seg, buddy->bits[o]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
106 while (o > order) {
107 --o;
108 seg <<= 1;
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700109 set_bit(seg ^ 1, buddy->bits[o]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700110 }
111
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700112 spin_unlock(&buddy->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113
114 seg <<= order;
115
116 return seg;
117}
118
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700119static void mthca_buddy_free(struct mthca_buddy *buddy, u32 seg, int order)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700120{
121 seg >>= order;
122
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700123 spin_lock(&buddy->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700125 while (test_bit(seg ^ 1, buddy->bits[order])) {
126 clear_bit(seg ^ 1, buddy->bits[order]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127 seg >>= 1;
128 ++order;
129 }
130
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700131 set_bit(seg, buddy->bits[order]);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700133 spin_unlock(&buddy->lock);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134}
135
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700136static int __devinit mthca_buddy_init(struct mthca_buddy *buddy, int max_order)
Roland Dreier86562a12005-04-16 15:26:13 -0700137{
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700138 int i, s;
139
140 buddy->max_order = max_order;
141 spin_lock_init(&buddy->lock);
142
Roland Dreierde6eb662005-11-02 07:23:14 -0800143 buddy->bits = kzalloc((buddy->max_order + 1) * sizeof (long *),
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700144 GFP_KERNEL);
145 if (!buddy->bits)
146 goto err_out;
147
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700148 for (i = 0; i <= buddy->max_order; ++i) {
149 s = BITS_TO_LONGS(1 << (buddy->max_order - i));
150 buddy->bits[i] = kmalloc(s * sizeof (long), GFP_KERNEL);
151 if (!buddy->bits[i])
152 goto err_out_free;
153 bitmap_zero(buddy->bits[i],
154 1 << (buddy->max_order - i));
155 }
156
157 set_bit(0, buddy->bits[buddy->max_order]);
158
159 return 0;
160
161err_out_free:
162 for (i = 0; i <= buddy->max_order; ++i)
163 kfree(buddy->bits[i]);
164
165 kfree(buddy->bits);
166
167err_out:
168 return -ENOMEM;
169}
170
171static void __devexit mthca_buddy_cleanup(struct mthca_buddy *buddy)
172{
173 int i;
174
175 for (i = 0; i <= buddy->max_order; ++i)
176 kfree(buddy->bits[i]);
177
178 kfree(buddy->bits);
179}
180
Roland Dreierd56d6f92005-06-27 14:36:43 -0700181static u32 mthca_alloc_mtt_range(struct mthca_dev *dev, int order,
182 struct mthca_buddy *buddy)
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700183{
184 u32 seg = mthca_buddy_alloc(buddy, order);
Roland Dreier86562a12005-04-16 15:26:13 -0700185
186 if (seg == -1)
187 return -1;
188
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700189 if (mthca_is_memfree(dev))
Roland Dreier86562a12005-04-16 15:26:13 -0700190 if (mthca_table_get_range(dev, dev->mr_table.mtt_table, seg,
191 seg + (1 << order) - 1)) {
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700192 mthca_buddy_free(buddy, seg, order);
Roland Dreier86562a12005-04-16 15:26:13 -0700193 seg = -1;
194 }
195
196 return seg;
197}
198
Roland Dreierd56d6f92005-06-27 14:36:43 -0700199static struct mthca_mtt *__mthca_alloc_mtt(struct mthca_dev *dev, int size,
200 struct mthca_buddy *buddy)
Roland Dreier86562a12005-04-16 15:26:13 -0700201{
Roland Dreierd56d6f92005-06-27 14:36:43 -0700202 struct mthca_mtt *mtt;
203 int i;
204
205 if (size <= 0)
206 return ERR_PTR(-EINVAL);
207
208 mtt = kmalloc(sizeof *mtt, GFP_KERNEL);
209 if (!mtt)
210 return ERR_PTR(-ENOMEM);
211
212 mtt->buddy = buddy;
213 mtt->order = 0;
214 for (i = MTHCA_MTT_SEG_SIZE / 8; i < size; i <<= 1)
215 ++mtt->order;
216
217 mtt->first_seg = mthca_alloc_mtt_range(dev, mtt->order, buddy);
218 if (mtt->first_seg == -1) {
219 kfree(mtt);
220 return ERR_PTR(-ENOMEM);
221 }
222
223 return mtt;
224}
225
226struct mthca_mtt *mthca_alloc_mtt(struct mthca_dev *dev, int size)
227{
228 return __mthca_alloc_mtt(dev, size, &dev->mr_table.mtt_buddy);
229}
230
231void mthca_free_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt)
232{
233 if (!mtt)
234 return;
235
236 mthca_buddy_free(mtt->buddy, mtt->first_seg, mtt->order);
237
238 mthca_table_put_range(dev, dev->mr_table.mtt_table,
239 mtt->first_seg,
240 mtt->first_seg + (1 << mtt->order) - 1);
241
242 kfree(mtt);
243}
244
245int mthca_write_mtt(struct mthca_dev *dev, struct mthca_mtt *mtt,
246 int start_index, u64 *buffer_list, int list_len)
247{
Roland Dreiered878452005-06-27 14:36:45 -0700248 struct mthca_mailbox *mailbox;
Sean Hefty97f52eb2005-08-13 21:05:57 -0700249 __be64 *mtt_entry;
Roland Dreierd56d6f92005-06-27 14:36:43 -0700250 int err = 0;
251 u8 status;
252 int i;
253
Roland Dreiered878452005-06-27 14:36:45 -0700254 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
255 if (IS_ERR(mailbox))
256 return PTR_ERR(mailbox);
257 mtt_entry = mailbox->buf;
Roland Dreierd56d6f92005-06-27 14:36:43 -0700258
259 while (list_len > 0) {
260 mtt_entry[0] = cpu_to_be64(dev->mr_table.mtt_base +
261 mtt->first_seg * MTHCA_MTT_SEG_SIZE +
262 start_index * 8);
263 mtt_entry[1] = 0;
Roland Dreiered878452005-06-27 14:36:45 -0700264 for (i = 0; i < list_len && i < MTHCA_MAILBOX_SIZE / 8 - 2; ++i)
Roland Dreierd56d6f92005-06-27 14:36:43 -0700265 mtt_entry[i + 2] = cpu_to_be64(buffer_list[i] |
266 MTHCA_MTT_FLAG_PRESENT);
267
268 /*
269 * If we have an odd number of entries to write, add
270 * one more dummy entry for firmware efficiency.
271 */
272 if (i & 1)
273 mtt_entry[i + 2] = 0;
274
Roland Dreiered878452005-06-27 14:36:45 -0700275 err = mthca_WRITE_MTT(dev, mailbox, (i + 1) & ~1, &status);
Roland Dreierd56d6f92005-06-27 14:36:43 -0700276 if (err) {
277 mthca_warn(dev, "WRITE_MTT failed (%d)\n", err);
278 goto out;
279 }
280 if (status) {
281 mthca_warn(dev, "WRITE_MTT returned status 0x%02x\n",
282 status);
283 err = -EINVAL;
284 goto out;
285 }
286
287 list_len -= i;
288 start_index += i;
289 buffer_list += i;
290 }
291
292out:
Roland Dreiered878452005-06-27 14:36:45 -0700293 mthca_free_mailbox(dev, mailbox);
Roland Dreierd56d6f92005-06-27 14:36:43 -0700294 return err;
Roland Dreier86562a12005-04-16 15:26:13 -0700295}
296
Michael S. Tsirkind0a9d252005-04-16 15:26:30 -0700297static inline u32 tavor_hw_index_to_key(u32 ind)
298{
299 return ind;
300}
301
302static inline u32 tavor_key_to_hw_index(u32 key)
303{
304 return key;
305}
306
307static inline u32 arbel_hw_index_to_key(u32 ind)
308{
309 return (ind >> 24) | (ind << 8);
310}
311
312static inline u32 arbel_key_to_hw_index(u32 key)
313{
314 return (key << 24) | (key >> 8);
315}
316
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317static inline u32 hw_index_to_key(struct mthca_dev *dev, u32 ind)
318{
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700319 if (mthca_is_memfree(dev))
Michael S. Tsirkind0a9d252005-04-16 15:26:30 -0700320 return arbel_hw_index_to_key(ind);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321 else
Michael S. Tsirkind0a9d252005-04-16 15:26:30 -0700322 return tavor_hw_index_to_key(ind);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700323}
324
325static inline u32 key_to_hw_index(struct mthca_dev *dev, u32 key)
326{
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700327 if (mthca_is_memfree(dev))
Michael S. Tsirkind0a9d252005-04-16 15:26:30 -0700328 return arbel_key_to_hw_index(key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700329 else
Michael S. Tsirkind0a9d252005-04-16 15:26:30 -0700330 return tavor_key_to_hw_index(key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700331}
332
Roland Dreierd56d6f92005-06-27 14:36:43 -0700333int mthca_mr_alloc(struct mthca_dev *dev, u32 pd, int buffer_size_shift,
334 u64 iova, u64 total_size, u32 access, struct mthca_mr *mr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335{
Roland Dreiered878452005-06-27 14:36:45 -0700336 struct mthca_mailbox *mailbox;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337 struct mthca_mpt_entry *mpt_entry;
338 u32 key;
Roland Dreierd56d6f92005-06-27 14:36:43 -0700339 int i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700340 int err;
341 u8 status;
342
Roland Dreierd56d6f92005-06-27 14:36:43 -0700343 WARN_ON(buffer_size_shift >= 32);
344
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345 key = mthca_alloc(&dev->mr_table.mpt_alloc);
346 if (key == -1)
347 return -ENOMEM;
348 mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key);
349
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700350 if (mthca_is_memfree(dev)) {
Roland Dreier86562a12005-04-16 15:26:13 -0700351 err = mthca_table_get(dev, dev->mr_table.mpt_table, key);
352 if (err)
353 goto err_out_mpt_free;
354 }
355
Roland Dreiered878452005-06-27 14:36:45 -0700356 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
357 if (IS_ERR(mailbox)) {
358 err = PTR_ERR(mailbox);
Roland Dreier86562a12005-04-16 15:26:13 -0700359 goto err_out_table;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 }
Roland Dreiered878452005-06-27 14:36:45 -0700361 mpt_entry = mailbox->buf;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362
363 mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS |
364 MTHCA_MPT_FLAG_MIO |
Linus Torvalds1da177e2005-04-16 15:20:36 -0700365 MTHCA_MPT_FLAG_REGION |
366 access);
Roland Dreierd56d6f92005-06-27 14:36:43 -0700367 if (!mr->mtt)
368 mpt_entry->flags |= cpu_to_be32(MTHCA_MPT_FLAG_PHYSICAL);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369
370 mpt_entry->page_size = cpu_to_be32(buffer_size_shift - 12);
371 mpt_entry->key = cpu_to_be32(key);
372 mpt_entry->pd = cpu_to_be32(pd);
373 mpt_entry->start = cpu_to_be64(iova);
374 mpt_entry->length = cpu_to_be64(total_size);
Roland Dreierd56d6f92005-06-27 14:36:43 -0700375
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 memset(&mpt_entry->lkey, 0,
377 sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, lkey));
Roland Dreierd56d6f92005-06-27 14:36:43 -0700378
379 if (mr->mtt)
380 mpt_entry->mtt_seg =
381 cpu_to_be64(dev->mr_table.mtt_base +
382 mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383
384 if (0) {
385 mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey);
386 for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) {
387 if (i % 4 == 0)
388 printk("[%02x] ", i * 4);
Sean Hefty97f52eb2005-08-13 21:05:57 -0700389 printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i]));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 if ((i + 1) % 4 == 0)
391 printk("\n");
392 }
393 }
394
Roland Dreiered878452005-06-27 14:36:45 -0700395 err = mthca_SW2HW_MPT(dev, mailbox,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 key & (dev->limits.num_mpts - 1),
397 &status);
Roland Dreierd56d6f92005-06-27 14:36:43 -0700398 if (err) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err);
Roland Dreierd56d6f92005-06-27 14:36:43 -0700400 goto err_out_mailbox;
401 } else if (status) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 mthca_warn(dev, "SW2HW_MPT returned status 0x%02x\n",
403 status);
404 err = -EINVAL;
Roland Dreierd56d6f92005-06-27 14:36:43 -0700405 goto err_out_mailbox;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700406 }
407
Roland Dreiered878452005-06-27 14:36:45 -0700408 mthca_free_mailbox(dev, mailbox);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 return err;
410
Roland Dreierd56d6f92005-06-27 14:36:43 -0700411err_out_mailbox:
Roland Dreiered878452005-06-27 14:36:45 -0700412 mthca_free_mailbox(dev, mailbox);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413
Roland Dreier86562a12005-04-16 15:26:13 -0700414err_out_table:
Roland Dreiera03a5a62005-06-27 14:36:43 -0700415 mthca_table_put(dev, dev->mr_table.mpt_table, key);
Roland Dreier86562a12005-04-16 15:26:13 -0700416
417err_out_mpt_free:
Michael S. Tsirkin55645e92005-04-16 15:26:19 -0700418 mthca_free(&dev->mr_table.mpt_alloc, key);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 return err;
420}
421
Roland Dreierd56d6f92005-06-27 14:36:43 -0700422int mthca_mr_alloc_notrans(struct mthca_dev *dev, u32 pd,
423 u32 access, struct mthca_mr *mr)
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700424{
Roland Dreierd56d6f92005-06-27 14:36:43 -0700425 mr->mtt = NULL;
426 return mthca_mr_alloc(dev, pd, 12, 0, ~0ULL, access, mr);
427}
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700428
Roland Dreierd56d6f92005-06-27 14:36:43 -0700429int mthca_mr_alloc_phys(struct mthca_dev *dev, u32 pd,
430 u64 *buffer_list, int buffer_size_shift,
431 int list_len, u64 iova, u64 total_size,
432 u32 access, struct mthca_mr *mr)
433{
434 int err;
435
436 mr->mtt = mthca_alloc_mtt(dev, list_len);
437 if (IS_ERR(mr->mtt))
438 return PTR_ERR(mr->mtt);
439
440 err = mthca_write_mtt(dev, mr->mtt, 0, buffer_list, list_len);
441 if (err) {
442 mthca_free_mtt(dev, mr->mtt);
443 return err;
444 }
445
446 err = mthca_mr_alloc(dev, pd, buffer_size_shift, iova,
447 total_size, access, mr);
448 if (err)
449 mthca_free_mtt(dev, mr->mtt);
450
451 return err;
452}
453
454/* Free mr or fmr */
455static void mthca_free_region(struct mthca_dev *dev, u32 lkey)
456{
Roland Dreiera03a5a62005-06-27 14:36:43 -0700457 mthca_table_put(dev, dev->mr_table.mpt_table,
Guy German7f9f2db2005-08-15 07:38:50 -0700458 key_to_hw_index(dev, lkey));
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700459
460 mthca_free(&dev->mr_table.mpt_alloc, key_to_hw_index(dev, lkey));
461}
462
Linus Torvalds1da177e2005-04-16 15:20:36 -0700463void mthca_free_mr(struct mthca_dev *dev, struct mthca_mr *mr)
464{
465 int err;
466 u8 status;
467
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 err = mthca_HW2SW_MPT(dev, NULL,
469 key_to_hw_index(dev, mr->ibmr.lkey) &
470 (dev->limits.num_mpts - 1),
471 &status);
472 if (err)
473 mthca_warn(dev, "HW2SW_MPT failed (%d)\n", err);
474 else if (status)
475 mthca_warn(dev, "HW2SW_MPT returned status 0x%02x\n",
476 status);
477
Roland Dreierd56d6f92005-06-27 14:36:43 -0700478 mthca_free_region(dev, mr->ibmr.lkey);
479 mthca_free_mtt(dev, mr->mtt);
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700480}
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700482int mthca_fmr_alloc(struct mthca_dev *dev, u32 pd,
483 u32 access, struct mthca_fmr *mr)
484{
485 struct mthca_mpt_entry *mpt_entry;
Roland Dreiered878452005-06-27 14:36:45 -0700486 struct mthca_mailbox *mailbox;
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700487 u64 mtt_seg;
488 u32 key, idx;
489 u8 status;
490 int list_len = mr->attr.max_pages;
491 int err = -ENOMEM;
492 int i;
493
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700494 if (mr->attr.page_size < 12 || mr->attr.page_size >= 32)
495 return -EINVAL;
496
497 /* For Arbel, all MTTs must fit in the same page. */
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700498 if (mthca_is_memfree(dev) &&
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700499 mr->attr.max_pages * sizeof *mr->mem.arbel.mtts > PAGE_SIZE)
500 return -EINVAL;
501
502 mr->maps = 0;
503
504 key = mthca_alloc(&dev->mr_table.mpt_alloc);
505 if (key == -1)
506 return -ENOMEM;
507
508 idx = key & (dev->limits.num_mpts - 1);
509 mr->ibmr.rkey = mr->ibmr.lkey = hw_index_to_key(dev, key);
510
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700511 if (mthca_is_memfree(dev)) {
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700512 err = mthca_table_get(dev, dev->mr_table.mpt_table, key);
513 if (err)
514 goto err_out_mpt_free;
515
516 mr->mem.arbel.mpt = mthca_table_find(dev->mr_table.mpt_table, key);
517 BUG_ON(!mr->mem.arbel.mpt);
518 } else
519 mr->mem.tavor.mpt = dev->mr_table.tavor_fmr.mpt_base +
Roland Dreier2fa5e2e2006-02-01 13:38:24 -0800520 sizeof *(mr->mem.tavor.mpt) * idx;
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700521
Roland Dreierd56d6f92005-06-27 14:36:43 -0700522 mr->mtt = __mthca_alloc_mtt(dev, list_len, dev->mr_table.fmr_mtt_buddy);
523 if (IS_ERR(mr->mtt))
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700524 goto err_out_table;
525
Roland Dreierd56d6f92005-06-27 14:36:43 -0700526 mtt_seg = mr->mtt->first_seg * MTHCA_MTT_SEG_SIZE;
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700527
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700528 if (mthca_is_memfree(dev)) {
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700529 mr->mem.arbel.mtts = mthca_table_find(dev->mr_table.mtt_table,
Roland Dreierd56d6f92005-06-27 14:36:43 -0700530 mr->mtt->first_seg);
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700531 BUG_ON(!mr->mem.arbel.mtts);
532 } else
533 mr->mem.tavor.mtts = dev->mr_table.tavor_fmr.mtt_base + mtt_seg;
534
Roland Dreiered878452005-06-27 14:36:45 -0700535 mailbox = mthca_alloc_mailbox(dev, GFP_KERNEL);
536 if (IS_ERR(mailbox))
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700537 goto err_out_free_mtt;
538
Roland Dreiered878452005-06-27 14:36:45 -0700539 mpt_entry = mailbox->buf;
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700540
541 mpt_entry->flags = cpu_to_be32(MTHCA_MPT_FLAG_SW_OWNS |
542 MTHCA_MPT_FLAG_MIO |
543 MTHCA_MPT_FLAG_REGION |
544 access);
545
546 mpt_entry->page_size = cpu_to_be32(mr->attr.page_size - 12);
547 mpt_entry->key = cpu_to_be32(key);
548 mpt_entry->pd = cpu_to_be32(pd);
549 memset(&mpt_entry->start, 0,
550 sizeof *mpt_entry - offsetof(struct mthca_mpt_entry, start));
551 mpt_entry->mtt_seg = cpu_to_be64(dev->mr_table.mtt_base + mtt_seg);
552
553 if (0) {
554 mthca_dbg(dev, "Dumping MPT entry %08x:\n", mr->ibmr.lkey);
555 for (i = 0; i < sizeof (struct mthca_mpt_entry) / 4; ++i) {
556 if (i % 4 == 0)
557 printk("[%02x] ", i * 4);
Sean Hefty97f52eb2005-08-13 21:05:57 -0700558 printk(" %08x", be32_to_cpu(((__be32 *) mpt_entry)[i]));
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700559 if ((i + 1) % 4 == 0)
560 printk("\n");
561 }
562 }
563
Roland Dreiered878452005-06-27 14:36:45 -0700564 err = mthca_SW2HW_MPT(dev, mailbox,
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700565 key & (dev->limits.num_mpts - 1),
566 &status);
567 if (err) {
568 mthca_warn(dev, "SW2HW_MPT failed (%d)\n", err);
569 goto err_out_mailbox_free;
570 }
571 if (status) {
572 mthca_warn(dev, "SW2HW_MPT returned status 0x%02x\n",
573 status);
574 err = -EINVAL;
575 goto err_out_mailbox_free;
576 }
577
Roland Dreiered878452005-06-27 14:36:45 -0700578 mthca_free_mailbox(dev, mailbox);
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700579 return 0;
580
581err_out_mailbox_free:
Roland Dreiered878452005-06-27 14:36:45 -0700582 mthca_free_mailbox(dev, mailbox);
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700583
584err_out_free_mtt:
Roland Dreierd56d6f92005-06-27 14:36:43 -0700585 mthca_free_mtt(dev, mr->mtt);
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700586
587err_out_table:
Roland Dreiera03a5a62005-06-27 14:36:43 -0700588 mthca_table_put(dev, dev->mr_table.mpt_table, key);
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700589
590err_out_mpt_free:
591 mthca_free(&dev->mr_table.mpt_alloc, mr->ibmr.lkey);
592 return err;
593}
594
595int mthca_free_fmr(struct mthca_dev *dev, struct mthca_fmr *fmr)
596{
597 if (fmr->maps)
598 return -EBUSY;
599
Roland Dreierd56d6f92005-06-27 14:36:43 -0700600 mthca_free_region(dev, fmr->ibmr.lkey);
601 mthca_free_mtt(dev, fmr->mtt);
602
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700603 return 0;
604}
605
606static inline int mthca_check_fmr(struct mthca_fmr *fmr, u64 *page_list,
607 int list_len, u64 iova)
608{
609 int i, page_mask;
610
611 if (list_len > fmr->attr.max_pages)
612 return -EINVAL;
613
614 page_mask = (1 << fmr->attr.page_size) - 1;
615
616 /* We are getting page lists, so va must be page aligned. */
617 if (iova & page_mask)
618 return -EINVAL;
619
620 /* Trust the user not to pass misaligned data in page_list */
621 if (0)
622 for (i = 0; i < list_len; ++i) {
623 if (page_list[i] & ~page_mask)
624 return -EINVAL;
625 }
626
627 if (fmr->maps >= fmr->attr.max_maps)
628 return -EINVAL;
629
630 return 0;
631}
632
633
634int mthca_tavor_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
635 int list_len, u64 iova)
636{
637 struct mthca_fmr *fmr = to_mfmr(ibfmr);
638 struct mthca_dev *dev = to_mdev(ibfmr->device);
639 struct mthca_mpt_entry mpt_entry;
640 u32 key;
641 int i, err;
642
643 err = mthca_check_fmr(fmr, page_list, list_len, iova);
644 if (err)
645 return err;
646
647 ++fmr->maps;
648
649 key = tavor_key_to_hw_index(fmr->ibmr.lkey);
650 key += dev->limits.num_mpts;
651 fmr->ibmr.lkey = fmr->ibmr.rkey = tavor_hw_index_to_key(key);
652
653 writeb(MTHCA_MPT_STATUS_SW, fmr->mem.tavor.mpt);
654
655 for (i = 0; i < list_len; ++i) {
656 __be64 mtt_entry = cpu_to_be64(page_list[i] |
657 MTHCA_MTT_FLAG_PRESENT);
658 mthca_write64_raw(mtt_entry, fmr->mem.tavor.mtts + i);
659 }
660
661 mpt_entry.lkey = cpu_to_be32(key);
662 mpt_entry.length = cpu_to_be64(list_len * (1ull << fmr->attr.page_size));
663 mpt_entry.start = cpu_to_be64(iova);
664
Sean Hefty97f52eb2005-08-13 21:05:57 -0700665 __raw_writel((__force u32) mpt_entry.lkey, &fmr->mem.tavor.mpt->key);
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700666 memcpy_toio(&fmr->mem.tavor.mpt->start, &mpt_entry.start,
667 offsetof(struct mthca_mpt_entry, window_count) -
668 offsetof(struct mthca_mpt_entry, start));
669
670 writeb(MTHCA_MPT_STATUS_HW, fmr->mem.tavor.mpt);
671
672 return 0;
673}
674
675int mthca_arbel_map_phys_fmr(struct ib_fmr *ibfmr, u64 *page_list,
676 int list_len, u64 iova)
677{
678 struct mthca_fmr *fmr = to_mfmr(ibfmr);
679 struct mthca_dev *dev = to_mdev(ibfmr->device);
680 u32 key;
681 int i, err;
682
683 err = mthca_check_fmr(fmr, page_list, list_len, iova);
684 if (err)
685 return err;
686
687 ++fmr->maps;
688
689 key = arbel_key_to_hw_index(fmr->ibmr.lkey);
690 key += dev->limits.num_mpts;
691 fmr->ibmr.lkey = fmr->ibmr.rkey = arbel_hw_index_to_key(key);
692
693 *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW;
694
695 wmb();
696
697 for (i = 0; i < list_len; ++i)
698 fmr->mem.arbel.mtts[i] = cpu_to_be64(page_list[i] |
699 MTHCA_MTT_FLAG_PRESENT);
700
701 fmr->mem.arbel.mpt->key = cpu_to_be32(key);
702 fmr->mem.arbel.mpt->lkey = cpu_to_be32(key);
703 fmr->mem.arbel.mpt->length = cpu_to_be64(list_len * (1ull << fmr->attr.page_size));
704 fmr->mem.arbel.mpt->start = cpu_to_be64(iova);
705
706 wmb();
707
708 *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_HW;
709
710 wmb();
711
712 return 0;
713}
714
715void mthca_tavor_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
716{
717 u32 key;
718
719 if (!fmr->maps)
720 return;
721
722 key = tavor_key_to_hw_index(fmr->ibmr.lkey);
723 key &= dev->limits.num_mpts - 1;
724 fmr->ibmr.lkey = fmr->ibmr.rkey = tavor_hw_index_to_key(key);
725
726 fmr->maps = 0;
727
728 writeb(MTHCA_MPT_STATUS_SW, fmr->mem.tavor.mpt);
729}
730
731void mthca_arbel_fmr_unmap(struct mthca_dev *dev, struct mthca_fmr *fmr)
732{
733 u32 key;
734
735 if (!fmr->maps)
736 return;
737
738 key = arbel_key_to_hw_index(fmr->ibmr.lkey);
739 key &= dev->limits.num_mpts - 1;
740 fmr->ibmr.lkey = fmr->ibmr.rkey = arbel_hw_index_to_key(key);
741
742 fmr->maps = 0;
743
744 *(u8 *) fmr->mem.arbel.mpt = MTHCA_MPT_STATUS_SW;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745}
746
747int __devinit mthca_init_mr_table(struct mthca_dev *dev)
748{
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700749 int err, i;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750
751 err = mthca_alloc_init(&dev->mr_table.mpt_alloc,
752 dev->limits.num_mpts,
753 ~0, dev->limits.reserved_mrws);
754 if (err)
755 return err;
756
Roland Dreierd10ddbf2005-04-16 15:26:32 -0700757 if (!mthca_is_memfree(dev) &&
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700758 (dev->mthca_flags & MTHCA_FLAG_DDR_HIDDEN))
759 dev->limits.fmr_reserved_mtts = 0;
760 else
761 dev->mthca_flags |= MTHCA_FLAG_FMR;
762
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700763 err = mthca_buddy_init(&dev->mr_table.mtt_buddy,
764 fls(dev->limits.num_mtt_segs - 1));
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700765
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700766 if (err)
767 goto err_mtt_buddy;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700768
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700769 dev->mr_table.tavor_fmr.mpt_base = NULL;
770 dev->mr_table.tavor_fmr.mtt_base = NULL;
771
772 if (dev->limits.fmr_reserved_mtts) {
773 i = fls(dev->limits.fmr_reserved_mtts - 1);
774
775 if (i >= 31) {
776 mthca_warn(dev, "Unable to reserve 2^31 FMR MTTs.\n");
777 err = -EINVAL;
778 goto err_fmr_mpt;
779 }
780
781 dev->mr_table.tavor_fmr.mpt_base =
Roland Dreier2fa5e2e2006-02-01 13:38:24 -0800782 ioremap(dev->mr_table.mpt_base,
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700783 (1 << i) * sizeof (struct mthca_mpt_entry));
784
785 if (!dev->mr_table.tavor_fmr.mpt_base) {
786 mthca_warn(dev, "MPT ioremap for FMR failed.\n");
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700787 err = -ENOMEM;
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700788 goto err_fmr_mpt;
789 }
790
791 dev->mr_table.tavor_fmr.mtt_base =
792 ioremap(dev->mr_table.mtt_base,
793 (1 << i) * MTHCA_MTT_SEG_SIZE);
794 if (!dev->mr_table.tavor_fmr.mtt_base) {
795 mthca_warn(dev, "MTT ioremap for FMR failed.\n");
796 err = -ENOMEM;
797 goto err_fmr_mtt;
798 }
799
800 err = mthca_buddy_init(&dev->mr_table.tavor_fmr.mtt_buddy, i);
801 if (err)
802 goto err_fmr_mtt_buddy;
803
804 /* Prevent regular MRs from using FMR keys */
805 err = mthca_buddy_alloc(&dev->mr_table.mtt_buddy, i);
806 if (err)
807 goto err_reserve_fmr;
808
809 dev->mr_table.fmr_mtt_buddy =
Roland Dreier2fa5e2e2006-02-01 13:38:24 -0800810 &dev->mr_table.tavor_fmr.mtt_buddy;
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700811 } else
812 dev->mr_table.fmr_mtt_buddy = &dev->mr_table.mtt_buddy;
813
814 /* FMR table is always the first, take reserved MTTs out of there */
815 if (dev->limits.reserved_mtts) {
816 i = fls(dev->limits.reserved_mtts - 1);
817
Roland Dreierd56d6f92005-06-27 14:36:43 -0700818 if (mthca_alloc_mtt_range(dev, i,
819 dev->mr_table.fmr_mtt_buddy) == -1) {
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700820 mthca_warn(dev, "MTT table of order %d is too small.\n",
821 dev->mr_table.fmr_mtt_buddy->max_order);
822 err = -ENOMEM;
823 goto err_reserve_mtts;
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700824 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700825 }
826
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 return 0;
828
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700829err_reserve_mtts:
830err_reserve_fmr:
831 if (dev->limits.fmr_reserved_mtts)
832 mthca_buddy_cleanup(&dev->mr_table.tavor_fmr.mtt_buddy);
833
834err_fmr_mtt_buddy:
835 if (dev->mr_table.tavor_fmr.mtt_base)
836 iounmap(dev->mr_table.tavor_fmr.mtt_base);
837
838err_fmr_mtt:
839 if (dev->mr_table.tavor_fmr.mpt_base)
840 iounmap(dev->mr_table.tavor_fmr.mpt_base);
841
842err_fmr_mpt:
843 mthca_buddy_cleanup(&dev->mr_table.mtt_buddy);
844
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700845err_mtt_buddy:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 mthca_alloc_cleanup(&dev->mr_table.mpt_alloc);
847
848 return err;
849}
850
851void __devexit mthca_cleanup_mr_table(struct mthca_dev *dev)
852{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853 /* XXX check if any MRs are still allocated? */
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700854 if (dev->limits.fmr_reserved_mtts)
855 mthca_buddy_cleanup(&dev->mr_table.tavor_fmr.mtt_buddy);
856
Michael S. Tsirkin9095e202005-04-16 15:26:26 -0700857 mthca_buddy_cleanup(&dev->mr_table.mtt_buddy);
Michael S. Tsirkine0f5fdc2005-04-16 15:26:30 -0700858
859 if (dev->mr_table.tavor_fmr.mtt_base)
860 iounmap(dev->mr_table.tavor_fmr.mtt_base);
861 if (dev->mr_table.tavor_fmr.mpt_base)
862 iounmap(dev->mr_table.tavor_fmr.mpt_base);
863
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864 mthca_alloc_cleanup(&dev->mr_table.mpt_alloc);
865}