blob: ef62f1749b8a47e1d13aa9ed117b78d9a5364605 [file] [log] [blame]
Roland Dreier225c7b12007-05-08 18:00:38 -07001/*
Jack Morgenstein51a379d2008-07-25 10:32:52 -07002 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies. All rights reserved.
Roland Dreier225c7b12007-05-08 18:00:38 -07003 * Copyright (c) 2006, 2007 Cisco Systems, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
Roland Dreier225c7b12007-05-08 18:00:38 -070034#include <linux/errno.h>
Al Viro9cbe05c2007-05-15 20:36:30 +010035#include <linux/mm.h>
Jack Morgenstein5b0bf5e2007-08-01 12:28:20 +030036#include <linux/scatterlist.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090037#include <linux/slab.h>
Roland Dreier225c7b12007-05-08 18:00:38 -070038
39#include <linux/mlx4/cmd.h>
40
41#include "mlx4.h"
42#include "icm.h"
43#include "fw.h"
44
45/*
46 * We allocate in as big chunks as we can, up to a maximum of 256 KB
47 * per chunk.
48 */
49enum {
50 MLX4_ICM_ALLOC_SIZE = 1 << 18,
51 MLX4_TABLE_CHUNK_SIZE = 1 << 18
52};
53
Jack Morgenstein5b0bf5e2007-08-01 12:28:20 +030054static void mlx4_free_icm_pages(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
Roland Dreier225c7b12007-05-08 18:00:38 -070055{
Roland Dreier225c7b12007-05-08 18:00:38 -070056 int i;
57
Jack Morgenstein5b0bf5e2007-08-01 12:28:20 +030058 if (chunk->nsg > 0)
59 pci_unmap_sg(dev->pdev, chunk->mem, chunk->npages,
60 PCI_DMA_BIDIRECTIONAL);
Roland Dreier225c7b12007-05-08 18:00:38 -070061
Jack Morgenstein5b0bf5e2007-08-01 12:28:20 +030062 for (i = 0; i < chunk->npages; ++i)
Jens Axboe45711f12007-10-22 21:19:53 +020063 __free_pages(sg_page(&chunk->mem[i]),
Jack Morgenstein5b0bf5e2007-08-01 12:28:20 +030064 get_order(chunk->mem[i].length));
65}
66
67static void mlx4_free_icm_coherent(struct mlx4_dev *dev, struct mlx4_icm_chunk *chunk)
68{
69 int i;
70
71 for (i = 0; i < chunk->npages; ++i)
72 dma_free_coherent(&dev->pdev->dev, chunk->mem[i].length,
Jens Axboe45711f12007-10-22 21:19:53 +020073 lowmem_page_address(sg_page(&chunk->mem[i])),
Jack Morgenstein5b0bf5e2007-08-01 12:28:20 +030074 sg_dma_address(&chunk->mem[i]));
75}
76
77void mlx4_free_icm(struct mlx4_dev *dev, struct mlx4_icm *icm, int coherent)
78{
79 struct mlx4_icm_chunk *chunk, *tmp;
80
81 if (!icm)
82 return;
83
84 list_for_each_entry_safe(chunk, tmp, &icm->chunk_list, list) {
85 if (coherent)
86 mlx4_free_icm_coherent(dev, chunk);
87 else
88 mlx4_free_icm_pages(dev, chunk);
Roland Dreier225c7b12007-05-08 18:00:38 -070089
90 kfree(chunk);
91 }
92
93 kfree(icm);
94}
95
Jack Morgenstein5b0bf5e2007-08-01 12:28:20 +030096static int mlx4_alloc_icm_pages(struct scatterlist *mem, int order, gfp_t gfp_mask)
97{
Jens Axboe45711f12007-10-22 21:19:53 +020098 struct page *page;
99
100 page = alloc_pages(gfp_mask, order);
101 if (!page)
Jack Morgenstein5b0bf5e2007-08-01 12:28:20 +0300102 return -ENOMEM;
103
Jens Axboe642f149032007-10-24 11:20:47 +0200104 sg_set_page(mem, page, PAGE_SIZE << order, 0);
Jack Morgenstein5b0bf5e2007-08-01 12:28:20 +0300105 return 0;
106}
107
108static int mlx4_alloc_icm_coherent(struct device *dev, struct scatterlist *mem,
109 int order, gfp_t gfp_mask)
110{
111 void *buf = dma_alloc_coherent(dev, PAGE_SIZE << order,
112 &sg_dma_address(mem), gfp_mask);
113 if (!buf)
114 return -ENOMEM;
115
116 sg_set_buf(mem, buf, PAGE_SIZE << order);
117 BUG_ON(mem->offset);
118 sg_dma_len(mem) = PAGE_SIZE << order;
119 return 0;
120}
121
Roland Dreier225c7b12007-05-08 18:00:38 -0700122struct mlx4_icm *mlx4_alloc_icm(struct mlx4_dev *dev, int npages,
Jack Morgenstein5b0bf5e2007-08-01 12:28:20 +0300123 gfp_t gfp_mask, int coherent)
Roland Dreier225c7b12007-05-08 18:00:38 -0700124{
125 struct mlx4_icm *icm;
126 struct mlx4_icm_chunk *chunk = NULL;
127 int cur_order;
Jack Morgenstein5b0bf5e2007-08-01 12:28:20 +0300128 int ret;
129
130 /* We use sg_set_buf for coherent allocs, which assumes low memory */
131 BUG_ON(coherent && (gfp_mask & __GFP_HIGHMEM));
Roland Dreier225c7b12007-05-08 18:00:38 -0700132
133 icm = kmalloc(sizeof *icm, gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
134 if (!icm)
Jack Morgenstein5b0bf5e2007-08-01 12:28:20 +0300135 return NULL;
Roland Dreier225c7b12007-05-08 18:00:38 -0700136
137 icm->refcount = 0;
138 INIT_LIST_HEAD(&icm->chunk_list);
139
140 cur_order = get_order(MLX4_ICM_ALLOC_SIZE);
141
142 while (npages > 0) {
143 if (!chunk) {
144 chunk = kmalloc(sizeof *chunk,
145 gfp_mask & ~(__GFP_HIGHMEM | __GFP_NOWARN));
146 if (!chunk)
147 goto fail;
148
Jens Axboe45711f12007-10-22 21:19:53 +0200149 sg_init_table(chunk->mem, MLX4_ICM_CHUNK_LEN);
Roland Dreier225c7b12007-05-08 18:00:38 -0700150 chunk->npages = 0;
151 chunk->nsg = 0;
152 list_add_tail(&chunk->list, &icm->chunk_list);
153 }
154
155 while (1 << cur_order > npages)
156 --cur_order;
157
Jack Morgenstein5b0bf5e2007-08-01 12:28:20 +0300158 if (coherent)
159 ret = mlx4_alloc_icm_coherent(&dev->pdev->dev,
160 &chunk->mem[chunk->npages],
161 cur_order, gfp_mask);
162 else
163 ret = mlx4_alloc_icm_pages(&chunk->mem[chunk->npages],
164 cur_order, gfp_mask);
Roland Dreier225c7b12007-05-08 18:00:38 -0700165
Jack Morgenstein5b0bf5e2007-08-01 12:28:20 +0300166 if (!ret) {
167 ++chunk->npages;
168
169 if (coherent)
170 ++chunk->nsg;
171 else if (chunk->npages == MLX4_ICM_CHUNK_LEN) {
Roland Dreier225c7b12007-05-08 18:00:38 -0700172 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
173 chunk->npages,
174 PCI_DMA_BIDIRECTIONAL);
175
176 if (chunk->nsg <= 0)
177 goto fail;
Roland Dreier225c7b12007-05-08 18:00:38 -0700178 }
179
Sebastien Duguec0dc72b2010-05-20 15:58:22 -0700180 if (chunk->npages == MLX4_ICM_CHUNK_LEN)
181 chunk = NULL;
182
Roland Dreier225c7b12007-05-08 18:00:38 -0700183 npages -= 1 << cur_order;
184 } else {
185 --cur_order;
186 if (cur_order < 0)
187 goto fail;
188 }
189 }
190
Jack Morgenstein5b0bf5e2007-08-01 12:28:20 +0300191 if (!coherent && chunk) {
Roland Dreier225c7b12007-05-08 18:00:38 -0700192 chunk->nsg = pci_map_sg(dev->pdev, chunk->mem,
193 chunk->npages,
194 PCI_DMA_BIDIRECTIONAL);
195
196 if (chunk->nsg <= 0)
197 goto fail;
198 }
199
200 return icm;
201
202fail:
Jack Morgenstein5b0bf5e2007-08-01 12:28:20 +0300203 mlx4_free_icm(dev, icm, coherent);
Roland Dreier225c7b12007-05-08 18:00:38 -0700204 return NULL;
205}
206
207static int mlx4_MAP_ICM(struct mlx4_dev *dev, struct mlx4_icm *icm, u64 virt)
208{
209 return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM, icm, virt);
210}
211
212int mlx4_UNMAP_ICM(struct mlx4_dev *dev, u64 virt, u32 page_count)
213{
214 return mlx4_cmd(dev, virt, page_count, 0, MLX4_CMD_UNMAP_ICM,
215 MLX4_CMD_TIME_CLASS_B);
216}
217
218int mlx4_MAP_ICM_page(struct mlx4_dev *dev, u64 dma_addr, u64 virt)
219{
220 struct mlx4_cmd_mailbox *mailbox;
221 __be64 *inbox;
222 int err;
223
224 mailbox = mlx4_alloc_cmd_mailbox(dev);
225 if (IS_ERR(mailbox))
226 return PTR_ERR(mailbox);
227 inbox = mailbox->buf;
228
229 inbox[0] = cpu_to_be64(virt);
230 inbox[1] = cpu_to_be64(dma_addr);
231
232 err = mlx4_cmd(dev, mailbox->dma, 1, 0, MLX4_CMD_MAP_ICM,
233 MLX4_CMD_TIME_CLASS_B);
234
235 mlx4_free_cmd_mailbox(dev, mailbox);
236
237 if (!err)
238 mlx4_dbg(dev, "Mapped page at %llx to %llx for ICM.\n",
239 (unsigned long long) dma_addr, (unsigned long long) virt);
240
241 return err;
242}
243
244int mlx4_MAP_ICM_AUX(struct mlx4_dev *dev, struct mlx4_icm *icm)
245{
246 return mlx4_map_cmd(dev, MLX4_CMD_MAP_ICM_AUX, icm, -1);
247}
248
249int mlx4_UNMAP_ICM_AUX(struct mlx4_dev *dev)
250{
251 return mlx4_cmd(dev, 0, 0, 0, MLX4_CMD_UNMAP_ICM_AUX, MLX4_CMD_TIME_CLASS_B);
252}
253
254int mlx4_table_get(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
255{
256 int i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
257 int ret = 0;
258
259 mutex_lock(&table->mutex);
260
261 if (table->icm[i]) {
262 ++table->icm[i]->refcount;
263 goto out;
264 }
265
266 table->icm[i] = mlx4_alloc_icm(dev, MLX4_TABLE_CHUNK_SIZE >> PAGE_SHIFT,
267 (table->lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
Jack Morgenstein5b0bf5e2007-08-01 12:28:20 +0300268 __GFP_NOWARN, table->coherent);
Roland Dreier225c7b12007-05-08 18:00:38 -0700269 if (!table->icm[i]) {
270 ret = -ENOMEM;
271 goto out;
272 }
273
274 if (mlx4_MAP_ICM(dev, table->icm[i], table->virt +
275 (u64) i * MLX4_TABLE_CHUNK_SIZE)) {
Jack Morgenstein5b0bf5e2007-08-01 12:28:20 +0300276 mlx4_free_icm(dev, table->icm[i], table->coherent);
Roland Dreier225c7b12007-05-08 18:00:38 -0700277 table->icm[i] = NULL;
278 ret = -ENOMEM;
279 goto out;
280 }
281
282 ++table->icm[i]->refcount;
283
284out:
285 mutex_unlock(&table->mutex);
286 return ret;
287}
288
289void mlx4_table_put(struct mlx4_dev *dev, struct mlx4_icm_table *table, int obj)
290{
291 int i;
292
293 i = (obj & (table->num_obj - 1)) / (MLX4_TABLE_CHUNK_SIZE / table->obj_size);
294
295 mutex_lock(&table->mutex);
296
297 if (--table->icm[i]->refcount == 0) {
298 mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
299 MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
Jack Morgenstein5b0bf5e2007-08-01 12:28:20 +0300300 mlx4_free_icm(dev, table->icm[i], table->coherent);
Roland Dreier225c7b12007-05-08 18:00:38 -0700301 table->icm[i] = NULL;
302 }
303
304 mutex_unlock(&table->mutex);
305}
306
Jack Morgensteind7bb58f2007-08-01 12:28:53 +0300307void *mlx4_table_find(struct mlx4_icm_table *table, int obj, dma_addr_t *dma_handle)
Roland Dreier225c7b12007-05-08 18:00:38 -0700308{
Jack Morgensteind7bb58f2007-08-01 12:28:53 +0300309 int idx, offset, dma_offset, i;
Roland Dreier225c7b12007-05-08 18:00:38 -0700310 struct mlx4_icm_chunk *chunk;
311 struct mlx4_icm *icm;
312 struct page *page = NULL;
313
314 if (!table->lowmem)
315 return NULL;
316
317 mutex_lock(&table->mutex);
318
Jack Morgensteind7bb58f2007-08-01 12:28:53 +0300319 idx = (obj & (table->num_obj - 1)) * table->obj_size;
320 icm = table->icm[idx / MLX4_TABLE_CHUNK_SIZE];
321 dma_offset = offset = idx % MLX4_TABLE_CHUNK_SIZE;
Roland Dreier225c7b12007-05-08 18:00:38 -0700322
323 if (!icm)
324 goto out;
325
326 list_for_each_entry(chunk, &icm->chunk_list, list) {
327 for (i = 0; i < chunk->npages; ++i) {
Jack Morgensteind7bb58f2007-08-01 12:28:53 +0300328 if (dma_handle && dma_offset >= 0) {
329 if (sg_dma_len(&chunk->mem[i]) > dma_offset)
330 *dma_handle = sg_dma_address(&chunk->mem[i]) +
331 dma_offset;
332 dma_offset -= sg_dma_len(&chunk->mem[i]);
333 }
334 /*
335 * DMA mapping can merge pages but not split them,
336 * so if we found the page, dma_handle has already
337 * been assigned to.
338 */
Roland Dreier225c7b12007-05-08 18:00:38 -0700339 if (chunk->mem[i].length > offset) {
Jens Axboe45711f12007-10-22 21:19:53 +0200340 page = sg_page(&chunk->mem[i]);
Roland Dreier225c7b12007-05-08 18:00:38 -0700341 goto out;
342 }
343 offset -= chunk->mem[i].length;
344 }
345 }
346
347out:
348 mutex_unlock(&table->mutex);
349 return page ? lowmem_page_address(page) + offset : NULL;
350}
351
352int mlx4_table_get_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
353 int start, int end)
354{
355 int inc = MLX4_TABLE_CHUNK_SIZE / table->obj_size;
356 int i, err;
357
358 for (i = start; i <= end; i += inc) {
359 err = mlx4_table_get(dev, table, i);
360 if (err)
361 goto fail;
362 }
363
364 return 0;
365
366fail:
367 while (i > start) {
368 i -= inc;
369 mlx4_table_put(dev, table, i);
370 }
371
372 return err;
373}
374
375void mlx4_table_put_range(struct mlx4_dev *dev, struct mlx4_icm_table *table,
376 int start, int end)
377{
378 int i;
379
380 for (i = start; i <= end; i += MLX4_TABLE_CHUNK_SIZE / table->obj_size)
381 mlx4_table_put(dev, table, i);
382}
383
384int mlx4_init_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table,
385 u64 virt, int obj_size, int nobj, int reserved,
Jack Morgenstein5b0bf5e2007-08-01 12:28:20 +0300386 int use_lowmem, int use_coherent)
Roland Dreier225c7b12007-05-08 18:00:38 -0700387{
388 int obj_per_chunk;
389 int num_icm;
390 unsigned chunk_size;
391 int i;
392
393 obj_per_chunk = MLX4_TABLE_CHUNK_SIZE / obj_size;
394 num_icm = (nobj + obj_per_chunk - 1) / obj_per_chunk;
395
396 table->icm = kcalloc(num_icm, sizeof *table->icm, GFP_KERNEL);
397 if (!table->icm)
398 return -ENOMEM;
399 table->virt = virt;
400 table->num_icm = num_icm;
401 table->num_obj = nobj;
402 table->obj_size = obj_size;
403 table->lowmem = use_lowmem;
Jack Morgenstein5b0bf5e2007-08-01 12:28:20 +0300404 table->coherent = use_coherent;
Roland Dreier225c7b12007-05-08 18:00:38 -0700405 mutex_init(&table->mutex);
406
407 for (i = 0; i * MLX4_TABLE_CHUNK_SIZE < reserved * obj_size; ++i) {
408 chunk_size = MLX4_TABLE_CHUNK_SIZE;
409 if ((i + 1) * MLX4_TABLE_CHUNK_SIZE > nobj * obj_size)
410 chunk_size = PAGE_ALIGN(nobj * obj_size - i * MLX4_TABLE_CHUNK_SIZE);
411
412 table->icm[i] = mlx4_alloc_icm(dev, chunk_size >> PAGE_SHIFT,
413 (use_lowmem ? GFP_KERNEL : GFP_HIGHUSER) |
Jack Morgenstein5b0bf5e2007-08-01 12:28:20 +0300414 __GFP_NOWARN, use_coherent);
Roland Dreier225c7b12007-05-08 18:00:38 -0700415 if (!table->icm[i])
416 goto err;
417 if (mlx4_MAP_ICM(dev, table->icm[i], virt + i * MLX4_TABLE_CHUNK_SIZE)) {
Jack Morgenstein5b0bf5e2007-08-01 12:28:20 +0300418 mlx4_free_icm(dev, table->icm[i], use_coherent);
Roland Dreier225c7b12007-05-08 18:00:38 -0700419 table->icm[i] = NULL;
420 goto err;
421 }
422
423 /*
424 * Add a reference to this ICM chunk so that it never
425 * gets freed (since it contains reserved firmware objects).
426 */
427 ++table->icm[i]->refcount;
428 }
429
430 return 0;
431
432err:
433 for (i = 0; i < num_icm; ++i)
434 if (table->icm[i]) {
435 mlx4_UNMAP_ICM(dev, virt + i * MLX4_TABLE_CHUNK_SIZE,
436 MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
Jack Morgenstein5b0bf5e2007-08-01 12:28:20 +0300437 mlx4_free_icm(dev, table->icm[i], use_coherent);
Roland Dreier225c7b12007-05-08 18:00:38 -0700438 }
439
440 return -ENOMEM;
441}
442
443void mlx4_cleanup_icm_table(struct mlx4_dev *dev, struct mlx4_icm_table *table)
444{
445 int i;
446
447 for (i = 0; i < table->num_icm; ++i)
448 if (table->icm[i]) {
449 mlx4_UNMAP_ICM(dev, table->virt + i * MLX4_TABLE_CHUNK_SIZE,
450 MLX4_TABLE_CHUNK_SIZE / MLX4_ICM_PAGE_SIZE);
Jack Morgenstein5b0bf5e2007-08-01 12:28:20 +0300451 mlx4_free_icm(dev, table->icm[i], table->coherent);
Roland Dreier225c7b12007-05-08 18:00:38 -0700452 }
453
454 kfree(table->icm);
455}