blob: d4585154151db43ce1df6ef97312a41296bd39d9 [file] [log] [blame]
Eli Cohene126ba92013-07-07 17:25:49 +03001/*
Saeed Mahameed302bdf62015-04-02 17:07:29 +03002 * Copyright (c) 2013-2015, Mellanox Technologies. All rights reserved.
Eli Cohene126ba92013-07-07 17:25:49 +03003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
Christoph Hellwigadec6402015-08-28 09:27:19 +020033#include <linux/highmem.h>
Eli Cohene126ba92013-07-07 17:25:49 +030034#include <linux/kernel.h>
35#include <linux/module.h>
Eli Cohenfc50db92015-12-01 18:03:09 +020036#include <linux/delay.h>
Eli Cohene126ba92013-07-07 17:25:49 +030037#include <linux/mlx5/driver.h>
38#include <linux/mlx5/cmd.h>
39#include "mlx5_core.h"
40
41enum {
42 MLX5_PAGES_CANT_GIVE = 0,
43 MLX5_PAGES_GIVE = 1,
44 MLX5_PAGES_TAKE = 2
45};
46
47struct mlx5_pages_req {
48 struct mlx5_core_dev *dev;
Jack Morgensteinf241e742014-07-28 23:30:23 +030049 u16 func_id;
Moshe Lazer0a324f312013-08-14 17:46:48 +030050 s32 npages;
Eli Cohene126ba92013-07-07 17:25:49 +030051 struct work_struct work;
52};
53
54struct fw_page {
Eli Cohenbf0bf772013-10-23 09:53:19 +030055 struct rb_node rb_node;
56 u64 addr;
57 struct page *page;
58 u16 func_id;
59 unsigned long bitmask;
60 struct list_head list;
61 unsigned free_count;
Eli Cohene126ba92013-07-07 17:25:49 +030062};
63
Eli Cohendabed0e2013-09-11 16:35:28 +030064enum {
65 MAX_RECLAIM_TIME_MSECS = 5000,
Eli Cohenfc50db92015-12-01 18:03:09 +020066 MAX_RECLAIM_VFS_PAGES_TIME_MSECS = 2 * 1000 * 60,
Eli Cohendabed0e2013-09-11 16:35:28 +030067};
68
Eli Cohenbf0bf772013-10-23 09:53:19 +030069enum {
70 MLX5_MAX_RECLAIM_TIME_MILI = 5000,
Eli Cohen05bdb2a2014-01-14 17:45:20 +020071 MLX5_NUM_4K_IN_PAGE = PAGE_SIZE / MLX5_ADAPTER_PAGE_SIZE,
Eli Cohenbf0bf772013-10-23 09:53:19 +030072};
73
Eli Cohene126ba92013-07-07 17:25:49 +030074static int insert_page(struct mlx5_core_dev *dev, u64 addr, struct page *page, u16 func_id)
75{
76 struct rb_root *root = &dev->priv.page_root;
77 struct rb_node **new = &root->rb_node;
78 struct rb_node *parent = NULL;
79 struct fw_page *nfp;
80 struct fw_page *tfp;
Eli Cohenbf0bf772013-10-23 09:53:19 +030081 int i;
Eli Cohene126ba92013-07-07 17:25:49 +030082
83 while (*new) {
84 parent = *new;
85 tfp = rb_entry(parent, struct fw_page, rb_node);
86 if (tfp->addr < addr)
87 new = &parent->rb_left;
88 else if (tfp->addr > addr)
89 new = &parent->rb_right;
90 else
91 return -EEXIST;
92 }
93
Eli Cohenbf0bf772013-10-23 09:53:19 +030094 nfp = kzalloc(sizeof(*nfp), GFP_KERNEL);
Eli Cohene126ba92013-07-07 17:25:49 +030095 if (!nfp)
96 return -ENOMEM;
97
98 nfp->addr = addr;
99 nfp->page = page;
100 nfp->func_id = func_id;
Eli Cohenbf0bf772013-10-23 09:53:19 +0300101 nfp->free_count = MLX5_NUM_4K_IN_PAGE;
102 for (i = 0; i < MLX5_NUM_4K_IN_PAGE; i++)
103 set_bit(i, &nfp->bitmask);
Eli Cohene126ba92013-07-07 17:25:49 +0300104
105 rb_link_node(&nfp->rb_node, parent, new);
106 rb_insert_color(&nfp->rb_node, root);
Eli Cohenbf0bf772013-10-23 09:53:19 +0300107 list_add(&nfp->list, &dev->priv.free_list);
Eli Cohene126ba92013-07-07 17:25:49 +0300108
109 return 0;
110}
111
Eli Cohenbf0bf772013-10-23 09:53:19 +0300112static struct fw_page *find_fw_page(struct mlx5_core_dev *dev, u64 addr)
Eli Cohene126ba92013-07-07 17:25:49 +0300113{
114 struct rb_root *root = &dev->priv.page_root;
115 struct rb_node *tmp = root->rb_node;
Eli Cohenbf0bf772013-10-23 09:53:19 +0300116 struct fw_page *result = NULL;
Eli Cohene126ba92013-07-07 17:25:49 +0300117 struct fw_page *tfp;
118
119 while (tmp) {
120 tfp = rb_entry(tmp, struct fw_page, rb_node);
121 if (tfp->addr < addr) {
122 tmp = tmp->rb_left;
123 } else if (tfp->addr > addr) {
124 tmp = tmp->rb_right;
125 } else {
Eli Cohenbf0bf772013-10-23 09:53:19 +0300126 result = tfp;
Eli Cohene126ba92013-07-07 17:25:49 +0300127 break;
128 }
129 }
130
131 return result;
132}
133
134static int mlx5_cmd_query_pages(struct mlx5_core_dev *dev, u16 *func_id,
Moshe Lazer0a324f312013-08-14 17:46:48 +0300135 s32 *npages, int boot)
Eli Cohene126ba92013-07-07 17:25:49 +0300136{
Saeed Mahameeda533ed52016-07-17 13:27:25 +0300137 u32 out[MLX5_ST_SZ_DW(query_pages_out)] = {0};
138 u32 in[MLX5_ST_SZ_DW(query_pages_in)] = {0};
Eli Cohene126ba92013-07-07 17:25:49 +0300139 int err;
140
Saeed Mahameeda533ed52016-07-17 13:27:25 +0300141 MLX5_SET(query_pages_in, in, opcode, MLX5_CMD_OP_QUERY_PAGES);
142 MLX5_SET(query_pages_in, in, op_mod, boot ?
143 MLX5_QUERY_PAGES_IN_OP_MOD_BOOT_PAGES :
144 MLX5_QUERY_PAGES_IN_OP_MOD_INIT_PAGES);
Moshe Lazer0a324f312013-08-14 17:46:48 +0300145
Saeed Mahameeda533ed52016-07-17 13:27:25 +0300146 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
Eli Cohene126ba92013-07-07 17:25:49 +0300147 if (err)
148 return err;
149
Saeed Mahameeda533ed52016-07-17 13:27:25 +0300150 *npages = MLX5_GET(query_pages_out, out, num_pages);
151 *func_id = MLX5_GET(query_pages_out, out, function_id);
Eli Cohene126ba92013-07-07 17:25:49 +0300152
153 return err;
154}
155
Eli Cohenbf0bf772013-10-23 09:53:19 +0300156static int alloc_4k(struct mlx5_core_dev *dev, u64 *addr)
157{
158 struct fw_page *fp;
159 unsigned n;
160
Dan Carpenter24e427542014-01-14 17:45:11 +0200161 if (list_empty(&dev->priv.free_list))
Eli Cohenbf0bf772013-10-23 09:53:19 +0300162 return -ENOMEM;
Eli Cohenbf0bf772013-10-23 09:53:19 +0300163
164 fp = list_entry(dev->priv.free_list.next, struct fw_page, list);
165 n = find_first_bit(&fp->bitmask, 8 * sizeof(fp->bitmask));
166 if (n >= MLX5_NUM_4K_IN_PAGE) {
167 mlx5_core_warn(dev, "alloc 4k bug\n");
168 return -ENOENT;
169 }
170 clear_bit(n, &fp->bitmask);
171 fp->free_count--;
172 if (!fp->free_count)
173 list_del(&fp->list);
174
Eli Cohen05bdb2a2014-01-14 17:45:20 +0200175 *addr = fp->addr + n * MLX5_ADAPTER_PAGE_SIZE;
Eli Cohenbf0bf772013-10-23 09:53:19 +0300176
177 return 0;
178}
179
Honggang LI59d2d182015-04-15 16:36:15 +0800180#define MLX5_U64_4K_PAGE_MASK ((~(u64)0U) << PAGE_SHIFT)
181
Eli Cohenbf0bf772013-10-23 09:53:19 +0300182static void free_4k(struct mlx5_core_dev *dev, u64 addr)
183{
184 struct fw_page *fwp;
185 int n;
186
Honggang LI59d2d182015-04-15 16:36:15 +0800187 fwp = find_fw_page(dev, addr & MLX5_U64_4K_PAGE_MASK);
Eli Cohenbf0bf772013-10-23 09:53:19 +0300188 if (!fwp) {
189 mlx5_core_warn(dev, "page not found\n");
190 return;
191 }
192
Honggang LI59d2d182015-04-15 16:36:15 +0800193 n = (addr & ~MLX5_U64_4K_PAGE_MASK) >> MLX5_ADAPTER_PAGE_SHIFT;
Eli Cohenbf0bf772013-10-23 09:53:19 +0300194 fwp->free_count++;
195 set_bit(n, &fwp->bitmask);
196 if (fwp->free_count == MLX5_NUM_4K_IN_PAGE) {
197 rb_erase(&fwp->rb_node, &dev->priv.page_root);
Eli Cohen2b136d02013-10-31 15:26:33 +0200198 if (fwp->free_count != 1)
199 list_del(&fwp->list);
Honggang LI59d2d182015-04-15 16:36:15 +0800200 dma_unmap_page(&dev->pdev->dev, addr & MLX5_U64_4K_PAGE_MASK,
201 PAGE_SIZE, DMA_BIDIRECTIONAL);
Eli Cohenbf0bf772013-10-23 09:53:19 +0300202 __free_page(fwp->page);
203 kfree(fwp);
204 } else if (fwp->free_count == 1) {
205 list_add(&fwp->list, &dev->priv.free_list);
206 }
207}
208
209static int alloc_system_page(struct mlx5_core_dev *dev, u16 func_id)
210{
211 struct page *page;
212 u64 addr;
213 int err;
Eli Cohenad189102015-04-02 17:07:19 +0300214 int nid = dev_to_node(&dev->pdev->dev);
Eli Cohenbf0bf772013-10-23 09:53:19 +0300215
Eli Cohenad189102015-04-02 17:07:19 +0300216 page = alloc_pages_node(nid, GFP_HIGHUSER, 0);
Eli Cohenbf0bf772013-10-23 09:53:19 +0300217 if (!page) {
218 mlx5_core_warn(dev, "failed to allocate page\n");
219 return -ENOMEM;
220 }
221 addr = dma_map_page(&dev->pdev->dev, page, 0,
222 PAGE_SIZE, DMA_BIDIRECTIONAL);
223 if (dma_mapping_error(&dev->pdev->dev, addr)) {
224 mlx5_core_warn(dev, "failed dma mapping page\n");
225 err = -ENOMEM;
226 goto out_alloc;
227 }
228 err = insert_page(dev, addr, page, func_id);
229 if (err) {
230 mlx5_core_err(dev, "failed to track allocated page\n");
231 goto out_mapping;
232 }
233
234 return 0;
235
236out_mapping:
237 dma_unmap_page(&dev->pdev->dev, addr, PAGE_SIZE, DMA_BIDIRECTIONAL);
238
239out_alloc:
240 __free_page(page);
241
242 return err;
243}
Eli Cohena8ffe632015-09-25 10:49:13 +0300244
245static void page_notify_fail(struct mlx5_core_dev *dev, u16 func_id)
246{
Saeed Mahameeda533ed52016-07-17 13:27:25 +0300247 u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
248 u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
Eli Cohena8ffe632015-09-25 10:49:13 +0300249 int err;
250
Saeed Mahameeda533ed52016-07-17 13:27:25 +0300251 MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
252 MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_CANT_GIVE);
253 MLX5_SET(manage_pages_in, in, function_id, func_id);
Saeed Mahameedc4f287c2016-07-19 20:17:12 +0300254
Saeed Mahameeda533ed52016-07-17 13:27:25 +0300255 err = mlx5_cmd_exec(dev, in, sizeof(in), out, sizeof(out));
Eli Cohena8ffe632015-09-25 10:49:13 +0300256 if (err)
Saeed Mahameeda533ed52016-07-17 13:27:25 +0300257 mlx5_core_warn(dev, "page notify failed func_id(%d) err(%d)\n",
258 func_id, err);
Eli Cohena8ffe632015-09-25 10:49:13 +0300259}
260
Eli Cohene126ba92013-07-07 17:25:49 +0300261static int give_pages(struct mlx5_core_dev *dev, u16 func_id, int npages,
262 int notify_fail)
263{
Saeed Mahameeda533ed52016-07-17 13:27:25 +0300264 u32 out[MLX5_ST_SZ_DW(manage_pages_out)] = {0};
265 int inlen = MLX5_ST_SZ_BYTES(manage_pages_in);
Eli Cohene126ba92013-07-07 17:25:49 +0300266 u64 addr;
267 int err;
Saeed Mahameeda533ed52016-07-17 13:27:25 +0300268 u32 *in;
Eli Cohene126ba92013-07-07 17:25:49 +0300269 int i;
270
Saeed Mahameeda533ed52016-07-17 13:27:25 +0300271 inlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_in, pas[0]);
Eli Cohene126ba92013-07-07 17:25:49 +0300272 in = mlx5_vzalloc(inlen);
273 if (!in) {
Eli Cohena8ffe632015-09-25 10:49:13 +0300274 err = -ENOMEM;
Eli Cohene126ba92013-07-07 17:25:49 +0300275 mlx5_core_warn(dev, "vzalloc failed %d\n", inlen);
Eli Cohena8ffe632015-09-25 10:49:13 +0300276 goto out_free;
Eli Cohene126ba92013-07-07 17:25:49 +0300277 }
Eli Cohene126ba92013-07-07 17:25:49 +0300278
279 for (i = 0; i < npages; i++) {
Eli Cohenbf0bf772013-10-23 09:53:19 +0300280retry:
281 err = alloc_4k(dev, &addr);
Eli Cohene126ba92013-07-07 17:25:49 +0300282 if (err) {
Eli Cohenbf0bf772013-10-23 09:53:19 +0300283 if (err == -ENOMEM)
284 err = alloc_system_page(dev, func_id);
285 if (err)
286 goto out_4k;
287
288 goto retry;
Eli Cohene126ba92013-07-07 17:25:49 +0300289 }
Saeed Mahameeda533ed52016-07-17 13:27:25 +0300290 MLX5_SET64(manage_pages_in, in, pas[i], addr);
Eli Cohene126ba92013-07-07 17:25:49 +0300291 }
292
Saeed Mahameeda533ed52016-07-17 13:27:25 +0300293 MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
294 MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_GIVE);
295 MLX5_SET(manage_pages_in, in, function_id, func_id);
296 MLX5_SET(manage_pages_in, in, input_num_entries, npages);
297
298 err = mlx5_cmd_exec(dev, in, inlen, out, sizeof(out));
Eli Cohene126ba92013-07-07 17:25:49 +0300299 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -0700300 mlx5_core_warn(dev, "func_id 0x%x, npages %d, err %d\n",
301 func_id, npages, err);
Eli Cohena8ffe632015-09-25 10:49:13 +0300302 goto out_4k;
Eli Cohene126ba92013-07-07 17:25:49 +0300303 }
Eli Cohene126ba92013-07-07 17:25:49 +0300304
Eli Cohenfc50db92015-12-01 18:03:09 +0200305 dev->priv.fw_pages += npages;
306 if (func_id)
307 dev->priv.vfs_pages += npages;
308
Eli Cohene126ba92013-07-07 17:25:49 +0300309 mlx5_core_dbg(dev, "err %d\n", err);
310
Eli Cohena8ffe632015-09-25 10:49:13 +0300311 kvfree(in);
312 return 0;
Eli Cohen952f5f62013-10-23 09:53:18 +0300313
Eli Cohenbf0bf772013-10-23 09:53:19 +0300314out_4k:
315 for (i--; i >= 0; i--)
Saeed Mahameeda533ed52016-07-17 13:27:25 +0300316 free_4k(dev, MLX5_GET64(manage_pages_in, in, pas[i]));
Eli Cohene126ba92013-07-07 17:25:49 +0300317out_free:
Al Viro479163f2014-11-20 08:13:57 +0000318 kvfree(in);
Eli Cohena8ffe632015-09-25 10:49:13 +0300319 if (notify_fail)
320 page_notify_fail(dev, func_id);
Eli Cohene126ba92013-07-07 17:25:49 +0300321 return err;
322}
323
Daniel Jurgens5adff6a2016-06-30 17:34:40 +0300324static int reclaim_pages_cmd(struct mlx5_core_dev *dev,
Saeed Mahameeda533ed52016-07-17 13:27:25 +0300325 u32 *in, int in_size, u32 *out, int out_size)
Daniel Jurgens5adff6a2016-06-30 17:34:40 +0300326{
327 struct fw_page *fwp;
328 struct rb_node *p;
Mohamad Haj Yahiad62292e2016-09-09 17:35:17 +0300329 u32 func_id;
Daniel Jurgens5adff6a2016-06-30 17:34:40 +0300330 u32 npages;
331 u32 i = 0;
332
Saeed Mahameedc4f287c2016-07-19 20:17:12 +0300333 if (dev->state != MLX5_DEVICE_STATE_INTERNAL_ERROR)
334 return mlx5_cmd_exec(dev, in, in_size, out, out_size);
Saeed Mahameeda533ed52016-07-17 13:27:25 +0300335
336 /* No hard feelings, we want our pages back! */
337 npages = MLX5_GET(manage_pages_in, in, input_num_entries);
Mohamad Haj Yahiad62292e2016-09-09 17:35:17 +0300338 func_id = MLX5_GET(manage_pages_in, in, function_id);
Daniel Jurgens5adff6a2016-06-30 17:34:40 +0300339
340 p = rb_first(&dev->priv.page_root);
341 while (p && i < npages) {
342 fwp = rb_entry(p, struct fw_page, rb_node);
Daniel Jurgens5adff6a2016-06-30 17:34:40 +0300343 p = rb_next(p);
Mohamad Haj Yahiad62292e2016-09-09 17:35:17 +0300344 if (fwp->func_id != func_id)
345 continue;
346
347 MLX5_SET64(manage_pages_out, out, pas[i], fwp->addr);
Daniel Jurgens5adff6a2016-06-30 17:34:40 +0300348 i++;
349 }
350
Saeed Mahameeda533ed52016-07-17 13:27:25 +0300351 MLX5_SET(manage_pages_out, out, output_num_entries, i);
Daniel Jurgens5adff6a2016-06-30 17:34:40 +0300352 return 0;
353}
354
Eli Cohene126ba92013-07-07 17:25:49 +0300355static int reclaim_pages(struct mlx5_core_dev *dev, u32 func_id, int npages,
356 int *nclaimed)
357{
Saeed Mahameeda533ed52016-07-17 13:27:25 +0300358 int outlen = MLX5_ST_SZ_BYTES(manage_pages_out);
359 u32 in[MLX5_ST_SZ_DW(manage_pages_in)] = {0};
Eli Cohene126ba92013-07-07 17:25:49 +0300360 int num_claimed;
Saeed Mahameeda533ed52016-07-17 13:27:25 +0300361 u32 *out;
Eli Cohene126ba92013-07-07 17:25:49 +0300362 int err;
363 int i;
364
Eli Cohendabed0e2013-09-11 16:35:28 +0300365 if (nclaimed)
366 *nclaimed = 0;
367
Saeed Mahameeda533ed52016-07-17 13:27:25 +0300368 outlen += npages * MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
Eli Cohene126ba92013-07-07 17:25:49 +0300369 out = mlx5_vzalloc(outlen);
370 if (!out)
371 return -ENOMEM;
372
Saeed Mahameeda533ed52016-07-17 13:27:25 +0300373 MLX5_SET(manage_pages_in, in, opcode, MLX5_CMD_OP_MANAGE_PAGES);
374 MLX5_SET(manage_pages_in, in, op_mod, MLX5_PAGES_TAKE);
375 MLX5_SET(manage_pages_in, in, function_id, func_id);
376 MLX5_SET(manage_pages_in, in, input_num_entries, npages);
377
Eli Cohene126ba92013-07-07 17:25:49 +0300378 mlx5_core_dbg(dev, "npages %d, outlen %d\n", npages, outlen);
Saeed Mahameeda533ed52016-07-17 13:27:25 +0300379 err = reclaim_pages_cmd(dev, in, sizeof(in), out, outlen);
Eli Cohene126ba92013-07-07 17:25:49 +0300380 if (err) {
Daniel Jurgens5adff6a2016-06-30 17:34:40 +0300381 mlx5_core_err(dev, "failed reclaiming pages: err %d\n", err);
Eli Cohene126ba92013-07-07 17:25:49 +0300382 goto out_free;
383 }
384
Saeed Mahameeda533ed52016-07-17 13:27:25 +0300385 num_claimed = MLX5_GET(manage_pages_out, out, output_num_entries);
Eli Cohenfc50db92015-12-01 18:03:09 +0200386 if (num_claimed > npages) {
387 mlx5_core_warn(dev, "fw returned %d, driver asked %d => corruption\n",
388 num_claimed, npages);
389 err = -EINVAL;
390 goto out_free;
391 }
Eli Cohene126ba92013-07-07 17:25:49 +0300392
Saeed Mahameeda533ed52016-07-17 13:27:25 +0300393 for (i = 0; i < num_claimed; i++)
394 free_4k(dev, MLX5_GET64(manage_pages_out, out, pas[i]));
395
Daniel Jurgens5adff6a2016-06-30 17:34:40 +0300396
397 if (nclaimed)
398 *nclaimed = num_claimed;
399
Eli Cohenfc50db92015-12-01 18:03:09 +0200400 dev->priv.fw_pages -= num_claimed;
401 if (func_id)
402 dev->priv.vfs_pages -= num_claimed;
Eli Cohene126ba92013-07-07 17:25:49 +0300403
404out_free:
Al Viro479163f2014-11-20 08:13:57 +0000405 kvfree(out);
Eli Cohene126ba92013-07-07 17:25:49 +0300406 return err;
407}
408
409static void pages_work_handler(struct work_struct *work)
410{
411 struct mlx5_pages_req *req = container_of(work, struct mlx5_pages_req, work);
412 struct mlx5_core_dev *dev = req->dev;
413 int err = 0;
414
415 if (req->npages < 0)
416 err = reclaim_pages(dev, req->func_id, -1 * req->npages, NULL);
417 else if (req->npages > 0)
418 err = give_pages(dev, req->func_id, req->npages, 1);
419
420 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -0700421 mlx5_core_warn(dev, "%s fail %d\n",
422 req->npages < 0 ? "reclaim" : "give", err);
Eli Cohene126ba92013-07-07 17:25:49 +0300423
424 kfree(req);
425}
426
427void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
Moshe Lazer0a324f312013-08-14 17:46:48 +0300428 s32 npages)
Eli Cohene126ba92013-07-07 17:25:49 +0300429{
430 struct mlx5_pages_req *req;
431
432 req = kzalloc(sizeof(*req), GFP_ATOMIC);
433 if (!req) {
434 mlx5_core_warn(dev, "failed to allocate pages request\n");
435 return;
436 }
437
438 req->dev = dev;
439 req->func_id = func_id;
440 req->npages = npages;
441 INIT_WORK(&req->work, pages_work_handler);
442 queue_work(dev->priv.pg_wq, &req->work);
443}
444
Eli Cohencd23b142013-07-18 15:31:08 +0300445int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot)
Eli Cohene126ba92013-07-07 17:25:49 +0300446{
Eli Cohene126ba92013-07-07 17:25:49 +0300447 u16 uninitialized_var(func_id);
Moshe Lazer0a324f312013-08-14 17:46:48 +0300448 s32 uninitialized_var(npages);
Eli Cohene126ba92013-07-07 17:25:49 +0300449 int err;
450
Moshe Lazer0a324f312013-08-14 17:46:48 +0300451 err = mlx5_cmd_query_pages(dev, &func_id, &npages, boot);
Eli Cohene126ba92013-07-07 17:25:49 +0300452 if (err)
453 return err;
454
Moshe Lazer0a324f312013-08-14 17:46:48 +0300455 mlx5_core_dbg(dev, "requested %d %s pages for func_id 0x%x\n",
456 npages, boot ? "boot" : "init", func_id);
Eli Cohene126ba92013-07-07 17:25:49 +0300457
Moshe Lazer0a324f312013-08-14 17:46:48 +0300458 return give_pages(dev, func_id, npages, 0);
Eli Cohene126ba92013-07-07 17:25:49 +0300459}
460
Moshe Lazer4e3d6772013-10-23 09:53:21 +0300461enum {
462 MLX5_BLKS_FOR_RECLAIM_PAGES = 12
463};
464
Eli Cohene126ba92013-07-07 17:25:49 +0300465static int optimal_reclaimed_pages(void)
466{
467 struct mlx5_cmd_prot_block *block;
468 struct mlx5_cmd_layout *lay;
469 int ret;
470
Moshe Lazer4e3d6772013-10-23 09:53:21 +0300471 ret = (sizeof(lay->out) + MLX5_BLKS_FOR_RECLAIM_PAGES * sizeof(block->data) -
Saeed Mahameeda533ed52016-07-17 13:27:25 +0300472 MLX5_ST_SZ_BYTES(manage_pages_out)) /
473 MLX5_FLD_SZ_BYTES(manage_pages_out, pas[0]);
Eli Cohene126ba92013-07-07 17:25:49 +0300474
475 return ret;
476}
477
478int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev)
479{
Eli Cohendabed0e2013-09-11 16:35:28 +0300480 unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
Eli Cohene126ba92013-07-07 17:25:49 +0300481 struct fw_page *fwp;
482 struct rb_node *p;
Eli Cohendabed0e2013-09-11 16:35:28 +0300483 int nclaimed = 0;
Majd Dibbiny89d44f02015-10-14 17:43:46 +0300484 int err = 0;
Eli Cohene126ba92013-07-07 17:25:49 +0300485
486 do {
487 p = rb_first(&dev->priv.page_root);
488 if (p) {
489 fwp = rb_entry(p, struct fw_page, rb_node);
Daniel Jurgens5adff6a2016-06-30 17:34:40 +0300490 err = reclaim_pages(dev, fwp->func_id,
491 optimal_reclaimed_pages(),
492 &nclaimed);
493
Eli Cohene126ba92013-07-07 17:25:49 +0300494 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -0700495 mlx5_core_warn(dev, "failed reclaiming pages (%d)\n",
496 err);
Eli Cohene126ba92013-07-07 17:25:49 +0300497 return err;
498 }
Eli Cohendabed0e2013-09-11 16:35:28 +0300499 if (nclaimed)
500 end = jiffies + msecs_to_jiffies(MAX_RECLAIM_TIME_MSECS);
Eli Cohene126ba92013-07-07 17:25:49 +0300501 }
502 if (time_after(jiffies, end)) {
503 mlx5_core_warn(dev, "FW did not return all pages. giving up...\n");
504 break;
505 }
506 } while (p);
507
Daniel Jurgens5adff6a2016-06-30 17:34:40 +0300508 WARN(dev->priv.fw_pages,
509 "FW pages counter is %d after reclaiming all pages\n",
510 dev->priv.fw_pages);
511 WARN(dev->priv.vfs_pages,
512 "VFs FW pages counter is %d after reclaiming all pages\n",
513 dev->priv.vfs_pages);
514
Eli Cohene126ba92013-07-07 17:25:49 +0300515 return 0;
516}
517
518void mlx5_pagealloc_init(struct mlx5_core_dev *dev)
519{
520 dev->priv.page_root = RB_ROOT;
Eli Cohenbf0bf772013-10-23 09:53:19 +0300521 INIT_LIST_HEAD(&dev->priv.free_list);
Eli Cohene126ba92013-07-07 17:25:49 +0300522}
523
524void mlx5_pagealloc_cleanup(struct mlx5_core_dev *dev)
525{
526 /* nothing */
527}
528
529int mlx5_pagealloc_start(struct mlx5_core_dev *dev)
530{
531 dev->priv.pg_wq = create_singlethread_workqueue("mlx5_page_allocator");
532 if (!dev->priv.pg_wq)
533 return -ENOMEM;
534
535 return 0;
536}
537
538void mlx5_pagealloc_stop(struct mlx5_core_dev *dev)
539{
540 destroy_workqueue(dev->priv.pg_wq);
541}
Eli Cohenfc50db92015-12-01 18:03:09 +0200542
543int mlx5_wait_for_vf_pages(struct mlx5_core_dev *dev)
544{
545 unsigned long end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
546 int prev_vfs_pages = dev->priv.vfs_pages;
547
Mohamad Haj Yahiad62292e2016-09-09 17:35:17 +0300548 /* In case of internal error we will free the pages manually later */
549 if (dev->state == MLX5_DEVICE_STATE_INTERNAL_ERROR) {
550 mlx5_core_warn(dev, "Skipping wait for vf pages stage");
551 return 0;
552 }
553
Eli Cohenfc50db92015-12-01 18:03:09 +0200554 mlx5_core_dbg(dev, "Waiting for %d pages from %s\n", prev_vfs_pages,
555 dev->priv.name);
556 while (dev->priv.vfs_pages) {
557 if (time_after(jiffies, end)) {
558 mlx5_core_warn(dev, "aborting while there are %d pending pages\n", dev->priv.vfs_pages);
559 return -ETIMEDOUT;
560 }
561 if (dev->priv.vfs_pages < prev_vfs_pages) {
562 end = jiffies + msecs_to_jiffies(MAX_RECLAIM_VFS_PAGES_TIME_MSECS);
563 prev_vfs_pages = dev->priv.vfs_pages;
564 }
565 msleep(50);
566 }
567
568 mlx5_core_dbg(dev, "All pages received from %s\n", dev->priv.name);
569 return 0;
570}