blob: 802eb2a63db90c688dee0f6ed37aa56892c37532 [file] [log] [blame]
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4 * All rights reserved.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/sched.h>
37#include <linux/pci.h>
38#include <linux/errno.h>
39#include <linux/kernel.h>
40#include <linux/io.h>
Axel Line143a1a2011-12-25 23:35:34 +000041#include <linux/slab.h>
Eli Cohenc82e9aa2011-12-13 04:15:24 +000042#include <linux/mlx4/cmd.h>
43#include <linux/mlx4/qp.h>
Amir Vadaiaf22d9d2012-07-18 22:33:49 +000044#include <linux/if_ether.h>
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +000045#include <linux/etherdevice.h>
Eli Cohenc82e9aa2011-12-13 04:15:24 +000046
47#include "mlx4.h"
48#include "fw.h"
49
50#define MLX4_MAC_VALID (1ull << 63)
Eran Ben Elisha9de92c62015-06-15 17:59:00 +030051#define MLX4_PF_COUNTERS_PER_PORT 2
52#define MLX4_VF_COUNTERS_PER_PORT 1
Eli Cohenc82e9aa2011-12-13 04:15:24 +000053
54struct mac_res {
55 struct list_head list;
56 u64 mac;
Jack Morgenstein2f5bb472014-03-12 12:00:40 +020057 int ref_count;
58 u8 smac_index;
Eli Cohenc82e9aa2011-12-13 04:15:24 +000059 u8 port;
60};
61
Jack Morgenstein48740802013-11-03 10:03:20 +020062struct vlan_res {
63 struct list_head list;
64 u16 vlan;
65 int ref_count;
66 int vlan_index;
67 u8 port;
68};
69
Eli Cohenc82e9aa2011-12-13 04:15:24 +000070struct res_common {
71 struct list_head list;
Hadar Hen Zion4af1c042012-07-05 04:03:41 +000072 struct rb_node node;
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +000073 u64 res_id;
Eli Cohenc82e9aa2011-12-13 04:15:24 +000074 int owner;
75 int state;
76 int from_state;
77 int to_state;
78 int removing;
79};
80
81enum {
82 RES_ANY_BUSY = 1
83};
84
85struct res_gid {
86 struct list_head list;
87 u8 gid[16];
88 enum mlx4_protocol prot;
Eugenia Emantayev9f5b6c62012-02-15 06:23:16 +000089 enum mlx4_steer_type steer;
Hadar Hen Zionfab1e242013-04-11 01:56:41 +000090 u64 reg_id;
Eli Cohenc82e9aa2011-12-13 04:15:24 +000091};
92
93enum res_qp_states {
94 RES_QP_BUSY = RES_ANY_BUSY,
95
96 /* QP number was allocated */
97 RES_QP_RESERVED,
98
99 /* ICM memory for QP context was mapped */
100 RES_QP_MAPPED,
101
102 /* QP is in hw ownership */
103 RES_QP_HW
104};
105
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000106struct res_qp {
107 struct res_common com;
108 struct res_mtt *mtt;
109 struct res_cq *rcq;
110 struct res_cq *scq;
111 struct res_srq *srq;
112 struct list_head mcg_list;
113 spinlock_t mcg_spl;
114 int local_qpn;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +0000115 atomic_t ref_count;
Jack Morgensteinb01978c2013-06-27 19:05:21 +0300116 u32 qpc_flags;
Rony Efraimf0f829b2013-11-07 12:19:51 +0200117 /* saved qp params before VST enforcement in order to restore on VGT */
Jack Morgensteinb01978c2013-06-27 19:05:21 +0300118 u8 sched_queue;
Rony Efraimf0f829b2013-11-07 12:19:51 +0200119 __be32 param3;
120 u8 vlan_control;
121 u8 fvl_rx;
122 u8 pri_path_fl;
123 u8 vlan_index;
124 u8 feup;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000125};
126
127enum res_mtt_states {
128 RES_MTT_BUSY = RES_ANY_BUSY,
129 RES_MTT_ALLOCATED,
130};
131
132static inline const char *mtt_states_str(enum res_mtt_states state)
133{
134 switch (state) {
135 case RES_MTT_BUSY: return "RES_MTT_BUSY";
136 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
137 default: return "Unknown";
138 }
139}
140
141struct res_mtt {
142 struct res_common com;
143 int order;
144 atomic_t ref_count;
145};
146
147enum res_mpt_states {
148 RES_MPT_BUSY = RES_ANY_BUSY,
149 RES_MPT_RESERVED,
150 RES_MPT_MAPPED,
151 RES_MPT_HW,
152};
153
154struct res_mpt {
155 struct res_common com;
156 struct res_mtt *mtt;
157 int key;
158};
159
160enum res_eq_states {
161 RES_EQ_BUSY = RES_ANY_BUSY,
162 RES_EQ_RESERVED,
163 RES_EQ_HW,
164};
165
166struct res_eq {
167 struct res_common com;
168 struct res_mtt *mtt;
169};
170
171enum res_cq_states {
172 RES_CQ_BUSY = RES_ANY_BUSY,
173 RES_CQ_ALLOCATED,
174 RES_CQ_HW,
175};
176
177struct res_cq {
178 struct res_common com;
179 struct res_mtt *mtt;
180 atomic_t ref_count;
181};
182
183enum res_srq_states {
184 RES_SRQ_BUSY = RES_ANY_BUSY,
185 RES_SRQ_ALLOCATED,
186 RES_SRQ_HW,
187};
188
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000189struct res_srq {
190 struct res_common com;
191 struct res_mtt *mtt;
192 struct res_cq *cq;
193 atomic_t ref_count;
194};
195
196enum res_counter_states {
197 RES_COUNTER_BUSY = RES_ANY_BUSY,
198 RES_COUNTER_ALLOCATED,
199};
200
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000201struct res_counter {
202 struct res_common com;
203 int port;
204};
205
Jack Morgensteinba062d52012-05-15 10:35:03 +0000206enum res_xrcdn_states {
207 RES_XRCD_BUSY = RES_ANY_BUSY,
208 RES_XRCD_ALLOCATED,
209};
210
211struct res_xrcdn {
212 struct res_common com;
213 int port;
214};
215
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +0000216enum res_fs_rule_states {
217 RES_FS_RULE_BUSY = RES_ANY_BUSY,
218 RES_FS_RULE_ALLOCATED,
219};
220
221struct res_fs_rule {
222 struct res_common com;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +0000223 int qpn;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +0000224};
225
Hadar Hen Zion4af1c042012-07-05 04:03:41 +0000226static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
227{
228 struct rb_node *node = root->rb_node;
229
230 while (node) {
231 struct res_common *res = container_of(node, struct res_common,
232 node);
233
234 if (res_id < res->res_id)
235 node = node->rb_left;
236 else if (res_id > res->res_id)
237 node = node->rb_right;
238 else
239 return res;
240 }
241 return NULL;
242}
243
244static int res_tracker_insert(struct rb_root *root, struct res_common *res)
245{
246 struct rb_node **new = &(root->rb_node), *parent = NULL;
247
248 /* Figure out where to put new node */
249 while (*new) {
250 struct res_common *this = container_of(*new, struct res_common,
251 node);
252
253 parent = *new;
254 if (res->res_id < this->res_id)
255 new = &((*new)->rb_left);
256 else if (res->res_id > this->res_id)
257 new = &((*new)->rb_right);
258 else
259 return -EEXIST;
260 }
261
262 /* Add new node and rebalance tree. */
263 rb_link_node(&res->node, parent, new);
264 rb_insert_color(&res->node, root);
265
266 return 0;
267}
268
Jack Morgenstein54679e12012-08-03 08:40:43 +0000269enum qp_transition {
270 QP_TRANS_INIT2RTR,
271 QP_TRANS_RTR2RTS,
272 QP_TRANS_RTS2RTS,
273 QP_TRANS_SQERR2RTS,
274 QP_TRANS_SQD2SQD,
275 QP_TRANS_SQD2RTS
276};
277
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000278/* For Debug uses */
Jack Morgenstein956463732014-06-08 13:49:45 +0300279static const char *resource_str(enum mlx4_resource rt)
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000280{
281 switch (rt) {
282 case RES_QP: return "RES_QP";
283 case RES_CQ: return "RES_CQ";
284 case RES_SRQ: return "RES_SRQ";
285 case RES_MPT: return "RES_MPT";
286 case RES_MTT: return "RES_MTT";
287 case RES_MAC: return "RES_MAC";
Jack Morgenstein48740802013-11-03 10:03:20 +0200288 case RES_VLAN: return "RES_VLAN";
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000289 case RES_EQ: return "RES_EQ";
290 case RES_COUNTER: return "RES_COUNTER";
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +0000291 case RES_FS_RULE: return "RES_FS_RULE";
Jack Morgensteinba062d52012-05-15 10:35:03 +0000292 case RES_XRCD: return "RES_XRCD";
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000293 default: return "Unknown resource type !!!";
294 };
295}
296
Jack Morgenstein48740802013-11-03 10:03:20 +0200297static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200298static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
299 enum mlx4_resource res_type, int count,
300 int port)
301{
302 struct mlx4_priv *priv = mlx4_priv(dev);
303 struct resource_allocator *res_alloc =
304 &priv->mfunc.master.res_tracker.res_alloc[res_type];
305 int err = -EINVAL;
306 int allocated, free, reserved, guaranteed, from_free;
Jack Morgenstein956463732014-06-08 13:49:45 +0300307 int from_rsvd;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200308
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200309 if (slave > dev->persist->num_vfs)
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200310 return -EINVAL;
311
312 spin_lock(&res_alloc->alloc_lock);
313 allocated = (port > 0) ?
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200314 res_alloc->allocated[(port - 1) *
315 (dev->persist->num_vfs + 1) + slave] :
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200316 res_alloc->allocated[slave];
317 free = (port > 0) ? res_alloc->res_port_free[port - 1] :
318 res_alloc->res_free;
319 reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
320 res_alloc->res_reserved;
321 guaranteed = res_alloc->guaranteed[slave];
322
Jack Morgenstein956463732014-06-08 13:49:45 +0300323 if (allocated + count > res_alloc->quota[slave]) {
324 mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
325 slave, port, resource_str(res_type), count,
326 allocated, res_alloc->quota[slave]);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200327 goto out;
Jack Morgenstein956463732014-06-08 13:49:45 +0300328 }
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200329
330 if (allocated + count <= guaranteed) {
331 err = 0;
Jack Morgenstein956463732014-06-08 13:49:45 +0300332 from_rsvd = count;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200333 } else {
334 /* portion may need to be obtained from free area */
335 if (guaranteed - allocated > 0)
336 from_free = count - (guaranteed - allocated);
337 else
338 from_free = count;
339
Jack Morgenstein956463732014-06-08 13:49:45 +0300340 from_rsvd = count - from_free;
341
342 if (free - from_free >= reserved)
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200343 err = 0;
Jack Morgenstein956463732014-06-08 13:49:45 +0300344 else
345 mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
346 slave, port, resource_str(res_type), free,
347 from_free, reserved);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200348 }
349
350 if (!err) {
351 /* grant the request */
352 if (port > 0) {
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200353 res_alloc->allocated[(port - 1) *
354 (dev->persist->num_vfs + 1) + slave] += count;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200355 res_alloc->res_port_free[port - 1] -= count;
Jack Morgenstein956463732014-06-08 13:49:45 +0300356 res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200357 } else {
358 res_alloc->allocated[slave] += count;
359 res_alloc->res_free -= count;
Jack Morgenstein956463732014-06-08 13:49:45 +0300360 res_alloc->res_reserved -= from_rsvd;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200361 }
362 }
363
364out:
365 spin_unlock(&res_alloc->alloc_lock);
366 return err;
367}
368
369static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
370 enum mlx4_resource res_type, int count,
371 int port)
372{
373 struct mlx4_priv *priv = mlx4_priv(dev);
374 struct resource_allocator *res_alloc =
375 &priv->mfunc.master.res_tracker.res_alloc[res_type];
Jack Morgenstein956463732014-06-08 13:49:45 +0300376 int allocated, guaranteed, from_rsvd;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200377
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200378 if (slave > dev->persist->num_vfs)
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200379 return;
380
381 spin_lock(&res_alloc->alloc_lock);
Jack Morgenstein956463732014-06-08 13:49:45 +0300382
383 allocated = (port > 0) ?
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200384 res_alloc->allocated[(port - 1) *
385 (dev->persist->num_vfs + 1) + slave] :
Jack Morgenstein956463732014-06-08 13:49:45 +0300386 res_alloc->allocated[slave];
387 guaranteed = res_alloc->guaranteed[slave];
388
389 if (allocated - count >= guaranteed) {
390 from_rsvd = 0;
391 } else {
392 /* portion may need to be returned to reserved area */
393 if (allocated - guaranteed > 0)
394 from_rsvd = count - (allocated - guaranteed);
395 else
396 from_rsvd = count;
397 }
398
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200399 if (port > 0) {
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200400 res_alloc->allocated[(port - 1) *
401 (dev->persist->num_vfs + 1) + slave] -= count;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200402 res_alloc->res_port_free[port - 1] += count;
Jack Morgenstein956463732014-06-08 13:49:45 +0300403 res_alloc->res_port_rsvd[port - 1] += from_rsvd;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200404 } else {
405 res_alloc->allocated[slave] -= count;
406 res_alloc->res_free += count;
Jack Morgenstein956463732014-06-08 13:49:45 +0300407 res_alloc->res_reserved += from_rsvd;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200408 }
409
410 spin_unlock(&res_alloc->alloc_lock);
411 return;
412}
413
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200414static inline void initialize_res_quotas(struct mlx4_dev *dev,
415 struct resource_allocator *res_alloc,
416 enum mlx4_resource res_type,
417 int vf, int num_instances)
418{
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200419 res_alloc->guaranteed[vf] = num_instances /
420 (2 * (dev->persist->num_vfs + 1));
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200421 res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
422 if (vf == mlx4_master_func_num(dev)) {
423 res_alloc->res_free = num_instances;
424 if (res_type == RES_MTT) {
425 /* reserved mtts will be taken out of the PF allocation */
426 res_alloc->res_free += dev->caps.reserved_mtts;
427 res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
428 res_alloc->quota[vf] += dev->caps.reserved_mtts;
429 }
430 }
431}
432
433void mlx4_init_quotas(struct mlx4_dev *dev)
434{
435 struct mlx4_priv *priv = mlx4_priv(dev);
436 int pf;
437
438 /* quotas for VFs are initialized in mlx4_slave_cap */
439 if (mlx4_is_slave(dev))
440 return;
441
442 if (!mlx4_is_mfunc(dev)) {
443 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
444 mlx4_num_reserved_sqps(dev);
445 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
446 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
447 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
448 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
449 return;
450 }
451
452 pf = mlx4_master_func_num(dev);
453 dev->quotas.qp =
454 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
455 dev->quotas.cq =
456 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
457 dev->quotas.srq =
458 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
459 dev->quotas.mtt =
460 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
461 dev->quotas.mpt =
462 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
463}
Eran Ben Elisha9de92c62015-06-15 17:59:00 +0300464
465static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev)
466{
467 /* reduce the sink counter */
468 return (dev->caps.max_counters - 1 -
469 (MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS))
470 / MLX4_MAX_PORTS;
471}
472
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000473int mlx4_init_resource_tracker(struct mlx4_dev *dev)
474{
475 struct mlx4_priv *priv = mlx4_priv(dev);
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200476 int i, j;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000477 int t;
Eran Ben Elisha9de92c62015-06-15 17:59:00 +0300478 int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev);
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000479
480 priv->mfunc.master.res_tracker.slave_list =
481 kzalloc(dev->num_slaves * sizeof(struct slave_list),
482 GFP_KERNEL);
483 if (!priv->mfunc.master.res_tracker.slave_list)
484 return -ENOMEM;
485
486 for (i = 0 ; i < dev->num_slaves; i++) {
487 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
488 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
489 slave_list[i].res_list[t]);
490 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
491 }
492
493 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
494 dev->num_slaves);
495 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
Hadar Hen Zion4af1c042012-07-05 04:03:41 +0000496 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000497
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200498 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
499 struct resource_allocator *res_alloc =
500 &priv->mfunc.master.res_tracker.res_alloc[i];
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200501 res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) *
502 sizeof(int), GFP_KERNEL);
503 res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) *
504 sizeof(int), GFP_KERNEL);
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200505 if (i == RES_MAC || i == RES_VLAN)
506 res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200507 (dev->persist->num_vfs
508 + 1) *
509 sizeof(int), GFP_KERNEL);
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200510 else
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200511 res_alloc->allocated = kzalloc((dev->persist->
512 num_vfs + 1) *
513 sizeof(int), GFP_KERNEL);
Eran Ben Elisha9de92c62015-06-15 17:59:00 +0300514 /* Reduce the sink counter */
515 if (i == RES_COUNTER)
516 res_alloc->res_free = dev->caps.max_counters - 1;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200517
518 if (!res_alloc->quota || !res_alloc->guaranteed ||
519 !res_alloc->allocated)
520 goto no_mem_err;
521
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200522 spin_lock_init(&res_alloc->alloc_lock);
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200523 for (t = 0; t < dev->persist->num_vfs + 1; t++) {
Matan Barak449fc482014-03-19 18:11:52 +0200524 struct mlx4_active_ports actv_ports =
525 mlx4_get_active_ports(dev, t);
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200526 switch (i) {
527 case RES_QP:
528 initialize_res_quotas(dev, res_alloc, RES_QP,
529 t, dev->caps.num_qps -
530 dev->caps.reserved_qps -
531 mlx4_num_reserved_sqps(dev));
532 break;
533 case RES_CQ:
534 initialize_res_quotas(dev, res_alloc, RES_CQ,
535 t, dev->caps.num_cqs -
536 dev->caps.reserved_cqs);
537 break;
538 case RES_SRQ:
539 initialize_res_quotas(dev, res_alloc, RES_SRQ,
540 t, dev->caps.num_srqs -
541 dev->caps.reserved_srqs);
542 break;
543 case RES_MPT:
544 initialize_res_quotas(dev, res_alloc, RES_MPT,
545 t, dev->caps.num_mpts -
546 dev->caps.reserved_mrws);
547 break;
548 case RES_MTT:
549 initialize_res_quotas(dev, res_alloc, RES_MTT,
550 t, dev->caps.num_mtts -
551 dev->caps.reserved_mtts);
552 break;
553 case RES_MAC:
554 if (t == mlx4_master_func_num(dev)) {
Matan Barak449fc482014-03-19 18:11:52 +0200555 int max_vfs_pport = 0;
556 /* Calculate the max vfs per port for */
557 /* both ports. */
558 for (j = 0; j < dev->caps.num_ports;
559 j++) {
560 struct mlx4_slaves_pport slaves_pport =
561 mlx4_phys_to_slaves_pport(dev, j + 1);
562 unsigned current_slaves =
563 bitmap_weight(slaves_pport.slaves,
564 dev->caps.num_ports) - 1;
565 if (max_vfs_pport < current_slaves)
566 max_vfs_pport =
567 current_slaves;
568 }
569 res_alloc->quota[t] =
570 MLX4_MAX_MAC_NUM -
571 2 * max_vfs_pport;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200572 res_alloc->guaranteed[t] = 2;
573 for (j = 0; j < MLX4_MAX_PORTS; j++)
Matan Barak449fc482014-03-19 18:11:52 +0200574 res_alloc->res_port_free[j] =
575 MLX4_MAX_MAC_NUM;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200576 } else {
577 res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
578 res_alloc->guaranteed[t] = 2;
579 }
580 break;
581 case RES_VLAN:
582 if (t == mlx4_master_func_num(dev)) {
583 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
584 res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
585 for (j = 0; j < MLX4_MAX_PORTS; j++)
586 res_alloc->res_port_free[j] =
587 res_alloc->quota[t];
588 } else {
589 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
590 res_alloc->guaranteed[t] = 0;
591 }
592 break;
593 case RES_COUNTER:
594 res_alloc->quota[t] = dev->caps.max_counters;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200595 if (t == mlx4_master_func_num(dev))
Eran Ben Elisha9de92c62015-06-15 17:59:00 +0300596 res_alloc->guaranteed[t] =
597 MLX4_PF_COUNTERS_PER_PORT *
598 MLX4_MAX_PORTS;
599 else if (t <= max_vfs_guarantee_counter)
600 res_alloc->guaranteed[t] =
601 MLX4_VF_COUNTERS_PER_PORT *
602 MLX4_MAX_PORTS;
603 else
604 res_alloc->guaranteed[t] = 0;
605 res_alloc->res_free -= res_alloc->guaranteed[t];
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200606 break;
607 default:
608 break;
609 }
610 if (i == RES_MAC || i == RES_VLAN) {
Matan Barak449fc482014-03-19 18:11:52 +0200611 for (j = 0; j < dev->caps.num_ports; j++)
612 if (test_bit(j, actv_ports.ports))
613 res_alloc->res_port_rsvd[j] +=
614 res_alloc->guaranteed[t];
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200615 } else {
616 res_alloc->res_reserved += res_alloc->guaranteed[t];
617 }
618 }
619 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000620 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200621 return 0;
622
623no_mem_err:
624 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
625 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
626 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
627 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
628 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
629 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
630 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
631 }
632 return -ENOMEM;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000633}
634
Jack Morgensteinb8924952012-05-15 10:35:02 +0000635void mlx4_free_resource_tracker(struct mlx4_dev *dev,
636 enum mlx4_res_tracker_free_type type)
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000637{
638 struct mlx4_priv *priv = mlx4_priv(dev);
639 int i;
640
641 if (priv->mfunc.master.res_tracker.slave_list) {
Jack Morgenstein48740802013-11-03 10:03:20 +0200642 if (type != RES_TR_FREE_STRUCTS_ONLY) {
643 for (i = 0; i < dev->num_slaves; i++) {
Jack Morgensteinb8924952012-05-15 10:35:02 +0000644 if (type == RES_TR_FREE_ALL ||
645 dev->caps.function != i)
646 mlx4_delete_all_resources_for_slave(dev, i);
Jack Morgenstein48740802013-11-03 10:03:20 +0200647 }
648 /* free master's vlans */
649 i = dev->caps.function;
Jack Morgenstein111c6092014-05-27 09:26:38 +0300650 mlx4_reset_roce_gids(dev, i);
Jack Morgenstein48740802013-11-03 10:03:20 +0200651 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
652 rem_slave_vlans(dev, i);
653 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
654 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000655
Jack Morgensteinb8924952012-05-15 10:35:02 +0000656 if (type != RES_TR_FREE_SLAVES_ONLY) {
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200657 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
658 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
659 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
660 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
661 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
662 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
663 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
664 }
Jack Morgensteinb8924952012-05-15 10:35:02 +0000665 kfree(priv->mfunc.master.res_tracker.slave_list);
666 priv->mfunc.master.res_tracker.slave_list = NULL;
667 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000668 }
669}
670
Jack Morgenstein54679e12012-08-03 08:40:43 +0000671static void update_pkey_index(struct mlx4_dev *dev, int slave,
672 struct mlx4_cmd_mailbox *inbox)
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000673{
Jack Morgenstein54679e12012-08-03 08:40:43 +0000674 u8 sched = *(u8 *)(inbox->buf + 64);
675 u8 orig_index = *(u8 *)(inbox->buf + 35);
676 u8 new_index;
677 struct mlx4_priv *priv = mlx4_priv(dev);
678 int port;
679
680 port = (sched >> 6 & 1) + 1;
681
682 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
683 *(u8 *)(inbox->buf + 35) = new_index;
Jack Morgenstein54679e12012-08-03 08:40:43 +0000684}
685
686static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
687 u8 slave)
688{
689 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
690 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
691 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +0200692 int port;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000693
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +0200694 if (MLX4_QP_ST_UD == ts) {
695 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
696 if (mlx4_is_eth(dev, port))
Matan Barak449fc482014-03-19 18:11:52 +0200697 qp_ctx->pri_path.mgid_index =
698 mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +0200699 else
700 qp_ctx->pri_path.mgid_index = slave | 0x80;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000701
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +0200702 } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
703 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
704 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
705 if (mlx4_is_eth(dev, port)) {
Matan Barak449fc482014-03-19 18:11:52 +0200706 qp_ctx->pri_path.mgid_index +=
707 mlx4_get_base_gid_ix(dev, slave, port);
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +0200708 qp_ctx->pri_path.mgid_index &= 0x7f;
709 } else {
710 qp_ctx->pri_path.mgid_index = slave & 0x7F;
711 }
712 }
713 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
714 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
715 if (mlx4_is_eth(dev, port)) {
Matan Barak449fc482014-03-19 18:11:52 +0200716 qp_ctx->alt_path.mgid_index +=
717 mlx4_get_base_gid_ix(dev, slave, port);
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +0200718 qp_ctx->alt_path.mgid_index &= 0x7f;
719 } else {
720 qp_ctx->alt_path.mgid_index = slave & 0x7F;
721 }
722 }
Jack Morgenstein54679e12012-08-03 08:40:43 +0000723 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000724}
725
Rony Efraim3f7fb022013-04-25 05:22:28 +0000726static int update_vport_qp_param(struct mlx4_dev *dev,
727 struct mlx4_cmd_mailbox *inbox,
Jack Morgensteinb01978c2013-06-27 19:05:21 +0300728 u8 slave, u32 qpn)
Rony Efraim3f7fb022013-04-25 05:22:28 +0000729{
730 struct mlx4_qp_context *qpc = inbox->buf + 8;
731 struct mlx4_vport_oper_state *vp_oper;
732 struct mlx4_priv *priv;
Matan Barak09e05c32014-09-10 16:41:56 +0300733 u32 qp_type;
Or Gerlitzf5956fa2015-03-02 18:22:15 +0200734 int port, err = 0;
Rony Efraim3f7fb022013-04-25 05:22:28 +0000735
736 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
737 priv = mlx4_priv(dev);
738 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
Matan Barak09e05c32014-09-10 16:41:56 +0300739 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
Rony Efraim3f7fb022013-04-25 05:22:28 +0000740
741 if (MLX4_VGT != vp_oper->state.default_vlan) {
Jack Morgensteinb01978c2013-06-27 19:05:21 +0300742 /* the reserved QPs (special, proxy, tunnel)
743 * do not operate over vlans
744 */
745 if (mlx4_is_qp_reserved(dev, qpn))
746 return 0;
747
Matan Barak09e05c32014-09-10 16:41:56 +0300748 /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
749 if (qp_type == MLX4_QP_ST_UD ||
750 (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
751 if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
752 *(__be32 *)inbox->buf =
753 cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
754 MLX4_QP_OPTPAR_VLAN_STRIPPING);
755 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
756 } else {
757 struct mlx4_update_qp_params params = {.flags = 0};
758
Or Gerlitzf5956fa2015-03-02 18:22:15 +0200759 err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
760 if (err)
761 goto out;
Matan Barak09e05c32014-09-10 16:41:56 +0300762 }
763 }
Rony Efraim0a6eac22013-06-27 19:05:22 +0300764
765 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
766 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
767 qpc->pri_path.vlan_control =
768 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
769 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
770 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
771 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
772 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
773 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
774 } else if (0 != vp_oper->state.default_vlan) {
Rony Efraim7677fc92013-05-08 22:22:35 +0000775 qpc->pri_path.vlan_control =
776 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
777 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
778 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
779 } else { /* priority tagged */
780 qpc->pri_path.vlan_control =
781 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
782 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
783 }
784
785 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
Rony Efraim3f7fb022013-04-25 05:22:28 +0000786 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
Rony Efraim7677fc92013-05-08 22:22:35 +0000787 qpc->pri_path.fl |= MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
788 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
Rony Efraim3f7fb022013-04-25 05:22:28 +0000789 qpc->pri_path.sched_queue &= 0xC7;
790 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
Ido Shamay08068cd2015-04-02 16:31:15 +0300791 qpc->qos_vport = vp_oper->state.qos_vport;
Rony Efraim3f7fb022013-04-25 05:22:28 +0000792 }
Rony Efraime6b6a232013-04-25 05:22:29 +0000793 if (vp_oper->state.spoofchk) {
Rony Efraim7677fc92013-05-08 22:22:35 +0000794 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
Rony Efraime6b6a232013-04-25 05:22:29 +0000795 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
Rony Efraime6b6a232013-04-25 05:22:29 +0000796 }
Or Gerlitzf5956fa2015-03-02 18:22:15 +0200797out:
798 return err;
Rony Efraim3f7fb022013-04-25 05:22:28 +0000799}
800
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000801static int mpt_mask(struct mlx4_dev *dev)
802{
803 return dev->caps.num_mpts - 1;
804}
805
Hadar Hen Zion1e3f7b32013-03-21 05:55:54 +0000806static void *find_res(struct mlx4_dev *dev, u64 res_id,
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000807 enum mlx4_resource type)
808{
809 struct mlx4_priv *priv = mlx4_priv(dev);
810
Hadar Hen Zion4af1c042012-07-05 04:03:41 +0000811 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
812 res_id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000813}
814
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +0000815static int get_res(struct mlx4_dev *dev, int slave, u64 res_id,
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000816 enum mlx4_resource type,
817 void *res)
818{
819 struct res_common *r;
820 int err = 0;
821
822 spin_lock_irq(mlx4_tlock(dev));
823 r = find_res(dev, res_id, type);
824 if (!r) {
825 err = -ENONET;
826 goto exit;
827 }
828
829 if (r->state == RES_ANY_BUSY) {
830 err = -EBUSY;
831 goto exit;
832 }
833
834 if (r->owner != slave) {
835 err = -EPERM;
836 goto exit;
837 }
838
839 r->from_state = r->state;
840 r->state = RES_ANY_BUSY;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000841
842 if (res)
843 *((struct res_common **)res) = r;
844
845exit:
846 spin_unlock_irq(mlx4_tlock(dev));
847 return err;
848}
849
850int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
851 enum mlx4_resource type,
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +0000852 u64 res_id, int *slave)
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000853{
854
855 struct res_common *r;
856 int err = -ENOENT;
857 int id = res_id;
858
859 if (type == RES_QP)
860 id &= 0x7fffff;
Yevgeny Petrilin996b0542011-12-19 04:03:05 +0000861 spin_lock(mlx4_tlock(dev));
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000862
863 r = find_res(dev, id, type);
864 if (r) {
865 *slave = r->owner;
866 err = 0;
867 }
Yevgeny Petrilin996b0542011-12-19 04:03:05 +0000868 spin_unlock(mlx4_tlock(dev));
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000869
870 return err;
871}
872
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +0000873static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000874 enum mlx4_resource type)
875{
876 struct res_common *r;
877
878 spin_lock_irq(mlx4_tlock(dev));
879 r = find_res(dev, res_id, type);
880 if (r)
881 r->state = r->from_state;
882 spin_unlock_irq(mlx4_tlock(dev));
883}
884
885static struct res_common *alloc_qp_tr(int id)
886{
887 struct res_qp *ret;
888
889 ret = kzalloc(sizeof *ret, GFP_KERNEL);
890 if (!ret)
891 return NULL;
892
893 ret->com.res_id = id;
894 ret->com.state = RES_QP_RESERVED;
Eugenia Emantayev25311882012-02-15 06:22:57 +0000895 ret->local_qpn = id;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000896 INIT_LIST_HEAD(&ret->mcg_list);
897 spin_lock_init(&ret->mcg_spl);
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +0000898 atomic_set(&ret->ref_count, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000899
900 return &ret->com;
901}
902
903static struct res_common *alloc_mtt_tr(int id, int order)
904{
905 struct res_mtt *ret;
906
907 ret = kzalloc(sizeof *ret, GFP_KERNEL);
908 if (!ret)
909 return NULL;
910
911 ret->com.res_id = id;
912 ret->order = order;
913 ret->com.state = RES_MTT_ALLOCATED;
914 atomic_set(&ret->ref_count, 0);
915
916 return &ret->com;
917}
918
919static struct res_common *alloc_mpt_tr(int id, int key)
920{
921 struct res_mpt *ret;
922
923 ret = kzalloc(sizeof *ret, GFP_KERNEL);
924 if (!ret)
925 return NULL;
926
927 ret->com.res_id = id;
928 ret->com.state = RES_MPT_RESERVED;
929 ret->key = key;
930
931 return &ret->com;
932}
933
934static struct res_common *alloc_eq_tr(int id)
935{
936 struct res_eq *ret;
937
938 ret = kzalloc(sizeof *ret, GFP_KERNEL);
939 if (!ret)
940 return NULL;
941
942 ret->com.res_id = id;
943 ret->com.state = RES_EQ_RESERVED;
944
945 return &ret->com;
946}
947
948static struct res_common *alloc_cq_tr(int id)
949{
950 struct res_cq *ret;
951
952 ret = kzalloc(sizeof *ret, GFP_KERNEL);
953 if (!ret)
954 return NULL;
955
956 ret->com.res_id = id;
957 ret->com.state = RES_CQ_ALLOCATED;
958 atomic_set(&ret->ref_count, 0);
959
960 return &ret->com;
961}
962
963static struct res_common *alloc_srq_tr(int id)
964{
965 struct res_srq *ret;
966
967 ret = kzalloc(sizeof *ret, GFP_KERNEL);
968 if (!ret)
969 return NULL;
970
971 ret->com.res_id = id;
972 ret->com.state = RES_SRQ_ALLOCATED;
973 atomic_set(&ret->ref_count, 0);
974
975 return &ret->com;
976}
977
Eran Ben Elisha9de92c62015-06-15 17:59:00 +0300978static struct res_common *alloc_counter_tr(int id, int port)
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000979{
980 struct res_counter *ret;
981
982 ret = kzalloc(sizeof *ret, GFP_KERNEL);
983 if (!ret)
984 return NULL;
985
986 ret->com.res_id = id;
987 ret->com.state = RES_COUNTER_ALLOCATED;
Eran Ben Elisha9de92c62015-06-15 17:59:00 +0300988 ret->port = port;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000989
990 return &ret->com;
991}
992
Jack Morgensteinba062d52012-05-15 10:35:03 +0000993static struct res_common *alloc_xrcdn_tr(int id)
994{
995 struct res_xrcdn *ret;
996
997 ret = kzalloc(sizeof *ret, GFP_KERNEL);
998 if (!ret)
999 return NULL;
1000
1001 ret->com.res_id = id;
1002 ret->com.state = RES_XRCD_ALLOCATED;
1003
1004 return &ret->com;
1005}
1006
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00001007static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00001008{
1009 struct res_fs_rule *ret;
1010
1011 ret = kzalloc(sizeof *ret, GFP_KERNEL);
1012 if (!ret)
1013 return NULL;
1014
1015 ret->com.res_id = id;
1016 ret->com.state = RES_FS_RULE_ALLOCATED;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00001017 ret->qpn = qpn;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00001018 return &ret->com;
1019}
1020
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00001021static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001022 int extra)
1023{
1024 struct res_common *ret;
1025
1026 switch (type) {
1027 case RES_QP:
1028 ret = alloc_qp_tr(id);
1029 break;
1030 case RES_MPT:
1031 ret = alloc_mpt_tr(id, extra);
1032 break;
1033 case RES_MTT:
1034 ret = alloc_mtt_tr(id, extra);
1035 break;
1036 case RES_EQ:
1037 ret = alloc_eq_tr(id);
1038 break;
1039 case RES_CQ:
1040 ret = alloc_cq_tr(id);
1041 break;
1042 case RES_SRQ:
1043 ret = alloc_srq_tr(id);
1044 break;
1045 case RES_MAC:
Amir Vadaic20862c2014-05-22 15:55:40 +03001046 pr_err("implementation missing\n");
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001047 return NULL;
1048 case RES_COUNTER:
Eran Ben Elisha9de92c62015-06-15 17:59:00 +03001049 ret = alloc_counter_tr(id, extra);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001050 break;
Jack Morgensteinba062d52012-05-15 10:35:03 +00001051 case RES_XRCD:
1052 ret = alloc_xrcdn_tr(id);
1053 break;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00001054 case RES_FS_RULE:
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00001055 ret = alloc_fs_rule_tr(id, extra);
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00001056 break;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001057 default:
1058 return NULL;
1059 }
1060 if (ret)
1061 ret->owner = slave;
1062
1063 return ret;
1064}
1065
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00001066static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001067 enum mlx4_resource type, int extra)
1068{
1069 int i;
1070 int err;
1071 struct mlx4_priv *priv = mlx4_priv(dev);
1072 struct res_common **res_arr;
1073 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001074 struct rb_root *root = &tracker->res_tree[type];
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001075
1076 res_arr = kzalloc(count * sizeof *res_arr, GFP_KERNEL);
1077 if (!res_arr)
1078 return -ENOMEM;
1079
1080 for (i = 0; i < count; ++i) {
1081 res_arr[i] = alloc_tr(base + i, type, slave, extra);
1082 if (!res_arr[i]) {
1083 for (--i; i >= 0; --i)
1084 kfree(res_arr[i]);
1085
1086 kfree(res_arr);
1087 return -ENOMEM;
1088 }
1089 }
1090
1091 spin_lock_irq(mlx4_tlock(dev));
1092 for (i = 0; i < count; ++i) {
1093 if (find_res(dev, base + i, type)) {
1094 err = -EEXIST;
1095 goto undo;
1096 }
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001097 err = res_tracker_insert(root, res_arr[i]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001098 if (err)
1099 goto undo;
1100 list_add_tail(&res_arr[i]->list,
1101 &tracker->slave_list[slave].res_list[type]);
1102 }
1103 spin_unlock_irq(mlx4_tlock(dev));
1104 kfree(res_arr);
1105
1106 return 0;
1107
1108undo:
1109 for (--i; i >= base; --i)
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001110 rb_erase(&res_arr[i]->node, root);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001111
1112 spin_unlock_irq(mlx4_tlock(dev));
1113
1114 for (i = 0; i < count; ++i)
1115 kfree(res_arr[i]);
1116
1117 kfree(res_arr);
1118
1119 return err;
1120}
1121
1122static int remove_qp_ok(struct res_qp *res)
1123{
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00001124 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1125 !list_empty(&res->mcg_list)) {
1126 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1127 res->com.state, atomic_read(&res->ref_count));
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001128 return -EBUSY;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00001129 } else if (res->com.state != RES_QP_RESERVED) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001130 return -EPERM;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00001131 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001132
1133 return 0;
1134}
1135
1136static int remove_mtt_ok(struct res_mtt *res, int order)
1137{
1138 if (res->com.state == RES_MTT_BUSY ||
1139 atomic_read(&res->ref_count)) {
Amir Vadaic20862c2014-05-22 15:55:40 +03001140 pr_devel("%s-%d: state %s, ref_count %d\n",
1141 __func__, __LINE__,
1142 mtt_states_str(res->com.state),
1143 atomic_read(&res->ref_count));
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001144 return -EBUSY;
1145 } else if (res->com.state != RES_MTT_ALLOCATED)
1146 return -EPERM;
1147 else if (res->order != order)
1148 return -EINVAL;
1149
1150 return 0;
1151}
1152
1153static int remove_mpt_ok(struct res_mpt *res)
1154{
1155 if (res->com.state == RES_MPT_BUSY)
1156 return -EBUSY;
1157 else if (res->com.state != RES_MPT_RESERVED)
1158 return -EPERM;
1159
1160 return 0;
1161}
1162
1163static int remove_eq_ok(struct res_eq *res)
1164{
1165 if (res->com.state == RES_MPT_BUSY)
1166 return -EBUSY;
1167 else if (res->com.state != RES_MPT_RESERVED)
1168 return -EPERM;
1169
1170 return 0;
1171}
1172
1173static int remove_counter_ok(struct res_counter *res)
1174{
1175 if (res->com.state == RES_COUNTER_BUSY)
1176 return -EBUSY;
1177 else if (res->com.state != RES_COUNTER_ALLOCATED)
1178 return -EPERM;
1179
1180 return 0;
1181}
1182
Jack Morgensteinba062d52012-05-15 10:35:03 +00001183static int remove_xrcdn_ok(struct res_xrcdn *res)
1184{
1185 if (res->com.state == RES_XRCD_BUSY)
1186 return -EBUSY;
1187 else if (res->com.state != RES_XRCD_ALLOCATED)
1188 return -EPERM;
1189
1190 return 0;
1191}
1192
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00001193static int remove_fs_rule_ok(struct res_fs_rule *res)
1194{
1195 if (res->com.state == RES_FS_RULE_BUSY)
1196 return -EBUSY;
1197 else if (res->com.state != RES_FS_RULE_ALLOCATED)
1198 return -EPERM;
1199
1200 return 0;
1201}
1202
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001203static int remove_cq_ok(struct res_cq *res)
1204{
1205 if (res->com.state == RES_CQ_BUSY)
1206 return -EBUSY;
1207 else if (res->com.state != RES_CQ_ALLOCATED)
1208 return -EPERM;
1209
1210 return 0;
1211}
1212
1213static int remove_srq_ok(struct res_srq *res)
1214{
1215 if (res->com.state == RES_SRQ_BUSY)
1216 return -EBUSY;
1217 else if (res->com.state != RES_SRQ_ALLOCATED)
1218 return -EPERM;
1219
1220 return 0;
1221}
1222
1223static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1224{
1225 switch (type) {
1226 case RES_QP:
1227 return remove_qp_ok((struct res_qp *)res);
1228 case RES_CQ:
1229 return remove_cq_ok((struct res_cq *)res);
1230 case RES_SRQ:
1231 return remove_srq_ok((struct res_srq *)res);
1232 case RES_MPT:
1233 return remove_mpt_ok((struct res_mpt *)res);
1234 case RES_MTT:
1235 return remove_mtt_ok((struct res_mtt *)res, extra);
1236 case RES_MAC:
1237 return -ENOSYS;
1238 case RES_EQ:
1239 return remove_eq_ok((struct res_eq *)res);
1240 case RES_COUNTER:
1241 return remove_counter_ok((struct res_counter *)res);
Jack Morgensteinba062d52012-05-15 10:35:03 +00001242 case RES_XRCD:
1243 return remove_xrcdn_ok((struct res_xrcdn *)res);
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00001244 case RES_FS_RULE:
1245 return remove_fs_rule_ok((struct res_fs_rule *)res);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001246 default:
1247 return -EINVAL;
1248 }
1249}
1250
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00001251static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001252 enum mlx4_resource type, int extra)
1253{
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00001254 u64 i;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001255 int err;
1256 struct mlx4_priv *priv = mlx4_priv(dev);
1257 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1258 struct res_common *r;
1259
1260 spin_lock_irq(mlx4_tlock(dev));
1261 for (i = base; i < base + count; ++i) {
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001262 r = res_tracker_lookup(&tracker->res_tree[type], i);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001263 if (!r) {
1264 err = -ENOENT;
1265 goto out;
1266 }
1267 if (r->owner != slave) {
1268 err = -EPERM;
1269 goto out;
1270 }
1271 err = remove_ok(r, type, extra);
1272 if (err)
1273 goto out;
1274 }
1275
1276 for (i = base; i < base + count; ++i) {
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001277 r = res_tracker_lookup(&tracker->res_tree[type], i);
1278 rb_erase(&r->node, &tracker->res_tree[type]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001279 list_del(&r->list);
1280 kfree(r);
1281 }
1282 err = 0;
1283
1284out:
1285 spin_unlock_irq(mlx4_tlock(dev));
1286
1287 return err;
1288}
1289
1290static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1291 enum res_qp_states state, struct res_qp **qp,
1292 int alloc)
1293{
1294 struct mlx4_priv *priv = mlx4_priv(dev);
1295 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1296 struct res_qp *r;
1297 int err = 0;
1298
1299 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001300 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001301 if (!r)
1302 err = -ENOENT;
1303 else if (r->com.owner != slave)
1304 err = -EPERM;
1305 else {
1306 switch (state) {
1307 case RES_QP_BUSY:
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00001308 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001309 __func__, r->com.res_id);
1310 err = -EBUSY;
1311 break;
1312
1313 case RES_QP_RESERVED:
1314 if (r->com.state == RES_QP_MAPPED && !alloc)
1315 break;
1316
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00001317 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001318 err = -EINVAL;
1319 break;
1320
1321 case RES_QP_MAPPED:
1322 if ((r->com.state == RES_QP_RESERVED && alloc) ||
1323 r->com.state == RES_QP_HW)
1324 break;
1325 else {
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00001326 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001327 r->com.res_id);
1328 err = -EINVAL;
1329 }
1330
1331 break;
1332
1333 case RES_QP_HW:
1334 if (r->com.state != RES_QP_MAPPED)
1335 err = -EINVAL;
1336 break;
1337 default:
1338 err = -EINVAL;
1339 }
1340
1341 if (!err) {
1342 r->com.from_state = r->com.state;
1343 r->com.to_state = state;
1344 r->com.state = RES_QP_BUSY;
1345 if (qp)
Joe Perches64699332012-06-04 12:44:16 +00001346 *qp = r;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001347 }
1348 }
1349
1350 spin_unlock_irq(mlx4_tlock(dev));
1351
1352 return err;
1353}
1354
1355static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1356 enum res_mpt_states state, struct res_mpt **mpt)
1357{
1358 struct mlx4_priv *priv = mlx4_priv(dev);
1359 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1360 struct res_mpt *r;
1361 int err = 0;
1362
1363 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001364 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001365 if (!r)
1366 err = -ENOENT;
1367 else if (r->com.owner != slave)
1368 err = -EPERM;
1369 else {
1370 switch (state) {
1371 case RES_MPT_BUSY:
1372 err = -EINVAL;
1373 break;
1374
1375 case RES_MPT_RESERVED:
1376 if (r->com.state != RES_MPT_MAPPED)
1377 err = -EINVAL;
1378 break;
1379
1380 case RES_MPT_MAPPED:
1381 if (r->com.state != RES_MPT_RESERVED &&
1382 r->com.state != RES_MPT_HW)
1383 err = -EINVAL;
1384 break;
1385
1386 case RES_MPT_HW:
1387 if (r->com.state != RES_MPT_MAPPED)
1388 err = -EINVAL;
1389 break;
1390 default:
1391 err = -EINVAL;
1392 }
1393
1394 if (!err) {
1395 r->com.from_state = r->com.state;
1396 r->com.to_state = state;
1397 r->com.state = RES_MPT_BUSY;
1398 if (mpt)
Joe Perches64699332012-06-04 12:44:16 +00001399 *mpt = r;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001400 }
1401 }
1402
1403 spin_unlock_irq(mlx4_tlock(dev));
1404
1405 return err;
1406}
1407
1408static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1409 enum res_eq_states state, struct res_eq **eq)
1410{
1411 struct mlx4_priv *priv = mlx4_priv(dev);
1412 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1413 struct res_eq *r;
1414 int err = 0;
1415
1416 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001417 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001418 if (!r)
1419 err = -ENOENT;
1420 else if (r->com.owner != slave)
1421 err = -EPERM;
1422 else {
1423 switch (state) {
1424 case RES_EQ_BUSY:
1425 err = -EINVAL;
1426 break;
1427
1428 case RES_EQ_RESERVED:
1429 if (r->com.state != RES_EQ_HW)
1430 err = -EINVAL;
1431 break;
1432
1433 case RES_EQ_HW:
1434 if (r->com.state != RES_EQ_RESERVED)
1435 err = -EINVAL;
1436 break;
1437
1438 default:
1439 err = -EINVAL;
1440 }
1441
1442 if (!err) {
1443 r->com.from_state = r->com.state;
1444 r->com.to_state = state;
1445 r->com.state = RES_EQ_BUSY;
1446 if (eq)
1447 *eq = r;
1448 }
1449 }
1450
1451 spin_unlock_irq(mlx4_tlock(dev));
1452
1453 return err;
1454}
1455
1456static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1457 enum res_cq_states state, struct res_cq **cq)
1458{
1459 struct mlx4_priv *priv = mlx4_priv(dev);
1460 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1461 struct res_cq *r;
1462 int err;
1463
1464 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001465 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
Paul Bollec9218a92014-01-14 20:45:36 +01001466 if (!r) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001467 err = -ENOENT;
Paul Bollec9218a92014-01-14 20:45:36 +01001468 } else if (r->com.owner != slave) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001469 err = -EPERM;
Paul Bollec9218a92014-01-14 20:45:36 +01001470 } else if (state == RES_CQ_ALLOCATED) {
1471 if (r->com.state != RES_CQ_HW)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001472 err = -EINVAL;
Paul Bollec9218a92014-01-14 20:45:36 +01001473 else if (atomic_read(&r->ref_count))
1474 err = -EBUSY;
1475 else
1476 err = 0;
1477 } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1478 err = -EINVAL;
1479 } else {
1480 err = 0;
1481 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001482
Paul Bollec9218a92014-01-14 20:45:36 +01001483 if (!err) {
1484 r->com.from_state = r->com.state;
1485 r->com.to_state = state;
1486 r->com.state = RES_CQ_BUSY;
1487 if (cq)
1488 *cq = r;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001489 }
1490
1491 spin_unlock_irq(mlx4_tlock(dev));
1492
1493 return err;
1494}
1495
1496static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
Paul Bollef088cbb2014-01-14 20:46:52 +01001497 enum res_srq_states state, struct res_srq **srq)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001498{
1499 struct mlx4_priv *priv = mlx4_priv(dev);
1500 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1501 struct res_srq *r;
1502 int err = 0;
1503
1504 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001505 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
Paul Bollef088cbb2014-01-14 20:46:52 +01001506 if (!r) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001507 err = -ENOENT;
Paul Bollef088cbb2014-01-14 20:46:52 +01001508 } else if (r->com.owner != slave) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001509 err = -EPERM;
Paul Bollef088cbb2014-01-14 20:46:52 +01001510 } else if (state == RES_SRQ_ALLOCATED) {
1511 if (r->com.state != RES_SRQ_HW)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001512 err = -EINVAL;
Paul Bollef088cbb2014-01-14 20:46:52 +01001513 else if (atomic_read(&r->ref_count))
1514 err = -EBUSY;
1515 } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1516 err = -EINVAL;
1517 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001518
Paul Bollef088cbb2014-01-14 20:46:52 +01001519 if (!err) {
1520 r->com.from_state = r->com.state;
1521 r->com.to_state = state;
1522 r->com.state = RES_SRQ_BUSY;
1523 if (srq)
1524 *srq = r;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001525 }
1526
1527 spin_unlock_irq(mlx4_tlock(dev));
1528
1529 return err;
1530}
1531
1532static void res_abort_move(struct mlx4_dev *dev, int slave,
1533 enum mlx4_resource type, int id)
1534{
1535 struct mlx4_priv *priv = mlx4_priv(dev);
1536 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1537 struct res_common *r;
1538
1539 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001540 r = res_tracker_lookup(&tracker->res_tree[type], id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001541 if (r && (r->owner == slave))
1542 r->state = r->from_state;
1543 spin_unlock_irq(mlx4_tlock(dev));
1544}
1545
1546static void res_end_move(struct mlx4_dev *dev, int slave,
1547 enum mlx4_resource type, int id)
1548{
1549 struct mlx4_priv *priv = mlx4_priv(dev);
1550 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1551 struct res_common *r;
1552
1553 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001554 r = res_tracker_lookup(&tracker->res_tree[type], id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001555 if (r && (r->owner == slave))
1556 r->state = r->to_state;
1557 spin_unlock_irq(mlx4_tlock(dev));
1558}
1559
1560static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1561{
Jack Morgensteine2c76822012-08-03 08:40:41 +00001562 return mlx4_is_qp_reserved(dev, qpn) &&
1563 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001564}
1565
Jack Morgenstein54679e12012-08-03 08:40:43 +00001566static int fw_reserved(struct mlx4_dev *dev, int qpn)
1567{
1568 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001569}
1570
1571static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1572 u64 in_param, u64 *out_param)
1573{
1574 int err;
1575 int count;
1576 int align;
1577 int base;
1578 int qpn;
Eugenia Emantayevddae0342014-12-11 10:57:54 +02001579 u8 flags;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001580
1581 switch (op) {
1582 case RES_OP_RESERVE:
Jack Morgenstein2d5c57d2014-11-25 11:54:31 +02001583 count = get_param_l(&in_param) & 0xffffff;
Eugenia Emantayevddae0342014-12-11 10:57:54 +02001584 /* Turn off all unsupported QP allocation flags that the
1585 * slave tries to set.
1586 */
1587 flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001588 align = get_param_h(&in_param);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001589 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001590 if (err)
1591 return err;
1592
Eugenia Emantayevddae0342014-12-11 10:57:54 +02001593 err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001594 if (err) {
1595 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1596 return err;
1597 }
1598
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001599 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1600 if (err) {
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001601 mlx4_release_resource(dev, slave, RES_QP, count, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001602 __mlx4_qp_release_range(dev, base, count);
1603 return err;
1604 }
1605 set_param_l(out_param, base);
1606 break;
1607 case RES_OP_MAP_ICM:
1608 qpn = get_param_l(&in_param) & 0x7fffff;
1609 if (valid_reserved(dev, slave, qpn)) {
1610 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1611 if (err)
1612 return err;
1613 }
1614
1615 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1616 NULL, 1);
1617 if (err)
1618 return err;
1619
Jack Morgenstein54679e12012-08-03 08:40:43 +00001620 if (!fw_reserved(dev, qpn)) {
Jiri Kosina40f22872014-05-11 15:15:12 +03001621 err = __mlx4_qp_alloc_icm(dev, qpn, GFP_KERNEL);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001622 if (err) {
1623 res_abort_move(dev, slave, RES_QP, qpn);
1624 return err;
1625 }
1626 }
1627
1628 res_end_move(dev, slave, RES_QP, qpn);
1629 break;
1630
1631 default:
1632 err = -EINVAL;
1633 break;
1634 }
1635 return err;
1636}
1637
1638static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1639 u64 in_param, u64 *out_param)
1640{
1641 int err = -EINVAL;
1642 int base;
1643 int order;
1644
1645 if (op != RES_OP_RESERVE_AND_MAP)
1646 return err;
1647
1648 order = get_param_l(&in_param);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001649
1650 err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1651 if (err)
1652 return err;
1653
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001654 base = __mlx4_alloc_mtt_range(dev, order);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001655 if (base == -1) {
1656 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001657 return -ENOMEM;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001658 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001659
1660 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001661 if (err) {
1662 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001663 __mlx4_free_mtt_range(dev, base, order);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001664 } else {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001665 set_param_l(out_param, base);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001666 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001667
1668 return err;
1669}
1670
1671static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1672 u64 in_param, u64 *out_param)
1673{
1674 int err = -EINVAL;
1675 int index;
1676 int id;
1677 struct res_mpt *mpt;
1678
1679 switch (op) {
1680 case RES_OP_RESERVE:
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001681 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1682 if (err)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001683 break;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001684
1685 index = __mlx4_mpt_reserve(dev);
1686 if (index == -1) {
1687 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1688 break;
1689 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001690 id = index & mpt_mask(dev);
1691
1692 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1693 if (err) {
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001694 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
Shani Michaelib20e5192013-02-06 16:19:08 +00001695 __mlx4_mpt_release(dev, index);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001696 break;
1697 }
1698 set_param_l(out_param, index);
1699 break;
1700 case RES_OP_MAP_ICM:
1701 index = get_param_l(&in_param);
1702 id = index & mpt_mask(dev);
1703 err = mr_res_start_move_to(dev, slave, id,
1704 RES_MPT_MAPPED, &mpt);
1705 if (err)
1706 return err;
1707
Jiri Kosina40f22872014-05-11 15:15:12 +03001708 err = __mlx4_mpt_alloc_icm(dev, mpt->key, GFP_KERNEL);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001709 if (err) {
1710 res_abort_move(dev, slave, RES_MPT, id);
1711 return err;
1712 }
1713
1714 res_end_move(dev, slave, RES_MPT, id);
1715 break;
1716 }
1717 return err;
1718}
1719
1720static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1721 u64 in_param, u64 *out_param)
1722{
1723 int cqn;
1724 int err;
1725
1726 switch (op) {
1727 case RES_OP_RESERVE_AND_MAP:
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001728 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001729 if (err)
1730 break;
1731
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001732 err = __mlx4_cq_alloc_icm(dev, &cqn);
1733 if (err) {
1734 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1735 break;
1736 }
1737
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001738 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1739 if (err) {
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001740 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001741 __mlx4_cq_free_icm(dev, cqn);
1742 break;
1743 }
1744
1745 set_param_l(out_param, cqn);
1746 break;
1747
1748 default:
1749 err = -EINVAL;
1750 }
1751
1752 return err;
1753}
1754
1755static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1756 u64 in_param, u64 *out_param)
1757{
1758 int srqn;
1759 int err;
1760
1761 switch (op) {
1762 case RES_OP_RESERVE_AND_MAP:
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001763 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001764 if (err)
1765 break;
1766
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001767 err = __mlx4_srq_alloc_icm(dev, &srqn);
1768 if (err) {
1769 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1770 break;
1771 }
1772
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001773 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1774 if (err) {
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001775 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001776 __mlx4_srq_free_icm(dev, srqn);
1777 break;
1778 }
1779
1780 set_param_l(out_param, srqn);
1781 break;
1782
1783 default:
1784 err = -EINVAL;
1785 }
1786
1787 return err;
1788}
1789
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02001790static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1791 u8 smac_index, u64 *mac)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001792{
1793 struct mlx4_priv *priv = mlx4_priv(dev);
1794 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02001795 struct list_head *mac_list =
1796 &tracker->slave_list[slave].res_list[RES_MAC];
1797 struct mac_res *res, *tmp;
1798
1799 list_for_each_entry_safe(res, tmp, mac_list, list) {
1800 if (res->smac_index == smac_index && res->port == (u8) port) {
1801 *mac = res->mac;
1802 return 0;
1803 }
1804 }
1805 return -ENOENT;
1806}
1807
1808static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
1809{
1810 struct mlx4_priv *priv = mlx4_priv(dev);
1811 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1812 struct list_head *mac_list =
1813 &tracker->slave_list[slave].res_list[RES_MAC];
1814 struct mac_res *res, *tmp;
1815
1816 list_for_each_entry_safe(res, tmp, mac_list, list) {
1817 if (res->mac == mac && res->port == (u8) port) {
1818 /* mac found. update ref count */
1819 ++res->ref_count;
1820 return 0;
1821 }
1822 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001823
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001824 if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
1825 return -EINVAL;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001826 res = kzalloc(sizeof *res, GFP_KERNEL);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001827 if (!res) {
1828 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001829 return -ENOMEM;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001830 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001831 res->mac = mac;
1832 res->port = (u8) port;
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02001833 res->smac_index = smac_index;
1834 res->ref_count = 1;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001835 list_add_tail(&res->list,
1836 &tracker->slave_list[slave].res_list[RES_MAC]);
1837 return 0;
1838}
1839
1840static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
1841 int port)
1842{
1843 struct mlx4_priv *priv = mlx4_priv(dev);
1844 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1845 struct list_head *mac_list =
1846 &tracker->slave_list[slave].res_list[RES_MAC];
1847 struct mac_res *res, *tmp;
1848
1849 list_for_each_entry_safe(res, tmp, mac_list, list) {
1850 if (res->mac == mac && res->port == (u8) port) {
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02001851 if (!--res->ref_count) {
1852 list_del(&res->list);
1853 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
1854 kfree(res);
1855 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001856 break;
1857 }
1858 }
1859}
1860
1861static void rem_slave_macs(struct mlx4_dev *dev, int slave)
1862{
1863 struct mlx4_priv *priv = mlx4_priv(dev);
1864 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1865 struct list_head *mac_list =
1866 &tracker->slave_list[slave].res_list[RES_MAC];
1867 struct mac_res *res, *tmp;
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02001868 int i;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001869
1870 list_for_each_entry_safe(res, tmp, mac_list, list) {
1871 list_del(&res->list);
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02001872 /* dereference the mac the num times the slave referenced it */
1873 for (i = 0; i < res->ref_count; i++)
1874 __mlx4_unregister_mac(dev, res->port, res->mac);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001875 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001876 kfree(res);
1877 }
1878}
1879
1880static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02001881 u64 in_param, u64 *out_param, int in_port)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001882{
1883 int err = -EINVAL;
1884 int port;
1885 u64 mac;
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02001886 u8 smac_index;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001887
1888 if (op != RES_OP_RESERVE_AND_MAP)
1889 return err;
1890
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02001891 port = !in_port ? get_param_l(out_param) : in_port;
Matan Barak449fc482014-03-19 18:11:52 +02001892 port = mlx4_slave_convert_port(
1893 dev, slave, port);
1894
1895 if (port < 0)
1896 return -EINVAL;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001897 mac = in_param;
1898
1899 err = __mlx4_register_mac(dev, port, mac);
1900 if (err >= 0) {
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02001901 smac_index = err;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001902 set_param_l(out_param, err);
1903 err = 0;
1904 }
1905
1906 if (!err) {
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02001907 err = mac_add_to_slave(dev, slave, mac, port, smac_index);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001908 if (err)
1909 __mlx4_unregister_mac(dev, port, mac);
1910 }
1911 return err;
1912}
1913
Jack Morgenstein48740802013-11-03 10:03:20 +02001914static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1915 int port, int vlan_index)
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001916{
Jack Morgenstein48740802013-11-03 10:03:20 +02001917 struct mlx4_priv *priv = mlx4_priv(dev);
1918 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1919 struct list_head *vlan_list =
1920 &tracker->slave_list[slave].res_list[RES_VLAN];
1921 struct vlan_res *res, *tmp;
1922
1923 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1924 if (res->vlan == vlan && res->port == (u8) port) {
1925 /* vlan found. update ref count */
1926 ++res->ref_count;
1927 return 0;
1928 }
1929 }
1930
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001931 if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
1932 return -EINVAL;
Jack Morgenstein48740802013-11-03 10:03:20 +02001933 res = kzalloc(sizeof(*res), GFP_KERNEL);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001934 if (!res) {
1935 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
Jack Morgenstein48740802013-11-03 10:03:20 +02001936 return -ENOMEM;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001937 }
Jack Morgenstein48740802013-11-03 10:03:20 +02001938 res->vlan = vlan;
1939 res->port = (u8) port;
1940 res->vlan_index = vlan_index;
1941 res->ref_count = 1;
1942 list_add_tail(&res->list,
1943 &tracker->slave_list[slave].res_list[RES_VLAN]);
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00001944 return 0;
1945}
1946
Jack Morgenstein48740802013-11-03 10:03:20 +02001947
1948static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
1949 int port)
1950{
1951 struct mlx4_priv *priv = mlx4_priv(dev);
1952 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1953 struct list_head *vlan_list =
1954 &tracker->slave_list[slave].res_list[RES_VLAN];
1955 struct vlan_res *res, *tmp;
1956
1957 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1958 if (res->vlan == vlan && res->port == (u8) port) {
1959 if (!--res->ref_count) {
1960 list_del(&res->list);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001961 mlx4_release_resource(dev, slave, RES_VLAN,
1962 1, port);
Jack Morgenstein48740802013-11-03 10:03:20 +02001963 kfree(res);
1964 }
1965 break;
1966 }
1967 }
1968}
1969
1970static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
1971{
1972 struct mlx4_priv *priv = mlx4_priv(dev);
1973 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1974 struct list_head *vlan_list =
1975 &tracker->slave_list[slave].res_list[RES_VLAN];
1976 struct vlan_res *res, *tmp;
1977 int i;
1978
1979 list_for_each_entry_safe(res, tmp, vlan_list, list) {
1980 list_del(&res->list);
1981 /* dereference the vlan the num times the slave referenced it */
1982 for (i = 0; i < res->ref_count; i++)
1983 __mlx4_unregister_vlan(dev, res->port, res->vlan);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001984 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
Jack Morgenstein48740802013-11-03 10:03:20 +02001985 kfree(res);
1986 }
1987}
1988
1989static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
Jack Morgenstein2c957ff2013-11-03 10:03:21 +02001990 u64 in_param, u64 *out_param, int in_port)
Jack Morgenstein48740802013-11-03 10:03:20 +02001991{
Jack Morgenstein2c957ff2013-11-03 10:03:21 +02001992 struct mlx4_priv *priv = mlx4_priv(dev);
1993 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
Jack Morgenstein48740802013-11-03 10:03:20 +02001994 int err;
1995 u16 vlan;
1996 int vlan_index;
Jack Morgenstein2c957ff2013-11-03 10:03:21 +02001997 int port;
1998
1999 port = !in_port ? get_param_l(out_param) : in_port;
Jack Morgenstein48740802013-11-03 10:03:20 +02002000
2001 if (!port || op != RES_OP_RESERVE_AND_MAP)
2002 return -EINVAL;
2003
Matan Barak449fc482014-03-19 18:11:52 +02002004 port = mlx4_slave_convert_port(
2005 dev, slave, port);
2006
2007 if (port < 0)
2008 return -EINVAL;
Jack Morgenstein2c957ff2013-11-03 10:03:21 +02002009 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
2010 if (!in_port && port > 0 && port <= dev->caps.num_ports) {
2011 slave_state[slave].old_vlan_api = true;
2012 return 0;
2013 }
2014
Jack Morgenstein48740802013-11-03 10:03:20 +02002015 vlan = (u16) in_param;
2016
2017 err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
2018 if (!err) {
2019 set_param_l(out_param, (u32) vlan_index);
2020 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
2021 if (err)
2022 __mlx4_unregister_vlan(dev, port, vlan);
2023 }
2024 return err;
2025}
2026
Jack Morgensteinba062d52012-05-15 10:35:03 +00002027static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2028 u64 in_param, u64 *out_param)
2029{
2030 u32 index;
2031 int err;
2032
2033 if (op != RES_OP_RESERVE)
2034 return -EINVAL;
2035
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002036 err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
Jack Morgensteinba062d52012-05-15 10:35:03 +00002037 if (err)
2038 return err;
2039
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002040 err = __mlx4_counter_alloc(dev, &index);
2041 if (err) {
2042 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2043 return err;
2044 }
2045
Jack Morgensteinba062d52012-05-15 10:35:03 +00002046 err = add_res_range(dev, slave, index, 1, RES_COUNTER, 0);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002047 if (err) {
Jack Morgensteinba062d52012-05-15 10:35:03 +00002048 __mlx4_counter_free(dev, index);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002049 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2050 } else {
Jack Morgensteinba062d52012-05-15 10:35:03 +00002051 set_param_l(out_param, index);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002052 }
Jack Morgensteinba062d52012-05-15 10:35:03 +00002053
2054 return err;
2055}
2056
2057static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2058 u64 in_param, u64 *out_param)
2059{
2060 u32 xrcdn;
2061 int err;
2062
2063 if (op != RES_OP_RESERVE)
2064 return -EINVAL;
2065
2066 err = __mlx4_xrcd_alloc(dev, &xrcdn);
2067 if (err)
2068 return err;
2069
2070 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2071 if (err)
2072 __mlx4_xrcd_free(dev, xrcdn);
2073 else
2074 set_param_l(out_param, xrcdn);
2075
2076 return err;
2077}
2078
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002079int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2080 struct mlx4_vhcr *vhcr,
2081 struct mlx4_cmd_mailbox *inbox,
2082 struct mlx4_cmd_mailbox *outbox,
2083 struct mlx4_cmd_info *cmd)
2084{
2085 int err;
2086 int alop = vhcr->op_modifier;
2087
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002088 switch (vhcr->in_modifier & 0xFF) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002089 case RES_QP:
2090 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2091 vhcr->in_param, &vhcr->out_param);
2092 break;
2093
2094 case RES_MTT:
2095 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2096 vhcr->in_param, &vhcr->out_param);
2097 break;
2098
2099 case RES_MPT:
2100 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2101 vhcr->in_param, &vhcr->out_param);
2102 break;
2103
2104 case RES_CQ:
2105 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2106 vhcr->in_param, &vhcr->out_param);
2107 break;
2108
2109 case RES_SRQ:
2110 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2111 vhcr->in_param, &vhcr->out_param);
2112 break;
2113
2114 case RES_MAC:
2115 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002116 vhcr->in_param, &vhcr->out_param,
2117 (vhcr->in_modifier >> 8) & 0xFF);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002118 break;
2119
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002120 case RES_VLAN:
2121 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002122 vhcr->in_param, &vhcr->out_param,
2123 (vhcr->in_modifier >> 8) & 0xFF);
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002124 break;
2125
Jack Morgensteinba062d52012-05-15 10:35:03 +00002126 case RES_COUNTER:
2127 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
2128 vhcr->in_param, &vhcr->out_param);
2129 break;
2130
2131 case RES_XRCD:
2132 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2133 vhcr->in_param, &vhcr->out_param);
2134 break;
2135
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002136 default:
2137 err = -EINVAL;
2138 break;
2139 }
2140
2141 return err;
2142}
2143
2144static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2145 u64 in_param)
2146{
2147 int err;
2148 int count;
2149 int base;
2150 int qpn;
2151
2152 switch (op) {
2153 case RES_OP_RESERVE:
2154 base = get_param_l(&in_param) & 0x7fffff;
2155 count = get_param_h(&in_param);
2156 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2157 if (err)
2158 break;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002159 mlx4_release_resource(dev, slave, RES_QP, count, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002160 __mlx4_qp_release_range(dev, base, count);
2161 break;
2162 case RES_OP_MAP_ICM:
2163 qpn = get_param_l(&in_param) & 0x7fffff;
2164 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2165 NULL, 0);
2166 if (err)
2167 return err;
2168
Jack Morgenstein54679e12012-08-03 08:40:43 +00002169 if (!fw_reserved(dev, qpn))
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002170 __mlx4_qp_free_icm(dev, qpn);
2171
2172 res_end_move(dev, slave, RES_QP, qpn);
2173
2174 if (valid_reserved(dev, slave, qpn))
2175 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2176 break;
2177 default:
2178 err = -EINVAL;
2179 break;
2180 }
2181 return err;
2182}
2183
2184static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2185 u64 in_param, u64 *out_param)
2186{
2187 int err = -EINVAL;
2188 int base;
2189 int order;
2190
2191 if (op != RES_OP_RESERVE_AND_MAP)
2192 return err;
2193
2194 base = get_param_l(&in_param);
2195 order = get_param_h(&in_param);
2196 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002197 if (!err) {
2198 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002199 __mlx4_free_mtt_range(dev, base, order);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002200 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002201 return err;
2202}
2203
2204static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2205 u64 in_param)
2206{
2207 int err = -EINVAL;
2208 int index;
2209 int id;
2210 struct res_mpt *mpt;
2211
2212 switch (op) {
2213 case RES_OP_RESERVE:
2214 index = get_param_l(&in_param);
2215 id = index & mpt_mask(dev);
2216 err = get_res(dev, slave, id, RES_MPT, &mpt);
2217 if (err)
2218 break;
2219 index = mpt->key;
2220 put_res(dev, slave, id, RES_MPT);
2221
2222 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2223 if (err)
2224 break;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002225 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
Shani Michaelib20e5192013-02-06 16:19:08 +00002226 __mlx4_mpt_release(dev, index);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002227 break;
2228 case RES_OP_MAP_ICM:
2229 index = get_param_l(&in_param);
2230 id = index & mpt_mask(dev);
2231 err = mr_res_start_move_to(dev, slave, id,
2232 RES_MPT_RESERVED, &mpt);
2233 if (err)
2234 return err;
2235
Shani Michaelib20e5192013-02-06 16:19:08 +00002236 __mlx4_mpt_free_icm(dev, mpt->key);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002237 res_end_move(dev, slave, RES_MPT, id);
2238 return err;
2239 break;
2240 default:
2241 err = -EINVAL;
2242 break;
2243 }
2244 return err;
2245}
2246
2247static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2248 u64 in_param, u64 *out_param)
2249{
2250 int cqn;
2251 int err;
2252
2253 switch (op) {
2254 case RES_OP_RESERVE_AND_MAP:
2255 cqn = get_param_l(&in_param);
2256 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2257 if (err)
2258 break;
2259
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002260 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002261 __mlx4_cq_free_icm(dev, cqn);
2262 break;
2263
2264 default:
2265 err = -EINVAL;
2266 break;
2267 }
2268
2269 return err;
2270}
2271
2272static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2273 u64 in_param, u64 *out_param)
2274{
2275 int srqn;
2276 int err;
2277
2278 switch (op) {
2279 case RES_OP_RESERVE_AND_MAP:
2280 srqn = get_param_l(&in_param);
2281 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2282 if (err)
2283 break;
2284
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002285 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002286 __mlx4_srq_free_icm(dev, srqn);
2287 break;
2288
2289 default:
2290 err = -EINVAL;
2291 break;
2292 }
2293
2294 return err;
2295}
2296
2297static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002298 u64 in_param, u64 *out_param, int in_port)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002299{
2300 int port;
2301 int err = 0;
2302
2303 switch (op) {
2304 case RES_OP_RESERVE_AND_MAP:
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002305 port = !in_port ? get_param_l(out_param) : in_port;
Matan Barak449fc482014-03-19 18:11:52 +02002306 port = mlx4_slave_convert_port(
2307 dev, slave, port);
2308
2309 if (port < 0)
2310 return -EINVAL;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002311 mac_del_from_slave(dev, slave, in_param, port);
2312 __mlx4_unregister_mac(dev, port, in_param);
2313 break;
2314 default:
2315 err = -EINVAL;
2316 break;
2317 }
2318
2319 return err;
2320
2321}
2322
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002323static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002324 u64 in_param, u64 *out_param, int port)
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002325{
Jack Morgenstein2c957ff2013-11-03 10:03:21 +02002326 struct mlx4_priv *priv = mlx4_priv(dev);
2327 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
Jack Morgenstein48740802013-11-03 10:03:20 +02002328 int err = 0;
2329
Matan Barak449fc482014-03-19 18:11:52 +02002330 port = mlx4_slave_convert_port(
2331 dev, slave, port);
2332
2333 if (port < 0)
2334 return -EINVAL;
Jack Morgenstein48740802013-11-03 10:03:20 +02002335 switch (op) {
2336 case RES_OP_RESERVE_AND_MAP:
Jack Morgenstein2c957ff2013-11-03 10:03:21 +02002337 if (slave_state[slave].old_vlan_api)
2338 return 0;
Jack Morgenstein48740802013-11-03 10:03:20 +02002339 if (!port)
2340 return -EINVAL;
2341 vlan_del_from_slave(dev, slave, in_param, port);
2342 __mlx4_unregister_vlan(dev, port, in_param);
2343 break;
2344 default:
2345 err = -EINVAL;
2346 break;
2347 }
2348
2349 return err;
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002350}
2351
Jack Morgensteinba062d52012-05-15 10:35:03 +00002352static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2353 u64 in_param, u64 *out_param)
2354{
2355 int index;
2356 int err;
2357
2358 if (op != RES_OP_RESERVE)
2359 return -EINVAL;
2360
2361 index = get_param_l(&in_param);
Eran Ben Elisha9de92c62015-06-15 17:59:00 +03002362 if (index == MLX4_SINK_COUNTER_INDEX(dev))
2363 return 0;
2364
Jack Morgensteinba062d52012-05-15 10:35:03 +00002365 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2366 if (err)
2367 return err;
2368
2369 __mlx4_counter_free(dev, index);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002370 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
Jack Morgensteinba062d52012-05-15 10:35:03 +00002371
2372 return err;
2373}
2374
2375static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2376 u64 in_param, u64 *out_param)
2377{
2378 int xrcdn;
2379 int err;
2380
2381 if (op != RES_OP_RESERVE)
2382 return -EINVAL;
2383
2384 xrcdn = get_param_l(&in_param);
2385 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2386 if (err)
2387 return err;
2388
2389 __mlx4_xrcd_free(dev, xrcdn);
2390
2391 return err;
2392}
2393
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002394int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2395 struct mlx4_vhcr *vhcr,
2396 struct mlx4_cmd_mailbox *inbox,
2397 struct mlx4_cmd_mailbox *outbox,
2398 struct mlx4_cmd_info *cmd)
2399{
2400 int err = -EINVAL;
2401 int alop = vhcr->op_modifier;
2402
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002403 switch (vhcr->in_modifier & 0xFF) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002404 case RES_QP:
2405 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2406 vhcr->in_param);
2407 break;
2408
2409 case RES_MTT:
2410 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2411 vhcr->in_param, &vhcr->out_param);
2412 break;
2413
2414 case RES_MPT:
2415 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2416 vhcr->in_param);
2417 break;
2418
2419 case RES_CQ:
2420 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2421 vhcr->in_param, &vhcr->out_param);
2422 break;
2423
2424 case RES_SRQ:
2425 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2426 vhcr->in_param, &vhcr->out_param);
2427 break;
2428
2429 case RES_MAC:
2430 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002431 vhcr->in_param, &vhcr->out_param,
2432 (vhcr->in_modifier >> 8) & 0xFF);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002433 break;
2434
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002435 case RES_VLAN:
2436 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002437 vhcr->in_param, &vhcr->out_param,
2438 (vhcr->in_modifier >> 8) & 0xFF);
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002439 break;
2440
Jack Morgensteinba062d52012-05-15 10:35:03 +00002441 case RES_COUNTER:
2442 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2443 vhcr->in_param, &vhcr->out_param);
2444 break;
2445
2446 case RES_XRCD:
2447 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2448 vhcr->in_param, &vhcr->out_param);
2449
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002450 default:
2451 break;
2452 }
2453 return err;
2454}
2455
2456/* ugly but other choices are uglier */
2457static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2458{
2459 return (be32_to_cpu(mpt->flags) >> 9) & 1;
2460}
2461
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002462static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002463{
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002464 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002465}
2466
2467static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2468{
2469 return be32_to_cpu(mpt->mtt_sz);
2470}
2471
Shani Michaelicc1ade92013-02-06 16:19:10 +00002472static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2473{
2474 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2475}
2476
2477static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2478{
2479 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2480}
2481
2482static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2483{
2484 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2485}
2486
2487static int mr_is_region(struct mlx4_mpt_entry *mpt)
2488{
2489 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2490}
2491
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002492static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002493{
2494 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2495}
2496
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002497static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002498{
2499 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2500}
2501
2502static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2503{
2504 int page_shift = (qpc->log_page_size & 0x3f) + 12;
2505 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2506 int log_sq_sride = qpc->sq_size_stride & 7;
2507 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2508 int log_rq_stride = qpc->rq_size_stride & 7;
2509 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2510 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
Yishai Hadas5c5f3f02013-08-01 18:49:52 +03002511 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2512 int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002513 int sq_size;
2514 int rq_size;
2515 int total_pages;
2516 int total_mem;
2517 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2518
2519 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2520 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2521 total_mem = sq_size + rq_size;
2522 total_pages =
2523 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2524 page_shift);
2525
2526 return total_pages;
2527}
2528
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002529static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2530 int size, struct res_mtt *mtt)
2531{
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002532 int res_start = mtt->com.res_id;
2533 int res_size = (1 << mtt->order);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002534
2535 if (start < res_start || start + size > res_start + res_size)
2536 return -EPERM;
2537 return 0;
2538}
2539
2540int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2541 struct mlx4_vhcr *vhcr,
2542 struct mlx4_cmd_mailbox *inbox,
2543 struct mlx4_cmd_mailbox *outbox,
2544 struct mlx4_cmd_info *cmd)
2545{
2546 int err;
2547 int index = vhcr->in_modifier;
2548 struct res_mtt *mtt;
2549 struct res_mpt *mpt;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002550 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002551 int phys;
2552 int id;
Shani Michaelicc1ade92013-02-06 16:19:10 +00002553 u32 pd;
2554 int pd_slave;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002555
2556 id = index & mpt_mask(dev);
2557 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2558 if (err)
2559 return err;
2560
Shani Michaelicc1ade92013-02-06 16:19:10 +00002561 /* Disable memory windows for VFs. */
2562 if (!mr_is_region(inbox->buf)) {
2563 err = -EPERM;
2564 goto ex_abort;
2565 }
2566
2567 /* Make sure that the PD bits related to the slave id are zeros. */
2568 pd = mr_get_pd(inbox->buf);
2569 pd_slave = (pd >> 17) & 0x7f;
Maor Gottliebb3320682015-02-03 17:57:15 +02002570 if (pd_slave != 0 && --pd_slave != slave) {
Shani Michaelicc1ade92013-02-06 16:19:10 +00002571 err = -EPERM;
2572 goto ex_abort;
2573 }
2574
2575 if (mr_is_fmr(inbox->buf)) {
2576 /* FMR and Bind Enable are forbidden in slave devices. */
2577 if (mr_is_bind_enabled(inbox->buf)) {
2578 err = -EPERM;
2579 goto ex_abort;
2580 }
2581 /* FMR and Memory Windows are also forbidden. */
2582 if (!mr_is_region(inbox->buf)) {
2583 err = -EPERM;
2584 goto ex_abort;
2585 }
2586 }
2587
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002588 phys = mr_phys_mpt(inbox->buf);
2589 if (!phys) {
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002590 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002591 if (err)
2592 goto ex_abort;
2593
2594 err = check_mtt_range(dev, slave, mtt_base,
2595 mr_get_mtt_size(inbox->buf), mtt);
2596 if (err)
2597 goto ex_put;
2598
2599 mpt->mtt = mtt;
2600 }
2601
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002602 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2603 if (err)
2604 goto ex_put;
2605
2606 if (!phys) {
2607 atomic_inc(&mtt->ref_count);
2608 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2609 }
2610
2611 res_end_move(dev, slave, RES_MPT, id);
2612 return 0;
2613
2614ex_put:
2615 if (!phys)
2616 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2617ex_abort:
2618 res_abort_move(dev, slave, RES_MPT, id);
2619
2620 return err;
2621}
2622
2623int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2624 struct mlx4_vhcr *vhcr,
2625 struct mlx4_cmd_mailbox *inbox,
2626 struct mlx4_cmd_mailbox *outbox,
2627 struct mlx4_cmd_info *cmd)
2628{
2629 int err;
2630 int index = vhcr->in_modifier;
2631 struct res_mpt *mpt;
2632 int id;
2633
2634 id = index & mpt_mask(dev);
2635 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2636 if (err)
2637 return err;
2638
2639 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2640 if (err)
2641 goto ex_abort;
2642
2643 if (mpt->mtt)
2644 atomic_dec(&mpt->mtt->ref_count);
2645
2646 res_end_move(dev, slave, RES_MPT, id);
2647 return 0;
2648
2649ex_abort:
2650 res_abort_move(dev, slave, RES_MPT, id);
2651
2652 return err;
2653}
2654
2655int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2656 struct mlx4_vhcr *vhcr,
2657 struct mlx4_cmd_mailbox *inbox,
2658 struct mlx4_cmd_mailbox *outbox,
2659 struct mlx4_cmd_info *cmd)
2660{
2661 int err;
2662 int index = vhcr->in_modifier;
2663 struct res_mpt *mpt;
2664 int id;
2665
2666 id = index & mpt_mask(dev);
2667 err = get_res(dev, slave, id, RES_MPT, &mpt);
2668 if (err)
2669 return err;
2670
Matan Barake6306642014-07-31 11:01:29 +03002671 if (mpt->com.from_state == RES_MPT_MAPPED) {
2672 /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2673 * that, the VF must read the MPT. But since the MPT entry memory is not
2674 * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2675 * entry contents. To guarantee that the MPT cannot be changed, the driver
2676 * must perform HW2SW_MPT before this query and return the MPT entry to HW
2677 * ownership fofollowing the change. The change here allows the VF to
2678 * perform QUERY_MPT also when the entry is in SW ownership.
2679 */
2680 struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
2681 &mlx4_priv(dev)->mr_table.dmpt_table,
2682 mpt->key, NULL);
2683
2684 if (NULL == mpt_entry || NULL == outbox->buf) {
2685 err = -EINVAL;
2686 goto out;
2687 }
2688
2689 memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
2690
2691 err = 0;
2692 } else if (mpt->com.from_state == RES_MPT_HW) {
2693 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2694 } else {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002695 err = -EBUSY;
2696 goto out;
2697 }
2698
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002699
2700out:
2701 put_res(dev, slave, id, RES_MPT);
2702 return err;
2703}
2704
2705static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2706{
2707 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2708}
2709
2710static int qp_get_scqn(struct mlx4_qp_context *qpc)
2711{
2712 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2713}
2714
2715static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2716{
2717 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2718}
2719
Jack Morgenstein54679e12012-08-03 08:40:43 +00002720static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2721 struct mlx4_qp_context *context)
2722{
2723 u32 qpn = vhcr->in_modifier & 0xffffff;
2724 u32 qkey = 0;
2725
2726 if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2727 return;
2728
2729 /* adjust qkey in qp context */
2730 context->qkey = cpu_to_be32(qkey);
2731}
2732
Or Gerlitze5dfbf92015-05-21 15:14:09 +03002733static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
2734 struct mlx4_qp_context *qpc,
2735 struct mlx4_cmd_mailbox *inbox);
2736
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002737int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2738 struct mlx4_vhcr *vhcr,
2739 struct mlx4_cmd_mailbox *inbox,
2740 struct mlx4_cmd_mailbox *outbox,
2741 struct mlx4_cmd_info *cmd)
2742{
2743 int err;
2744 int qpn = vhcr->in_modifier & 0x7fffff;
2745 struct res_mtt *mtt;
2746 struct res_qp *qp;
2747 struct mlx4_qp_context *qpc = inbox->buf + 8;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002748 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002749 int mtt_size = qp_get_mtt_size(qpc);
2750 struct res_cq *rcq;
2751 struct res_cq *scq;
2752 int rcqn = qp_get_rcqn(qpc);
2753 int scqn = qp_get_scqn(qpc);
2754 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2755 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2756 struct res_srq *srq;
2757 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2758
Or Gerlitze5dfbf92015-05-21 15:14:09 +03002759 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
2760 if (err)
2761 return err;
2762
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002763 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2764 if (err)
2765 return err;
2766 qp->local_qpn = local_qpn;
Jack Morgensteinb01978c2013-06-27 19:05:21 +03002767 qp->sched_queue = 0;
Rony Efraimf0f829b2013-11-07 12:19:51 +02002768 qp->param3 = 0;
2769 qp->vlan_control = 0;
2770 qp->fvl_rx = 0;
2771 qp->pri_path_fl = 0;
2772 qp->vlan_index = 0;
2773 qp->feup = 0;
Jack Morgensteinb01978c2013-06-27 19:05:21 +03002774 qp->qpc_flags = be32_to_cpu(qpc->flags);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002775
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002776 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002777 if (err)
2778 goto ex_abort;
2779
2780 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2781 if (err)
2782 goto ex_put_mtt;
2783
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002784 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2785 if (err)
2786 goto ex_put_mtt;
2787
2788 if (scqn != rcqn) {
2789 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2790 if (err)
2791 goto ex_put_rcq;
2792 } else
2793 scq = rcq;
2794
2795 if (use_srq) {
2796 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2797 if (err)
2798 goto ex_put_scq;
2799 }
2800
Jack Morgenstein54679e12012-08-03 08:40:43 +00002801 adjust_proxy_tun_qkey(dev, vhcr, qpc);
2802 update_pkey_index(dev, slave, inbox);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002803 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2804 if (err)
2805 goto ex_put_srq;
2806 atomic_inc(&mtt->ref_count);
2807 qp->mtt = mtt;
2808 atomic_inc(&rcq->ref_count);
2809 qp->rcq = rcq;
2810 atomic_inc(&scq->ref_count);
2811 qp->scq = scq;
2812
2813 if (scqn != rcqn)
2814 put_res(dev, slave, scqn, RES_CQ);
2815
2816 if (use_srq) {
2817 atomic_inc(&srq->ref_count);
2818 put_res(dev, slave, srqn, RES_SRQ);
2819 qp->srq = srq;
2820 }
2821 put_res(dev, slave, rcqn, RES_CQ);
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002822 put_res(dev, slave, mtt_base, RES_MTT);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002823 res_end_move(dev, slave, RES_QP, qpn);
2824
2825 return 0;
2826
2827ex_put_srq:
2828 if (use_srq)
2829 put_res(dev, slave, srqn, RES_SRQ);
2830ex_put_scq:
2831 if (scqn != rcqn)
2832 put_res(dev, slave, scqn, RES_CQ);
2833ex_put_rcq:
2834 put_res(dev, slave, rcqn, RES_CQ);
2835ex_put_mtt:
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002836 put_res(dev, slave, mtt_base, RES_MTT);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002837ex_abort:
2838 res_abort_move(dev, slave, RES_QP, qpn);
2839
2840 return err;
2841}
2842
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002843static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002844{
2845 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
2846}
2847
2848static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
2849{
2850 int log_eq_size = eqc->log_eq_size & 0x1f;
2851 int page_shift = (eqc->log_page_size & 0x3f) + 12;
2852
2853 if (log_eq_size + 5 < page_shift)
2854 return 1;
2855
2856 return 1 << (log_eq_size + 5 - page_shift);
2857}
2858
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002859static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002860{
2861 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
2862}
2863
2864static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
2865{
2866 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
2867 int page_shift = (cqc->log_page_size & 0x3f) + 12;
2868
2869 if (log_cq_size + 5 < page_shift)
2870 return 1;
2871
2872 return 1 << (log_cq_size + 5 - page_shift);
2873}
2874
2875int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
2876 struct mlx4_vhcr *vhcr,
2877 struct mlx4_cmd_mailbox *inbox,
2878 struct mlx4_cmd_mailbox *outbox,
2879 struct mlx4_cmd_info *cmd)
2880{
2881 int err;
2882 int eqn = vhcr->in_modifier;
Yishai Hadas2d3c7392015-05-05 17:07:12 +03002883 int res_id = (slave << 10) | eqn;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002884 struct mlx4_eq_context *eqc = inbox->buf;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002885 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002886 int mtt_size = eq_get_mtt_size(eqc);
2887 struct res_eq *eq;
2888 struct res_mtt *mtt;
2889
2890 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2891 if (err)
2892 return err;
2893 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
2894 if (err)
2895 goto out_add;
2896
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002897 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002898 if (err)
2899 goto out_move;
2900
2901 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2902 if (err)
2903 goto out_put;
2904
2905 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2906 if (err)
2907 goto out_put;
2908
2909 atomic_inc(&mtt->ref_count);
2910 eq->mtt = mtt;
2911 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2912 res_end_move(dev, slave, RES_EQ, res_id);
2913 return 0;
2914
2915out_put:
2916 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2917out_move:
2918 res_abort_move(dev, slave, RES_EQ, res_id);
2919out_add:
2920 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
2921 return err;
2922}
2923
Matan Barakd475c952014-11-02 16:26:17 +02002924int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
2925 struct mlx4_vhcr *vhcr,
2926 struct mlx4_cmd_mailbox *inbox,
2927 struct mlx4_cmd_mailbox *outbox,
2928 struct mlx4_cmd_info *cmd)
2929{
2930 int err;
2931 u8 get = vhcr->op_modifier;
2932
2933 if (get != 1)
2934 return -EPERM;
2935
2936 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2937
2938 return err;
2939}
2940
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002941static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
2942 int len, struct res_mtt **res)
2943{
2944 struct mlx4_priv *priv = mlx4_priv(dev);
2945 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2946 struct res_mtt *mtt;
2947 int err = -EINVAL;
2948
2949 spin_lock_irq(mlx4_tlock(dev));
2950 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
2951 com.list) {
2952 if (!check_mtt_range(dev, slave, start, len, mtt)) {
2953 *res = mtt;
2954 mtt->com.from_state = mtt->com.state;
2955 mtt->com.state = RES_MTT_BUSY;
2956 err = 0;
2957 break;
2958 }
2959 }
2960 spin_unlock_irq(mlx4_tlock(dev));
2961
2962 return err;
2963}
2964
Jack Morgenstein54679e12012-08-03 08:40:43 +00002965static int verify_qp_parameters(struct mlx4_dev *dev,
Jack Morgenstein99ec41d2014-05-29 16:31:03 +03002966 struct mlx4_vhcr *vhcr,
Jack Morgenstein54679e12012-08-03 08:40:43 +00002967 struct mlx4_cmd_mailbox *inbox,
2968 enum qp_transition transition, u8 slave)
2969{
2970 u32 qp_type;
Jack Morgenstein99ec41d2014-05-29 16:31:03 +03002971 u32 qpn;
Jack Morgenstein54679e12012-08-03 08:40:43 +00002972 struct mlx4_qp_context *qp_ctx;
2973 enum mlx4_qp_optpar optpar;
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +02002974 int port;
2975 int num_gids;
Jack Morgenstein54679e12012-08-03 08:40:43 +00002976
2977 qp_ctx = inbox->buf + 8;
2978 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
2979 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
2980
Or Gerlitzfc31e252015-03-18 14:57:34 +02002981 if (slave != mlx4_master_func_num(dev)) {
Moni Shoua53f33ae2015-02-03 16:48:33 +02002982 qp_ctx->params2 &= ~MLX4_QP_BIT_FPP;
Or Gerlitzfc31e252015-03-18 14:57:34 +02002983 /* setting QP rate-limit is disallowed for VFs */
2984 if (qp_ctx->rate_limit_params)
2985 return -EPERM;
2986 }
Moni Shoua53f33ae2015-02-03 16:48:33 +02002987
Jack Morgenstein54679e12012-08-03 08:40:43 +00002988 switch (qp_type) {
2989 case MLX4_QP_ST_RC:
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +02002990 case MLX4_QP_ST_XRC:
Jack Morgenstein54679e12012-08-03 08:40:43 +00002991 case MLX4_QP_ST_UC:
2992 switch (transition) {
2993 case QP_TRANS_INIT2RTR:
2994 case QP_TRANS_RTR2RTS:
2995 case QP_TRANS_RTS2RTS:
2996 case QP_TRANS_SQD2SQD:
2997 case QP_TRANS_SQD2RTS:
2998 if (slave != mlx4_master_func_num(dev))
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +02002999 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
3000 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3001 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
Matan Barak449fc482014-03-19 18:11:52 +02003002 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +02003003 else
3004 num_gids = 1;
3005 if (qp_ctx->pri_path.mgid_index >= num_gids)
Jack Morgenstein54679e12012-08-03 08:40:43 +00003006 return -EINVAL;
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +02003007 }
3008 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3009 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
3010 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
Matan Barak449fc482014-03-19 18:11:52 +02003011 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +02003012 else
3013 num_gids = 1;
3014 if (qp_ctx->alt_path.mgid_index >= num_gids)
Jack Morgenstein54679e12012-08-03 08:40:43 +00003015 return -EINVAL;
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +02003016 }
Jack Morgenstein54679e12012-08-03 08:40:43 +00003017 break;
3018 default:
3019 break;
3020 }
Jack Morgenstein54679e12012-08-03 08:40:43 +00003021 break;
Roland Dreier165cb462014-05-30 15:38:58 -07003022
3023 case MLX4_QP_ST_MLX:
3024 qpn = vhcr->in_modifier & 0x7fffff;
3025 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3026 if (transition == QP_TRANS_INIT2RTR &&
3027 slave != mlx4_master_func_num(dev) &&
3028 mlx4_is_qp_reserved(dev, qpn) &&
3029 !mlx4_vf_smi_enabled(dev, slave, port)) {
3030 /* only enabled VFs may create MLX proxy QPs */
3031 mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
3032 __func__, slave, port);
3033 return -EPERM;
3034 }
3035 break;
3036
Jack Morgenstein54679e12012-08-03 08:40:43 +00003037 default:
3038 break;
3039 }
3040
3041 return 0;
3042}
3043
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003044int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
3045 struct mlx4_vhcr *vhcr,
3046 struct mlx4_cmd_mailbox *inbox,
3047 struct mlx4_cmd_mailbox *outbox,
3048 struct mlx4_cmd_info *cmd)
3049{
3050 struct mlx4_mtt mtt;
3051 __be64 *page_list = inbox->buf;
3052 u64 *pg_list = (u64 *)page_list;
3053 int i;
3054 struct res_mtt *rmtt = NULL;
3055 int start = be64_to_cpu(page_list[0]);
3056 int npages = vhcr->in_modifier;
3057 int err;
3058
3059 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
3060 if (err)
3061 return err;
3062
3063 /* Call the SW implementation of write_mtt:
3064 * - Prepare a dummy mtt struct
Joe Perchesdbedd442015-03-06 20:49:12 -08003065 * - Translate inbox contents to simple addresses in host endianness */
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003066 mtt.offset = 0; /* TBD this is broken but I don't handle it since
3067 we don't really use it */
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003068 mtt.order = 0;
3069 mtt.page_shift = 0;
3070 for (i = 0; i < npages; ++i)
3071 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
3072
3073 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
3074 ((u64 *)page_list + 2));
3075
3076 if (rmtt)
3077 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
3078
3079 return err;
3080}
3081
3082int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3083 struct mlx4_vhcr *vhcr,
3084 struct mlx4_cmd_mailbox *inbox,
3085 struct mlx4_cmd_mailbox *outbox,
3086 struct mlx4_cmd_info *cmd)
3087{
3088 int eqn = vhcr->in_modifier;
Yishai Hadas2d3c7392015-05-05 17:07:12 +03003089 int res_id = eqn | (slave << 10);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003090 struct res_eq *eq;
3091 int err;
3092
3093 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
3094 if (err)
3095 return err;
3096
3097 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
3098 if (err)
3099 goto ex_abort;
3100
3101 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3102 if (err)
3103 goto ex_put;
3104
3105 atomic_dec(&eq->mtt->ref_count);
3106 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3107 res_end_move(dev, slave, RES_EQ, res_id);
3108 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3109
3110 return 0;
3111
3112ex_put:
3113 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3114ex_abort:
3115 res_abort_move(dev, slave, RES_EQ, res_id);
3116
3117 return err;
3118}
3119
3120int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3121{
3122 struct mlx4_priv *priv = mlx4_priv(dev);
3123 struct mlx4_slave_event_eq_info *event_eq;
3124 struct mlx4_cmd_mailbox *mailbox;
3125 u32 in_modifier = 0;
3126 int err;
3127 int res_id;
3128 struct res_eq *req;
3129
3130 if (!priv->mfunc.master.slave_state)
3131 return -EINVAL;
3132
Jack Morgensteinbffb0232015-03-24 15:18:39 +02003133 /* check for slave valid, slave not PF, and slave active */
3134 if (slave < 0 || slave > dev->persist->num_vfs ||
3135 slave == dev->caps.function ||
3136 !priv->mfunc.master.slave_state[slave].active)
3137 return 0;
3138
Marcel Apfelbaum803143f2012-01-19 09:45:46 +00003139 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003140
3141 /* Create the event only if the slave is registered */
Marcel Apfelbaum803143f2012-01-19 09:45:46 +00003142 if (event_eq->eqn < 0)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003143 return 0;
3144
3145 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
Yishai Hadas2d3c7392015-05-05 17:07:12 +03003146 res_id = (slave << 10) | event_eq->eqn;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003147 err = get_res(dev, slave, res_id, RES_EQ, &req);
3148 if (err)
3149 goto unlock;
3150
3151 if (req->com.from_state != RES_EQ_HW) {
3152 err = -EINVAL;
3153 goto put;
3154 }
3155
3156 mailbox = mlx4_alloc_cmd_mailbox(dev);
3157 if (IS_ERR(mailbox)) {
3158 err = PTR_ERR(mailbox);
3159 goto put;
3160 }
3161
3162 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3163 ++event_eq->token;
3164 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3165 }
3166
3167 memcpy(mailbox->buf, (u8 *) eqe, 28);
3168
Yishai Hadas2d3c7392015-05-05 17:07:12 +03003169 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003170
3171 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3172 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3173 MLX4_CMD_NATIVE);
3174
3175 put_res(dev, slave, res_id, RES_EQ);
3176 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3177 mlx4_free_cmd_mailbox(dev, mailbox);
3178 return err;
3179
3180put:
3181 put_res(dev, slave, res_id, RES_EQ);
3182
3183unlock:
3184 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3185 return err;
3186}
3187
3188int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3189 struct mlx4_vhcr *vhcr,
3190 struct mlx4_cmd_mailbox *inbox,
3191 struct mlx4_cmd_mailbox *outbox,
3192 struct mlx4_cmd_info *cmd)
3193{
3194 int eqn = vhcr->in_modifier;
Yishai Hadas2d3c7392015-05-05 17:07:12 +03003195 int res_id = eqn | (slave << 10);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003196 struct res_eq *eq;
3197 int err;
3198
3199 err = get_res(dev, slave, res_id, RES_EQ, &eq);
3200 if (err)
3201 return err;
3202
3203 if (eq->com.from_state != RES_EQ_HW) {
3204 err = -EINVAL;
3205 goto ex_put;
3206 }
3207
3208 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3209
3210ex_put:
3211 put_res(dev, slave, res_id, RES_EQ);
3212 return err;
3213}
3214
3215int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3216 struct mlx4_vhcr *vhcr,
3217 struct mlx4_cmd_mailbox *inbox,
3218 struct mlx4_cmd_mailbox *outbox,
3219 struct mlx4_cmd_info *cmd)
3220{
3221 int err;
3222 int cqn = vhcr->in_modifier;
3223 struct mlx4_cq_context *cqc = inbox->buf;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003224 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
Bjorn Helgaasc1c52db2015-05-14 18:17:08 -05003225 struct res_cq *cq = NULL;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003226 struct res_mtt *mtt;
3227
3228 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3229 if (err)
3230 return err;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003231 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003232 if (err)
3233 goto out_move;
3234 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3235 if (err)
3236 goto out_put;
3237 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3238 if (err)
3239 goto out_put;
3240 atomic_inc(&mtt->ref_count);
3241 cq->mtt = mtt;
3242 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3243 res_end_move(dev, slave, RES_CQ, cqn);
3244 return 0;
3245
3246out_put:
3247 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3248out_move:
3249 res_abort_move(dev, slave, RES_CQ, cqn);
3250 return err;
3251}
3252
3253int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3254 struct mlx4_vhcr *vhcr,
3255 struct mlx4_cmd_mailbox *inbox,
3256 struct mlx4_cmd_mailbox *outbox,
3257 struct mlx4_cmd_info *cmd)
3258{
3259 int err;
3260 int cqn = vhcr->in_modifier;
Bjorn Helgaasc1c52db2015-05-14 18:17:08 -05003261 struct res_cq *cq = NULL;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003262
3263 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3264 if (err)
3265 return err;
3266 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3267 if (err)
3268 goto out_move;
3269 atomic_dec(&cq->mtt->ref_count);
3270 res_end_move(dev, slave, RES_CQ, cqn);
3271 return 0;
3272
3273out_move:
3274 res_abort_move(dev, slave, RES_CQ, cqn);
3275 return err;
3276}
3277
3278int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3279 struct mlx4_vhcr *vhcr,
3280 struct mlx4_cmd_mailbox *inbox,
3281 struct mlx4_cmd_mailbox *outbox,
3282 struct mlx4_cmd_info *cmd)
3283{
3284 int cqn = vhcr->in_modifier;
3285 struct res_cq *cq;
3286 int err;
3287
3288 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3289 if (err)
3290 return err;
3291
3292 if (cq->com.from_state != RES_CQ_HW)
3293 goto ex_put;
3294
3295 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3296ex_put:
3297 put_res(dev, slave, cqn, RES_CQ);
3298
3299 return err;
3300}
3301
3302static int handle_resize(struct mlx4_dev *dev, int slave,
3303 struct mlx4_vhcr *vhcr,
3304 struct mlx4_cmd_mailbox *inbox,
3305 struct mlx4_cmd_mailbox *outbox,
3306 struct mlx4_cmd_info *cmd,
3307 struct res_cq *cq)
3308{
3309 int err;
3310 struct res_mtt *orig_mtt;
3311 struct res_mtt *mtt;
3312 struct mlx4_cq_context *cqc = inbox->buf;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003313 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003314
3315 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3316 if (err)
3317 return err;
3318
3319 if (orig_mtt != cq->mtt) {
3320 err = -EINVAL;
3321 goto ex_put;
3322 }
3323
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003324 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003325 if (err)
3326 goto ex_put;
3327
3328 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3329 if (err)
3330 goto ex_put1;
3331 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3332 if (err)
3333 goto ex_put1;
3334 atomic_dec(&orig_mtt->ref_count);
3335 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3336 atomic_inc(&mtt->ref_count);
3337 cq->mtt = mtt;
3338 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3339 return 0;
3340
3341ex_put1:
3342 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3343ex_put:
3344 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3345
3346 return err;
3347
3348}
3349
3350int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3351 struct mlx4_vhcr *vhcr,
3352 struct mlx4_cmd_mailbox *inbox,
3353 struct mlx4_cmd_mailbox *outbox,
3354 struct mlx4_cmd_info *cmd)
3355{
3356 int cqn = vhcr->in_modifier;
3357 struct res_cq *cq;
3358 int err;
3359
3360 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3361 if (err)
3362 return err;
3363
3364 if (cq->com.from_state != RES_CQ_HW)
3365 goto ex_put;
3366
3367 if (vhcr->op_modifier == 0) {
3368 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
Jack Morgensteindcf353b2012-03-07 05:56:35 +00003369 goto ex_put;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003370 }
3371
3372 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3373ex_put:
3374 put_res(dev, slave, cqn, RES_CQ);
3375
3376 return err;
3377}
3378
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003379static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3380{
3381 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3382 int log_rq_stride = srqc->logstride & 7;
3383 int page_shift = (srqc->log_page_size & 0x3f) + 12;
3384
3385 if (log_srq_size + log_rq_stride + 4 < page_shift)
3386 return 1;
3387
3388 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3389}
3390
3391int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3392 struct mlx4_vhcr *vhcr,
3393 struct mlx4_cmd_mailbox *inbox,
3394 struct mlx4_cmd_mailbox *outbox,
3395 struct mlx4_cmd_info *cmd)
3396{
3397 int err;
3398 int srqn = vhcr->in_modifier;
3399 struct res_mtt *mtt;
Bjorn Helgaasc1c52db2015-05-14 18:17:08 -05003400 struct res_srq *srq = NULL;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003401 struct mlx4_srq_context *srqc = inbox->buf;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003402 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003403
3404 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3405 return -EINVAL;
3406
3407 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3408 if (err)
3409 return err;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003410 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003411 if (err)
3412 goto ex_abort;
3413 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3414 mtt);
3415 if (err)
3416 goto ex_put_mtt;
3417
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003418 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3419 if (err)
3420 goto ex_put_mtt;
3421
3422 atomic_inc(&mtt->ref_count);
3423 srq->mtt = mtt;
3424 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3425 res_end_move(dev, slave, RES_SRQ, srqn);
3426 return 0;
3427
3428ex_put_mtt:
3429 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3430ex_abort:
3431 res_abort_move(dev, slave, RES_SRQ, srqn);
3432
3433 return err;
3434}
3435
3436int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3437 struct mlx4_vhcr *vhcr,
3438 struct mlx4_cmd_mailbox *inbox,
3439 struct mlx4_cmd_mailbox *outbox,
3440 struct mlx4_cmd_info *cmd)
3441{
3442 int err;
3443 int srqn = vhcr->in_modifier;
Bjorn Helgaasc1c52db2015-05-14 18:17:08 -05003444 struct res_srq *srq = NULL;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003445
3446 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3447 if (err)
3448 return err;
3449 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3450 if (err)
3451 goto ex_abort;
3452 atomic_dec(&srq->mtt->ref_count);
3453 if (srq->cq)
3454 atomic_dec(&srq->cq->ref_count);
3455 res_end_move(dev, slave, RES_SRQ, srqn);
3456
3457 return 0;
3458
3459ex_abort:
3460 res_abort_move(dev, slave, RES_SRQ, srqn);
3461
3462 return err;
3463}
3464
3465int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3466 struct mlx4_vhcr *vhcr,
3467 struct mlx4_cmd_mailbox *inbox,
3468 struct mlx4_cmd_mailbox *outbox,
3469 struct mlx4_cmd_info *cmd)
3470{
3471 int err;
3472 int srqn = vhcr->in_modifier;
3473 struct res_srq *srq;
3474
3475 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3476 if (err)
3477 return err;
3478 if (srq->com.from_state != RES_SRQ_HW) {
3479 err = -EBUSY;
3480 goto out;
3481 }
3482 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3483out:
3484 put_res(dev, slave, srqn, RES_SRQ);
3485 return err;
3486}
3487
3488int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3489 struct mlx4_vhcr *vhcr,
3490 struct mlx4_cmd_mailbox *inbox,
3491 struct mlx4_cmd_mailbox *outbox,
3492 struct mlx4_cmd_info *cmd)
3493{
3494 int err;
3495 int srqn = vhcr->in_modifier;
3496 struct res_srq *srq;
3497
3498 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3499 if (err)
3500 return err;
3501
3502 if (srq->com.from_state != RES_SRQ_HW) {
3503 err = -EBUSY;
3504 goto out;
3505 }
3506
3507 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3508out:
3509 put_res(dev, slave, srqn, RES_SRQ);
3510 return err;
3511}
3512
3513int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3514 struct mlx4_vhcr *vhcr,
3515 struct mlx4_cmd_mailbox *inbox,
3516 struct mlx4_cmd_mailbox *outbox,
3517 struct mlx4_cmd_info *cmd)
3518{
3519 int err;
3520 int qpn = vhcr->in_modifier & 0x7fffff;
3521 struct res_qp *qp;
3522
3523 err = get_res(dev, slave, qpn, RES_QP, &qp);
3524 if (err)
3525 return err;
3526 if (qp->com.from_state != RES_QP_HW) {
3527 err = -EBUSY;
3528 goto out;
3529 }
3530
3531 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3532out:
3533 put_res(dev, slave, qpn, RES_QP);
3534 return err;
3535}
3536
Jack Morgenstein54679e12012-08-03 08:40:43 +00003537int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3538 struct mlx4_vhcr *vhcr,
3539 struct mlx4_cmd_mailbox *inbox,
3540 struct mlx4_cmd_mailbox *outbox,
3541 struct mlx4_cmd_info *cmd)
3542{
3543 struct mlx4_qp_context *context = inbox->buf + 8;
3544 adjust_proxy_tun_qkey(dev, vhcr, context);
3545 update_pkey_index(dev, slave, inbox);
3546 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3547}
3548
Matan Barak449fc482014-03-19 18:11:52 +02003549static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3550 struct mlx4_qp_context *qpc,
3551 struct mlx4_cmd_mailbox *inbox)
3552{
3553 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3554 u8 pri_sched_queue;
3555 int port = mlx4_slave_convert_port(
3556 dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3557
3558 if (port < 0)
3559 return -EINVAL;
3560
3561 pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3562 ((port & 1) << 6);
3563
Or Gerlitzf40e99e2015-05-21 15:14:08 +03003564 if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) ||
3565 qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) {
Matan Barak449fc482014-03-19 18:11:52 +02003566 qpc->pri_path.sched_queue = pri_sched_queue;
3567 }
3568
3569 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3570 port = mlx4_slave_convert_port(
3571 dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3572 + 1) - 1;
3573 if (port < 0)
3574 return -EINVAL;
3575 qpc->alt_path.sched_queue =
3576 (qpc->alt_path.sched_queue & ~(1 << 6)) |
3577 (port & 1) << 6;
3578 }
3579 return 0;
3580}
3581
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02003582static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3583 struct mlx4_qp_context *qpc,
3584 struct mlx4_cmd_mailbox *inbox)
3585{
3586 u64 mac;
3587 int port;
3588 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3589 u8 sched = *(u8 *)(inbox->buf + 64);
3590 u8 smac_ix;
3591
3592 port = (sched >> 6 & 1) + 1;
3593 if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3594 smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3595 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3596 return -ENOENT;
3597 }
3598 return 0;
3599}
3600
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003601int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3602 struct mlx4_vhcr *vhcr,
3603 struct mlx4_cmd_mailbox *inbox,
3604 struct mlx4_cmd_mailbox *outbox,
3605 struct mlx4_cmd_info *cmd)
3606{
Jack Morgenstein54679e12012-08-03 08:40:43 +00003607 int err;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003608 struct mlx4_qp_context *qpc = inbox->buf + 8;
Jack Morgensteinb01978c2013-06-27 19:05:21 +03003609 int qpn = vhcr->in_modifier & 0x7fffff;
3610 struct res_qp *qp;
3611 u8 orig_sched_queue;
Rony Efraimf0f829b2013-11-07 12:19:51 +02003612 __be32 orig_param3 = qpc->param3;
3613 u8 orig_vlan_control = qpc->pri_path.vlan_control;
3614 u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3615 u8 orig_pri_path_fl = qpc->pri_path.fl;
3616 u8 orig_vlan_index = qpc->pri_path.vlan_index;
3617 u8 orig_feup = qpc->pri_path.feup;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003618
Matan Barak449fc482014-03-19 18:11:52 +02003619 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3620 if (err)
3621 return err;
Jack Morgenstein99ec41d2014-05-29 16:31:03 +03003622 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
Jack Morgenstein54679e12012-08-03 08:40:43 +00003623 if (err)
3624 return err;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003625
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02003626 if (roce_verify_mac(dev, slave, qpc, inbox))
3627 return -EINVAL;
3628
Jack Morgenstein54679e12012-08-03 08:40:43 +00003629 update_pkey_index(dev, slave, inbox);
3630 update_gid(dev, inbox, (u8)slave);
3631 adjust_proxy_tun_qkey(dev, vhcr, qpc);
Jack Morgensteinb01978c2013-06-27 19:05:21 +03003632 orig_sched_queue = qpc->pri_path.sched_queue;
3633 err = update_vport_qp_param(dev, inbox, slave, qpn);
Rony Efraim3f7fb022013-04-25 05:22:28 +00003634 if (err)
3635 return err;
Jack Morgenstein54679e12012-08-03 08:40:43 +00003636
Jack Morgensteinb01978c2013-06-27 19:05:21 +03003637 err = get_res(dev, slave, qpn, RES_QP, &qp);
3638 if (err)
3639 return err;
3640 if (qp->com.from_state != RES_QP_HW) {
3641 err = -EBUSY;
3642 goto out;
3643 }
3644
3645 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3646out:
3647 /* if no error, save sched queue value passed in by VF. This is
3648 * essentially the QOS value provided by the VF. This will be useful
3649 * if we allow dynamic changes from VST back to VGT
3650 */
Rony Efraimf0f829b2013-11-07 12:19:51 +02003651 if (!err) {
Jack Morgensteinb01978c2013-06-27 19:05:21 +03003652 qp->sched_queue = orig_sched_queue;
Rony Efraimf0f829b2013-11-07 12:19:51 +02003653 qp->param3 = orig_param3;
3654 qp->vlan_control = orig_vlan_control;
3655 qp->fvl_rx = orig_fvl_rx;
3656 qp->pri_path_fl = orig_pri_path_fl;
3657 qp->vlan_index = orig_vlan_index;
3658 qp->feup = orig_feup;
3659 }
Jack Morgensteinb01978c2013-06-27 19:05:21 +03003660 put_res(dev, slave, qpn, RES_QP);
3661 return err;
Jack Morgenstein54679e12012-08-03 08:40:43 +00003662}
3663
3664int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3665 struct mlx4_vhcr *vhcr,
3666 struct mlx4_cmd_mailbox *inbox,
3667 struct mlx4_cmd_mailbox *outbox,
3668 struct mlx4_cmd_info *cmd)
3669{
3670 int err;
3671 struct mlx4_qp_context *context = inbox->buf + 8;
3672
Matan Barak449fc482014-03-19 18:11:52 +02003673 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3674 if (err)
3675 return err;
Jack Morgenstein99ec41d2014-05-29 16:31:03 +03003676 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
Jack Morgenstein54679e12012-08-03 08:40:43 +00003677 if (err)
3678 return err;
3679
3680 update_pkey_index(dev, slave, inbox);
3681 update_gid(dev, inbox, (u8)slave);
3682 adjust_proxy_tun_qkey(dev, vhcr, context);
3683 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3684}
3685
3686int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3687 struct mlx4_vhcr *vhcr,
3688 struct mlx4_cmd_mailbox *inbox,
3689 struct mlx4_cmd_mailbox *outbox,
3690 struct mlx4_cmd_info *cmd)
3691{
3692 int err;
3693 struct mlx4_qp_context *context = inbox->buf + 8;
3694
Matan Barak449fc482014-03-19 18:11:52 +02003695 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3696 if (err)
3697 return err;
Jack Morgenstein99ec41d2014-05-29 16:31:03 +03003698 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
Jack Morgenstein54679e12012-08-03 08:40:43 +00003699 if (err)
3700 return err;
3701
3702 update_pkey_index(dev, slave, inbox);
3703 update_gid(dev, inbox, (u8)slave);
3704 adjust_proxy_tun_qkey(dev, vhcr, context);
3705 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3706}
3707
3708
3709int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3710 struct mlx4_vhcr *vhcr,
3711 struct mlx4_cmd_mailbox *inbox,
3712 struct mlx4_cmd_mailbox *outbox,
3713 struct mlx4_cmd_info *cmd)
3714{
3715 struct mlx4_qp_context *context = inbox->buf + 8;
Matan Barak449fc482014-03-19 18:11:52 +02003716 int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3717 if (err)
3718 return err;
Jack Morgenstein54679e12012-08-03 08:40:43 +00003719 adjust_proxy_tun_qkey(dev, vhcr, context);
3720 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3721}
3722
3723int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3724 struct mlx4_vhcr *vhcr,
3725 struct mlx4_cmd_mailbox *inbox,
3726 struct mlx4_cmd_mailbox *outbox,
3727 struct mlx4_cmd_info *cmd)
3728{
3729 int err;
3730 struct mlx4_qp_context *context = inbox->buf + 8;
3731
Matan Barak449fc482014-03-19 18:11:52 +02003732 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3733 if (err)
3734 return err;
Jack Morgenstein99ec41d2014-05-29 16:31:03 +03003735 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
Jack Morgenstein54679e12012-08-03 08:40:43 +00003736 if (err)
3737 return err;
3738
3739 adjust_proxy_tun_qkey(dev, vhcr, context);
3740 update_gid(dev, inbox, (u8)slave);
3741 update_pkey_index(dev, slave, inbox);
3742 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3743}
3744
3745int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3746 struct mlx4_vhcr *vhcr,
3747 struct mlx4_cmd_mailbox *inbox,
3748 struct mlx4_cmd_mailbox *outbox,
3749 struct mlx4_cmd_info *cmd)
3750{
3751 int err;
3752 struct mlx4_qp_context *context = inbox->buf + 8;
3753
Matan Barak449fc482014-03-19 18:11:52 +02003754 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3755 if (err)
3756 return err;
Jack Morgenstein99ec41d2014-05-29 16:31:03 +03003757 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
Jack Morgenstein54679e12012-08-03 08:40:43 +00003758 if (err)
3759 return err;
3760
3761 adjust_proxy_tun_qkey(dev, vhcr, context);
3762 update_gid(dev, inbox, (u8)slave);
3763 update_pkey_index(dev, slave, inbox);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003764 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3765}
3766
3767int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3768 struct mlx4_vhcr *vhcr,
3769 struct mlx4_cmd_mailbox *inbox,
3770 struct mlx4_cmd_mailbox *outbox,
3771 struct mlx4_cmd_info *cmd)
3772{
3773 int err;
3774 int qpn = vhcr->in_modifier & 0x7fffff;
3775 struct res_qp *qp;
3776
3777 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3778 if (err)
3779 return err;
3780 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3781 if (err)
3782 goto ex_abort;
3783
3784 atomic_dec(&qp->mtt->ref_count);
3785 atomic_dec(&qp->rcq->ref_count);
3786 atomic_dec(&qp->scq->ref_count);
3787 if (qp->srq)
3788 atomic_dec(&qp->srq->ref_count);
3789 res_end_move(dev, slave, RES_QP, qpn);
3790 return 0;
3791
3792ex_abort:
3793 res_abort_move(dev, slave, RES_QP, qpn);
3794
3795 return err;
3796}
3797
3798static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
3799 struct res_qp *rqp, u8 *gid)
3800{
3801 struct res_gid *res;
3802
3803 list_for_each_entry(res, &rqp->mcg_list, list) {
3804 if (!memcmp(res->gid, gid, 16))
3805 return res;
3806 }
3807 return NULL;
3808}
3809
3810static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
Eugenia Emantayev9f5b6c62012-02-15 06:23:16 +00003811 u8 *gid, enum mlx4_protocol prot,
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003812 enum mlx4_steer_type steer, u64 reg_id)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003813{
3814 struct res_gid *res;
3815 int err;
3816
3817 res = kzalloc(sizeof *res, GFP_KERNEL);
3818 if (!res)
3819 return -ENOMEM;
3820
3821 spin_lock_irq(&rqp->mcg_spl);
3822 if (find_gid(dev, slave, rqp, gid)) {
3823 kfree(res);
3824 err = -EEXIST;
3825 } else {
3826 memcpy(res->gid, gid, 16);
3827 res->prot = prot;
Eugenia Emantayev9f5b6c62012-02-15 06:23:16 +00003828 res->steer = steer;
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003829 res->reg_id = reg_id;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003830 list_add_tail(&res->list, &rqp->mcg_list);
3831 err = 0;
3832 }
3833 spin_unlock_irq(&rqp->mcg_spl);
3834
3835 return err;
3836}
3837
3838static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
Eugenia Emantayev9f5b6c62012-02-15 06:23:16 +00003839 u8 *gid, enum mlx4_protocol prot,
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003840 enum mlx4_steer_type steer, u64 *reg_id)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003841{
3842 struct res_gid *res;
3843 int err;
3844
3845 spin_lock_irq(&rqp->mcg_spl);
3846 res = find_gid(dev, slave, rqp, gid);
Eugenia Emantayev9f5b6c62012-02-15 06:23:16 +00003847 if (!res || res->prot != prot || res->steer != steer)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003848 err = -EINVAL;
3849 else {
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003850 *reg_id = res->reg_id;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003851 list_del(&res->list);
3852 kfree(res);
3853 err = 0;
3854 }
3855 spin_unlock_irq(&rqp->mcg_spl);
3856
3857 return err;
3858}
3859
Matan Barak449fc482014-03-19 18:11:52 +02003860static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
3861 u8 gid[16], int block_loopback, enum mlx4_protocol prot,
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003862 enum mlx4_steer_type type, u64 *reg_id)
3863{
3864 switch (dev->caps.steering_mode) {
Matan Barak449fc482014-03-19 18:11:52 +02003865 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
3866 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3867 if (port < 0)
3868 return port;
3869 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003870 block_loopback, prot,
3871 reg_id);
Matan Barak449fc482014-03-19 18:11:52 +02003872 }
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003873 case MLX4_STEERING_MODE_B0:
Matan Barak449fc482014-03-19 18:11:52 +02003874 if (prot == MLX4_PROT_ETH) {
3875 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
3876 if (port < 0)
3877 return port;
3878 gid[5] = port;
3879 }
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003880 return mlx4_qp_attach_common(dev, qp, gid,
3881 block_loopback, prot, type);
3882 default:
3883 return -EINVAL;
3884 }
3885}
3886
Matan Barak449fc482014-03-19 18:11:52 +02003887static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
3888 u8 gid[16], enum mlx4_protocol prot,
3889 enum mlx4_steer_type type, u64 reg_id)
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003890{
3891 switch (dev->caps.steering_mode) {
3892 case MLX4_STEERING_MODE_DEVICE_MANAGED:
3893 return mlx4_flow_detach(dev, reg_id);
3894 case MLX4_STEERING_MODE_B0:
3895 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
3896 default:
3897 return -EINVAL;
3898 }
3899}
3900
Jack Morgenstein531d9012014-05-04 17:07:22 +03003901static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
3902 u8 *gid, enum mlx4_protocol prot)
3903{
3904 int real_port;
3905
3906 if (prot != MLX4_PROT_ETH)
3907 return 0;
3908
3909 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
3910 dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
3911 real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
3912 if (real_port < 0)
3913 return -EINVAL;
3914 gid[5] = real_port;
3915 }
3916
3917 return 0;
3918}
3919
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003920int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
3921 struct mlx4_vhcr *vhcr,
3922 struct mlx4_cmd_mailbox *inbox,
3923 struct mlx4_cmd_mailbox *outbox,
3924 struct mlx4_cmd_info *cmd)
3925{
3926 struct mlx4_qp qp; /* dummy for calling attach/detach */
3927 u8 *gid = inbox->buf;
3928 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
Or Gerlitz162344e2012-05-15 10:34:57 +00003929 int err;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003930 int qpn;
3931 struct res_qp *rqp;
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003932 u64 reg_id = 0;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003933 int attach = vhcr->op_modifier;
3934 int block_loopback = vhcr->in_modifier >> 31;
3935 u8 steer_type_mask = 2;
Eugenia Emantayev75c60622012-02-15 06:22:49 +00003936 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003937
3938 qpn = vhcr->in_modifier & 0xffffff;
3939 err = get_res(dev, slave, qpn, RES_QP, &rqp);
3940 if (err)
3941 return err;
3942
3943 qp.qpn = qpn;
3944 if (attach) {
Matan Barak449fc482014-03-19 18:11:52 +02003945 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003946 type, &reg_id);
3947 if (err) {
3948 pr_err("Fail to attach rule to qp 0x%x\n", qpn);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003949 goto ex_put;
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003950 }
3951 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003952 if (err)
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003953 goto ex_detach;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003954 } else {
Jack Morgenstein531d9012014-05-04 17:07:22 +03003955 err = mlx4_adjust_port(dev, slave, gid, prot);
3956 if (err)
3957 goto ex_put;
3958
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003959 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003960 if (err)
3961 goto ex_put;
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003962
3963 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
3964 if (err)
3965 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
3966 qpn, reg_id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003967 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003968 put_res(dev, slave, qpn, RES_QP);
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003969 return err;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003970
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00003971ex_detach:
3972 qp_detach(dev, &qp, gid, prot, type, reg_id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003973ex_put:
3974 put_res(dev, slave, qpn, RES_QP);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003975 return err;
3976}
3977
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00003978/*
3979 * MAC validation for Flow Steering rules.
3980 * VF can attach rules only with a mac address which is assigned to it.
3981 */
3982static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
3983 struct list_head *rlist)
3984{
3985 struct mac_res *res, *tmp;
3986 __be64 be_mac;
3987
3988 /* make sure it isn't multicast or broadcast mac*/
3989 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
3990 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
3991 list_for_each_entry_safe(res, tmp, rlist, list) {
3992 be_mac = cpu_to_be64(res->mac << 16);
dingtianhongc0623e52013-12-30 15:40:55 +08003993 if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00003994 return 0;
3995 }
3996 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
3997 eth_header->eth.dst_mac, slave);
3998 return -EINVAL;
3999 }
4000 return 0;
4001}
4002
Matan Barak48564132015-05-31 09:30:15 +03004003static void handle_eth_header_mcast_prio(struct mlx4_net_trans_rule_hw_ctrl *ctrl,
4004 struct _rule_hw *eth_header)
4005{
4006 if (is_multicast_ether_addr(eth_header->eth.dst_mac) ||
4007 is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
4008 struct mlx4_net_trans_rule_hw_eth *eth =
4009 (struct mlx4_net_trans_rule_hw_eth *)eth_header;
4010 struct _rule_hw *next_rule = (struct _rule_hw *)(eth + 1);
4011 bool last_rule = next_rule->size == 0 && next_rule->id == 0 &&
4012 next_rule->rsvd == 0;
4013
4014 if (last_rule)
4015 ctrl->prio = cpu_to_be16(MLX4_DOMAIN_NIC);
4016 }
4017}
4018
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004019/*
4020 * In case of missing eth header, append eth header with a MAC address
4021 * assigned to the VF.
4022 */
4023static int add_eth_header(struct mlx4_dev *dev, int slave,
4024 struct mlx4_cmd_mailbox *inbox,
4025 struct list_head *rlist, int header_id)
4026{
4027 struct mac_res *res, *tmp;
4028 u8 port;
4029 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4030 struct mlx4_net_trans_rule_hw_eth *eth_header;
4031 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
4032 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
4033 __be64 be_mac = 0;
4034 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
4035
4036 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
Hadar Hen Zion015465f2013-01-30 23:07:02 +00004037 port = ctrl->port;
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004038 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
4039
4040 /* Clear a space in the inbox for eth header */
4041 switch (header_id) {
4042 case MLX4_NET_TRANS_RULE_ID_IPV4:
4043 ip_header =
4044 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
4045 memmove(ip_header, eth_header,
4046 sizeof(*ip_header) + sizeof(*l4_header));
4047 break;
4048 case MLX4_NET_TRANS_RULE_ID_TCP:
4049 case MLX4_NET_TRANS_RULE_ID_UDP:
4050 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
4051 (eth_header + 1);
4052 memmove(l4_header, eth_header, sizeof(*l4_header));
4053 break;
4054 default:
4055 return -EINVAL;
4056 }
4057 list_for_each_entry_safe(res, tmp, rlist, list) {
4058 if (port == res->port) {
4059 be_mac = cpu_to_be64(res->mac << 16);
4060 break;
4061 }
4062 }
4063 if (!be_mac) {
Joe Perches1a91de22014-05-07 12:52:57 -07004064 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004065 port);
4066 return -EINVAL;
4067 }
4068
4069 memset(eth_header, 0, sizeof(*eth_header));
4070 eth_header->size = sizeof(*eth_header) >> 2;
4071 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
4072 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
4073 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
4074
4075 return 0;
4076
4077}
4078
Matan Barakce8d9e02014-05-15 15:29:27 +03004079#define MLX4_UPD_QP_PATH_MASK_SUPPORTED (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)
4080int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
4081 struct mlx4_vhcr *vhcr,
4082 struct mlx4_cmd_mailbox *inbox,
4083 struct mlx4_cmd_mailbox *outbox,
4084 struct mlx4_cmd_info *cmd_info)
4085{
4086 int err;
4087 u32 qpn = vhcr->in_modifier & 0xffffff;
4088 struct res_qp *rqp;
4089 u64 mac;
4090 unsigned port;
4091 u64 pri_addr_path_mask;
4092 struct mlx4_update_qp_context *cmd;
4093 int smac_index;
4094
4095 cmd = (struct mlx4_update_qp_context *)inbox->buf;
4096
4097 pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
4098 if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
4099 (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
4100 return -EPERM;
4101
4102 /* Just change the smac for the QP */
4103 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4104 if (err) {
4105 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
4106 return err;
4107 }
4108
4109 port = (rqp->sched_queue >> 6 & 1) + 1;
Matan Barakb7834752014-09-10 16:41:55 +03004110
4111 if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
4112 smac_index = cmd->qp_context.pri_path.grh_mylmc;
4113 err = mac_find_smac_ix_in_slave(dev, slave, port,
4114 smac_index, &mac);
4115
4116 if (err) {
4117 mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4118 qpn, smac_index);
4119 goto err_mac;
4120 }
Matan Barakce8d9e02014-05-15 15:29:27 +03004121 }
4122
4123 err = mlx4_cmd(dev, inbox->dma,
4124 vhcr->in_modifier, 0,
4125 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
4126 MLX4_CMD_NATIVE);
4127 if (err) {
4128 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
4129 goto err_mac;
4130 }
4131
4132err_mac:
4133 put_res(dev, slave, qpn, RES_QP);
4134 return err;
4135}
4136
Hadar Hen Zion8fcfb4d2012-07-05 04:03:45 +00004137int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4138 struct mlx4_vhcr *vhcr,
4139 struct mlx4_cmd_mailbox *inbox,
4140 struct mlx4_cmd_mailbox *outbox,
4141 struct mlx4_cmd_info *cmd)
4142{
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004143
4144 struct mlx4_priv *priv = mlx4_priv(dev);
4145 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4146 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004147 int err;
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00004148 int qpn;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00004149 struct res_qp *rqp;
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004150 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4151 struct _rule_hw *rule_header;
4152 int header_id;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004153
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00004154 if (dev->caps.steering_mode !=
4155 MLX4_STEERING_MODE_DEVICE_MANAGED)
4156 return -EOPNOTSUPP;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004157
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004158 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
Matan Barak449fc482014-03-19 18:11:52 +02004159 ctrl->port = mlx4_slave_convert_port(dev, slave, ctrl->port);
4160 if (ctrl->port <= 0)
4161 return -EINVAL;
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00004162 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00004163 err = get_res(dev, slave, qpn, RES_QP, &rqp);
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00004164 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07004165 pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00004166 return err;
4167 }
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004168 rule_header = (struct _rule_hw *)(ctrl + 1);
4169 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4170
Matan Barak48564132015-05-31 09:30:15 +03004171 if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
4172 handle_eth_header_mcast_prio(ctrl, rule_header);
4173
4174 if (slave == dev->caps.function)
4175 goto execute;
4176
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004177 switch (header_id) {
4178 case MLX4_NET_TRANS_RULE_ID_ETH:
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00004179 if (validate_eth_header_mac(slave, rule_header, rlist)) {
4180 err = -EINVAL;
4181 goto err_put;
4182 }
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004183 break;
Jack Morgenstein60396682012-10-03 15:38:48 +00004184 case MLX4_NET_TRANS_RULE_ID_IB:
4185 break;
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004186 case MLX4_NET_TRANS_RULE_ID_IPV4:
4187 case MLX4_NET_TRANS_RULE_ID_TCP:
4188 case MLX4_NET_TRANS_RULE_ID_UDP:
Joe Perches1a91de22014-05-07 12:52:57 -07004189 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00004190 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4191 err = -EINVAL;
4192 goto err_put;
4193 }
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004194 vhcr->in_modifier +=
4195 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4196 break;
4197 default:
Joe Perches1a91de22014-05-07 12:52:57 -07004198 pr_err("Corrupted mailbox\n");
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00004199 err = -EINVAL;
4200 goto err_put;
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004201 }
4202
Matan Barak48564132015-05-31 09:30:15 +03004203execute:
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004204 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4205 vhcr->in_modifier, 0,
4206 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4207 MLX4_CMD_NATIVE);
4208 if (err)
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00004209 goto err_put;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004210
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00004211 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004212 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07004213 mlx4_err(dev, "Fail to add flow steering resources\n");
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004214 /* detach rule*/
4215 mlx4_cmd(dev, vhcr->out_param, 0, 0,
Hadar Hen Zion2065b382012-12-06 17:11:58 +00004216 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004217 MLX4_CMD_NATIVE);
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00004218 goto err_put;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004219 }
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00004220 atomic_inc(&rqp->ref_count);
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00004221err_put:
4222 put_res(dev, slave, qpn, RES_QP);
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004223 return err;
Hadar Hen Zion8fcfb4d2012-07-05 04:03:45 +00004224}
4225
4226int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4227 struct mlx4_vhcr *vhcr,
4228 struct mlx4_cmd_mailbox *inbox,
4229 struct mlx4_cmd_mailbox *outbox,
4230 struct mlx4_cmd_info *cmd)
4231{
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004232 int err;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00004233 struct res_qp *rqp;
4234 struct res_fs_rule *rrule;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004235
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00004236 if (dev->caps.steering_mode !=
4237 MLX4_STEERING_MODE_DEVICE_MANAGED)
4238 return -EOPNOTSUPP;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004239
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00004240 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4241 if (err)
4242 return err;
4243 /* Release the rule form busy state before removal */
4244 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4245 err = get_res(dev, slave, rrule->qpn, RES_QP, &rqp);
4246 if (err)
4247 return err;
4248
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004249 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4250 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07004251 mlx4_err(dev, "Fail to remove flow steering resources\n");
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00004252 goto out;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004253 }
4254
4255 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4256 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4257 MLX4_CMD_NATIVE);
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00004258 if (!err)
4259 atomic_dec(&rqp->ref_count);
4260out:
4261 put_res(dev, slave, rrule->qpn, RES_QP);
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004262 return err;
Hadar Hen Zion8fcfb4d2012-07-05 04:03:45 +00004263}
4264
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004265enum {
4266 BUSY_MAX_RETRIES = 10
4267};
4268
4269int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4270 struct mlx4_vhcr *vhcr,
4271 struct mlx4_cmd_mailbox *inbox,
4272 struct mlx4_cmd_mailbox *outbox,
4273 struct mlx4_cmd_info *cmd)
4274{
4275 int err;
4276 int index = vhcr->in_modifier & 0xffff;
4277
4278 err = get_res(dev, slave, index, RES_COUNTER, NULL);
4279 if (err)
4280 return err;
4281
4282 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4283 put_res(dev, slave, index, RES_COUNTER);
4284 return err;
4285}
4286
4287static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4288{
4289 struct res_gid *rgid;
4290 struct res_gid *tmp;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004291 struct mlx4_qp qp; /* dummy for calling attach/detach */
4292
4293 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00004294 switch (dev->caps.steering_mode) {
4295 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4296 mlx4_flow_detach(dev, rgid->reg_id);
4297 break;
4298 case MLX4_STEERING_MODE_B0:
4299 qp.qpn = rqp->local_qpn;
4300 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4301 rgid->prot, rgid->steer);
4302 break;
4303 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004304 list_del(&rgid->list);
4305 kfree(rgid);
4306 }
4307}
4308
4309static int _move_all_busy(struct mlx4_dev *dev, int slave,
4310 enum mlx4_resource type, int print)
4311{
4312 struct mlx4_priv *priv = mlx4_priv(dev);
4313 struct mlx4_resource_tracker *tracker =
4314 &priv->mfunc.master.res_tracker;
4315 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4316 struct res_common *r;
4317 struct res_common *tmp;
4318 int busy;
4319
4320 busy = 0;
4321 spin_lock_irq(mlx4_tlock(dev));
4322 list_for_each_entry_safe(r, tmp, rlist, list) {
4323 if (r->owner == slave) {
4324 if (!r->removing) {
4325 if (r->state == RES_ANY_BUSY) {
4326 if (print)
4327 mlx4_dbg(dev,
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00004328 "%s id 0x%llx is busy\n",
Jack Morgenstein956463732014-06-08 13:49:45 +03004329 resource_str(type),
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004330 r->res_id);
4331 ++busy;
4332 } else {
4333 r->from_state = r->state;
4334 r->state = RES_ANY_BUSY;
4335 r->removing = 1;
4336 }
4337 }
4338 }
4339 }
4340 spin_unlock_irq(mlx4_tlock(dev));
4341
4342 return busy;
4343}
4344
4345static int move_all_busy(struct mlx4_dev *dev, int slave,
4346 enum mlx4_resource type)
4347{
4348 unsigned long begin;
4349 int busy;
4350
4351 begin = jiffies;
4352 do {
4353 busy = _move_all_busy(dev, slave, type, 0);
4354 if (time_after(jiffies, begin + 5 * HZ))
4355 break;
4356 if (busy)
4357 cond_resched();
4358 } while (busy);
4359
4360 if (busy)
4361 busy = _move_all_busy(dev, slave, type, 1);
4362
4363 return busy;
4364}
4365static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4366{
4367 struct mlx4_priv *priv = mlx4_priv(dev);
4368 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4369 struct list_head *qp_list =
4370 &tracker->slave_list[slave].res_list[RES_QP];
4371 struct res_qp *qp;
4372 struct res_qp *tmp;
4373 int state;
4374 u64 in_param;
4375 int qpn;
4376 int err;
4377
4378 err = move_all_busy(dev, slave, RES_QP);
4379 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004380 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4381 slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004382
4383 spin_lock_irq(mlx4_tlock(dev));
4384 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4385 spin_unlock_irq(mlx4_tlock(dev));
4386 if (qp->com.owner == slave) {
4387 qpn = qp->com.res_id;
4388 detach_qp(dev, slave, qp);
4389 state = qp->com.from_state;
4390 while (state != 0) {
4391 switch (state) {
4392 case RES_QP_RESERVED:
4393 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004394 rb_erase(&qp->com.node,
4395 &tracker->res_tree[RES_QP]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004396 list_del(&qp->com.list);
4397 spin_unlock_irq(mlx4_tlock(dev));
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02004398 if (!valid_reserved(dev, slave, qpn)) {
4399 __mlx4_qp_release_range(dev, qpn, 1);
4400 mlx4_release_resource(dev, slave,
4401 RES_QP, 1, 0);
4402 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004403 kfree(qp);
4404 state = 0;
4405 break;
4406 case RES_QP_MAPPED:
4407 if (!valid_reserved(dev, slave, qpn))
4408 __mlx4_qp_free_icm(dev, qpn);
4409 state = RES_QP_RESERVED;
4410 break;
4411 case RES_QP_HW:
4412 in_param = slave;
4413 err = mlx4_cmd(dev, in_param,
4414 qp->local_qpn, 2,
4415 MLX4_CMD_2RST_QP,
4416 MLX4_CMD_TIME_CLASS_A,
4417 MLX4_CMD_NATIVE);
4418 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004419 mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4420 slave, qp->local_qpn);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004421 atomic_dec(&qp->rcq->ref_count);
4422 atomic_dec(&qp->scq->ref_count);
4423 atomic_dec(&qp->mtt->ref_count);
4424 if (qp->srq)
4425 atomic_dec(&qp->srq->ref_count);
4426 state = RES_QP_MAPPED;
4427 break;
4428 default:
4429 state = 0;
4430 }
4431 }
4432 }
4433 spin_lock_irq(mlx4_tlock(dev));
4434 }
4435 spin_unlock_irq(mlx4_tlock(dev));
4436}
4437
4438static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4439{
4440 struct mlx4_priv *priv = mlx4_priv(dev);
4441 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4442 struct list_head *srq_list =
4443 &tracker->slave_list[slave].res_list[RES_SRQ];
4444 struct res_srq *srq;
4445 struct res_srq *tmp;
4446 int state;
4447 u64 in_param;
4448 LIST_HEAD(tlist);
4449 int srqn;
4450 int err;
4451
4452 err = move_all_busy(dev, slave, RES_SRQ);
4453 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004454 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4455 slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004456
4457 spin_lock_irq(mlx4_tlock(dev));
4458 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4459 spin_unlock_irq(mlx4_tlock(dev));
4460 if (srq->com.owner == slave) {
4461 srqn = srq->com.res_id;
4462 state = srq->com.from_state;
4463 while (state != 0) {
4464 switch (state) {
4465 case RES_SRQ_ALLOCATED:
4466 __mlx4_srq_free_icm(dev, srqn);
4467 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004468 rb_erase(&srq->com.node,
4469 &tracker->res_tree[RES_SRQ]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004470 list_del(&srq->com.list);
4471 spin_unlock_irq(mlx4_tlock(dev));
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02004472 mlx4_release_resource(dev, slave,
4473 RES_SRQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004474 kfree(srq);
4475 state = 0;
4476 break;
4477
4478 case RES_SRQ_HW:
4479 in_param = slave;
4480 err = mlx4_cmd(dev, in_param, srqn, 1,
4481 MLX4_CMD_HW2SW_SRQ,
4482 MLX4_CMD_TIME_CLASS_A,
4483 MLX4_CMD_NATIVE);
4484 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004485 mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004486 slave, srqn);
4487
4488 atomic_dec(&srq->mtt->ref_count);
4489 if (srq->cq)
4490 atomic_dec(&srq->cq->ref_count);
4491 state = RES_SRQ_ALLOCATED;
4492 break;
4493
4494 default:
4495 state = 0;
4496 }
4497 }
4498 }
4499 spin_lock_irq(mlx4_tlock(dev));
4500 }
4501 spin_unlock_irq(mlx4_tlock(dev));
4502}
4503
4504static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4505{
4506 struct mlx4_priv *priv = mlx4_priv(dev);
4507 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4508 struct list_head *cq_list =
4509 &tracker->slave_list[slave].res_list[RES_CQ];
4510 struct res_cq *cq;
4511 struct res_cq *tmp;
4512 int state;
4513 u64 in_param;
4514 LIST_HEAD(tlist);
4515 int cqn;
4516 int err;
4517
4518 err = move_all_busy(dev, slave, RES_CQ);
4519 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004520 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4521 slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004522
4523 spin_lock_irq(mlx4_tlock(dev));
4524 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4525 spin_unlock_irq(mlx4_tlock(dev));
4526 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4527 cqn = cq->com.res_id;
4528 state = cq->com.from_state;
4529 while (state != 0) {
4530 switch (state) {
4531 case RES_CQ_ALLOCATED:
4532 __mlx4_cq_free_icm(dev, cqn);
4533 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004534 rb_erase(&cq->com.node,
4535 &tracker->res_tree[RES_CQ]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004536 list_del(&cq->com.list);
4537 spin_unlock_irq(mlx4_tlock(dev));
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02004538 mlx4_release_resource(dev, slave,
4539 RES_CQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004540 kfree(cq);
4541 state = 0;
4542 break;
4543
4544 case RES_CQ_HW:
4545 in_param = slave;
4546 err = mlx4_cmd(dev, in_param, cqn, 1,
4547 MLX4_CMD_HW2SW_CQ,
4548 MLX4_CMD_TIME_CLASS_A,
4549 MLX4_CMD_NATIVE);
4550 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004551 mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004552 slave, cqn);
4553 atomic_dec(&cq->mtt->ref_count);
4554 state = RES_CQ_ALLOCATED;
4555 break;
4556
4557 default:
4558 state = 0;
4559 }
4560 }
4561 }
4562 spin_lock_irq(mlx4_tlock(dev));
4563 }
4564 spin_unlock_irq(mlx4_tlock(dev));
4565}
4566
4567static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4568{
4569 struct mlx4_priv *priv = mlx4_priv(dev);
4570 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4571 struct list_head *mpt_list =
4572 &tracker->slave_list[slave].res_list[RES_MPT];
4573 struct res_mpt *mpt;
4574 struct res_mpt *tmp;
4575 int state;
4576 u64 in_param;
4577 LIST_HEAD(tlist);
4578 int mptn;
4579 int err;
4580
4581 err = move_all_busy(dev, slave, RES_MPT);
4582 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004583 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4584 slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004585
4586 spin_lock_irq(mlx4_tlock(dev));
4587 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4588 spin_unlock_irq(mlx4_tlock(dev));
4589 if (mpt->com.owner == slave) {
4590 mptn = mpt->com.res_id;
4591 state = mpt->com.from_state;
4592 while (state != 0) {
4593 switch (state) {
4594 case RES_MPT_RESERVED:
Shani Michaelib20e5192013-02-06 16:19:08 +00004595 __mlx4_mpt_release(dev, mpt->key);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004596 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004597 rb_erase(&mpt->com.node,
4598 &tracker->res_tree[RES_MPT]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004599 list_del(&mpt->com.list);
4600 spin_unlock_irq(mlx4_tlock(dev));
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02004601 mlx4_release_resource(dev, slave,
4602 RES_MPT, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004603 kfree(mpt);
4604 state = 0;
4605 break;
4606
4607 case RES_MPT_MAPPED:
Shani Michaelib20e5192013-02-06 16:19:08 +00004608 __mlx4_mpt_free_icm(dev, mpt->key);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004609 state = RES_MPT_RESERVED;
4610 break;
4611
4612 case RES_MPT_HW:
4613 in_param = slave;
4614 err = mlx4_cmd(dev, in_param, mptn, 0,
4615 MLX4_CMD_HW2SW_MPT,
4616 MLX4_CMD_TIME_CLASS_A,
4617 MLX4_CMD_NATIVE);
4618 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004619 mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004620 slave, mptn);
4621 if (mpt->mtt)
4622 atomic_dec(&mpt->mtt->ref_count);
4623 state = RES_MPT_MAPPED;
4624 break;
4625 default:
4626 state = 0;
4627 }
4628 }
4629 }
4630 spin_lock_irq(mlx4_tlock(dev));
4631 }
4632 spin_unlock_irq(mlx4_tlock(dev));
4633}
4634
4635static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4636{
4637 struct mlx4_priv *priv = mlx4_priv(dev);
4638 struct mlx4_resource_tracker *tracker =
4639 &priv->mfunc.master.res_tracker;
4640 struct list_head *mtt_list =
4641 &tracker->slave_list[slave].res_list[RES_MTT];
4642 struct res_mtt *mtt;
4643 struct res_mtt *tmp;
4644 int state;
4645 LIST_HEAD(tlist);
4646 int base;
4647 int err;
4648
4649 err = move_all_busy(dev, slave, RES_MTT);
4650 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004651 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
4652 slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004653
4654 spin_lock_irq(mlx4_tlock(dev));
4655 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4656 spin_unlock_irq(mlx4_tlock(dev));
4657 if (mtt->com.owner == slave) {
4658 base = mtt->com.res_id;
4659 state = mtt->com.from_state;
4660 while (state != 0) {
4661 switch (state) {
4662 case RES_MTT_ALLOCATED:
4663 __mlx4_free_mtt_range(dev, base,
4664 mtt->order);
4665 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004666 rb_erase(&mtt->com.node,
4667 &tracker->res_tree[RES_MTT]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004668 list_del(&mtt->com.list);
4669 spin_unlock_irq(mlx4_tlock(dev));
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02004670 mlx4_release_resource(dev, slave, RES_MTT,
4671 1 << mtt->order, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004672 kfree(mtt);
4673 state = 0;
4674 break;
4675
4676 default:
4677 state = 0;
4678 }
4679 }
4680 }
4681 spin_lock_irq(mlx4_tlock(dev));
4682 }
4683 spin_unlock_irq(mlx4_tlock(dev));
4684}
4685
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004686static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
4687{
4688 struct mlx4_priv *priv = mlx4_priv(dev);
4689 struct mlx4_resource_tracker *tracker =
4690 &priv->mfunc.master.res_tracker;
4691 struct list_head *fs_rule_list =
4692 &tracker->slave_list[slave].res_list[RES_FS_RULE];
4693 struct res_fs_rule *fs_rule;
4694 struct res_fs_rule *tmp;
4695 int state;
4696 u64 base;
4697 int err;
4698
4699 err = move_all_busy(dev, slave, RES_FS_RULE);
4700 if (err)
4701 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
4702 slave);
4703
4704 spin_lock_irq(mlx4_tlock(dev));
4705 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
4706 spin_unlock_irq(mlx4_tlock(dev));
4707 if (fs_rule->com.owner == slave) {
4708 base = fs_rule->com.res_id;
4709 state = fs_rule->com.from_state;
4710 while (state != 0) {
4711 switch (state) {
4712 case RES_FS_RULE_ALLOCATED:
4713 /* detach rule */
4714 err = mlx4_cmd(dev, base, 0, 0,
4715 MLX4_QP_FLOW_STEERING_DETACH,
4716 MLX4_CMD_TIME_CLASS_A,
4717 MLX4_CMD_NATIVE);
4718
4719 spin_lock_irq(mlx4_tlock(dev));
4720 rb_erase(&fs_rule->com.node,
4721 &tracker->res_tree[RES_FS_RULE]);
4722 list_del(&fs_rule->com.list);
4723 spin_unlock_irq(mlx4_tlock(dev));
4724 kfree(fs_rule);
4725 state = 0;
4726 break;
4727
4728 default:
4729 state = 0;
4730 }
4731 }
4732 }
4733 spin_lock_irq(mlx4_tlock(dev));
4734 }
4735 spin_unlock_irq(mlx4_tlock(dev));
4736}
4737
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004738static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
4739{
4740 struct mlx4_priv *priv = mlx4_priv(dev);
4741 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4742 struct list_head *eq_list =
4743 &tracker->slave_list[slave].res_list[RES_EQ];
4744 struct res_eq *eq;
4745 struct res_eq *tmp;
4746 int err;
4747 int state;
4748 LIST_HEAD(tlist);
4749 int eqn;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004750
4751 err = move_all_busy(dev, slave, RES_EQ);
4752 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004753 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
4754 slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004755
4756 spin_lock_irq(mlx4_tlock(dev));
4757 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
4758 spin_unlock_irq(mlx4_tlock(dev));
4759 if (eq->com.owner == slave) {
4760 eqn = eq->com.res_id;
4761 state = eq->com.from_state;
4762 while (state != 0) {
4763 switch (state) {
4764 case RES_EQ_RESERVED:
4765 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004766 rb_erase(&eq->com.node,
4767 &tracker->res_tree[RES_EQ]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004768 list_del(&eq->com.list);
4769 spin_unlock_irq(mlx4_tlock(dev));
4770 kfree(eq);
4771 state = 0;
4772 break;
4773
4774 case RES_EQ_HW:
Yishai Hadas2d3c7392015-05-05 17:07:12 +03004775 err = mlx4_cmd(dev, slave, eqn & 0x3ff,
Jack Morgenstein30a5da52015-01-27 15:58:03 +02004776 1, MLX4_CMD_HW2SW_EQ,
4777 MLX4_CMD_TIME_CLASS_A,
4778 MLX4_CMD_NATIVE);
Jack Morgensteineb71d0d2012-05-15 10:35:04 +00004779 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004780 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
Yishai Hadas2d3c7392015-05-05 17:07:12 +03004781 slave, eqn & 0x3ff);
Jack Morgensteineb71d0d2012-05-15 10:35:04 +00004782 atomic_dec(&eq->mtt->ref_count);
4783 state = RES_EQ_RESERVED;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004784 break;
4785
4786 default:
4787 state = 0;
4788 }
4789 }
4790 }
4791 spin_lock_irq(mlx4_tlock(dev));
4792 }
4793 spin_unlock_irq(mlx4_tlock(dev));
4794}
4795
Jack Morgensteinba062d52012-05-15 10:35:03 +00004796static void rem_slave_counters(struct mlx4_dev *dev, int slave)
4797{
4798 struct mlx4_priv *priv = mlx4_priv(dev);
4799 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4800 struct list_head *counter_list =
4801 &tracker->slave_list[slave].res_list[RES_COUNTER];
4802 struct res_counter *counter;
4803 struct res_counter *tmp;
4804 int err;
4805 int index;
4806
4807 err = move_all_busy(dev, slave, RES_COUNTER);
4808 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004809 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
4810 slave);
Jack Morgensteinba062d52012-05-15 10:35:03 +00004811
4812 spin_lock_irq(mlx4_tlock(dev));
4813 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
4814 if (counter->com.owner == slave) {
4815 index = counter->com.res_id;
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004816 rb_erase(&counter->com.node,
4817 &tracker->res_tree[RES_COUNTER]);
Jack Morgensteinba062d52012-05-15 10:35:03 +00004818 list_del(&counter->com.list);
4819 kfree(counter);
4820 __mlx4_counter_free(dev, index);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02004821 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
Jack Morgensteinba062d52012-05-15 10:35:03 +00004822 }
4823 }
4824 spin_unlock_irq(mlx4_tlock(dev));
4825}
4826
4827static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
4828{
4829 struct mlx4_priv *priv = mlx4_priv(dev);
4830 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4831 struct list_head *xrcdn_list =
4832 &tracker->slave_list[slave].res_list[RES_XRCD];
4833 struct res_xrcdn *xrcd;
4834 struct res_xrcdn *tmp;
4835 int err;
4836 int xrcdn;
4837
4838 err = move_all_busy(dev, slave, RES_XRCD);
4839 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004840 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
4841 slave);
Jack Morgensteinba062d52012-05-15 10:35:03 +00004842
4843 spin_lock_irq(mlx4_tlock(dev));
4844 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
4845 if (xrcd->com.owner == slave) {
4846 xrcdn = xrcd->com.res_id;
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004847 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
Jack Morgensteinba062d52012-05-15 10:35:03 +00004848 list_del(&xrcd->com.list);
4849 kfree(xrcd);
4850 __mlx4_xrcd_free(dev, xrcdn);
4851 }
4852 }
4853 spin_unlock_irq(mlx4_tlock(dev));
4854}
4855
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004856void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
4857{
4858 struct mlx4_priv *priv = mlx4_priv(dev);
Jack Morgenstein111c6092014-05-27 09:26:38 +03004859 mlx4_reset_roce_gids(dev, slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004860 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
Jack Morgenstein48740802013-11-03 10:03:20 +02004861 rem_slave_vlans(dev, slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004862 rem_slave_macs(dev, slave);
Hadar Hen Zion80cb0022013-03-21 05:55:52 +00004863 rem_slave_fs_rule(dev, slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004864 rem_slave_qps(dev, slave);
4865 rem_slave_srqs(dev, slave);
4866 rem_slave_cqs(dev, slave);
4867 rem_slave_mrs(dev, slave);
4868 rem_slave_eqs(dev, slave);
4869 rem_slave_mtts(dev, slave);
Jack Morgensteinba062d52012-05-15 10:35:03 +00004870 rem_slave_counters(dev, slave);
4871 rem_slave_xrcdns(dev, slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004872 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
4873}
Jack Morgensteinb01978c2013-06-27 19:05:21 +03004874
4875void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
4876{
4877 struct mlx4_vf_immed_vlan_work *work =
4878 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
4879 struct mlx4_cmd_mailbox *mailbox;
4880 struct mlx4_update_qp_context *upd_context;
4881 struct mlx4_dev *dev = &work->priv->dev;
4882 struct mlx4_resource_tracker *tracker =
4883 &work->priv->mfunc.master.res_tracker;
4884 struct list_head *qp_list =
4885 &tracker->slave_list[work->slave].res_list[RES_QP];
4886 struct res_qp *qp;
4887 struct res_qp *tmp;
Rony Efraimf0f829b2013-11-07 12:19:51 +02004888 u64 qp_path_mask_vlan_ctrl =
4889 ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
Jack Morgensteinb01978c2013-06-27 19:05:21 +03004890 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
4891 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
4892 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
4893 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
Rony Efraimf0f829b2013-11-07 12:19:51 +02004894 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
4895
4896 u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
4897 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
4898 (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
4899 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
4900 (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
4901 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
Jack Morgensteinb01978c2013-06-27 19:05:21 +03004902 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
4903
4904 int err;
4905 int port, errors = 0;
4906 u8 vlan_control;
4907
4908 if (mlx4_is_slave(dev)) {
4909 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
4910 work->slave);
4911 goto out;
4912 }
4913
4914 mailbox = mlx4_alloc_cmd_mailbox(dev);
4915 if (IS_ERR(mailbox))
4916 goto out;
Rony Efraim0a6eac22013-06-27 19:05:22 +03004917 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
4918 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4919 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
4920 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
4921 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4922 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
4923 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4924 else if (!work->vlan_id)
Jack Morgensteinb01978c2013-06-27 19:05:21 +03004925 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4926 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
4927 else
4928 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
4929 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
4930 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
4931
4932 upd_context = mailbox->buf;
Matan Barak311be982014-09-10 16:41:54 +03004933 upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
Jack Morgensteinb01978c2013-06-27 19:05:21 +03004934
4935 spin_lock_irq(mlx4_tlock(dev));
4936 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4937 spin_unlock_irq(mlx4_tlock(dev));
4938 if (qp->com.owner == work->slave) {
4939 if (qp->com.from_state != RES_QP_HW ||
4940 !qp->sched_queue || /* no INIT2RTR trans yet */
4941 mlx4_is_qp_reserved(dev, qp->local_qpn) ||
4942 qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
4943 spin_lock_irq(mlx4_tlock(dev));
4944 continue;
4945 }
4946 port = (qp->sched_queue >> 6 & 1) + 1;
4947 if (port != work->port) {
4948 spin_lock_irq(mlx4_tlock(dev));
4949 continue;
4950 }
Rony Efraimf0f829b2013-11-07 12:19:51 +02004951 if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
4952 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
4953 else
4954 upd_context->primary_addr_path_mask =
4955 cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
4956 if (work->vlan_id == MLX4_VGT) {
4957 upd_context->qp_context.param3 = qp->param3;
4958 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
4959 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
4960 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
4961 upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
4962 upd_context->qp_context.pri_path.feup = qp->feup;
4963 upd_context->qp_context.pri_path.sched_queue =
4964 qp->sched_queue;
4965 } else {
4966 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
4967 upd_context->qp_context.pri_path.vlan_control = vlan_control;
4968 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
4969 upd_context->qp_context.pri_path.fvl_rx =
4970 qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
4971 upd_context->qp_context.pri_path.fl =
4972 qp->pri_path_fl | MLX4_FL_CV | MLX4_FL_ETH_HIDE_CQE_VLAN;
4973 upd_context->qp_context.pri_path.feup =
4974 qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
4975 upd_context->qp_context.pri_path.sched_queue =
4976 qp->sched_queue & 0xC7;
4977 upd_context->qp_context.pri_path.sched_queue |=
4978 ((work->qos & 0x7) << 3);
Ido Shamay08068cd2015-04-02 16:31:15 +03004979 upd_context->qp_mask |=
4980 cpu_to_be64(1ULL <<
4981 MLX4_UPD_QP_MASK_QOS_VPP);
4982 upd_context->qp_context.qos_vport =
4983 work->qos_vport;
Rony Efraimf0f829b2013-11-07 12:19:51 +02004984 }
Jack Morgensteinb01978c2013-06-27 19:05:21 +03004985
4986 err = mlx4_cmd(dev, mailbox->dma,
4987 qp->local_qpn & 0xffffff,
4988 0, MLX4_CMD_UPDATE_QP,
4989 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
4990 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07004991 mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
4992 work->slave, port, qp->local_qpn, err);
Jack Morgensteinb01978c2013-06-27 19:05:21 +03004993 errors++;
4994 }
4995 }
4996 spin_lock_irq(mlx4_tlock(dev));
4997 }
4998 spin_unlock_irq(mlx4_tlock(dev));
4999 mlx4_free_cmd_mailbox(dev, mailbox);
5000
5001 if (errors)
5002 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
5003 errors, work->slave, work->port);
5004
5005 /* unregister previous vlan_id if needed and we had no errors
5006 * while updating the QPs
5007 */
5008 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
5009 NO_INDX != work->orig_vlan_ix)
5010 __mlx4_unregister_vlan(&work->priv->dev, work->port,
Jack Morgenstein2009d002013-11-03 10:03:19 +02005011 work->orig_vlan_id);
Jack Morgensteinb01978c2013-06-27 19:05:21 +03005012out:
5013 kfree(work);
5014 return;
5015}