blob: 29e50f787349c986cdb8b954c783b54efeab770f [file] [log] [blame]
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001/*
2 * Copyright (c) 2004, 2005 Topspin Communications. All rights reserved.
3 * Copyright (c) 2005, 2006, 2007, 2008 Mellanox Technologies.
4 * All rights reserved.
5 * Copyright (c) 2005, 2006, 2007 Cisco Systems, Inc. All rights reserved.
6 *
7 * This software is available to you under a choice of one of two
8 * licenses. You may choose to be licensed under the terms of the GNU
9 * General Public License (GPL) Version 2, available from the file
10 * COPYING in the main directory of this source tree, or the
11 * OpenIB.org BSD license below:
12 *
13 * Redistribution and use in source and binary forms, with or
14 * without modification, are permitted provided that the following
15 * conditions are met:
16 *
17 * - Redistributions of source code must retain the above
18 * copyright notice, this list of conditions and the following
19 * disclaimer.
20 *
21 * - Redistributions in binary form must reproduce the above
22 * copyright notice, this list of conditions and the following
23 * disclaimer in the documentation and/or other materials
24 * provided with the distribution.
25 *
26 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
27 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
28 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
29 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
30 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
31 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
32 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
33 * SOFTWARE.
34 */
35
36#include <linux/sched.h>
37#include <linux/pci.h>
38#include <linux/errno.h>
39#include <linux/kernel.h>
40#include <linux/io.h>
Axel Line143a1a2011-12-25 23:35:34 +000041#include <linux/slab.h>
Eli Cohenc82e9aa2011-12-13 04:15:24 +000042#include <linux/mlx4/cmd.h>
43#include <linux/mlx4/qp.h>
Amir Vadaiaf22d9d2012-07-18 22:33:49 +000044#include <linux/if_ether.h>
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +000045#include <linux/etherdevice.h>
Eli Cohenc82e9aa2011-12-13 04:15:24 +000046
47#include "mlx4.h"
48#include "fw.h"
Eran Ben Elisha62a89052015-06-15 17:59:08 +030049#include "mlx4_stats.h"
Eli Cohenc82e9aa2011-12-13 04:15:24 +000050
51#define MLX4_MAC_VALID (1ull << 63)
Eran Ben Elisha9de92c62015-06-15 17:59:00 +030052#define MLX4_PF_COUNTERS_PER_PORT 2
53#define MLX4_VF_COUNTERS_PER_PORT 1
Eli Cohenc82e9aa2011-12-13 04:15:24 +000054
55struct mac_res {
56 struct list_head list;
57 u64 mac;
Jack Morgenstein2f5bb472014-03-12 12:00:40 +020058 int ref_count;
59 u8 smac_index;
Eli Cohenc82e9aa2011-12-13 04:15:24 +000060 u8 port;
61};
62
Jack Morgenstein48740802013-11-03 10:03:20 +020063struct vlan_res {
64 struct list_head list;
65 u16 vlan;
66 int ref_count;
67 int vlan_index;
68 u8 port;
69};
70
Eli Cohenc82e9aa2011-12-13 04:15:24 +000071struct res_common {
72 struct list_head list;
Hadar Hen Zion4af1c042012-07-05 04:03:41 +000073 struct rb_node node;
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +000074 u64 res_id;
Eli Cohenc82e9aa2011-12-13 04:15:24 +000075 int owner;
76 int state;
77 int from_state;
78 int to_state;
79 int removing;
Matan Barakae5a2e22017-01-29 18:56:15 +020080 const char *func_name;
Eli Cohenc82e9aa2011-12-13 04:15:24 +000081};
82
83enum {
84 RES_ANY_BUSY = 1
85};
86
87struct res_gid {
88 struct list_head list;
89 u8 gid[16];
90 enum mlx4_protocol prot;
Eugenia Emantayev9f5b6c62012-02-15 06:23:16 +000091 enum mlx4_steer_type steer;
Hadar Hen Zionfab1e242013-04-11 01:56:41 +000092 u64 reg_id;
Eli Cohenc82e9aa2011-12-13 04:15:24 +000093};
94
95enum res_qp_states {
96 RES_QP_BUSY = RES_ANY_BUSY,
97
98 /* QP number was allocated */
99 RES_QP_RESERVED,
100
101 /* ICM memory for QP context was mapped */
102 RES_QP_MAPPED,
103
104 /* QP is in hw ownership */
105 RES_QP_HW
106};
107
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000108struct res_qp {
109 struct res_common com;
110 struct res_mtt *mtt;
111 struct res_cq *rcq;
112 struct res_cq *scq;
113 struct res_srq *srq;
114 struct list_head mcg_list;
115 spinlock_t mcg_spl;
116 int local_qpn;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +0000117 atomic_t ref_count;
Jack Morgensteinb01978c2013-06-27 19:05:21 +0300118 u32 qpc_flags;
Rony Efraimf0f829b2013-11-07 12:19:51 +0200119 /* saved qp params before VST enforcement in order to restore on VGT */
Jack Morgensteinb01978c2013-06-27 19:05:21 +0300120 u8 sched_queue;
Rony Efraimf0f829b2013-11-07 12:19:51 +0200121 __be32 param3;
122 u8 vlan_control;
123 u8 fvl_rx;
124 u8 pri_path_fl;
125 u8 vlan_index;
126 u8 feup;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000127};
128
129enum res_mtt_states {
130 RES_MTT_BUSY = RES_ANY_BUSY,
131 RES_MTT_ALLOCATED,
132};
133
134static inline const char *mtt_states_str(enum res_mtt_states state)
135{
136 switch (state) {
137 case RES_MTT_BUSY: return "RES_MTT_BUSY";
138 case RES_MTT_ALLOCATED: return "RES_MTT_ALLOCATED";
139 default: return "Unknown";
140 }
141}
142
143struct res_mtt {
144 struct res_common com;
145 int order;
146 atomic_t ref_count;
147};
148
149enum res_mpt_states {
150 RES_MPT_BUSY = RES_ANY_BUSY,
151 RES_MPT_RESERVED,
152 RES_MPT_MAPPED,
153 RES_MPT_HW,
154};
155
156struct res_mpt {
157 struct res_common com;
158 struct res_mtt *mtt;
159 int key;
160};
161
162enum res_eq_states {
163 RES_EQ_BUSY = RES_ANY_BUSY,
164 RES_EQ_RESERVED,
165 RES_EQ_HW,
166};
167
168struct res_eq {
169 struct res_common com;
170 struct res_mtt *mtt;
171};
172
173enum res_cq_states {
174 RES_CQ_BUSY = RES_ANY_BUSY,
175 RES_CQ_ALLOCATED,
176 RES_CQ_HW,
177};
178
179struct res_cq {
180 struct res_common com;
181 struct res_mtt *mtt;
182 atomic_t ref_count;
183};
184
185enum res_srq_states {
186 RES_SRQ_BUSY = RES_ANY_BUSY,
187 RES_SRQ_ALLOCATED,
188 RES_SRQ_HW,
189};
190
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000191struct res_srq {
192 struct res_common com;
193 struct res_mtt *mtt;
194 struct res_cq *cq;
195 atomic_t ref_count;
196};
197
198enum res_counter_states {
199 RES_COUNTER_BUSY = RES_ANY_BUSY,
200 RES_COUNTER_ALLOCATED,
201};
202
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000203struct res_counter {
204 struct res_common com;
205 int port;
206};
207
Jack Morgensteinba062d52012-05-15 10:35:03 +0000208enum res_xrcdn_states {
209 RES_XRCD_BUSY = RES_ANY_BUSY,
210 RES_XRCD_ALLOCATED,
211};
212
213struct res_xrcdn {
214 struct res_common com;
215 int port;
216};
217
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +0000218enum res_fs_rule_states {
219 RES_FS_RULE_BUSY = RES_ANY_BUSY,
220 RES_FS_RULE_ALLOCATED,
221};
222
223struct res_fs_rule {
224 struct res_common com;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +0000225 int qpn;
Moni Shoua78efed22015-12-06 18:07:40 +0200226 /* VF DMFS mbox with port flipped */
227 void *mirr_mbox;
228 /* > 0 --> apply mirror when getting into HA mode */
229 /* = 0 --> un-apply mirror when getting out of HA mode */
230 u32 mirr_mbox_size;
231 struct list_head mirr_list;
232 u64 mirr_rule_id;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +0000233};
234
Hadar Hen Zion4af1c042012-07-05 04:03:41 +0000235static void *res_tracker_lookup(struct rb_root *root, u64 res_id)
236{
237 struct rb_node *node = root->rb_node;
238
239 while (node) {
Geliang Tang3704eb62017-01-20 22:36:57 +0800240 struct res_common *res = rb_entry(node, struct res_common,
241 node);
Hadar Hen Zion4af1c042012-07-05 04:03:41 +0000242
243 if (res_id < res->res_id)
244 node = node->rb_left;
245 else if (res_id > res->res_id)
246 node = node->rb_right;
247 else
248 return res;
249 }
250 return NULL;
251}
252
253static int res_tracker_insert(struct rb_root *root, struct res_common *res)
254{
255 struct rb_node **new = &(root->rb_node), *parent = NULL;
256
257 /* Figure out where to put new node */
258 while (*new) {
Geliang Tang3704eb62017-01-20 22:36:57 +0800259 struct res_common *this = rb_entry(*new, struct res_common,
260 node);
Hadar Hen Zion4af1c042012-07-05 04:03:41 +0000261
262 parent = *new;
263 if (res->res_id < this->res_id)
264 new = &((*new)->rb_left);
265 else if (res->res_id > this->res_id)
266 new = &((*new)->rb_right);
267 else
268 return -EEXIST;
269 }
270
271 /* Add new node and rebalance tree. */
272 rb_link_node(&res->node, parent, new);
273 rb_insert_color(&res->node, root);
274
275 return 0;
276}
277
Jack Morgenstein54679e12012-08-03 08:40:43 +0000278enum qp_transition {
279 QP_TRANS_INIT2RTR,
280 QP_TRANS_RTR2RTS,
281 QP_TRANS_RTS2RTS,
282 QP_TRANS_SQERR2RTS,
283 QP_TRANS_SQD2SQD,
284 QP_TRANS_SQD2RTS
285};
286
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000287/* For Debug uses */
Jack Morgenstein956463732014-06-08 13:49:45 +0300288static const char *resource_str(enum mlx4_resource rt)
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000289{
290 switch (rt) {
291 case RES_QP: return "RES_QP";
292 case RES_CQ: return "RES_CQ";
293 case RES_SRQ: return "RES_SRQ";
294 case RES_MPT: return "RES_MPT";
295 case RES_MTT: return "RES_MTT";
296 case RES_MAC: return "RES_MAC";
Jack Morgenstein48740802013-11-03 10:03:20 +0200297 case RES_VLAN: return "RES_VLAN";
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000298 case RES_EQ: return "RES_EQ";
299 case RES_COUNTER: return "RES_COUNTER";
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +0000300 case RES_FS_RULE: return "RES_FS_RULE";
Jack Morgensteinba062d52012-05-15 10:35:03 +0000301 case RES_XRCD: return "RES_XRCD";
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000302 default: return "Unknown resource type !!!";
303 };
304}
305
Jack Morgenstein48740802013-11-03 10:03:20 +0200306static void rem_slave_vlans(struct mlx4_dev *dev, int slave);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200307static inline int mlx4_grant_resource(struct mlx4_dev *dev, int slave,
308 enum mlx4_resource res_type, int count,
309 int port)
310{
311 struct mlx4_priv *priv = mlx4_priv(dev);
312 struct resource_allocator *res_alloc =
313 &priv->mfunc.master.res_tracker.res_alloc[res_type];
Jack Morgenstein83bd5112017-05-09 14:45:24 +0300314 int err = -EDQUOT;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200315 int allocated, free, reserved, guaranteed, from_free;
Jack Morgenstein956463732014-06-08 13:49:45 +0300316 int from_rsvd;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200317
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200318 if (slave > dev->persist->num_vfs)
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200319 return -EINVAL;
320
321 spin_lock(&res_alloc->alloc_lock);
322 allocated = (port > 0) ?
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200323 res_alloc->allocated[(port - 1) *
324 (dev->persist->num_vfs + 1) + slave] :
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200325 res_alloc->allocated[slave];
326 free = (port > 0) ? res_alloc->res_port_free[port - 1] :
327 res_alloc->res_free;
328 reserved = (port > 0) ? res_alloc->res_port_rsvd[port - 1] :
329 res_alloc->res_reserved;
330 guaranteed = res_alloc->guaranteed[slave];
331
Jack Morgenstein956463732014-06-08 13:49:45 +0300332 if (allocated + count > res_alloc->quota[slave]) {
333 mlx4_warn(dev, "VF %d port %d res %s: quota exceeded, count %d alloc %d quota %d\n",
334 slave, port, resource_str(res_type), count,
335 allocated, res_alloc->quota[slave]);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200336 goto out;
Jack Morgenstein956463732014-06-08 13:49:45 +0300337 }
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200338
339 if (allocated + count <= guaranteed) {
340 err = 0;
Jack Morgenstein956463732014-06-08 13:49:45 +0300341 from_rsvd = count;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200342 } else {
343 /* portion may need to be obtained from free area */
344 if (guaranteed - allocated > 0)
345 from_free = count - (guaranteed - allocated);
346 else
347 from_free = count;
348
Jack Morgenstein956463732014-06-08 13:49:45 +0300349 from_rsvd = count - from_free;
350
351 if (free - from_free >= reserved)
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200352 err = 0;
Jack Morgenstein956463732014-06-08 13:49:45 +0300353 else
354 mlx4_warn(dev, "VF %d port %d res %s: free pool empty, free %d from_free %d rsvd %d\n",
355 slave, port, resource_str(res_type), free,
356 from_free, reserved);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200357 }
358
359 if (!err) {
360 /* grant the request */
361 if (port > 0) {
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200362 res_alloc->allocated[(port - 1) *
363 (dev->persist->num_vfs + 1) + slave] += count;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200364 res_alloc->res_port_free[port - 1] -= count;
Jack Morgenstein956463732014-06-08 13:49:45 +0300365 res_alloc->res_port_rsvd[port - 1] -= from_rsvd;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200366 } else {
367 res_alloc->allocated[slave] += count;
368 res_alloc->res_free -= count;
Jack Morgenstein956463732014-06-08 13:49:45 +0300369 res_alloc->res_reserved -= from_rsvd;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200370 }
371 }
372
373out:
374 spin_unlock(&res_alloc->alloc_lock);
375 return err;
376}
377
378static inline void mlx4_release_resource(struct mlx4_dev *dev, int slave,
379 enum mlx4_resource res_type, int count,
380 int port)
381{
382 struct mlx4_priv *priv = mlx4_priv(dev);
383 struct resource_allocator *res_alloc =
384 &priv->mfunc.master.res_tracker.res_alloc[res_type];
Jack Morgenstein956463732014-06-08 13:49:45 +0300385 int allocated, guaranteed, from_rsvd;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200386
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200387 if (slave > dev->persist->num_vfs)
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200388 return;
389
390 spin_lock(&res_alloc->alloc_lock);
Jack Morgenstein956463732014-06-08 13:49:45 +0300391
392 allocated = (port > 0) ?
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200393 res_alloc->allocated[(port - 1) *
394 (dev->persist->num_vfs + 1) + slave] :
Jack Morgenstein956463732014-06-08 13:49:45 +0300395 res_alloc->allocated[slave];
396 guaranteed = res_alloc->guaranteed[slave];
397
398 if (allocated - count >= guaranteed) {
399 from_rsvd = 0;
400 } else {
401 /* portion may need to be returned to reserved area */
402 if (allocated - guaranteed > 0)
403 from_rsvd = count - (allocated - guaranteed);
404 else
405 from_rsvd = count;
406 }
407
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200408 if (port > 0) {
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200409 res_alloc->allocated[(port - 1) *
410 (dev->persist->num_vfs + 1) + slave] -= count;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200411 res_alloc->res_port_free[port - 1] += count;
Jack Morgenstein956463732014-06-08 13:49:45 +0300412 res_alloc->res_port_rsvd[port - 1] += from_rsvd;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200413 } else {
414 res_alloc->allocated[slave] -= count;
415 res_alloc->res_free += count;
Jack Morgenstein956463732014-06-08 13:49:45 +0300416 res_alloc->res_reserved += from_rsvd;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200417 }
418
419 spin_unlock(&res_alloc->alloc_lock);
420 return;
421}
422
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200423static inline void initialize_res_quotas(struct mlx4_dev *dev,
424 struct resource_allocator *res_alloc,
425 enum mlx4_resource res_type,
426 int vf, int num_instances)
427{
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200428 res_alloc->guaranteed[vf] = num_instances /
429 (2 * (dev->persist->num_vfs + 1));
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200430 res_alloc->quota[vf] = (num_instances / 2) + res_alloc->guaranteed[vf];
431 if (vf == mlx4_master_func_num(dev)) {
432 res_alloc->res_free = num_instances;
433 if (res_type == RES_MTT) {
434 /* reserved mtts will be taken out of the PF allocation */
435 res_alloc->res_free += dev->caps.reserved_mtts;
436 res_alloc->guaranteed[vf] += dev->caps.reserved_mtts;
437 res_alloc->quota[vf] += dev->caps.reserved_mtts;
438 }
439 }
440}
441
442void mlx4_init_quotas(struct mlx4_dev *dev)
443{
444 struct mlx4_priv *priv = mlx4_priv(dev);
445 int pf;
446
447 /* quotas for VFs are initialized in mlx4_slave_cap */
448 if (mlx4_is_slave(dev))
449 return;
450
451 if (!mlx4_is_mfunc(dev)) {
452 dev->quotas.qp = dev->caps.num_qps - dev->caps.reserved_qps -
453 mlx4_num_reserved_sqps(dev);
454 dev->quotas.cq = dev->caps.num_cqs - dev->caps.reserved_cqs;
455 dev->quotas.srq = dev->caps.num_srqs - dev->caps.reserved_srqs;
456 dev->quotas.mtt = dev->caps.num_mtts - dev->caps.reserved_mtts;
457 dev->quotas.mpt = dev->caps.num_mpts - dev->caps.reserved_mrws;
458 return;
459 }
460
461 pf = mlx4_master_func_num(dev);
462 dev->quotas.qp =
463 priv->mfunc.master.res_tracker.res_alloc[RES_QP].quota[pf];
464 dev->quotas.cq =
465 priv->mfunc.master.res_tracker.res_alloc[RES_CQ].quota[pf];
466 dev->quotas.srq =
467 priv->mfunc.master.res_tracker.res_alloc[RES_SRQ].quota[pf];
468 dev->quotas.mtt =
469 priv->mfunc.master.res_tracker.res_alloc[RES_MTT].quota[pf];
470 dev->quotas.mpt =
471 priv->mfunc.master.res_tracker.res_alloc[RES_MPT].quota[pf];
472}
Eran Ben Elisha9de92c62015-06-15 17:59:00 +0300473
474static int get_max_gauranteed_vfs_counter(struct mlx4_dev *dev)
475{
476 /* reduce the sink counter */
477 return (dev->caps.max_counters - 1 -
478 (MLX4_PF_COUNTERS_PER_PORT * MLX4_MAX_PORTS))
479 / MLX4_MAX_PORTS;
480}
481
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000482int mlx4_init_resource_tracker(struct mlx4_dev *dev)
483{
484 struct mlx4_priv *priv = mlx4_priv(dev);
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200485 int i, j;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000486 int t;
Eran Ben Elisha9de92c62015-06-15 17:59:00 +0300487 int max_vfs_guarantee_counter = get_max_gauranteed_vfs_counter(dev);
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000488
489 priv->mfunc.master.res_tracker.slave_list =
490 kzalloc(dev->num_slaves * sizeof(struct slave_list),
491 GFP_KERNEL);
492 if (!priv->mfunc.master.res_tracker.slave_list)
493 return -ENOMEM;
494
495 for (i = 0 ; i < dev->num_slaves; i++) {
496 for (t = 0; t < MLX4_NUM_OF_RESOURCE_TYPE; ++t)
497 INIT_LIST_HEAD(&priv->mfunc.master.res_tracker.
498 slave_list[i].res_list[t]);
499 mutex_init(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
500 }
501
502 mlx4_dbg(dev, "Started init_resource_tracker: %ld slaves\n",
503 dev->num_slaves);
504 for (i = 0 ; i < MLX4_NUM_OF_RESOURCE_TYPE; i++)
Hadar Hen Zion4af1c042012-07-05 04:03:41 +0000505 priv->mfunc.master.res_tracker.res_tree[i] = RB_ROOT;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000506
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200507 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
508 struct resource_allocator *res_alloc =
509 &priv->mfunc.master.res_tracker.res_alloc[i];
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200510 res_alloc->quota = kmalloc((dev->persist->num_vfs + 1) *
511 sizeof(int), GFP_KERNEL);
512 res_alloc->guaranteed = kmalloc((dev->persist->num_vfs + 1) *
513 sizeof(int), GFP_KERNEL);
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200514 if (i == RES_MAC || i == RES_VLAN)
515 res_alloc->allocated = kzalloc(MLX4_MAX_PORTS *
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200516 (dev->persist->num_vfs
517 + 1) *
518 sizeof(int), GFP_KERNEL);
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200519 else
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200520 res_alloc->allocated = kzalloc((dev->persist->
521 num_vfs + 1) *
522 sizeof(int), GFP_KERNEL);
Eran Ben Elisha9de92c62015-06-15 17:59:00 +0300523 /* Reduce the sink counter */
524 if (i == RES_COUNTER)
525 res_alloc->res_free = dev->caps.max_counters - 1;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200526
527 if (!res_alloc->quota || !res_alloc->guaranteed ||
528 !res_alloc->allocated)
529 goto no_mem_err;
530
Jack Morgenstein146f3ef2013-11-03 10:03:25 +0200531 spin_lock_init(&res_alloc->alloc_lock);
Yishai Hadas872bf2f2015-01-25 16:59:35 +0200532 for (t = 0; t < dev->persist->num_vfs + 1; t++) {
Matan Barak449fc482014-03-19 18:11:52 +0200533 struct mlx4_active_ports actv_ports =
534 mlx4_get_active_ports(dev, t);
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200535 switch (i) {
536 case RES_QP:
537 initialize_res_quotas(dev, res_alloc, RES_QP,
538 t, dev->caps.num_qps -
539 dev->caps.reserved_qps -
540 mlx4_num_reserved_sqps(dev));
541 break;
542 case RES_CQ:
543 initialize_res_quotas(dev, res_alloc, RES_CQ,
544 t, dev->caps.num_cqs -
545 dev->caps.reserved_cqs);
546 break;
547 case RES_SRQ:
548 initialize_res_quotas(dev, res_alloc, RES_SRQ,
549 t, dev->caps.num_srqs -
550 dev->caps.reserved_srqs);
551 break;
552 case RES_MPT:
553 initialize_res_quotas(dev, res_alloc, RES_MPT,
554 t, dev->caps.num_mpts -
555 dev->caps.reserved_mrws);
556 break;
557 case RES_MTT:
558 initialize_res_quotas(dev, res_alloc, RES_MTT,
559 t, dev->caps.num_mtts -
560 dev->caps.reserved_mtts);
561 break;
562 case RES_MAC:
563 if (t == mlx4_master_func_num(dev)) {
Matan Barak449fc482014-03-19 18:11:52 +0200564 int max_vfs_pport = 0;
565 /* Calculate the max vfs per port for */
566 /* both ports. */
567 for (j = 0; j < dev->caps.num_ports;
568 j++) {
569 struct mlx4_slaves_pport slaves_pport =
570 mlx4_phys_to_slaves_pport(dev, j + 1);
571 unsigned current_slaves =
572 bitmap_weight(slaves_pport.slaves,
573 dev->caps.num_ports) - 1;
574 if (max_vfs_pport < current_slaves)
575 max_vfs_pport =
576 current_slaves;
577 }
578 res_alloc->quota[t] =
579 MLX4_MAX_MAC_NUM -
580 2 * max_vfs_pport;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200581 res_alloc->guaranteed[t] = 2;
582 for (j = 0; j < MLX4_MAX_PORTS; j++)
Matan Barak449fc482014-03-19 18:11:52 +0200583 res_alloc->res_port_free[j] =
584 MLX4_MAX_MAC_NUM;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200585 } else {
586 res_alloc->quota[t] = MLX4_MAX_MAC_NUM;
587 res_alloc->guaranteed[t] = 2;
588 }
589 break;
590 case RES_VLAN:
591 if (t == mlx4_master_func_num(dev)) {
592 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM;
593 res_alloc->guaranteed[t] = MLX4_MAX_VLAN_NUM / 2;
594 for (j = 0; j < MLX4_MAX_PORTS; j++)
595 res_alloc->res_port_free[j] =
596 res_alloc->quota[t];
597 } else {
598 res_alloc->quota[t] = MLX4_MAX_VLAN_NUM / 2;
599 res_alloc->guaranteed[t] = 0;
600 }
601 break;
602 case RES_COUNTER:
603 res_alloc->quota[t] = dev->caps.max_counters;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200604 if (t == mlx4_master_func_num(dev))
Eran Ben Elisha9de92c62015-06-15 17:59:00 +0300605 res_alloc->guaranteed[t] =
606 MLX4_PF_COUNTERS_PER_PORT *
607 MLX4_MAX_PORTS;
608 else if (t <= max_vfs_guarantee_counter)
609 res_alloc->guaranteed[t] =
610 MLX4_VF_COUNTERS_PER_PORT *
611 MLX4_MAX_PORTS;
612 else
613 res_alloc->guaranteed[t] = 0;
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200614 break;
615 default:
616 break;
617 }
618 if (i == RES_MAC || i == RES_VLAN) {
Matan Barak449fc482014-03-19 18:11:52 +0200619 for (j = 0; j < dev->caps.num_ports; j++)
620 if (test_bit(j, actv_ports.ports))
621 res_alloc->res_port_rsvd[j] +=
622 res_alloc->guaranteed[t];
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200623 } else {
624 res_alloc->res_reserved += res_alloc->guaranteed[t];
625 }
626 }
627 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000628 spin_lock_init(&priv->mfunc.master.res_tracker.lock);
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200629 return 0;
630
631no_mem_err:
632 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
633 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
634 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
635 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
636 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
637 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
638 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
639 }
640 return -ENOMEM;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000641}
642
Jack Morgensteinb8924952012-05-15 10:35:02 +0000643void mlx4_free_resource_tracker(struct mlx4_dev *dev,
644 enum mlx4_res_tracker_free_type type)
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000645{
646 struct mlx4_priv *priv = mlx4_priv(dev);
647 int i;
648
649 if (priv->mfunc.master.res_tracker.slave_list) {
Jack Morgenstein48740802013-11-03 10:03:20 +0200650 if (type != RES_TR_FREE_STRUCTS_ONLY) {
651 for (i = 0; i < dev->num_slaves; i++) {
Jack Morgensteinb8924952012-05-15 10:35:02 +0000652 if (type == RES_TR_FREE_ALL ||
653 dev->caps.function != i)
654 mlx4_delete_all_resources_for_slave(dev, i);
Jack Morgenstein48740802013-11-03 10:03:20 +0200655 }
656 /* free master's vlans */
657 i = dev->caps.function;
Jack Morgenstein111c6092014-05-27 09:26:38 +0300658 mlx4_reset_roce_gids(dev, i);
Jack Morgenstein48740802013-11-03 10:03:20 +0200659 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
660 rem_slave_vlans(dev, i);
661 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[i].mutex);
662 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000663
Jack Morgensteinb8924952012-05-15 10:35:02 +0000664 if (type != RES_TR_FREE_SLAVES_ONLY) {
Jack Morgenstein5a0d0a62013-11-03 10:03:23 +0200665 for (i = 0; i < MLX4_NUM_OF_RESOURCE_TYPE; i++) {
666 kfree(priv->mfunc.master.res_tracker.res_alloc[i].allocated);
667 priv->mfunc.master.res_tracker.res_alloc[i].allocated = NULL;
668 kfree(priv->mfunc.master.res_tracker.res_alloc[i].guaranteed);
669 priv->mfunc.master.res_tracker.res_alloc[i].guaranteed = NULL;
670 kfree(priv->mfunc.master.res_tracker.res_alloc[i].quota);
671 priv->mfunc.master.res_tracker.res_alloc[i].quota = NULL;
672 }
Jack Morgensteinb8924952012-05-15 10:35:02 +0000673 kfree(priv->mfunc.master.res_tracker.slave_list);
674 priv->mfunc.master.res_tracker.slave_list = NULL;
675 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000676 }
677}
678
Jack Morgenstein54679e12012-08-03 08:40:43 +0000679static void update_pkey_index(struct mlx4_dev *dev, int slave,
680 struct mlx4_cmd_mailbox *inbox)
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000681{
Jack Morgenstein54679e12012-08-03 08:40:43 +0000682 u8 sched = *(u8 *)(inbox->buf + 64);
683 u8 orig_index = *(u8 *)(inbox->buf + 35);
684 u8 new_index;
685 struct mlx4_priv *priv = mlx4_priv(dev);
686 int port;
687
688 port = (sched >> 6 & 1) + 1;
689
690 new_index = priv->virt2phys_pkey[slave][port - 1][orig_index];
691 *(u8 *)(inbox->buf + 35) = new_index;
Jack Morgenstein54679e12012-08-03 08:40:43 +0000692}
693
694static void update_gid(struct mlx4_dev *dev, struct mlx4_cmd_mailbox *inbox,
695 u8 slave)
696{
697 struct mlx4_qp_context *qp_ctx = inbox->buf + 8;
698 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *) inbox->buf);
699 u32 ts = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +0200700 int port;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000701
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +0200702 if (MLX4_QP_ST_UD == ts) {
703 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
704 if (mlx4_is_eth(dev, port))
Matan Barak449fc482014-03-19 18:11:52 +0200705 qp_ctx->pri_path.mgid_index =
706 mlx4_get_base_gid_ix(dev, slave, port) | 0x80;
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +0200707 else
708 qp_ctx->pri_path.mgid_index = slave | 0x80;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000709
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +0200710 } else if (MLX4_QP_ST_RC == ts || MLX4_QP_ST_XRC == ts || MLX4_QP_ST_UC == ts) {
711 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
712 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
713 if (mlx4_is_eth(dev, port)) {
Matan Barak449fc482014-03-19 18:11:52 +0200714 qp_ctx->pri_path.mgid_index +=
715 mlx4_get_base_gid_ix(dev, slave, port);
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +0200716 qp_ctx->pri_path.mgid_index &= 0x7f;
717 } else {
718 qp_ctx->pri_path.mgid_index = slave & 0x7F;
719 }
720 }
721 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
722 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
723 if (mlx4_is_eth(dev, port)) {
Matan Barak449fc482014-03-19 18:11:52 +0200724 qp_ctx->alt_path.mgid_index +=
725 mlx4_get_base_gid_ix(dev, slave, port);
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +0200726 qp_ctx->alt_path.mgid_index &= 0x7f;
727 } else {
728 qp_ctx->alt_path.mgid_index = slave & 0x7F;
729 }
730 }
Jack Morgenstein54679e12012-08-03 08:40:43 +0000731 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000732}
733
Eran Ben Elisha68230242015-06-15 17:59:01 +0300734static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
735 u8 slave, int port);
736
Rony Efraim3f7fb022013-04-25 05:22:28 +0000737static int update_vport_qp_param(struct mlx4_dev *dev,
738 struct mlx4_cmd_mailbox *inbox,
Jack Morgensteinb01978c2013-06-27 19:05:21 +0300739 u8 slave, u32 qpn)
Rony Efraim3f7fb022013-04-25 05:22:28 +0000740{
741 struct mlx4_qp_context *qpc = inbox->buf + 8;
742 struct mlx4_vport_oper_state *vp_oper;
743 struct mlx4_priv *priv;
Matan Barak09e05c32014-09-10 16:41:56 +0300744 u32 qp_type;
Or Gerlitzf5956fa2015-03-02 18:22:15 +0200745 int port, err = 0;
Rony Efraim3f7fb022013-04-25 05:22:28 +0000746
747 port = (qpc->pri_path.sched_queue & 0x40) ? 2 : 1;
748 priv = mlx4_priv(dev);
749 vp_oper = &priv->mfunc.master.vf_oper[slave].vport[port];
Matan Barak09e05c32014-09-10 16:41:56 +0300750 qp_type = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
Rony Efraim3f7fb022013-04-25 05:22:28 +0000751
Eran Ben Elisha68230242015-06-15 17:59:01 +0300752 err = handle_counter(dev, qpc, slave, port);
753 if (err)
754 goto out;
755
Rony Efraim3f7fb022013-04-25 05:22:28 +0000756 if (MLX4_VGT != vp_oper->state.default_vlan) {
Jack Morgensteinb01978c2013-06-27 19:05:21 +0300757 /* the reserved QPs (special, proxy, tunnel)
758 * do not operate over vlans
759 */
760 if (mlx4_is_qp_reserved(dev, qpn))
761 return 0;
762
Matan Barak09e05c32014-09-10 16:41:56 +0300763 /* force strip vlan by clear vsd, MLX QP refers to Raw Ethernet */
764 if (qp_type == MLX4_QP_ST_UD ||
765 (qp_type == MLX4_QP_ST_MLX && mlx4_is_eth(dev, port))) {
766 if (dev->caps.bmme_flags & MLX4_BMME_FLAG_VSD_INIT2RTR) {
767 *(__be32 *)inbox->buf =
768 cpu_to_be32(be32_to_cpu(*(__be32 *)inbox->buf) |
769 MLX4_QP_OPTPAR_VLAN_STRIPPING);
770 qpc->param3 &= ~cpu_to_be32(MLX4_STRIP_VLAN);
771 } else {
772 struct mlx4_update_qp_params params = {.flags = 0};
773
Or Gerlitzf5956fa2015-03-02 18:22:15 +0200774 err = mlx4_update_qp(dev, qpn, MLX4_UPDATE_QP_VSD, &params);
775 if (err)
776 goto out;
Matan Barak09e05c32014-09-10 16:41:56 +0300777 }
778 }
Rony Efraim0a6eac22013-06-27 19:05:22 +0300779
Maor Gottlieb9a892832015-10-15 14:44:38 +0300780 /* preserve IF_COUNTER flag */
781 qpc->pri_path.vlan_control &=
782 MLX4_CTRL_ETH_SRC_CHECK_IF_COUNTER;
Rony Efraim0a6eac22013-06-27 19:05:22 +0300783 if (vp_oper->state.link_state == IFLA_VF_LINK_STATE_DISABLE &&
784 dev->caps.flags2 & MLX4_DEV_CAP_FLAG2_UPDATE_QP) {
Maor Gottlieb9a892832015-10-15 14:44:38 +0300785 qpc->pri_path.vlan_control |=
Rony Efraim0a6eac22013-06-27 19:05:22 +0300786 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
787 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
788 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
789 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
790 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
791 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
792 } else if (0 != vp_oper->state.default_vlan) {
Moshe Shemesh7c3d21c2016-09-22 12:11:13 +0300793 if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD)) {
794 /* vst QinQ should block untagged on TX,
795 * but cvlan is in payload and phv is set so
796 * hw see it as untagged. Block tagged instead.
797 */
798 qpc->pri_path.vlan_control |=
799 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
800 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
801 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
802 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
803 } else { /* vst 802.1Q */
804 qpc->pri_path.vlan_control |=
805 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
806 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
807 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
808 }
Rony Efraim7677fc92013-05-08 22:22:35 +0000809 } else { /* priority tagged */
Maor Gottlieb9a892832015-10-15 14:44:38 +0300810 qpc->pri_path.vlan_control |=
Rony Efraim7677fc92013-05-08 22:22:35 +0000811 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
812 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
813 }
814
815 qpc->pri_path.fvl_rx |= MLX4_FVL_RX_FORCE_ETH_VLAN;
Rony Efraim3f7fb022013-04-25 05:22:28 +0000816 qpc->pri_path.vlan_index = vp_oper->vlan_idx;
Moshe Shemesh7c3d21c2016-09-22 12:11:13 +0300817 qpc->pri_path.fl |= MLX4_FL_ETH_HIDE_CQE_VLAN;
818 if (vp_oper->state.vlan_proto == htons(ETH_P_8021AD))
819 qpc->pri_path.fl |= MLX4_FL_SV;
820 else
821 qpc->pri_path.fl |= MLX4_FL_CV;
Rony Efraim7677fc92013-05-08 22:22:35 +0000822 qpc->pri_path.feup |= MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
Rony Efraim3f7fb022013-04-25 05:22:28 +0000823 qpc->pri_path.sched_queue &= 0xC7;
824 qpc->pri_path.sched_queue |= (vp_oper->state.default_qos) << 3;
Ido Shamay08068cd2015-04-02 16:31:15 +0300825 qpc->qos_vport = vp_oper->state.qos_vport;
Rony Efraim3f7fb022013-04-25 05:22:28 +0000826 }
Rony Efraime6b6a232013-04-25 05:22:29 +0000827 if (vp_oper->state.spoofchk) {
Rony Efraim7677fc92013-05-08 22:22:35 +0000828 qpc->pri_path.feup |= MLX4_FSM_FORCE_ETH_SRC_MAC;
Rony Efraime6b6a232013-04-25 05:22:29 +0000829 qpc->pri_path.grh_mylmc = (0x80 & qpc->pri_path.grh_mylmc) + vp_oper->mac_idx;
Rony Efraime6b6a232013-04-25 05:22:29 +0000830 }
Or Gerlitzf5956fa2015-03-02 18:22:15 +0200831out:
832 return err;
Rony Efraim3f7fb022013-04-25 05:22:28 +0000833}
834
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000835static int mpt_mask(struct mlx4_dev *dev)
836{
837 return dev->caps.num_mpts - 1;
838}
839
Matan Barakae5a2e22017-01-29 18:56:15 +0200840static const char *mlx4_resource_type_to_str(enum mlx4_resource t)
841{
842 switch (t) {
843 case RES_QP:
844 return "QP";
845 case RES_CQ:
846 return "CQ";
847 case RES_SRQ:
848 return "SRQ";
849 case RES_XRCD:
850 return "XRCD";
851 case RES_MPT:
852 return "MPT";
853 case RES_MTT:
854 return "MTT";
855 case RES_MAC:
856 return "MAC";
857 case RES_VLAN:
858 return "VLAN";
859 case RES_COUNTER:
860 return "COUNTER";
861 case RES_FS_RULE:
862 return "FS_RULE";
863 case RES_EQ:
864 return "EQ";
865 default:
866 return "INVALID RESOURCE";
867 }
868}
869
Hadar Hen Zion1e3f7b32013-03-21 05:55:54 +0000870static void *find_res(struct mlx4_dev *dev, u64 res_id,
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000871 enum mlx4_resource type)
872{
873 struct mlx4_priv *priv = mlx4_priv(dev);
874
Hadar Hen Zion4af1c042012-07-05 04:03:41 +0000875 return res_tracker_lookup(&priv->mfunc.master.res_tracker.res_tree[type],
876 res_id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000877}
878
Matan Barakae5a2e22017-01-29 18:56:15 +0200879static int _get_res(struct mlx4_dev *dev, int slave, u64 res_id,
880 enum mlx4_resource type,
881 void *res, const char *func_name)
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000882{
883 struct res_common *r;
884 int err = 0;
885
886 spin_lock_irq(mlx4_tlock(dev));
887 r = find_res(dev, res_id, type);
888 if (!r) {
889 err = -ENONET;
890 goto exit;
891 }
892
893 if (r->state == RES_ANY_BUSY) {
Matan Barakae5a2e22017-01-29 18:56:15 +0200894 mlx4_warn(dev,
895 "%s(%d) trying to get resource %llx of type %s, but it's already taken by %s\n",
896 func_name, slave, res_id, mlx4_resource_type_to_str(type),
897 r->func_name);
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000898 err = -EBUSY;
899 goto exit;
900 }
901
902 if (r->owner != slave) {
903 err = -EPERM;
904 goto exit;
905 }
906
907 r->from_state = r->state;
908 r->state = RES_ANY_BUSY;
Matan Barakae5a2e22017-01-29 18:56:15 +0200909 r->func_name = func_name;
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000910
911 if (res)
912 *((struct res_common **)res) = r;
913
914exit:
915 spin_unlock_irq(mlx4_tlock(dev));
916 return err;
917}
918
Matan Barakae5a2e22017-01-29 18:56:15 +0200919#define get_res(dev, slave, res_id, type, res) \
920 _get_res((dev), (slave), (res_id), (type), (res), __func__)
921
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000922int mlx4_get_slave_from_resource_id(struct mlx4_dev *dev,
923 enum mlx4_resource type,
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +0000924 u64 res_id, int *slave)
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000925{
926
927 struct res_common *r;
928 int err = -ENOENT;
929 int id = res_id;
930
931 if (type == RES_QP)
932 id &= 0x7fffff;
Yevgeny Petrilin996b0542011-12-19 04:03:05 +0000933 spin_lock(mlx4_tlock(dev));
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000934
935 r = find_res(dev, id, type);
936 if (r) {
937 *slave = r->owner;
938 err = 0;
939 }
Yevgeny Petrilin996b0542011-12-19 04:03:05 +0000940 spin_unlock(mlx4_tlock(dev));
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000941
942 return err;
943}
944
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +0000945static void put_res(struct mlx4_dev *dev, int slave, u64 res_id,
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000946 enum mlx4_resource type)
947{
948 struct res_common *r;
949
950 spin_lock_irq(mlx4_tlock(dev));
951 r = find_res(dev, res_id, type);
Matan Barakae5a2e22017-01-29 18:56:15 +0200952 if (r) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000953 r->state = r->from_state;
Matan Barakae5a2e22017-01-29 18:56:15 +0200954 r->func_name = "";
955 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +0000956 spin_unlock_irq(mlx4_tlock(dev));
957}
958
Eran Ben Elisha68230242015-06-15 17:59:01 +0300959static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
960 u64 in_param, u64 *out_param, int port);
961
962static int handle_existing_counter(struct mlx4_dev *dev, u8 slave, int port,
963 int counter_index)
964{
965 struct res_common *r;
966 struct res_counter *counter;
967 int ret = 0;
968
969 if (counter_index == MLX4_SINK_COUNTER_INDEX(dev))
970 return ret;
971
972 spin_lock_irq(mlx4_tlock(dev));
973 r = find_res(dev, counter_index, RES_COUNTER);
Eran Ben Elisha6b94bab2016-02-17 17:24:24 +0200974 if (!r || r->owner != slave) {
Eran Ben Elisha68230242015-06-15 17:59:01 +0300975 ret = -EINVAL;
Eran Ben Elisha6b94bab2016-02-17 17:24:24 +0200976 } else {
977 counter = container_of(r, struct res_counter, com);
978 if (!counter->port)
979 counter->port = port;
980 }
Eran Ben Elisha68230242015-06-15 17:59:01 +0300981
982 spin_unlock_irq(mlx4_tlock(dev));
983 return ret;
984}
985
986static int handle_unexisting_counter(struct mlx4_dev *dev,
987 struct mlx4_qp_context *qpc, u8 slave,
988 int port)
989{
990 struct mlx4_priv *priv = mlx4_priv(dev);
991 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
992 struct res_common *tmp;
993 struct res_counter *counter;
994 u64 counter_idx = MLX4_SINK_COUNTER_INDEX(dev);
995 int err = 0;
996
997 spin_lock_irq(mlx4_tlock(dev));
998 list_for_each_entry(tmp,
999 &tracker->slave_list[slave].res_list[RES_COUNTER],
1000 list) {
1001 counter = container_of(tmp, struct res_counter, com);
1002 if (port == counter->port) {
1003 qpc->pri_path.counter_index = counter->com.res_id;
1004 spin_unlock_irq(mlx4_tlock(dev));
1005 return 0;
1006 }
1007 }
1008 spin_unlock_irq(mlx4_tlock(dev));
1009
1010 /* No existing counter, need to allocate a new counter */
1011 err = counter_alloc_res(dev, slave, RES_OP_RESERVE, 0, 0, &counter_idx,
1012 port);
1013 if (err == -ENOENT) {
1014 err = 0;
1015 } else if (err && err != -ENOSPC) {
1016 mlx4_err(dev, "%s: failed to create new counter for slave %d err %d\n",
1017 __func__, slave, err);
1018 } else {
1019 qpc->pri_path.counter_index = counter_idx;
1020 mlx4_dbg(dev, "%s: alloc new counter for slave %d index %d\n",
1021 __func__, slave, qpc->pri_path.counter_index);
1022 err = 0;
1023 }
1024
1025 return err;
1026}
1027
1028static int handle_counter(struct mlx4_dev *dev, struct mlx4_qp_context *qpc,
1029 u8 slave, int port)
1030{
1031 if (qpc->pri_path.counter_index != MLX4_SINK_COUNTER_INDEX(dev))
1032 return handle_existing_counter(dev, slave, port,
1033 qpc->pri_path.counter_index);
1034
1035 return handle_unexisting_counter(dev, qpc, slave, port);
1036}
1037
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001038static struct res_common *alloc_qp_tr(int id)
1039{
1040 struct res_qp *ret;
1041
stephen hemminger31975e22017-08-15 10:29:19 -07001042 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001043 if (!ret)
1044 return NULL;
1045
1046 ret->com.res_id = id;
1047 ret->com.state = RES_QP_RESERVED;
Eugenia Emantayev25311882012-02-15 06:22:57 +00001048 ret->local_qpn = id;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001049 INIT_LIST_HEAD(&ret->mcg_list);
1050 spin_lock_init(&ret->mcg_spl);
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00001051 atomic_set(&ret->ref_count, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001052
1053 return &ret->com;
1054}
1055
1056static struct res_common *alloc_mtt_tr(int id, int order)
1057{
1058 struct res_mtt *ret;
1059
stephen hemminger31975e22017-08-15 10:29:19 -07001060 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001061 if (!ret)
1062 return NULL;
1063
1064 ret->com.res_id = id;
1065 ret->order = order;
1066 ret->com.state = RES_MTT_ALLOCATED;
1067 atomic_set(&ret->ref_count, 0);
1068
1069 return &ret->com;
1070}
1071
1072static struct res_common *alloc_mpt_tr(int id, int key)
1073{
1074 struct res_mpt *ret;
1075
stephen hemminger31975e22017-08-15 10:29:19 -07001076 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001077 if (!ret)
1078 return NULL;
1079
1080 ret->com.res_id = id;
1081 ret->com.state = RES_MPT_RESERVED;
1082 ret->key = key;
1083
1084 return &ret->com;
1085}
1086
1087static struct res_common *alloc_eq_tr(int id)
1088{
1089 struct res_eq *ret;
1090
stephen hemminger31975e22017-08-15 10:29:19 -07001091 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001092 if (!ret)
1093 return NULL;
1094
1095 ret->com.res_id = id;
1096 ret->com.state = RES_EQ_RESERVED;
1097
1098 return &ret->com;
1099}
1100
1101static struct res_common *alloc_cq_tr(int id)
1102{
1103 struct res_cq *ret;
1104
stephen hemminger31975e22017-08-15 10:29:19 -07001105 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001106 if (!ret)
1107 return NULL;
1108
1109 ret->com.res_id = id;
1110 ret->com.state = RES_CQ_ALLOCATED;
1111 atomic_set(&ret->ref_count, 0);
1112
1113 return &ret->com;
1114}
1115
1116static struct res_common *alloc_srq_tr(int id)
1117{
1118 struct res_srq *ret;
1119
stephen hemminger31975e22017-08-15 10:29:19 -07001120 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001121 if (!ret)
1122 return NULL;
1123
1124 ret->com.res_id = id;
1125 ret->com.state = RES_SRQ_ALLOCATED;
1126 atomic_set(&ret->ref_count, 0);
1127
1128 return &ret->com;
1129}
1130
Eran Ben Elisha9de92c62015-06-15 17:59:00 +03001131static struct res_common *alloc_counter_tr(int id, int port)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001132{
1133 struct res_counter *ret;
1134
stephen hemminger31975e22017-08-15 10:29:19 -07001135 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001136 if (!ret)
1137 return NULL;
1138
1139 ret->com.res_id = id;
1140 ret->com.state = RES_COUNTER_ALLOCATED;
Eran Ben Elisha9de92c62015-06-15 17:59:00 +03001141 ret->port = port;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001142
1143 return &ret->com;
1144}
1145
Jack Morgensteinba062d52012-05-15 10:35:03 +00001146static struct res_common *alloc_xrcdn_tr(int id)
1147{
1148 struct res_xrcdn *ret;
1149
stephen hemminger31975e22017-08-15 10:29:19 -07001150 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
Jack Morgensteinba062d52012-05-15 10:35:03 +00001151 if (!ret)
1152 return NULL;
1153
1154 ret->com.res_id = id;
1155 ret->com.state = RES_XRCD_ALLOCATED;
1156
1157 return &ret->com;
1158}
1159
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00001160static struct res_common *alloc_fs_rule_tr(u64 id, int qpn)
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00001161{
1162 struct res_fs_rule *ret;
1163
stephen hemminger31975e22017-08-15 10:29:19 -07001164 ret = kzalloc(sizeof(*ret), GFP_KERNEL);
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00001165 if (!ret)
1166 return NULL;
1167
1168 ret->com.res_id = id;
1169 ret->com.state = RES_FS_RULE_ALLOCATED;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00001170 ret->qpn = qpn;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00001171 return &ret->com;
1172}
1173
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00001174static struct res_common *alloc_tr(u64 id, enum mlx4_resource type, int slave,
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001175 int extra)
1176{
1177 struct res_common *ret;
1178
1179 switch (type) {
1180 case RES_QP:
1181 ret = alloc_qp_tr(id);
1182 break;
1183 case RES_MPT:
1184 ret = alloc_mpt_tr(id, extra);
1185 break;
1186 case RES_MTT:
1187 ret = alloc_mtt_tr(id, extra);
1188 break;
1189 case RES_EQ:
1190 ret = alloc_eq_tr(id);
1191 break;
1192 case RES_CQ:
1193 ret = alloc_cq_tr(id);
1194 break;
1195 case RES_SRQ:
1196 ret = alloc_srq_tr(id);
1197 break;
1198 case RES_MAC:
Amir Vadaic20862c2014-05-22 15:55:40 +03001199 pr_err("implementation missing\n");
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001200 return NULL;
1201 case RES_COUNTER:
Eran Ben Elisha9de92c62015-06-15 17:59:00 +03001202 ret = alloc_counter_tr(id, extra);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001203 break;
Jack Morgensteinba062d52012-05-15 10:35:03 +00001204 case RES_XRCD:
1205 ret = alloc_xrcdn_tr(id);
1206 break;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00001207 case RES_FS_RULE:
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00001208 ret = alloc_fs_rule_tr(id, extra);
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00001209 break;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001210 default:
1211 return NULL;
1212 }
1213 if (ret)
1214 ret->owner = slave;
1215
1216 return ret;
1217}
1218
Eran Ben Elisha62a89052015-06-15 17:59:08 +03001219int mlx4_calc_vf_counters(struct mlx4_dev *dev, int slave, int port,
1220 struct mlx4_counter *data)
1221{
1222 struct mlx4_priv *priv = mlx4_priv(dev);
1223 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1224 struct res_common *tmp;
1225 struct res_counter *counter;
1226 int *counters_arr;
1227 int i = 0, err = 0;
1228
1229 memset(data, 0, sizeof(*data));
1230
1231 counters_arr = kmalloc_array(dev->caps.max_counters,
1232 sizeof(*counters_arr), GFP_KERNEL);
1233 if (!counters_arr)
1234 return -ENOMEM;
1235
1236 spin_lock_irq(mlx4_tlock(dev));
1237 list_for_each_entry(tmp,
1238 &tracker->slave_list[slave].res_list[RES_COUNTER],
1239 list) {
1240 counter = container_of(tmp, struct res_counter, com);
1241 if (counter->port == port) {
1242 counters_arr[i] = (int)tmp->res_id;
1243 i++;
1244 }
1245 }
1246 spin_unlock_irq(mlx4_tlock(dev));
1247 counters_arr[i] = -1;
1248
1249 i = 0;
1250
1251 while (counters_arr[i] != -1) {
1252 err = mlx4_get_counter_stats(dev, counters_arr[i], data,
1253 0);
1254 if (err) {
1255 memset(data, 0, sizeof(*data));
1256 goto table_changed;
1257 }
1258 i++;
1259 }
1260
1261table_changed:
1262 kfree(counters_arr);
1263 return 0;
1264}
1265
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00001266static int add_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001267 enum mlx4_resource type, int extra)
1268{
1269 int i;
1270 int err;
1271 struct mlx4_priv *priv = mlx4_priv(dev);
1272 struct res_common **res_arr;
1273 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001274 struct rb_root *root = &tracker->res_tree[type];
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001275
stephen hemminger31975e22017-08-15 10:29:19 -07001276 res_arr = kcalloc(count, sizeof(*res_arr), GFP_KERNEL);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001277 if (!res_arr)
1278 return -ENOMEM;
1279
1280 for (i = 0; i < count; ++i) {
1281 res_arr[i] = alloc_tr(base + i, type, slave, extra);
1282 if (!res_arr[i]) {
1283 for (--i; i >= 0; --i)
1284 kfree(res_arr[i]);
1285
1286 kfree(res_arr);
1287 return -ENOMEM;
1288 }
1289 }
1290
1291 spin_lock_irq(mlx4_tlock(dev));
1292 for (i = 0; i < count; ++i) {
1293 if (find_res(dev, base + i, type)) {
1294 err = -EEXIST;
1295 goto undo;
1296 }
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001297 err = res_tracker_insert(root, res_arr[i]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001298 if (err)
1299 goto undo;
1300 list_add_tail(&res_arr[i]->list,
1301 &tracker->slave_list[slave].res_list[type]);
1302 }
1303 spin_unlock_irq(mlx4_tlock(dev));
1304 kfree(res_arr);
1305
1306 return 0;
1307
1308undo:
Saeed Mahameed95e19632015-10-08 17:14:03 +03001309 for (--i; i >= 0; --i) {
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001310 rb_erase(&res_arr[i]->node, root);
Saeed Mahameed95e19632015-10-08 17:14:03 +03001311 list_del_init(&res_arr[i]->list);
1312 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001313
1314 spin_unlock_irq(mlx4_tlock(dev));
1315
1316 for (i = 0; i < count; ++i)
1317 kfree(res_arr[i]);
1318
1319 kfree(res_arr);
1320
1321 return err;
1322}
1323
1324static int remove_qp_ok(struct res_qp *res)
1325{
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00001326 if (res->com.state == RES_QP_BUSY || atomic_read(&res->ref_count) ||
1327 !list_empty(&res->mcg_list)) {
1328 pr_err("resource tracker: fail to remove qp, state %d, ref_count %d\n",
1329 res->com.state, atomic_read(&res->ref_count));
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001330 return -EBUSY;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00001331 } else if (res->com.state != RES_QP_RESERVED) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001332 return -EPERM;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00001333 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001334
1335 return 0;
1336}
1337
1338static int remove_mtt_ok(struct res_mtt *res, int order)
1339{
1340 if (res->com.state == RES_MTT_BUSY ||
1341 atomic_read(&res->ref_count)) {
Amir Vadaic20862c2014-05-22 15:55:40 +03001342 pr_devel("%s-%d: state %s, ref_count %d\n",
1343 __func__, __LINE__,
1344 mtt_states_str(res->com.state),
1345 atomic_read(&res->ref_count));
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001346 return -EBUSY;
1347 } else if (res->com.state != RES_MTT_ALLOCATED)
1348 return -EPERM;
1349 else if (res->order != order)
1350 return -EINVAL;
1351
1352 return 0;
1353}
1354
1355static int remove_mpt_ok(struct res_mpt *res)
1356{
1357 if (res->com.state == RES_MPT_BUSY)
1358 return -EBUSY;
1359 else if (res->com.state != RES_MPT_RESERVED)
1360 return -EPERM;
1361
1362 return 0;
1363}
1364
1365static int remove_eq_ok(struct res_eq *res)
1366{
1367 if (res->com.state == RES_MPT_BUSY)
1368 return -EBUSY;
1369 else if (res->com.state != RES_MPT_RESERVED)
1370 return -EPERM;
1371
1372 return 0;
1373}
1374
1375static int remove_counter_ok(struct res_counter *res)
1376{
1377 if (res->com.state == RES_COUNTER_BUSY)
1378 return -EBUSY;
1379 else if (res->com.state != RES_COUNTER_ALLOCATED)
1380 return -EPERM;
1381
1382 return 0;
1383}
1384
Jack Morgensteinba062d52012-05-15 10:35:03 +00001385static int remove_xrcdn_ok(struct res_xrcdn *res)
1386{
1387 if (res->com.state == RES_XRCD_BUSY)
1388 return -EBUSY;
1389 else if (res->com.state != RES_XRCD_ALLOCATED)
1390 return -EPERM;
1391
1392 return 0;
1393}
1394
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00001395static int remove_fs_rule_ok(struct res_fs_rule *res)
1396{
1397 if (res->com.state == RES_FS_RULE_BUSY)
1398 return -EBUSY;
1399 else if (res->com.state != RES_FS_RULE_ALLOCATED)
1400 return -EPERM;
1401
1402 return 0;
1403}
1404
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001405static int remove_cq_ok(struct res_cq *res)
1406{
1407 if (res->com.state == RES_CQ_BUSY)
1408 return -EBUSY;
1409 else if (res->com.state != RES_CQ_ALLOCATED)
1410 return -EPERM;
1411
1412 return 0;
1413}
1414
1415static int remove_srq_ok(struct res_srq *res)
1416{
1417 if (res->com.state == RES_SRQ_BUSY)
1418 return -EBUSY;
1419 else if (res->com.state != RES_SRQ_ALLOCATED)
1420 return -EPERM;
1421
1422 return 0;
1423}
1424
1425static int remove_ok(struct res_common *res, enum mlx4_resource type, int extra)
1426{
1427 switch (type) {
1428 case RES_QP:
1429 return remove_qp_ok((struct res_qp *)res);
1430 case RES_CQ:
1431 return remove_cq_ok((struct res_cq *)res);
1432 case RES_SRQ:
1433 return remove_srq_ok((struct res_srq *)res);
1434 case RES_MPT:
1435 return remove_mpt_ok((struct res_mpt *)res);
1436 case RES_MTT:
1437 return remove_mtt_ok((struct res_mtt *)res, extra);
1438 case RES_MAC:
Tariq Toukan72b8eaa2017-01-29 18:56:13 +02001439 return -EOPNOTSUPP;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001440 case RES_EQ:
1441 return remove_eq_ok((struct res_eq *)res);
1442 case RES_COUNTER:
1443 return remove_counter_ok((struct res_counter *)res);
Jack Morgensteinba062d52012-05-15 10:35:03 +00001444 case RES_XRCD:
1445 return remove_xrcdn_ok((struct res_xrcdn *)res);
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00001446 case RES_FS_RULE:
1447 return remove_fs_rule_ok((struct res_fs_rule *)res);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001448 default:
1449 return -EINVAL;
1450 }
1451}
1452
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00001453static int rem_res_range(struct mlx4_dev *dev, int slave, u64 base, int count,
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001454 enum mlx4_resource type, int extra)
1455{
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00001456 u64 i;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001457 int err;
1458 struct mlx4_priv *priv = mlx4_priv(dev);
1459 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1460 struct res_common *r;
1461
1462 spin_lock_irq(mlx4_tlock(dev));
1463 for (i = base; i < base + count; ++i) {
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001464 r = res_tracker_lookup(&tracker->res_tree[type], i);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001465 if (!r) {
1466 err = -ENOENT;
1467 goto out;
1468 }
1469 if (r->owner != slave) {
1470 err = -EPERM;
1471 goto out;
1472 }
1473 err = remove_ok(r, type, extra);
1474 if (err)
1475 goto out;
1476 }
1477
1478 for (i = base; i < base + count; ++i) {
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001479 r = res_tracker_lookup(&tracker->res_tree[type], i);
1480 rb_erase(&r->node, &tracker->res_tree[type]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001481 list_del(&r->list);
1482 kfree(r);
1483 }
1484 err = 0;
1485
1486out:
1487 spin_unlock_irq(mlx4_tlock(dev));
1488
1489 return err;
1490}
1491
1492static int qp_res_start_move_to(struct mlx4_dev *dev, int slave, int qpn,
1493 enum res_qp_states state, struct res_qp **qp,
1494 int alloc)
1495{
1496 struct mlx4_priv *priv = mlx4_priv(dev);
1497 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1498 struct res_qp *r;
1499 int err = 0;
1500
1501 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001502 r = res_tracker_lookup(&tracker->res_tree[RES_QP], qpn);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001503 if (!r)
1504 err = -ENOENT;
1505 else if (r->com.owner != slave)
1506 err = -EPERM;
1507 else {
1508 switch (state) {
1509 case RES_QP_BUSY:
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00001510 mlx4_dbg(dev, "%s: failed RES_QP, 0x%llx\n",
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001511 __func__, r->com.res_id);
1512 err = -EBUSY;
1513 break;
1514
1515 case RES_QP_RESERVED:
1516 if (r->com.state == RES_QP_MAPPED && !alloc)
1517 break;
1518
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00001519 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n", r->com.res_id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001520 err = -EINVAL;
1521 break;
1522
1523 case RES_QP_MAPPED:
1524 if ((r->com.state == RES_QP_RESERVED && alloc) ||
1525 r->com.state == RES_QP_HW)
1526 break;
1527 else {
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00001528 mlx4_dbg(dev, "failed RES_QP, 0x%llx\n",
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001529 r->com.res_id);
1530 err = -EINVAL;
1531 }
1532
1533 break;
1534
1535 case RES_QP_HW:
1536 if (r->com.state != RES_QP_MAPPED)
1537 err = -EINVAL;
1538 break;
1539 default:
1540 err = -EINVAL;
1541 }
1542
1543 if (!err) {
1544 r->com.from_state = r->com.state;
1545 r->com.to_state = state;
1546 r->com.state = RES_QP_BUSY;
1547 if (qp)
Joe Perches64699332012-06-04 12:44:16 +00001548 *qp = r;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001549 }
1550 }
1551
1552 spin_unlock_irq(mlx4_tlock(dev));
1553
1554 return err;
1555}
1556
1557static int mr_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1558 enum res_mpt_states state, struct res_mpt **mpt)
1559{
1560 struct mlx4_priv *priv = mlx4_priv(dev);
1561 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1562 struct res_mpt *r;
1563 int err = 0;
1564
1565 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001566 r = res_tracker_lookup(&tracker->res_tree[RES_MPT], index);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001567 if (!r)
1568 err = -ENOENT;
1569 else if (r->com.owner != slave)
1570 err = -EPERM;
1571 else {
1572 switch (state) {
1573 case RES_MPT_BUSY:
1574 err = -EINVAL;
1575 break;
1576
1577 case RES_MPT_RESERVED:
1578 if (r->com.state != RES_MPT_MAPPED)
1579 err = -EINVAL;
1580 break;
1581
1582 case RES_MPT_MAPPED:
1583 if (r->com.state != RES_MPT_RESERVED &&
1584 r->com.state != RES_MPT_HW)
1585 err = -EINVAL;
1586 break;
1587
1588 case RES_MPT_HW:
1589 if (r->com.state != RES_MPT_MAPPED)
1590 err = -EINVAL;
1591 break;
1592 default:
1593 err = -EINVAL;
1594 }
1595
1596 if (!err) {
1597 r->com.from_state = r->com.state;
1598 r->com.to_state = state;
1599 r->com.state = RES_MPT_BUSY;
1600 if (mpt)
Joe Perches64699332012-06-04 12:44:16 +00001601 *mpt = r;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001602 }
1603 }
1604
1605 spin_unlock_irq(mlx4_tlock(dev));
1606
1607 return err;
1608}
1609
1610static int eq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
1611 enum res_eq_states state, struct res_eq **eq)
1612{
1613 struct mlx4_priv *priv = mlx4_priv(dev);
1614 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1615 struct res_eq *r;
1616 int err = 0;
1617
1618 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001619 r = res_tracker_lookup(&tracker->res_tree[RES_EQ], index);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001620 if (!r)
1621 err = -ENOENT;
1622 else if (r->com.owner != slave)
1623 err = -EPERM;
1624 else {
1625 switch (state) {
1626 case RES_EQ_BUSY:
1627 err = -EINVAL;
1628 break;
1629
1630 case RES_EQ_RESERVED:
1631 if (r->com.state != RES_EQ_HW)
1632 err = -EINVAL;
1633 break;
1634
1635 case RES_EQ_HW:
1636 if (r->com.state != RES_EQ_RESERVED)
1637 err = -EINVAL;
1638 break;
1639
1640 default:
1641 err = -EINVAL;
1642 }
1643
1644 if (!err) {
1645 r->com.from_state = r->com.state;
1646 r->com.to_state = state;
1647 r->com.state = RES_EQ_BUSY;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001648 }
1649 }
1650
1651 spin_unlock_irq(mlx4_tlock(dev));
1652
Arnd Bergmanna4256bc2016-10-25 18:16:20 +02001653 if (!err && eq)
1654 *eq = r;
1655
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001656 return err;
1657}
1658
1659static int cq_res_start_move_to(struct mlx4_dev *dev, int slave, int cqn,
1660 enum res_cq_states state, struct res_cq **cq)
1661{
1662 struct mlx4_priv *priv = mlx4_priv(dev);
1663 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1664 struct res_cq *r;
1665 int err;
1666
1667 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001668 r = res_tracker_lookup(&tracker->res_tree[RES_CQ], cqn);
Paul Bollec9218a92014-01-14 20:45:36 +01001669 if (!r) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001670 err = -ENOENT;
Paul Bollec9218a92014-01-14 20:45:36 +01001671 } else if (r->com.owner != slave) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001672 err = -EPERM;
Paul Bollec9218a92014-01-14 20:45:36 +01001673 } else if (state == RES_CQ_ALLOCATED) {
1674 if (r->com.state != RES_CQ_HW)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001675 err = -EINVAL;
Paul Bollec9218a92014-01-14 20:45:36 +01001676 else if (atomic_read(&r->ref_count))
1677 err = -EBUSY;
1678 else
1679 err = 0;
1680 } else if (state != RES_CQ_HW || r->com.state != RES_CQ_ALLOCATED) {
1681 err = -EINVAL;
1682 } else {
1683 err = 0;
1684 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001685
Paul Bollec9218a92014-01-14 20:45:36 +01001686 if (!err) {
1687 r->com.from_state = r->com.state;
1688 r->com.to_state = state;
1689 r->com.state = RES_CQ_BUSY;
1690 if (cq)
1691 *cq = r;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001692 }
1693
1694 spin_unlock_irq(mlx4_tlock(dev));
1695
1696 return err;
1697}
1698
1699static int srq_res_start_move_to(struct mlx4_dev *dev, int slave, int index,
Paul Bollef088cbb2014-01-14 20:46:52 +01001700 enum res_srq_states state, struct res_srq **srq)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001701{
1702 struct mlx4_priv *priv = mlx4_priv(dev);
1703 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1704 struct res_srq *r;
1705 int err = 0;
1706
1707 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001708 r = res_tracker_lookup(&tracker->res_tree[RES_SRQ], index);
Paul Bollef088cbb2014-01-14 20:46:52 +01001709 if (!r) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001710 err = -ENOENT;
Paul Bollef088cbb2014-01-14 20:46:52 +01001711 } else if (r->com.owner != slave) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001712 err = -EPERM;
Paul Bollef088cbb2014-01-14 20:46:52 +01001713 } else if (state == RES_SRQ_ALLOCATED) {
1714 if (r->com.state != RES_SRQ_HW)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001715 err = -EINVAL;
Paul Bollef088cbb2014-01-14 20:46:52 +01001716 else if (atomic_read(&r->ref_count))
1717 err = -EBUSY;
1718 } else if (state != RES_SRQ_HW || r->com.state != RES_SRQ_ALLOCATED) {
1719 err = -EINVAL;
1720 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001721
Paul Bollef088cbb2014-01-14 20:46:52 +01001722 if (!err) {
1723 r->com.from_state = r->com.state;
1724 r->com.to_state = state;
1725 r->com.state = RES_SRQ_BUSY;
1726 if (srq)
1727 *srq = r;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001728 }
1729
1730 spin_unlock_irq(mlx4_tlock(dev));
1731
1732 return err;
1733}
1734
1735static void res_abort_move(struct mlx4_dev *dev, int slave,
1736 enum mlx4_resource type, int id)
1737{
1738 struct mlx4_priv *priv = mlx4_priv(dev);
1739 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1740 struct res_common *r;
1741
1742 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001743 r = res_tracker_lookup(&tracker->res_tree[type], id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001744 if (r && (r->owner == slave))
1745 r->state = r->from_state;
1746 spin_unlock_irq(mlx4_tlock(dev));
1747}
1748
1749static void res_end_move(struct mlx4_dev *dev, int slave,
1750 enum mlx4_resource type, int id)
1751{
1752 struct mlx4_priv *priv = mlx4_priv(dev);
1753 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
1754 struct res_common *r;
1755
1756 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00001757 r = res_tracker_lookup(&tracker->res_tree[type], id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001758 if (r && (r->owner == slave))
1759 r->state = r->to_state;
1760 spin_unlock_irq(mlx4_tlock(dev));
1761}
1762
1763static int valid_reserved(struct mlx4_dev *dev, int slave, int qpn)
1764{
Jack Morgensteine2c76822012-08-03 08:40:41 +00001765 return mlx4_is_qp_reserved(dev, qpn) &&
1766 (mlx4_is_master(dev) || mlx4_is_guest_proxy(dev, slave, qpn));
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001767}
1768
Jack Morgenstein54679e12012-08-03 08:40:43 +00001769static int fw_reserved(struct mlx4_dev *dev, int qpn)
1770{
1771 return qpn < dev->caps.reserved_qps_cnt[MLX4_QP_REGION_FW];
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001772}
1773
1774static int qp_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1775 u64 in_param, u64 *out_param)
1776{
1777 int err;
1778 int count;
1779 int align;
1780 int base;
1781 int qpn;
Eugenia Emantayevddae0342014-12-11 10:57:54 +02001782 u8 flags;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001783
1784 switch (op) {
1785 case RES_OP_RESERVE:
Jack Morgenstein2d5c57d2014-11-25 11:54:31 +02001786 count = get_param_l(&in_param) & 0xffffff;
Eugenia Emantayevddae0342014-12-11 10:57:54 +02001787 /* Turn off all unsupported QP allocation flags that the
1788 * slave tries to set.
1789 */
1790 flags = (get_param_l(&in_param) >> 24) & dev->caps.alloc_res_qp_mask;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001791 align = get_param_h(&in_param);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001792 err = mlx4_grant_resource(dev, slave, RES_QP, count, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001793 if (err)
1794 return err;
1795
Eugenia Emantayevddae0342014-12-11 10:57:54 +02001796 err = __mlx4_qp_reserve_range(dev, count, align, &base, flags);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001797 if (err) {
1798 mlx4_release_resource(dev, slave, RES_QP, count, 0);
1799 return err;
1800 }
1801
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001802 err = add_res_range(dev, slave, base, count, RES_QP, 0);
1803 if (err) {
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001804 mlx4_release_resource(dev, slave, RES_QP, count, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001805 __mlx4_qp_release_range(dev, base, count);
1806 return err;
1807 }
1808 set_param_l(out_param, base);
1809 break;
1810 case RES_OP_MAP_ICM:
1811 qpn = get_param_l(&in_param) & 0x7fffff;
1812 if (valid_reserved(dev, slave, qpn)) {
1813 err = add_res_range(dev, slave, qpn, 1, RES_QP, 0);
1814 if (err)
1815 return err;
1816 }
1817
1818 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED,
1819 NULL, 1);
1820 if (err)
1821 return err;
1822
Jack Morgenstein54679e12012-08-03 08:40:43 +00001823 if (!fw_reserved(dev, qpn)) {
Leon Romanovsky8900b892017-05-23 14:38:15 +03001824 err = __mlx4_qp_alloc_icm(dev, qpn);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001825 if (err) {
1826 res_abort_move(dev, slave, RES_QP, qpn);
1827 return err;
1828 }
1829 }
1830
1831 res_end_move(dev, slave, RES_QP, qpn);
1832 break;
1833
1834 default:
1835 err = -EINVAL;
1836 break;
1837 }
1838 return err;
1839}
1840
1841static int mtt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1842 u64 in_param, u64 *out_param)
1843{
1844 int err = -EINVAL;
1845 int base;
1846 int order;
1847
1848 if (op != RES_OP_RESERVE_AND_MAP)
1849 return err;
1850
1851 order = get_param_l(&in_param);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001852
1853 err = mlx4_grant_resource(dev, slave, RES_MTT, 1 << order, 0);
1854 if (err)
1855 return err;
1856
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001857 base = __mlx4_alloc_mtt_range(dev, order);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001858 if (base == -1) {
1859 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001860 return -ENOMEM;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001861 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001862
1863 err = add_res_range(dev, slave, base, 1, RES_MTT, order);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001864 if (err) {
1865 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001866 __mlx4_free_mtt_range(dev, base, order);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001867 } else {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001868 set_param_l(out_param, base);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001869 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001870
1871 return err;
1872}
1873
1874static int mpt_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1875 u64 in_param, u64 *out_param)
1876{
1877 int err = -EINVAL;
1878 int index;
1879 int id;
1880 struct res_mpt *mpt;
1881
1882 switch (op) {
1883 case RES_OP_RESERVE:
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001884 err = mlx4_grant_resource(dev, slave, RES_MPT, 1, 0);
1885 if (err)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001886 break;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001887
1888 index = __mlx4_mpt_reserve(dev);
1889 if (index == -1) {
1890 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
1891 break;
1892 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001893 id = index & mpt_mask(dev);
1894
1895 err = add_res_range(dev, slave, id, 1, RES_MPT, index);
1896 if (err) {
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001897 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
Shani Michaelib20e5192013-02-06 16:19:08 +00001898 __mlx4_mpt_release(dev, index);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001899 break;
1900 }
1901 set_param_l(out_param, index);
1902 break;
1903 case RES_OP_MAP_ICM:
1904 index = get_param_l(&in_param);
1905 id = index & mpt_mask(dev);
1906 err = mr_res_start_move_to(dev, slave, id,
1907 RES_MPT_MAPPED, &mpt);
1908 if (err)
1909 return err;
1910
Leon Romanovsky8900b892017-05-23 14:38:15 +03001911 err = __mlx4_mpt_alloc_icm(dev, mpt->key);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001912 if (err) {
1913 res_abort_move(dev, slave, RES_MPT, id);
1914 return err;
1915 }
1916
1917 res_end_move(dev, slave, RES_MPT, id);
1918 break;
1919 }
1920 return err;
1921}
1922
1923static int cq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1924 u64 in_param, u64 *out_param)
1925{
1926 int cqn;
1927 int err;
1928
1929 switch (op) {
1930 case RES_OP_RESERVE_AND_MAP:
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001931 err = mlx4_grant_resource(dev, slave, RES_CQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001932 if (err)
1933 break;
1934
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001935 err = __mlx4_cq_alloc_icm(dev, &cqn);
1936 if (err) {
1937 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
1938 break;
1939 }
1940
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001941 err = add_res_range(dev, slave, cqn, 1, RES_CQ, 0);
1942 if (err) {
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001943 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001944 __mlx4_cq_free_icm(dev, cqn);
1945 break;
1946 }
1947
1948 set_param_l(out_param, cqn);
1949 break;
1950
1951 default:
1952 err = -EINVAL;
1953 }
1954
1955 return err;
1956}
1957
1958static int srq_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
1959 u64 in_param, u64 *out_param)
1960{
1961 int srqn;
1962 int err;
1963
1964 switch (op) {
1965 case RES_OP_RESERVE_AND_MAP:
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001966 err = mlx4_grant_resource(dev, slave, RES_SRQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001967 if (err)
1968 break;
1969
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001970 err = __mlx4_srq_alloc_icm(dev, &srqn);
1971 if (err) {
1972 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
1973 break;
1974 }
1975
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001976 err = add_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
1977 if (err) {
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02001978 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001979 __mlx4_srq_free_icm(dev, srqn);
1980 break;
1981 }
1982
1983 set_param_l(out_param, srqn);
1984 break;
1985
1986 default:
1987 err = -EINVAL;
1988 }
1989
1990 return err;
1991}
1992
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02001993static int mac_find_smac_ix_in_slave(struct mlx4_dev *dev, int slave, int port,
1994 u8 smac_index, u64 *mac)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00001995{
1996 struct mlx4_priv *priv = mlx4_priv(dev);
1997 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02001998 struct list_head *mac_list =
1999 &tracker->slave_list[slave].res_list[RES_MAC];
2000 struct mac_res *res, *tmp;
2001
2002 list_for_each_entry_safe(res, tmp, mac_list, list) {
2003 if (res->smac_index == smac_index && res->port == (u8) port) {
2004 *mac = res->mac;
2005 return 0;
2006 }
2007 }
2008 return -ENOENT;
2009}
2010
2011static int mac_add_to_slave(struct mlx4_dev *dev, int slave, u64 mac, int port, u8 smac_index)
2012{
2013 struct mlx4_priv *priv = mlx4_priv(dev);
2014 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2015 struct list_head *mac_list =
2016 &tracker->slave_list[slave].res_list[RES_MAC];
2017 struct mac_res *res, *tmp;
2018
2019 list_for_each_entry_safe(res, tmp, mac_list, list) {
2020 if (res->mac == mac && res->port == (u8) port) {
2021 /* mac found. update ref count */
2022 ++res->ref_count;
2023 return 0;
2024 }
2025 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002026
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002027 if (mlx4_grant_resource(dev, slave, RES_MAC, 1, port))
2028 return -EINVAL;
stephen hemminger31975e22017-08-15 10:29:19 -07002029 res = kzalloc(sizeof(*res), GFP_KERNEL);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002030 if (!res) {
2031 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002032 return -ENOMEM;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002033 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002034 res->mac = mac;
2035 res->port = (u8) port;
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02002036 res->smac_index = smac_index;
2037 res->ref_count = 1;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002038 list_add_tail(&res->list,
2039 &tracker->slave_list[slave].res_list[RES_MAC]);
2040 return 0;
2041}
2042
2043static void mac_del_from_slave(struct mlx4_dev *dev, int slave, u64 mac,
2044 int port)
2045{
2046 struct mlx4_priv *priv = mlx4_priv(dev);
2047 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2048 struct list_head *mac_list =
2049 &tracker->slave_list[slave].res_list[RES_MAC];
2050 struct mac_res *res, *tmp;
2051
2052 list_for_each_entry_safe(res, tmp, mac_list, list) {
2053 if (res->mac == mac && res->port == (u8) port) {
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02002054 if (!--res->ref_count) {
2055 list_del(&res->list);
2056 mlx4_release_resource(dev, slave, RES_MAC, 1, port);
2057 kfree(res);
2058 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002059 break;
2060 }
2061 }
2062}
2063
2064static void rem_slave_macs(struct mlx4_dev *dev, int slave)
2065{
2066 struct mlx4_priv *priv = mlx4_priv(dev);
2067 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2068 struct list_head *mac_list =
2069 &tracker->slave_list[slave].res_list[RES_MAC];
2070 struct mac_res *res, *tmp;
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02002071 int i;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002072
2073 list_for_each_entry_safe(res, tmp, mac_list, list) {
2074 list_del(&res->list);
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02002075 /* dereference the mac the num times the slave referenced it */
2076 for (i = 0; i < res->ref_count; i++)
2077 __mlx4_unregister_mac(dev, res->port, res->mac);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002078 mlx4_release_resource(dev, slave, RES_MAC, 1, res->port);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002079 kfree(res);
2080 }
2081}
2082
2083static int mac_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002084 u64 in_param, u64 *out_param, int in_port)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002085{
2086 int err = -EINVAL;
2087 int port;
2088 u64 mac;
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02002089 u8 smac_index;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002090
2091 if (op != RES_OP_RESERVE_AND_MAP)
2092 return err;
2093
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002094 port = !in_port ? get_param_l(out_param) : in_port;
Matan Barak449fc482014-03-19 18:11:52 +02002095 port = mlx4_slave_convert_port(
2096 dev, slave, port);
2097
2098 if (port < 0)
2099 return -EINVAL;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002100 mac = in_param;
2101
2102 err = __mlx4_register_mac(dev, port, mac);
2103 if (err >= 0) {
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02002104 smac_index = err;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002105 set_param_l(out_param, err);
2106 err = 0;
2107 }
2108
2109 if (!err) {
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02002110 err = mac_add_to_slave(dev, slave, mac, port, smac_index);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002111 if (err)
2112 __mlx4_unregister_mac(dev, port, mac);
2113 }
2114 return err;
2115}
2116
Jack Morgenstein48740802013-11-03 10:03:20 +02002117static int vlan_add_to_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2118 int port, int vlan_index)
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002119{
Jack Morgenstein48740802013-11-03 10:03:20 +02002120 struct mlx4_priv *priv = mlx4_priv(dev);
2121 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2122 struct list_head *vlan_list =
2123 &tracker->slave_list[slave].res_list[RES_VLAN];
2124 struct vlan_res *res, *tmp;
2125
2126 list_for_each_entry_safe(res, tmp, vlan_list, list) {
2127 if (res->vlan == vlan && res->port == (u8) port) {
2128 /* vlan found. update ref count */
2129 ++res->ref_count;
2130 return 0;
2131 }
2132 }
2133
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002134 if (mlx4_grant_resource(dev, slave, RES_VLAN, 1, port))
2135 return -EINVAL;
Jack Morgenstein48740802013-11-03 10:03:20 +02002136 res = kzalloc(sizeof(*res), GFP_KERNEL);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002137 if (!res) {
2138 mlx4_release_resource(dev, slave, RES_VLAN, 1, port);
Jack Morgenstein48740802013-11-03 10:03:20 +02002139 return -ENOMEM;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002140 }
Jack Morgenstein48740802013-11-03 10:03:20 +02002141 res->vlan = vlan;
2142 res->port = (u8) port;
2143 res->vlan_index = vlan_index;
2144 res->ref_count = 1;
2145 list_add_tail(&res->list,
2146 &tracker->slave_list[slave].res_list[RES_VLAN]);
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002147 return 0;
2148}
2149
Jack Morgenstein48740802013-11-03 10:03:20 +02002150
2151static void vlan_del_from_slave(struct mlx4_dev *dev, int slave, u16 vlan,
2152 int port)
2153{
2154 struct mlx4_priv *priv = mlx4_priv(dev);
2155 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2156 struct list_head *vlan_list =
2157 &tracker->slave_list[slave].res_list[RES_VLAN];
2158 struct vlan_res *res, *tmp;
2159
2160 list_for_each_entry_safe(res, tmp, vlan_list, list) {
2161 if (res->vlan == vlan && res->port == (u8) port) {
2162 if (!--res->ref_count) {
2163 list_del(&res->list);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002164 mlx4_release_resource(dev, slave, RES_VLAN,
2165 1, port);
Jack Morgenstein48740802013-11-03 10:03:20 +02002166 kfree(res);
2167 }
2168 break;
2169 }
2170 }
2171}
2172
2173static void rem_slave_vlans(struct mlx4_dev *dev, int slave)
2174{
2175 struct mlx4_priv *priv = mlx4_priv(dev);
2176 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
2177 struct list_head *vlan_list =
2178 &tracker->slave_list[slave].res_list[RES_VLAN];
2179 struct vlan_res *res, *tmp;
2180 int i;
2181
2182 list_for_each_entry_safe(res, tmp, vlan_list, list) {
2183 list_del(&res->list);
2184 /* dereference the vlan the num times the slave referenced it */
2185 for (i = 0; i < res->ref_count; i++)
2186 __mlx4_unregister_vlan(dev, res->port, res->vlan);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002187 mlx4_release_resource(dev, slave, RES_VLAN, 1, res->port);
Jack Morgenstein48740802013-11-03 10:03:20 +02002188 kfree(res);
2189 }
2190}
2191
2192static int vlan_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
Jack Morgenstein2c957ff2013-11-03 10:03:21 +02002193 u64 in_param, u64 *out_param, int in_port)
Jack Morgenstein48740802013-11-03 10:03:20 +02002194{
Jack Morgenstein2c957ff2013-11-03 10:03:21 +02002195 struct mlx4_priv *priv = mlx4_priv(dev);
2196 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
Jack Morgenstein48740802013-11-03 10:03:20 +02002197 int err;
2198 u16 vlan;
2199 int vlan_index;
Jack Morgenstein2c957ff2013-11-03 10:03:21 +02002200 int port;
2201
2202 port = !in_port ? get_param_l(out_param) : in_port;
Jack Morgenstein48740802013-11-03 10:03:20 +02002203
2204 if (!port || op != RES_OP_RESERVE_AND_MAP)
2205 return -EINVAL;
2206
Matan Barak449fc482014-03-19 18:11:52 +02002207 port = mlx4_slave_convert_port(
2208 dev, slave, port);
2209
2210 if (port < 0)
2211 return -EINVAL;
Jack Morgenstein2c957ff2013-11-03 10:03:21 +02002212 /* upstream kernels had NOP for reg/unreg vlan. Continue this. */
2213 if (!in_port && port > 0 && port <= dev->caps.num_ports) {
2214 slave_state[slave].old_vlan_api = true;
2215 return 0;
2216 }
2217
Jack Morgenstein48740802013-11-03 10:03:20 +02002218 vlan = (u16) in_param;
2219
2220 err = __mlx4_register_vlan(dev, port, vlan, &vlan_index);
2221 if (!err) {
2222 set_param_l(out_param, (u32) vlan_index);
2223 err = vlan_add_to_slave(dev, slave, vlan, port, vlan_index);
2224 if (err)
2225 __mlx4_unregister_vlan(dev, port, vlan);
2226 }
2227 return err;
2228}
2229
Jack Morgensteinba062d52012-05-15 10:35:03 +00002230static int counter_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
Eran Ben Elisha68230242015-06-15 17:59:01 +03002231 u64 in_param, u64 *out_param, int port)
Jack Morgensteinba062d52012-05-15 10:35:03 +00002232{
2233 u32 index;
2234 int err;
2235
2236 if (op != RES_OP_RESERVE)
2237 return -EINVAL;
2238
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002239 err = mlx4_grant_resource(dev, slave, RES_COUNTER, 1, 0);
Jack Morgensteinba062d52012-05-15 10:35:03 +00002240 if (err)
2241 return err;
2242
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002243 err = __mlx4_counter_alloc(dev, &index);
2244 if (err) {
2245 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2246 return err;
2247 }
2248
Eran Ben Elisha68230242015-06-15 17:59:01 +03002249 err = add_res_range(dev, slave, index, 1, RES_COUNTER, port);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002250 if (err) {
Jack Morgensteinba062d52012-05-15 10:35:03 +00002251 __mlx4_counter_free(dev, index);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002252 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
2253 } else {
Jack Morgensteinba062d52012-05-15 10:35:03 +00002254 set_param_l(out_param, index);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002255 }
Jack Morgensteinba062d52012-05-15 10:35:03 +00002256
2257 return err;
2258}
2259
2260static int xrcdn_alloc_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2261 u64 in_param, u64 *out_param)
2262{
2263 u32 xrcdn;
2264 int err;
2265
2266 if (op != RES_OP_RESERVE)
2267 return -EINVAL;
2268
2269 err = __mlx4_xrcd_alloc(dev, &xrcdn);
2270 if (err)
2271 return err;
2272
2273 err = add_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2274 if (err)
2275 __mlx4_xrcd_free(dev, xrcdn);
2276 else
2277 set_param_l(out_param, xrcdn);
2278
2279 return err;
2280}
2281
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002282int mlx4_ALLOC_RES_wrapper(struct mlx4_dev *dev, int slave,
2283 struct mlx4_vhcr *vhcr,
2284 struct mlx4_cmd_mailbox *inbox,
2285 struct mlx4_cmd_mailbox *outbox,
2286 struct mlx4_cmd_info *cmd)
2287{
2288 int err;
2289 int alop = vhcr->op_modifier;
2290
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002291 switch (vhcr->in_modifier & 0xFF) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002292 case RES_QP:
2293 err = qp_alloc_res(dev, slave, vhcr->op_modifier, alop,
2294 vhcr->in_param, &vhcr->out_param);
2295 break;
2296
2297 case RES_MTT:
2298 err = mtt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2299 vhcr->in_param, &vhcr->out_param);
2300 break;
2301
2302 case RES_MPT:
2303 err = mpt_alloc_res(dev, slave, vhcr->op_modifier, alop,
2304 vhcr->in_param, &vhcr->out_param);
2305 break;
2306
2307 case RES_CQ:
2308 err = cq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2309 vhcr->in_param, &vhcr->out_param);
2310 break;
2311
2312 case RES_SRQ:
2313 err = srq_alloc_res(dev, slave, vhcr->op_modifier, alop,
2314 vhcr->in_param, &vhcr->out_param);
2315 break;
2316
2317 case RES_MAC:
2318 err = mac_alloc_res(dev, slave, vhcr->op_modifier, alop,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002319 vhcr->in_param, &vhcr->out_param,
2320 (vhcr->in_modifier >> 8) & 0xFF);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002321 break;
2322
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002323 case RES_VLAN:
2324 err = vlan_alloc_res(dev, slave, vhcr->op_modifier, alop,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002325 vhcr->in_param, &vhcr->out_param,
2326 (vhcr->in_modifier >> 8) & 0xFF);
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002327 break;
2328
Jack Morgensteinba062d52012-05-15 10:35:03 +00002329 case RES_COUNTER:
2330 err = counter_alloc_res(dev, slave, vhcr->op_modifier, alop,
Eran Ben Elisha68230242015-06-15 17:59:01 +03002331 vhcr->in_param, &vhcr->out_param, 0);
Jack Morgensteinba062d52012-05-15 10:35:03 +00002332 break;
2333
2334 case RES_XRCD:
2335 err = xrcdn_alloc_res(dev, slave, vhcr->op_modifier, alop,
2336 vhcr->in_param, &vhcr->out_param);
2337 break;
2338
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002339 default:
2340 err = -EINVAL;
2341 break;
2342 }
2343
2344 return err;
2345}
2346
2347static int qp_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2348 u64 in_param)
2349{
2350 int err;
2351 int count;
2352 int base;
2353 int qpn;
2354
2355 switch (op) {
2356 case RES_OP_RESERVE:
2357 base = get_param_l(&in_param) & 0x7fffff;
2358 count = get_param_h(&in_param);
2359 err = rem_res_range(dev, slave, base, count, RES_QP, 0);
2360 if (err)
2361 break;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002362 mlx4_release_resource(dev, slave, RES_QP, count, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002363 __mlx4_qp_release_range(dev, base, count);
2364 break;
2365 case RES_OP_MAP_ICM:
2366 qpn = get_param_l(&in_param) & 0x7fffff;
2367 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_RESERVED,
2368 NULL, 0);
2369 if (err)
2370 return err;
2371
Jack Morgenstein54679e12012-08-03 08:40:43 +00002372 if (!fw_reserved(dev, qpn))
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002373 __mlx4_qp_free_icm(dev, qpn);
2374
2375 res_end_move(dev, slave, RES_QP, qpn);
2376
2377 if (valid_reserved(dev, slave, qpn))
2378 err = rem_res_range(dev, slave, qpn, 1, RES_QP, 0);
2379 break;
2380 default:
2381 err = -EINVAL;
2382 break;
2383 }
2384 return err;
2385}
2386
2387static int mtt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2388 u64 in_param, u64 *out_param)
2389{
2390 int err = -EINVAL;
2391 int base;
2392 int order;
2393
2394 if (op != RES_OP_RESERVE_AND_MAP)
2395 return err;
2396
2397 base = get_param_l(&in_param);
2398 order = get_param_h(&in_param);
2399 err = rem_res_range(dev, slave, base, 1, RES_MTT, order);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002400 if (!err) {
2401 mlx4_release_resource(dev, slave, RES_MTT, 1 << order, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002402 __mlx4_free_mtt_range(dev, base, order);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002403 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002404 return err;
2405}
2406
2407static int mpt_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2408 u64 in_param)
2409{
2410 int err = -EINVAL;
2411 int index;
2412 int id;
2413 struct res_mpt *mpt;
2414
2415 switch (op) {
2416 case RES_OP_RESERVE:
2417 index = get_param_l(&in_param);
2418 id = index & mpt_mask(dev);
2419 err = get_res(dev, slave, id, RES_MPT, &mpt);
2420 if (err)
2421 break;
2422 index = mpt->key;
2423 put_res(dev, slave, id, RES_MPT);
2424
2425 err = rem_res_range(dev, slave, id, 1, RES_MPT, 0);
2426 if (err)
2427 break;
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002428 mlx4_release_resource(dev, slave, RES_MPT, 1, 0);
Shani Michaelib20e5192013-02-06 16:19:08 +00002429 __mlx4_mpt_release(dev, index);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002430 break;
2431 case RES_OP_MAP_ICM:
Christophe Jaillet5d4de162016-07-02 14:31:05 +02002432 index = get_param_l(&in_param);
2433 id = index & mpt_mask(dev);
2434 err = mr_res_start_move_to(dev, slave, id,
2435 RES_MPT_RESERVED, &mpt);
2436 if (err)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002437 return err;
Christophe Jaillet5d4de162016-07-02 14:31:05 +02002438
2439 __mlx4_mpt_free_icm(dev, mpt->key);
2440 res_end_move(dev, slave, RES_MPT, id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002441 break;
2442 default:
2443 err = -EINVAL;
2444 break;
2445 }
2446 return err;
2447}
2448
2449static int cq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2450 u64 in_param, u64 *out_param)
2451{
2452 int cqn;
2453 int err;
2454
2455 switch (op) {
2456 case RES_OP_RESERVE_AND_MAP:
2457 cqn = get_param_l(&in_param);
2458 err = rem_res_range(dev, slave, cqn, 1, RES_CQ, 0);
2459 if (err)
2460 break;
2461
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002462 mlx4_release_resource(dev, slave, RES_CQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002463 __mlx4_cq_free_icm(dev, cqn);
2464 break;
2465
2466 default:
2467 err = -EINVAL;
2468 break;
2469 }
2470
2471 return err;
2472}
2473
2474static int srq_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2475 u64 in_param, u64 *out_param)
2476{
2477 int srqn;
2478 int err;
2479
2480 switch (op) {
2481 case RES_OP_RESERVE_AND_MAP:
2482 srqn = get_param_l(&in_param);
2483 err = rem_res_range(dev, slave, srqn, 1, RES_SRQ, 0);
2484 if (err)
2485 break;
2486
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002487 mlx4_release_resource(dev, slave, RES_SRQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002488 __mlx4_srq_free_icm(dev, srqn);
2489 break;
2490
2491 default:
2492 err = -EINVAL;
2493 break;
2494 }
2495
2496 return err;
2497}
2498
2499static int mac_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002500 u64 in_param, u64 *out_param, int in_port)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002501{
2502 int port;
2503 int err = 0;
2504
2505 switch (op) {
2506 case RES_OP_RESERVE_AND_MAP:
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002507 port = !in_port ? get_param_l(out_param) : in_port;
Matan Barak449fc482014-03-19 18:11:52 +02002508 port = mlx4_slave_convert_port(
2509 dev, slave, port);
2510
2511 if (port < 0)
2512 return -EINVAL;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002513 mac_del_from_slave(dev, slave, in_param, port);
2514 __mlx4_unregister_mac(dev, port, in_param);
2515 break;
2516 default:
2517 err = -EINVAL;
2518 break;
2519 }
2520
2521 return err;
2522
2523}
2524
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002525static int vlan_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002526 u64 in_param, u64 *out_param, int port)
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002527{
Jack Morgenstein2c957ff2013-11-03 10:03:21 +02002528 struct mlx4_priv *priv = mlx4_priv(dev);
2529 struct mlx4_slave_state *slave_state = priv->mfunc.master.slave_state;
Jack Morgenstein48740802013-11-03 10:03:20 +02002530 int err = 0;
2531
Matan Barak449fc482014-03-19 18:11:52 +02002532 port = mlx4_slave_convert_port(
2533 dev, slave, port);
2534
2535 if (port < 0)
2536 return -EINVAL;
Jack Morgenstein48740802013-11-03 10:03:20 +02002537 switch (op) {
2538 case RES_OP_RESERVE_AND_MAP:
Jack Morgenstein2c957ff2013-11-03 10:03:21 +02002539 if (slave_state[slave].old_vlan_api)
2540 return 0;
Jack Morgenstein48740802013-11-03 10:03:20 +02002541 if (!port)
2542 return -EINVAL;
2543 vlan_del_from_slave(dev, slave, in_param, port);
2544 __mlx4_unregister_vlan(dev, port, in_param);
2545 break;
2546 default:
2547 err = -EINVAL;
2548 break;
2549 }
2550
2551 return err;
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002552}
2553
Jack Morgensteinba062d52012-05-15 10:35:03 +00002554static int counter_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2555 u64 in_param, u64 *out_param)
2556{
2557 int index;
2558 int err;
2559
2560 if (op != RES_OP_RESERVE)
2561 return -EINVAL;
2562
2563 index = get_param_l(&in_param);
Eran Ben Elisha9de92c62015-06-15 17:59:00 +03002564 if (index == MLX4_SINK_COUNTER_INDEX(dev))
2565 return 0;
2566
Jack Morgensteinba062d52012-05-15 10:35:03 +00002567 err = rem_res_range(dev, slave, index, 1, RES_COUNTER, 0);
2568 if (err)
2569 return err;
2570
2571 __mlx4_counter_free(dev, index);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02002572 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
Jack Morgensteinba062d52012-05-15 10:35:03 +00002573
2574 return err;
2575}
2576
2577static int xrcdn_free_res(struct mlx4_dev *dev, int slave, int op, int cmd,
2578 u64 in_param, u64 *out_param)
2579{
2580 int xrcdn;
2581 int err;
2582
2583 if (op != RES_OP_RESERVE)
2584 return -EINVAL;
2585
2586 xrcdn = get_param_l(&in_param);
2587 err = rem_res_range(dev, slave, xrcdn, 1, RES_XRCD, 0);
2588 if (err)
2589 return err;
2590
2591 __mlx4_xrcd_free(dev, xrcdn);
2592
2593 return err;
2594}
2595
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002596int mlx4_FREE_RES_wrapper(struct mlx4_dev *dev, int slave,
2597 struct mlx4_vhcr *vhcr,
2598 struct mlx4_cmd_mailbox *inbox,
2599 struct mlx4_cmd_mailbox *outbox,
2600 struct mlx4_cmd_info *cmd)
2601{
2602 int err = -EINVAL;
2603 int alop = vhcr->op_modifier;
2604
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002605 switch (vhcr->in_modifier & 0xFF) {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002606 case RES_QP:
2607 err = qp_free_res(dev, slave, vhcr->op_modifier, alop,
2608 vhcr->in_param);
2609 break;
2610
2611 case RES_MTT:
2612 err = mtt_free_res(dev, slave, vhcr->op_modifier, alop,
2613 vhcr->in_param, &vhcr->out_param);
2614 break;
2615
2616 case RES_MPT:
2617 err = mpt_free_res(dev, slave, vhcr->op_modifier, alop,
2618 vhcr->in_param);
2619 break;
2620
2621 case RES_CQ:
2622 err = cq_free_res(dev, slave, vhcr->op_modifier, alop,
2623 vhcr->in_param, &vhcr->out_param);
2624 break;
2625
2626 case RES_SRQ:
2627 err = srq_free_res(dev, slave, vhcr->op_modifier, alop,
2628 vhcr->in_param, &vhcr->out_param);
2629 break;
2630
2631 case RES_MAC:
2632 err = mac_free_res(dev, slave, vhcr->op_modifier, alop,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002633 vhcr->in_param, &vhcr->out_param,
2634 (vhcr->in_modifier >> 8) & 0xFF);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002635 break;
2636
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002637 case RES_VLAN:
2638 err = vlan_free_res(dev, slave, vhcr->op_modifier, alop,
Jack Morgensteinacddd5d2013-11-03 10:03:18 +02002639 vhcr->in_param, &vhcr->out_param,
2640 (vhcr->in_modifier >> 8) & 0xFF);
Eugenia Emantayevffe455a2011-12-13 04:16:21 +00002641 break;
2642
Jack Morgensteinba062d52012-05-15 10:35:03 +00002643 case RES_COUNTER:
2644 err = counter_free_res(dev, slave, vhcr->op_modifier, alop,
2645 vhcr->in_param, &vhcr->out_param);
2646 break;
2647
2648 case RES_XRCD:
2649 err = xrcdn_free_res(dev, slave, vhcr->op_modifier, alop,
2650 vhcr->in_param, &vhcr->out_param);
2651
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002652 default:
2653 break;
2654 }
2655 return err;
2656}
2657
2658/* ugly but other choices are uglier */
2659static int mr_phys_mpt(struct mlx4_mpt_entry *mpt)
2660{
2661 return (be32_to_cpu(mpt->flags) >> 9) & 1;
2662}
2663
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002664static int mr_get_mtt_addr(struct mlx4_mpt_entry *mpt)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002665{
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002666 return (int)be64_to_cpu(mpt->mtt_addr) & 0xfffffff8;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002667}
2668
2669static int mr_get_mtt_size(struct mlx4_mpt_entry *mpt)
2670{
2671 return be32_to_cpu(mpt->mtt_sz);
2672}
2673
Shani Michaelicc1ade92013-02-06 16:19:10 +00002674static u32 mr_get_pd(struct mlx4_mpt_entry *mpt)
2675{
2676 return be32_to_cpu(mpt->pd_flags) & 0x00ffffff;
2677}
2678
2679static int mr_is_fmr(struct mlx4_mpt_entry *mpt)
2680{
2681 return be32_to_cpu(mpt->pd_flags) & MLX4_MPT_PD_FLAG_FAST_REG;
2682}
2683
2684static int mr_is_bind_enabled(struct mlx4_mpt_entry *mpt)
2685{
2686 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_BIND_ENABLE;
2687}
2688
2689static int mr_is_region(struct mlx4_mpt_entry *mpt)
2690{
2691 return be32_to_cpu(mpt->flags) & MLX4_MPT_FLAG_REGION;
2692}
2693
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002694static int qp_get_mtt_addr(struct mlx4_qp_context *qpc)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002695{
2696 return be32_to_cpu(qpc->mtt_base_addr_l) & 0xfffffff8;
2697}
2698
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002699static int srq_get_mtt_addr(struct mlx4_srq_context *srqc)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002700{
2701 return be32_to_cpu(srqc->mtt_base_addr_l) & 0xfffffff8;
2702}
2703
2704static int qp_get_mtt_size(struct mlx4_qp_context *qpc)
2705{
2706 int page_shift = (qpc->log_page_size & 0x3f) + 12;
2707 int log_sq_size = (qpc->sq_size_stride >> 3) & 0xf;
2708 int log_sq_sride = qpc->sq_size_stride & 7;
2709 int log_rq_size = (qpc->rq_size_stride >> 3) & 0xf;
2710 int log_rq_stride = qpc->rq_size_stride & 7;
2711 int srq = (be32_to_cpu(qpc->srqn) >> 24) & 1;
2712 int rss = (be32_to_cpu(qpc->flags) >> 13) & 1;
Yishai Hadas5c5f3f02013-08-01 18:49:52 +03002713 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
2714 int xrc = (ts == MLX4_QP_ST_XRC) ? 1 : 0;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002715 int sq_size;
2716 int rq_size;
2717 int total_pages;
2718 int total_mem;
2719 int page_offset = (be32_to_cpu(qpc->params2) >> 6) & 0x3f;
2720
2721 sq_size = 1 << (log_sq_size + log_sq_sride + 4);
2722 rq_size = (srq|rss|xrc) ? 0 : (1 << (log_rq_size + log_rq_stride + 4));
2723 total_mem = sq_size + rq_size;
2724 total_pages =
2725 roundup_pow_of_two((total_mem + (page_offset << 6)) >>
2726 page_shift);
2727
2728 return total_pages;
2729}
2730
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002731static int check_mtt_range(struct mlx4_dev *dev, int slave, int start,
2732 int size, struct res_mtt *mtt)
2733{
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002734 int res_start = mtt->com.res_id;
2735 int res_size = (1 << mtt->order);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002736
2737 if (start < res_start || start + size > res_start + res_size)
2738 return -EPERM;
2739 return 0;
2740}
2741
2742int mlx4_SW2HW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2743 struct mlx4_vhcr *vhcr,
2744 struct mlx4_cmd_mailbox *inbox,
2745 struct mlx4_cmd_mailbox *outbox,
2746 struct mlx4_cmd_info *cmd)
2747{
2748 int err;
2749 int index = vhcr->in_modifier;
2750 struct res_mtt *mtt;
Greg Thelen8dc7d112017-04-17 23:21:35 -07002751 struct res_mpt *mpt = NULL;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002752 int mtt_base = mr_get_mtt_addr(inbox->buf) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002753 int phys;
2754 int id;
Shani Michaelicc1ade92013-02-06 16:19:10 +00002755 u32 pd;
2756 int pd_slave;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002757
2758 id = index & mpt_mask(dev);
2759 err = mr_res_start_move_to(dev, slave, id, RES_MPT_HW, &mpt);
2760 if (err)
2761 return err;
2762
Shani Michaelicc1ade92013-02-06 16:19:10 +00002763 /* Disable memory windows for VFs. */
2764 if (!mr_is_region(inbox->buf)) {
2765 err = -EPERM;
2766 goto ex_abort;
2767 }
2768
2769 /* Make sure that the PD bits related to the slave id are zeros. */
2770 pd = mr_get_pd(inbox->buf);
2771 pd_slave = (pd >> 17) & 0x7f;
Maor Gottliebb3320682015-02-03 17:57:15 +02002772 if (pd_slave != 0 && --pd_slave != slave) {
Shani Michaelicc1ade92013-02-06 16:19:10 +00002773 err = -EPERM;
2774 goto ex_abort;
2775 }
2776
2777 if (mr_is_fmr(inbox->buf)) {
2778 /* FMR and Bind Enable are forbidden in slave devices. */
2779 if (mr_is_bind_enabled(inbox->buf)) {
2780 err = -EPERM;
2781 goto ex_abort;
2782 }
2783 /* FMR and Memory Windows are also forbidden. */
2784 if (!mr_is_region(inbox->buf)) {
2785 err = -EPERM;
2786 goto ex_abort;
2787 }
2788 }
2789
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002790 phys = mr_phys_mpt(inbox->buf);
2791 if (!phys) {
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002792 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002793 if (err)
2794 goto ex_abort;
2795
2796 err = check_mtt_range(dev, slave, mtt_base,
2797 mr_get_mtt_size(inbox->buf), mtt);
2798 if (err)
2799 goto ex_put;
2800
2801 mpt->mtt = mtt;
2802 }
2803
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002804 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2805 if (err)
2806 goto ex_put;
2807
2808 if (!phys) {
2809 atomic_inc(&mtt->ref_count);
2810 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2811 }
2812
2813 res_end_move(dev, slave, RES_MPT, id);
2814 return 0;
2815
2816ex_put:
2817 if (!phys)
2818 put_res(dev, slave, mtt->com.res_id, RES_MTT);
2819ex_abort:
2820 res_abort_move(dev, slave, RES_MPT, id);
2821
2822 return err;
2823}
2824
2825int mlx4_HW2SW_MPT_wrapper(struct mlx4_dev *dev, int slave,
2826 struct mlx4_vhcr *vhcr,
2827 struct mlx4_cmd_mailbox *inbox,
2828 struct mlx4_cmd_mailbox *outbox,
2829 struct mlx4_cmd_info *cmd)
2830{
2831 int err;
2832 int index = vhcr->in_modifier;
2833 struct res_mpt *mpt;
2834 int id;
2835
2836 id = index & mpt_mask(dev);
2837 err = mr_res_start_move_to(dev, slave, id, RES_MPT_MAPPED, &mpt);
2838 if (err)
2839 return err;
2840
2841 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2842 if (err)
2843 goto ex_abort;
2844
2845 if (mpt->mtt)
2846 atomic_dec(&mpt->mtt->ref_count);
2847
2848 res_end_move(dev, slave, RES_MPT, id);
2849 return 0;
2850
2851ex_abort:
2852 res_abort_move(dev, slave, RES_MPT, id);
2853
2854 return err;
2855}
2856
2857int mlx4_QUERY_MPT_wrapper(struct mlx4_dev *dev, int slave,
2858 struct mlx4_vhcr *vhcr,
2859 struct mlx4_cmd_mailbox *inbox,
2860 struct mlx4_cmd_mailbox *outbox,
2861 struct mlx4_cmd_info *cmd)
2862{
2863 int err;
2864 int index = vhcr->in_modifier;
2865 struct res_mpt *mpt;
2866 int id;
2867
2868 id = index & mpt_mask(dev);
2869 err = get_res(dev, slave, id, RES_MPT, &mpt);
2870 if (err)
2871 return err;
2872
Matan Barake6306642014-07-31 11:01:29 +03002873 if (mpt->com.from_state == RES_MPT_MAPPED) {
2874 /* In order to allow rereg in SRIOV, we need to alter the MPT entry. To do
2875 * that, the VF must read the MPT. But since the MPT entry memory is not
2876 * in the VF's virtual memory space, it must use QUERY_MPT to obtain the
2877 * entry contents. To guarantee that the MPT cannot be changed, the driver
2878 * must perform HW2SW_MPT before this query and return the MPT entry to HW
2879 * ownership fofollowing the change. The change here allows the VF to
2880 * perform QUERY_MPT also when the entry is in SW ownership.
2881 */
2882 struct mlx4_mpt_entry *mpt_entry = mlx4_table_find(
2883 &mlx4_priv(dev)->mr_table.dmpt_table,
2884 mpt->key, NULL);
2885
2886 if (NULL == mpt_entry || NULL == outbox->buf) {
2887 err = -EINVAL;
2888 goto out;
2889 }
2890
2891 memcpy(outbox->buf, mpt_entry, sizeof(*mpt_entry));
2892
2893 err = 0;
2894 } else if (mpt->com.from_state == RES_MPT_HW) {
2895 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
2896 } else {
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002897 err = -EBUSY;
2898 goto out;
2899 }
2900
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002901
2902out:
2903 put_res(dev, slave, id, RES_MPT);
2904 return err;
2905}
2906
2907static int qp_get_rcqn(struct mlx4_qp_context *qpc)
2908{
2909 return be32_to_cpu(qpc->cqn_recv) & 0xffffff;
2910}
2911
2912static int qp_get_scqn(struct mlx4_qp_context *qpc)
2913{
2914 return be32_to_cpu(qpc->cqn_send) & 0xffffff;
2915}
2916
2917static u32 qp_get_srqn(struct mlx4_qp_context *qpc)
2918{
2919 return be32_to_cpu(qpc->srqn) & 0x1ffffff;
2920}
2921
Jack Morgenstein54679e12012-08-03 08:40:43 +00002922static void adjust_proxy_tun_qkey(struct mlx4_dev *dev, struct mlx4_vhcr *vhcr,
2923 struct mlx4_qp_context *context)
2924{
2925 u32 qpn = vhcr->in_modifier & 0xffffff;
2926 u32 qkey = 0;
2927
2928 if (mlx4_get_parav_qkey(dev, qpn, &qkey))
2929 return;
2930
2931 /* adjust qkey in qp context */
2932 context->qkey = cpu_to_be32(qkey);
2933}
2934
Or Gerlitze5dfbf92015-05-21 15:14:09 +03002935static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
2936 struct mlx4_qp_context *qpc,
2937 struct mlx4_cmd_mailbox *inbox);
2938
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002939int mlx4_RST2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
2940 struct mlx4_vhcr *vhcr,
2941 struct mlx4_cmd_mailbox *inbox,
2942 struct mlx4_cmd_mailbox *outbox,
2943 struct mlx4_cmd_info *cmd)
2944{
2945 int err;
2946 int qpn = vhcr->in_modifier & 0x7fffff;
2947 struct res_mtt *mtt;
2948 struct res_qp *qp;
2949 struct mlx4_qp_context *qpc = inbox->buf + 8;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002950 int mtt_base = qp_get_mtt_addr(qpc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002951 int mtt_size = qp_get_mtt_size(qpc);
2952 struct res_cq *rcq;
2953 struct res_cq *scq;
2954 int rcqn = qp_get_rcqn(qpc);
2955 int scqn = qp_get_scqn(qpc);
2956 u32 srqn = qp_get_srqn(qpc) & 0xffffff;
2957 int use_srq = (qp_get_srqn(qpc) >> 24) & 1;
2958 struct res_srq *srq;
2959 int local_qpn = be32_to_cpu(qpc->local_qpn) & 0xffffff;
2960
Or Gerlitze5dfbf92015-05-21 15:14:09 +03002961 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
2962 if (err)
2963 return err;
2964
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002965 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_HW, &qp, 0);
2966 if (err)
2967 return err;
2968 qp->local_qpn = local_qpn;
Jack Morgensteinb01978c2013-06-27 19:05:21 +03002969 qp->sched_queue = 0;
Rony Efraimf0f829b2013-11-07 12:19:51 +02002970 qp->param3 = 0;
2971 qp->vlan_control = 0;
2972 qp->fvl_rx = 0;
2973 qp->pri_path_fl = 0;
2974 qp->vlan_index = 0;
2975 qp->feup = 0;
Jack Morgensteinb01978c2013-06-27 19:05:21 +03002976 qp->qpc_flags = be32_to_cpu(qpc->flags);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002977
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00002978 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002979 if (err)
2980 goto ex_abort;
2981
2982 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
2983 if (err)
2984 goto ex_put_mtt;
2985
Eli Cohenc82e9aa2011-12-13 04:15:24 +00002986 err = get_res(dev, slave, rcqn, RES_CQ, &rcq);
2987 if (err)
2988 goto ex_put_mtt;
2989
2990 if (scqn != rcqn) {
2991 err = get_res(dev, slave, scqn, RES_CQ, &scq);
2992 if (err)
2993 goto ex_put_rcq;
2994 } else
2995 scq = rcq;
2996
2997 if (use_srq) {
2998 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
2999 if (err)
3000 goto ex_put_scq;
3001 }
3002
Jack Morgenstein54679e12012-08-03 08:40:43 +00003003 adjust_proxy_tun_qkey(dev, vhcr, qpc);
3004 update_pkey_index(dev, slave, inbox);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003005 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3006 if (err)
3007 goto ex_put_srq;
3008 atomic_inc(&mtt->ref_count);
3009 qp->mtt = mtt;
3010 atomic_inc(&rcq->ref_count);
3011 qp->rcq = rcq;
3012 atomic_inc(&scq->ref_count);
3013 qp->scq = scq;
3014
3015 if (scqn != rcqn)
3016 put_res(dev, slave, scqn, RES_CQ);
3017
3018 if (use_srq) {
3019 atomic_inc(&srq->ref_count);
3020 put_res(dev, slave, srqn, RES_SRQ);
3021 qp->srq = srq;
3022 }
Jack Morgenstein7c3945bc2017-01-16 18:31:38 +02003023
3024 /* Save param3 for dynamic changes from VST back to VGT */
3025 qp->param3 = qpc->param3;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003026 put_res(dev, slave, rcqn, RES_CQ);
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003027 put_res(dev, slave, mtt_base, RES_MTT);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003028 res_end_move(dev, slave, RES_QP, qpn);
3029
3030 return 0;
3031
3032ex_put_srq:
3033 if (use_srq)
3034 put_res(dev, slave, srqn, RES_SRQ);
3035ex_put_scq:
3036 if (scqn != rcqn)
3037 put_res(dev, slave, scqn, RES_CQ);
3038ex_put_rcq:
3039 put_res(dev, slave, rcqn, RES_CQ);
3040ex_put_mtt:
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003041 put_res(dev, slave, mtt_base, RES_MTT);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003042ex_abort:
3043 res_abort_move(dev, slave, RES_QP, qpn);
3044
3045 return err;
3046}
3047
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003048static int eq_get_mtt_addr(struct mlx4_eq_context *eqc)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003049{
3050 return be32_to_cpu(eqc->mtt_base_addr_l) & 0xfffffff8;
3051}
3052
3053static int eq_get_mtt_size(struct mlx4_eq_context *eqc)
3054{
3055 int log_eq_size = eqc->log_eq_size & 0x1f;
3056 int page_shift = (eqc->log_page_size & 0x3f) + 12;
3057
3058 if (log_eq_size + 5 < page_shift)
3059 return 1;
3060
3061 return 1 << (log_eq_size + 5 - page_shift);
3062}
3063
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003064static int cq_get_mtt_addr(struct mlx4_cq_context *cqc)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003065{
3066 return be32_to_cpu(cqc->mtt_base_addr_l) & 0xfffffff8;
3067}
3068
3069static int cq_get_mtt_size(struct mlx4_cq_context *cqc)
3070{
3071 int log_cq_size = (be32_to_cpu(cqc->logsize_usrpage) >> 24) & 0x1f;
3072 int page_shift = (cqc->log_page_size & 0x3f) + 12;
3073
3074 if (log_cq_size + 5 < page_shift)
3075 return 1;
3076
3077 return 1 << (log_cq_size + 5 - page_shift);
3078}
3079
3080int mlx4_SW2HW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3081 struct mlx4_vhcr *vhcr,
3082 struct mlx4_cmd_mailbox *inbox,
3083 struct mlx4_cmd_mailbox *outbox,
3084 struct mlx4_cmd_info *cmd)
3085{
3086 int err;
3087 int eqn = vhcr->in_modifier;
Yishai Hadas2d3c7392015-05-05 17:07:12 +03003088 int res_id = (slave << 10) | eqn;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003089 struct mlx4_eq_context *eqc = inbox->buf;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003090 int mtt_base = eq_get_mtt_addr(eqc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003091 int mtt_size = eq_get_mtt_size(eqc);
3092 struct res_eq *eq;
3093 struct res_mtt *mtt;
3094
3095 err = add_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3096 if (err)
3097 return err;
3098 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_HW, &eq);
3099 if (err)
3100 goto out_add;
3101
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003102 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003103 if (err)
3104 goto out_move;
3105
3106 err = check_mtt_range(dev, slave, mtt_base, mtt_size, mtt);
3107 if (err)
3108 goto out_put;
3109
3110 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3111 if (err)
3112 goto out_put;
3113
3114 atomic_inc(&mtt->ref_count);
3115 eq->mtt = mtt;
3116 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3117 res_end_move(dev, slave, RES_EQ, res_id);
3118 return 0;
3119
3120out_put:
3121 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3122out_move:
3123 res_abort_move(dev, slave, RES_EQ, res_id);
3124out_add:
3125 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3126 return err;
3127}
3128
Matan Barakd475c952014-11-02 16:26:17 +02003129int mlx4_CONFIG_DEV_wrapper(struct mlx4_dev *dev, int slave,
3130 struct mlx4_vhcr *vhcr,
3131 struct mlx4_cmd_mailbox *inbox,
3132 struct mlx4_cmd_mailbox *outbox,
3133 struct mlx4_cmd_info *cmd)
3134{
3135 int err;
3136 u8 get = vhcr->op_modifier;
3137
3138 if (get != 1)
3139 return -EPERM;
3140
3141 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3142
3143 return err;
3144}
3145
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003146static int get_containing_mtt(struct mlx4_dev *dev, int slave, int start,
3147 int len, struct res_mtt **res)
3148{
3149 struct mlx4_priv *priv = mlx4_priv(dev);
3150 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
3151 struct res_mtt *mtt;
3152 int err = -EINVAL;
3153
3154 spin_lock_irq(mlx4_tlock(dev));
3155 list_for_each_entry(mtt, &tracker->slave_list[slave].res_list[RES_MTT],
3156 com.list) {
3157 if (!check_mtt_range(dev, slave, start, len, mtt)) {
3158 *res = mtt;
3159 mtt->com.from_state = mtt->com.state;
3160 mtt->com.state = RES_MTT_BUSY;
3161 err = 0;
3162 break;
3163 }
3164 }
3165 spin_unlock_irq(mlx4_tlock(dev));
3166
3167 return err;
3168}
3169
Jack Morgenstein54679e12012-08-03 08:40:43 +00003170static int verify_qp_parameters(struct mlx4_dev *dev,
Jack Morgenstein99ec41d2014-05-29 16:31:03 +03003171 struct mlx4_vhcr *vhcr,
Jack Morgenstein54679e12012-08-03 08:40:43 +00003172 struct mlx4_cmd_mailbox *inbox,
3173 enum qp_transition transition, u8 slave)
3174{
3175 u32 qp_type;
Jack Morgenstein99ec41d2014-05-29 16:31:03 +03003176 u32 qpn;
Jack Morgenstein54679e12012-08-03 08:40:43 +00003177 struct mlx4_qp_context *qp_ctx;
3178 enum mlx4_qp_optpar optpar;
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +02003179 int port;
3180 int num_gids;
Jack Morgenstein54679e12012-08-03 08:40:43 +00003181
3182 qp_ctx = inbox->buf + 8;
3183 qp_type = (be32_to_cpu(qp_ctx->flags) >> 16) & 0xff;
3184 optpar = be32_to_cpu(*(__be32 *) inbox->buf);
3185
Or Gerlitzfc31e252015-03-18 14:57:34 +02003186 if (slave != mlx4_master_func_num(dev)) {
Tariq Toukanbb428a52017-10-09 16:59:48 +03003187 qp_ctx->params2 &= ~cpu_to_be32(MLX4_QP_BIT_FPP);
Or Gerlitzfc31e252015-03-18 14:57:34 +02003188 /* setting QP rate-limit is disallowed for VFs */
3189 if (qp_ctx->rate_limit_params)
3190 return -EPERM;
3191 }
Moni Shoua53f33ae2015-02-03 16:48:33 +02003192
Jack Morgenstein54679e12012-08-03 08:40:43 +00003193 switch (qp_type) {
3194 case MLX4_QP_ST_RC:
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +02003195 case MLX4_QP_ST_XRC:
Jack Morgenstein54679e12012-08-03 08:40:43 +00003196 case MLX4_QP_ST_UC:
3197 switch (transition) {
3198 case QP_TRANS_INIT2RTR:
3199 case QP_TRANS_RTR2RTS:
3200 case QP_TRANS_RTS2RTS:
3201 case QP_TRANS_SQD2SQD:
3202 case QP_TRANS_SQD2RTS:
Arnd Bergmannbaefd702016-03-14 15:18:34 +01003203 if (slave != mlx4_master_func_num(dev)) {
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +02003204 if (optpar & MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH) {
3205 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3206 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
Matan Barak449fc482014-03-19 18:11:52 +02003207 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +02003208 else
3209 num_gids = 1;
3210 if (qp_ctx->pri_path.mgid_index >= num_gids)
Jack Morgenstein54679e12012-08-03 08:40:43 +00003211 return -EINVAL;
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +02003212 }
3213 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3214 port = (qp_ctx->alt_path.sched_queue >> 6 & 1) + 1;
3215 if (dev->caps.port_mask[port] != MLX4_PORT_TYPE_IB)
Matan Barak449fc482014-03-19 18:11:52 +02003216 num_gids = mlx4_get_slave_num_gids(dev, slave, port);
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +02003217 else
3218 num_gids = 1;
3219 if (qp_ctx->alt_path.mgid_index >= num_gids)
Jack Morgenstein54679e12012-08-03 08:40:43 +00003220 return -EINVAL;
Jack Morgensteinb6ffaef2014-03-12 12:00:39 +02003221 }
Arnd Bergmannbaefd702016-03-14 15:18:34 +01003222 }
Jack Morgenstein54679e12012-08-03 08:40:43 +00003223 break;
3224 default:
3225 break;
3226 }
Jack Morgenstein54679e12012-08-03 08:40:43 +00003227 break;
Roland Dreier165cb462014-05-30 15:38:58 -07003228
3229 case MLX4_QP_ST_MLX:
3230 qpn = vhcr->in_modifier & 0x7fffff;
3231 port = (qp_ctx->pri_path.sched_queue >> 6 & 1) + 1;
3232 if (transition == QP_TRANS_INIT2RTR &&
3233 slave != mlx4_master_func_num(dev) &&
3234 mlx4_is_qp_reserved(dev, qpn) &&
3235 !mlx4_vf_smi_enabled(dev, slave, port)) {
3236 /* only enabled VFs may create MLX proxy QPs */
3237 mlx4_err(dev, "%s: unprivileged slave %d attempting to create an MLX proxy special QP on port %d\n",
3238 __func__, slave, port);
3239 return -EPERM;
3240 }
3241 break;
3242
Jack Morgenstein54679e12012-08-03 08:40:43 +00003243 default:
3244 break;
3245 }
3246
3247 return 0;
3248}
3249
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003250int mlx4_WRITE_MTT_wrapper(struct mlx4_dev *dev, int slave,
3251 struct mlx4_vhcr *vhcr,
3252 struct mlx4_cmd_mailbox *inbox,
3253 struct mlx4_cmd_mailbox *outbox,
3254 struct mlx4_cmd_info *cmd)
3255{
3256 struct mlx4_mtt mtt;
3257 __be64 *page_list = inbox->buf;
3258 u64 *pg_list = (u64 *)page_list;
3259 int i;
3260 struct res_mtt *rmtt = NULL;
3261 int start = be64_to_cpu(page_list[0]);
3262 int npages = vhcr->in_modifier;
3263 int err;
3264
3265 err = get_containing_mtt(dev, slave, start, npages, &rmtt);
3266 if (err)
3267 return err;
3268
3269 /* Call the SW implementation of write_mtt:
3270 * - Prepare a dummy mtt struct
Joe Perchesdbedd442015-03-06 20:49:12 -08003271 * - Translate inbox contents to simple addresses in host endianness */
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003272 mtt.offset = 0; /* TBD this is broken but I don't handle it since
3273 we don't really use it */
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003274 mtt.order = 0;
3275 mtt.page_shift = 0;
3276 for (i = 0; i < npages; ++i)
3277 pg_list[i + 2] = (be64_to_cpu(page_list[i + 2]) & ~1ULL);
3278
3279 err = __mlx4_write_mtt(dev, &mtt, be64_to_cpu(page_list[0]), npages,
3280 ((u64 *)page_list + 2));
3281
3282 if (rmtt)
3283 put_res(dev, slave, rmtt->com.res_id, RES_MTT);
3284
3285 return err;
3286}
3287
3288int mlx4_HW2SW_EQ_wrapper(struct mlx4_dev *dev, int slave,
3289 struct mlx4_vhcr *vhcr,
3290 struct mlx4_cmd_mailbox *inbox,
3291 struct mlx4_cmd_mailbox *outbox,
3292 struct mlx4_cmd_info *cmd)
3293{
3294 int eqn = vhcr->in_modifier;
Yishai Hadas2d3c7392015-05-05 17:07:12 +03003295 int res_id = eqn | (slave << 10);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003296 struct res_eq *eq;
3297 int err;
3298
3299 err = eq_res_start_move_to(dev, slave, res_id, RES_EQ_RESERVED, &eq);
3300 if (err)
3301 return err;
3302
3303 err = get_res(dev, slave, eq->mtt->com.res_id, RES_MTT, NULL);
3304 if (err)
3305 goto ex_abort;
3306
3307 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3308 if (err)
3309 goto ex_put;
3310
3311 atomic_dec(&eq->mtt->ref_count);
3312 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3313 res_end_move(dev, slave, RES_EQ, res_id);
3314 rem_res_range(dev, slave, res_id, 1, RES_EQ, 0);
3315
3316 return 0;
3317
3318ex_put:
3319 put_res(dev, slave, eq->mtt->com.res_id, RES_MTT);
3320ex_abort:
3321 res_abort_move(dev, slave, RES_EQ, res_id);
3322
3323 return err;
3324}
3325
3326int mlx4_GEN_EQE(struct mlx4_dev *dev, int slave, struct mlx4_eqe *eqe)
3327{
3328 struct mlx4_priv *priv = mlx4_priv(dev);
3329 struct mlx4_slave_event_eq_info *event_eq;
3330 struct mlx4_cmd_mailbox *mailbox;
3331 u32 in_modifier = 0;
3332 int err;
3333 int res_id;
3334 struct res_eq *req;
3335
3336 if (!priv->mfunc.master.slave_state)
3337 return -EINVAL;
3338
Jack Morgensteinbffb0232015-03-24 15:18:39 +02003339 /* check for slave valid, slave not PF, and slave active */
3340 if (slave < 0 || slave > dev->persist->num_vfs ||
3341 slave == dev->caps.function ||
3342 !priv->mfunc.master.slave_state[slave].active)
3343 return 0;
3344
Marcel Apfelbaum803143f2012-01-19 09:45:46 +00003345 event_eq = &priv->mfunc.master.slave_state[slave].event_eq[eqe->type];
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003346
3347 /* Create the event only if the slave is registered */
Marcel Apfelbaum803143f2012-01-19 09:45:46 +00003348 if (event_eq->eqn < 0)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003349 return 0;
3350
3351 mutex_lock(&priv->mfunc.master.gen_eqe_mutex[slave]);
Yishai Hadas2d3c7392015-05-05 17:07:12 +03003352 res_id = (slave << 10) | event_eq->eqn;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003353 err = get_res(dev, slave, res_id, RES_EQ, &req);
3354 if (err)
3355 goto unlock;
3356
3357 if (req->com.from_state != RES_EQ_HW) {
3358 err = -EINVAL;
3359 goto put;
3360 }
3361
3362 mailbox = mlx4_alloc_cmd_mailbox(dev);
3363 if (IS_ERR(mailbox)) {
3364 err = PTR_ERR(mailbox);
3365 goto put;
3366 }
3367
3368 if (eqe->type == MLX4_EVENT_TYPE_CMD) {
3369 ++event_eq->token;
3370 eqe->event.cmd.token = cpu_to_be16(event_eq->token);
3371 }
3372
3373 memcpy(mailbox->buf, (u8 *) eqe, 28);
3374
Yishai Hadas2d3c7392015-05-05 17:07:12 +03003375 in_modifier = (slave & 0xff) | ((event_eq->eqn & 0x3ff) << 16);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003376
3377 err = mlx4_cmd(dev, mailbox->dma, in_modifier, 0,
3378 MLX4_CMD_GEN_EQE, MLX4_CMD_TIME_CLASS_B,
3379 MLX4_CMD_NATIVE);
3380
3381 put_res(dev, slave, res_id, RES_EQ);
3382 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3383 mlx4_free_cmd_mailbox(dev, mailbox);
3384 return err;
3385
3386put:
3387 put_res(dev, slave, res_id, RES_EQ);
3388
3389unlock:
3390 mutex_unlock(&priv->mfunc.master.gen_eqe_mutex[slave]);
3391 return err;
3392}
3393
3394int mlx4_QUERY_EQ_wrapper(struct mlx4_dev *dev, int slave,
3395 struct mlx4_vhcr *vhcr,
3396 struct mlx4_cmd_mailbox *inbox,
3397 struct mlx4_cmd_mailbox *outbox,
3398 struct mlx4_cmd_info *cmd)
3399{
3400 int eqn = vhcr->in_modifier;
Yishai Hadas2d3c7392015-05-05 17:07:12 +03003401 int res_id = eqn | (slave << 10);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003402 struct res_eq *eq;
3403 int err;
3404
3405 err = get_res(dev, slave, res_id, RES_EQ, &eq);
3406 if (err)
3407 return err;
3408
3409 if (eq->com.from_state != RES_EQ_HW) {
3410 err = -EINVAL;
3411 goto ex_put;
3412 }
3413
3414 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3415
3416ex_put:
3417 put_res(dev, slave, res_id, RES_EQ);
3418 return err;
3419}
3420
3421int mlx4_SW2HW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3422 struct mlx4_vhcr *vhcr,
3423 struct mlx4_cmd_mailbox *inbox,
3424 struct mlx4_cmd_mailbox *outbox,
3425 struct mlx4_cmd_info *cmd)
3426{
3427 int err;
3428 int cqn = vhcr->in_modifier;
3429 struct mlx4_cq_context *cqc = inbox->buf;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003430 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
Bjorn Helgaasc1c52db2015-05-14 18:17:08 -05003431 struct res_cq *cq = NULL;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003432 struct res_mtt *mtt;
3433
3434 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_HW, &cq);
3435 if (err)
3436 return err;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003437 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003438 if (err)
3439 goto out_move;
3440 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3441 if (err)
3442 goto out_put;
3443 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3444 if (err)
3445 goto out_put;
3446 atomic_inc(&mtt->ref_count);
3447 cq->mtt = mtt;
3448 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3449 res_end_move(dev, slave, RES_CQ, cqn);
3450 return 0;
3451
3452out_put:
3453 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3454out_move:
3455 res_abort_move(dev, slave, RES_CQ, cqn);
3456 return err;
3457}
3458
3459int mlx4_HW2SW_CQ_wrapper(struct mlx4_dev *dev, int slave,
3460 struct mlx4_vhcr *vhcr,
3461 struct mlx4_cmd_mailbox *inbox,
3462 struct mlx4_cmd_mailbox *outbox,
3463 struct mlx4_cmd_info *cmd)
3464{
3465 int err;
3466 int cqn = vhcr->in_modifier;
Bjorn Helgaasc1c52db2015-05-14 18:17:08 -05003467 struct res_cq *cq = NULL;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003468
3469 err = cq_res_start_move_to(dev, slave, cqn, RES_CQ_ALLOCATED, &cq);
3470 if (err)
3471 return err;
3472 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3473 if (err)
3474 goto out_move;
3475 atomic_dec(&cq->mtt->ref_count);
3476 res_end_move(dev, slave, RES_CQ, cqn);
3477 return 0;
3478
3479out_move:
3480 res_abort_move(dev, slave, RES_CQ, cqn);
3481 return err;
3482}
3483
3484int mlx4_QUERY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3485 struct mlx4_vhcr *vhcr,
3486 struct mlx4_cmd_mailbox *inbox,
3487 struct mlx4_cmd_mailbox *outbox,
3488 struct mlx4_cmd_info *cmd)
3489{
3490 int cqn = vhcr->in_modifier;
3491 struct res_cq *cq;
3492 int err;
3493
3494 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3495 if (err)
3496 return err;
3497
3498 if (cq->com.from_state != RES_CQ_HW)
3499 goto ex_put;
3500
3501 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3502ex_put:
3503 put_res(dev, slave, cqn, RES_CQ);
3504
3505 return err;
3506}
3507
3508static int handle_resize(struct mlx4_dev *dev, int slave,
3509 struct mlx4_vhcr *vhcr,
3510 struct mlx4_cmd_mailbox *inbox,
3511 struct mlx4_cmd_mailbox *outbox,
3512 struct mlx4_cmd_info *cmd,
3513 struct res_cq *cq)
3514{
3515 int err;
3516 struct res_mtt *orig_mtt;
3517 struct res_mtt *mtt;
3518 struct mlx4_cq_context *cqc = inbox->buf;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003519 int mtt_base = cq_get_mtt_addr(cqc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003520
3521 err = get_res(dev, slave, cq->mtt->com.res_id, RES_MTT, &orig_mtt);
3522 if (err)
3523 return err;
3524
3525 if (orig_mtt != cq->mtt) {
3526 err = -EINVAL;
3527 goto ex_put;
3528 }
3529
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003530 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003531 if (err)
3532 goto ex_put;
3533
3534 err = check_mtt_range(dev, slave, mtt_base, cq_get_mtt_size(cqc), mtt);
3535 if (err)
3536 goto ex_put1;
3537 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3538 if (err)
3539 goto ex_put1;
3540 atomic_dec(&orig_mtt->ref_count);
3541 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3542 atomic_inc(&mtt->ref_count);
3543 cq->mtt = mtt;
3544 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3545 return 0;
3546
3547ex_put1:
3548 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3549ex_put:
3550 put_res(dev, slave, orig_mtt->com.res_id, RES_MTT);
3551
3552 return err;
3553
3554}
3555
3556int mlx4_MODIFY_CQ_wrapper(struct mlx4_dev *dev, int slave,
3557 struct mlx4_vhcr *vhcr,
3558 struct mlx4_cmd_mailbox *inbox,
3559 struct mlx4_cmd_mailbox *outbox,
3560 struct mlx4_cmd_info *cmd)
3561{
3562 int cqn = vhcr->in_modifier;
3563 struct res_cq *cq;
3564 int err;
3565
3566 err = get_res(dev, slave, cqn, RES_CQ, &cq);
3567 if (err)
3568 return err;
3569
3570 if (cq->com.from_state != RES_CQ_HW)
3571 goto ex_put;
3572
3573 if (vhcr->op_modifier == 0) {
3574 err = handle_resize(dev, slave, vhcr, inbox, outbox, cmd, cq);
Jack Morgensteindcf353b2012-03-07 05:56:35 +00003575 goto ex_put;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003576 }
3577
3578 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3579ex_put:
3580 put_res(dev, slave, cqn, RES_CQ);
3581
3582 return err;
3583}
3584
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003585static int srq_get_mtt_size(struct mlx4_srq_context *srqc)
3586{
3587 int log_srq_size = (be32_to_cpu(srqc->state_logsize_srqn) >> 24) & 0xf;
3588 int log_rq_stride = srqc->logstride & 7;
3589 int page_shift = (srqc->log_page_size & 0x3f) + 12;
3590
3591 if (log_srq_size + log_rq_stride + 4 < page_shift)
3592 return 1;
3593
3594 return 1 << (log_srq_size + log_rq_stride + 4 - page_shift);
3595}
3596
3597int mlx4_SW2HW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3598 struct mlx4_vhcr *vhcr,
3599 struct mlx4_cmd_mailbox *inbox,
3600 struct mlx4_cmd_mailbox *outbox,
3601 struct mlx4_cmd_info *cmd)
3602{
3603 int err;
3604 int srqn = vhcr->in_modifier;
3605 struct res_mtt *mtt;
Bjorn Helgaasc1c52db2015-05-14 18:17:08 -05003606 struct res_srq *srq = NULL;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003607 struct mlx4_srq_context *srqc = inbox->buf;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003608 int mtt_base = srq_get_mtt_addr(srqc) / dev->caps.mtt_entry_sz;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003609
3610 if (srqn != (be32_to_cpu(srqc->state_logsize_srqn) & 0xffffff))
3611 return -EINVAL;
3612
3613 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_HW, &srq);
3614 if (err)
3615 return err;
Marcel Apfelbaum2b8fb282011-12-13 04:16:56 +00003616 err = get_res(dev, slave, mtt_base, RES_MTT, &mtt);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003617 if (err)
3618 goto ex_abort;
3619 err = check_mtt_range(dev, slave, mtt_base, srq_get_mtt_size(srqc),
3620 mtt);
3621 if (err)
3622 goto ex_put_mtt;
3623
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003624 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3625 if (err)
3626 goto ex_put_mtt;
3627
3628 atomic_inc(&mtt->ref_count);
3629 srq->mtt = mtt;
3630 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3631 res_end_move(dev, slave, RES_SRQ, srqn);
3632 return 0;
3633
3634ex_put_mtt:
3635 put_res(dev, slave, mtt->com.res_id, RES_MTT);
3636ex_abort:
3637 res_abort_move(dev, slave, RES_SRQ, srqn);
3638
3639 return err;
3640}
3641
3642int mlx4_HW2SW_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3643 struct mlx4_vhcr *vhcr,
3644 struct mlx4_cmd_mailbox *inbox,
3645 struct mlx4_cmd_mailbox *outbox,
3646 struct mlx4_cmd_info *cmd)
3647{
3648 int err;
3649 int srqn = vhcr->in_modifier;
Bjorn Helgaasc1c52db2015-05-14 18:17:08 -05003650 struct res_srq *srq = NULL;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003651
3652 err = srq_res_start_move_to(dev, slave, srqn, RES_SRQ_ALLOCATED, &srq);
3653 if (err)
3654 return err;
3655 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3656 if (err)
3657 goto ex_abort;
3658 atomic_dec(&srq->mtt->ref_count);
3659 if (srq->cq)
3660 atomic_dec(&srq->cq->ref_count);
3661 res_end_move(dev, slave, RES_SRQ, srqn);
3662
3663 return 0;
3664
3665ex_abort:
3666 res_abort_move(dev, slave, RES_SRQ, srqn);
3667
3668 return err;
3669}
3670
3671int mlx4_QUERY_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3672 struct mlx4_vhcr *vhcr,
3673 struct mlx4_cmd_mailbox *inbox,
3674 struct mlx4_cmd_mailbox *outbox,
3675 struct mlx4_cmd_info *cmd)
3676{
3677 int err;
3678 int srqn = vhcr->in_modifier;
3679 struct res_srq *srq;
3680
3681 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3682 if (err)
3683 return err;
3684 if (srq->com.from_state != RES_SRQ_HW) {
3685 err = -EBUSY;
3686 goto out;
3687 }
3688 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3689out:
3690 put_res(dev, slave, srqn, RES_SRQ);
3691 return err;
3692}
3693
3694int mlx4_ARM_SRQ_wrapper(struct mlx4_dev *dev, int slave,
3695 struct mlx4_vhcr *vhcr,
3696 struct mlx4_cmd_mailbox *inbox,
3697 struct mlx4_cmd_mailbox *outbox,
3698 struct mlx4_cmd_info *cmd)
3699{
3700 int err;
3701 int srqn = vhcr->in_modifier;
3702 struct res_srq *srq;
3703
3704 err = get_res(dev, slave, srqn, RES_SRQ, &srq);
3705 if (err)
3706 return err;
3707
3708 if (srq->com.from_state != RES_SRQ_HW) {
3709 err = -EBUSY;
3710 goto out;
3711 }
3712
3713 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3714out:
3715 put_res(dev, slave, srqn, RES_SRQ);
3716 return err;
3717}
3718
3719int mlx4_GEN_QP_wrapper(struct mlx4_dev *dev, int slave,
3720 struct mlx4_vhcr *vhcr,
3721 struct mlx4_cmd_mailbox *inbox,
3722 struct mlx4_cmd_mailbox *outbox,
3723 struct mlx4_cmd_info *cmd)
3724{
3725 int err;
3726 int qpn = vhcr->in_modifier & 0x7fffff;
3727 struct res_qp *qp;
3728
3729 err = get_res(dev, slave, qpn, RES_QP, &qp);
3730 if (err)
3731 return err;
3732 if (qp->com.from_state != RES_QP_HW) {
3733 err = -EBUSY;
3734 goto out;
3735 }
3736
3737 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3738out:
3739 put_res(dev, slave, qpn, RES_QP);
3740 return err;
3741}
3742
Jack Morgenstein54679e12012-08-03 08:40:43 +00003743int mlx4_INIT2INIT_QP_wrapper(struct mlx4_dev *dev, int slave,
3744 struct mlx4_vhcr *vhcr,
3745 struct mlx4_cmd_mailbox *inbox,
3746 struct mlx4_cmd_mailbox *outbox,
3747 struct mlx4_cmd_info *cmd)
3748{
3749 struct mlx4_qp_context *context = inbox->buf + 8;
3750 adjust_proxy_tun_qkey(dev, vhcr, context);
3751 update_pkey_index(dev, slave, inbox);
3752 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3753}
3754
Matan Barak449fc482014-03-19 18:11:52 +02003755static int adjust_qp_sched_queue(struct mlx4_dev *dev, int slave,
3756 struct mlx4_qp_context *qpc,
3757 struct mlx4_cmd_mailbox *inbox)
3758{
3759 enum mlx4_qp_optpar optpar = be32_to_cpu(*(__be32 *)inbox->buf);
3760 u8 pri_sched_queue;
3761 int port = mlx4_slave_convert_port(
3762 dev, slave, (qpc->pri_path.sched_queue >> 6 & 1) + 1) - 1;
3763
3764 if (port < 0)
3765 return -EINVAL;
3766
3767 pri_sched_queue = (qpc->pri_path.sched_queue & ~(1 << 6)) |
3768 ((port & 1) << 6);
3769
Or Gerlitzf40e99e2015-05-21 15:14:08 +03003770 if (optpar & (MLX4_QP_OPTPAR_PRIMARY_ADDR_PATH | MLX4_QP_OPTPAR_SCHED_QUEUE) ||
3771 qpc->pri_path.sched_queue || mlx4_is_eth(dev, port + 1)) {
Matan Barak449fc482014-03-19 18:11:52 +02003772 qpc->pri_path.sched_queue = pri_sched_queue;
3773 }
3774
3775 if (optpar & MLX4_QP_OPTPAR_ALT_ADDR_PATH) {
3776 port = mlx4_slave_convert_port(
3777 dev, slave, (qpc->alt_path.sched_queue >> 6 & 1)
3778 + 1) - 1;
3779 if (port < 0)
3780 return -EINVAL;
3781 qpc->alt_path.sched_queue =
3782 (qpc->alt_path.sched_queue & ~(1 << 6)) |
3783 (port & 1) << 6;
3784 }
3785 return 0;
3786}
3787
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02003788static int roce_verify_mac(struct mlx4_dev *dev, int slave,
3789 struct mlx4_qp_context *qpc,
3790 struct mlx4_cmd_mailbox *inbox)
3791{
3792 u64 mac;
3793 int port;
3794 u32 ts = (be32_to_cpu(qpc->flags) >> 16) & 0xff;
3795 u8 sched = *(u8 *)(inbox->buf + 64);
3796 u8 smac_ix;
3797
3798 port = (sched >> 6 & 1) + 1;
3799 if (mlx4_is_eth(dev, port) && (ts != MLX4_QP_ST_MLX)) {
3800 smac_ix = qpc->pri_path.grh_mylmc & 0x7f;
3801 if (mac_find_smac_ix_in_slave(dev, slave, port, smac_ix, &mac))
3802 return -ENOENT;
3803 }
3804 return 0;
3805}
3806
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003807int mlx4_INIT2RTR_QP_wrapper(struct mlx4_dev *dev, int slave,
3808 struct mlx4_vhcr *vhcr,
3809 struct mlx4_cmd_mailbox *inbox,
3810 struct mlx4_cmd_mailbox *outbox,
3811 struct mlx4_cmd_info *cmd)
3812{
Jack Morgenstein54679e12012-08-03 08:40:43 +00003813 int err;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003814 struct mlx4_qp_context *qpc = inbox->buf + 8;
Jack Morgensteinb01978c2013-06-27 19:05:21 +03003815 int qpn = vhcr->in_modifier & 0x7fffff;
3816 struct res_qp *qp;
3817 u8 orig_sched_queue;
Rony Efraimf0f829b2013-11-07 12:19:51 +02003818 u8 orig_vlan_control = qpc->pri_path.vlan_control;
3819 u8 orig_fvl_rx = qpc->pri_path.fvl_rx;
3820 u8 orig_pri_path_fl = qpc->pri_path.fl;
3821 u8 orig_vlan_index = qpc->pri_path.vlan_index;
3822 u8 orig_feup = qpc->pri_path.feup;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003823
Matan Barak449fc482014-03-19 18:11:52 +02003824 err = adjust_qp_sched_queue(dev, slave, qpc, inbox);
3825 if (err)
3826 return err;
Jack Morgenstein99ec41d2014-05-29 16:31:03 +03003827 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_INIT2RTR, slave);
Jack Morgenstein54679e12012-08-03 08:40:43 +00003828 if (err)
3829 return err;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003830
Jack Morgenstein2f5bb472014-03-12 12:00:40 +02003831 if (roce_verify_mac(dev, slave, qpc, inbox))
3832 return -EINVAL;
3833
Jack Morgenstein54679e12012-08-03 08:40:43 +00003834 update_pkey_index(dev, slave, inbox);
3835 update_gid(dev, inbox, (u8)slave);
3836 adjust_proxy_tun_qkey(dev, vhcr, qpc);
Jack Morgensteinb01978c2013-06-27 19:05:21 +03003837 orig_sched_queue = qpc->pri_path.sched_queue;
Jack Morgenstein54679e12012-08-03 08:40:43 +00003838
Jack Morgensteinb01978c2013-06-27 19:05:21 +03003839 err = get_res(dev, slave, qpn, RES_QP, &qp);
3840 if (err)
3841 return err;
3842 if (qp->com.from_state != RES_QP_HW) {
3843 err = -EBUSY;
3844 goto out;
3845 }
3846
Maor Gottlieb9a892832015-10-15 14:44:38 +03003847 err = update_vport_qp_param(dev, inbox, slave, qpn);
3848 if (err)
3849 goto out;
3850
Jack Morgensteinb01978c2013-06-27 19:05:21 +03003851 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3852out:
3853 /* if no error, save sched queue value passed in by VF. This is
3854 * essentially the QOS value provided by the VF. This will be useful
3855 * if we allow dynamic changes from VST back to VGT
3856 */
Rony Efraimf0f829b2013-11-07 12:19:51 +02003857 if (!err) {
Jack Morgensteinb01978c2013-06-27 19:05:21 +03003858 qp->sched_queue = orig_sched_queue;
Rony Efraimf0f829b2013-11-07 12:19:51 +02003859 qp->vlan_control = orig_vlan_control;
3860 qp->fvl_rx = orig_fvl_rx;
3861 qp->pri_path_fl = orig_pri_path_fl;
3862 qp->vlan_index = orig_vlan_index;
3863 qp->feup = orig_feup;
3864 }
Jack Morgensteinb01978c2013-06-27 19:05:21 +03003865 put_res(dev, slave, qpn, RES_QP);
3866 return err;
Jack Morgenstein54679e12012-08-03 08:40:43 +00003867}
3868
3869int mlx4_RTR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3870 struct mlx4_vhcr *vhcr,
3871 struct mlx4_cmd_mailbox *inbox,
3872 struct mlx4_cmd_mailbox *outbox,
3873 struct mlx4_cmd_info *cmd)
3874{
3875 int err;
3876 struct mlx4_qp_context *context = inbox->buf + 8;
3877
Matan Barak449fc482014-03-19 18:11:52 +02003878 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3879 if (err)
3880 return err;
Jack Morgenstein99ec41d2014-05-29 16:31:03 +03003881 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTR2RTS, slave);
Jack Morgenstein54679e12012-08-03 08:40:43 +00003882 if (err)
3883 return err;
3884
3885 update_pkey_index(dev, slave, inbox);
3886 update_gid(dev, inbox, (u8)slave);
3887 adjust_proxy_tun_qkey(dev, vhcr, context);
3888 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3889}
3890
3891int mlx4_RTS2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3892 struct mlx4_vhcr *vhcr,
3893 struct mlx4_cmd_mailbox *inbox,
3894 struct mlx4_cmd_mailbox *outbox,
3895 struct mlx4_cmd_info *cmd)
3896{
3897 int err;
3898 struct mlx4_qp_context *context = inbox->buf + 8;
3899
Matan Barak449fc482014-03-19 18:11:52 +02003900 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3901 if (err)
3902 return err;
Jack Morgenstein99ec41d2014-05-29 16:31:03 +03003903 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_RTS2RTS, slave);
Jack Morgenstein54679e12012-08-03 08:40:43 +00003904 if (err)
3905 return err;
3906
3907 update_pkey_index(dev, slave, inbox);
3908 update_gid(dev, inbox, (u8)slave);
3909 adjust_proxy_tun_qkey(dev, vhcr, context);
3910 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3911}
3912
3913
3914int mlx4_SQERR2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3915 struct mlx4_vhcr *vhcr,
3916 struct mlx4_cmd_mailbox *inbox,
3917 struct mlx4_cmd_mailbox *outbox,
3918 struct mlx4_cmd_info *cmd)
3919{
3920 struct mlx4_qp_context *context = inbox->buf + 8;
Matan Barak449fc482014-03-19 18:11:52 +02003921 int err = adjust_qp_sched_queue(dev, slave, context, inbox);
3922 if (err)
3923 return err;
Jack Morgenstein54679e12012-08-03 08:40:43 +00003924 adjust_proxy_tun_qkey(dev, vhcr, context);
3925 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3926}
3927
3928int mlx4_SQD2SQD_QP_wrapper(struct mlx4_dev *dev, int slave,
3929 struct mlx4_vhcr *vhcr,
3930 struct mlx4_cmd_mailbox *inbox,
3931 struct mlx4_cmd_mailbox *outbox,
3932 struct mlx4_cmd_info *cmd)
3933{
3934 int err;
3935 struct mlx4_qp_context *context = inbox->buf + 8;
3936
Matan Barak449fc482014-03-19 18:11:52 +02003937 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3938 if (err)
3939 return err;
Jack Morgenstein99ec41d2014-05-29 16:31:03 +03003940 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2SQD, slave);
Jack Morgenstein54679e12012-08-03 08:40:43 +00003941 if (err)
3942 return err;
3943
3944 adjust_proxy_tun_qkey(dev, vhcr, context);
3945 update_gid(dev, inbox, (u8)slave);
3946 update_pkey_index(dev, slave, inbox);
3947 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3948}
3949
3950int mlx4_SQD2RTS_QP_wrapper(struct mlx4_dev *dev, int slave,
3951 struct mlx4_vhcr *vhcr,
3952 struct mlx4_cmd_mailbox *inbox,
3953 struct mlx4_cmd_mailbox *outbox,
3954 struct mlx4_cmd_info *cmd)
3955{
3956 int err;
3957 struct mlx4_qp_context *context = inbox->buf + 8;
3958
Matan Barak449fc482014-03-19 18:11:52 +02003959 err = adjust_qp_sched_queue(dev, slave, context, inbox);
3960 if (err)
3961 return err;
Jack Morgenstein99ec41d2014-05-29 16:31:03 +03003962 err = verify_qp_parameters(dev, vhcr, inbox, QP_TRANS_SQD2RTS, slave);
Jack Morgenstein54679e12012-08-03 08:40:43 +00003963 if (err)
3964 return err;
3965
3966 adjust_proxy_tun_qkey(dev, vhcr, context);
3967 update_gid(dev, inbox, (u8)slave);
3968 update_pkey_index(dev, slave, inbox);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00003969 return mlx4_GEN_QP_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3970}
3971
3972int mlx4_2RST_QP_wrapper(struct mlx4_dev *dev, int slave,
3973 struct mlx4_vhcr *vhcr,
3974 struct mlx4_cmd_mailbox *inbox,
3975 struct mlx4_cmd_mailbox *outbox,
3976 struct mlx4_cmd_info *cmd)
3977{
3978 int err;
3979 int qpn = vhcr->in_modifier & 0x7fffff;
3980 struct res_qp *qp;
3981
3982 err = qp_res_start_move_to(dev, slave, qpn, RES_QP_MAPPED, &qp, 0);
3983 if (err)
3984 return err;
3985 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
3986 if (err)
3987 goto ex_abort;
3988
3989 atomic_dec(&qp->mtt->ref_count);
3990 atomic_dec(&qp->rcq->ref_count);
3991 atomic_dec(&qp->scq->ref_count);
3992 if (qp->srq)
3993 atomic_dec(&qp->srq->ref_count);
3994 res_end_move(dev, slave, RES_QP, qpn);
3995 return 0;
3996
3997ex_abort:
3998 res_abort_move(dev, slave, RES_QP, qpn);
3999
4000 return err;
4001}
4002
4003static struct res_gid *find_gid(struct mlx4_dev *dev, int slave,
4004 struct res_qp *rqp, u8 *gid)
4005{
4006 struct res_gid *res;
4007
4008 list_for_each_entry(res, &rqp->mcg_list, list) {
4009 if (!memcmp(res->gid, gid, 16))
4010 return res;
4011 }
4012 return NULL;
4013}
4014
4015static int add_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
Eugenia Emantayev9f5b6c62012-02-15 06:23:16 +00004016 u8 *gid, enum mlx4_protocol prot,
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00004017 enum mlx4_steer_type steer, u64 reg_id)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004018{
4019 struct res_gid *res;
4020 int err;
4021
stephen hemminger31975e22017-08-15 10:29:19 -07004022 res = kzalloc(sizeof(*res), GFP_KERNEL);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004023 if (!res)
4024 return -ENOMEM;
4025
4026 spin_lock_irq(&rqp->mcg_spl);
4027 if (find_gid(dev, slave, rqp, gid)) {
4028 kfree(res);
4029 err = -EEXIST;
4030 } else {
4031 memcpy(res->gid, gid, 16);
4032 res->prot = prot;
Eugenia Emantayev9f5b6c62012-02-15 06:23:16 +00004033 res->steer = steer;
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00004034 res->reg_id = reg_id;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004035 list_add_tail(&res->list, &rqp->mcg_list);
4036 err = 0;
4037 }
4038 spin_unlock_irq(&rqp->mcg_spl);
4039
4040 return err;
4041}
4042
4043static int rem_mcg_res(struct mlx4_dev *dev, int slave, struct res_qp *rqp,
Eugenia Emantayev9f5b6c62012-02-15 06:23:16 +00004044 u8 *gid, enum mlx4_protocol prot,
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00004045 enum mlx4_steer_type steer, u64 *reg_id)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004046{
4047 struct res_gid *res;
4048 int err;
4049
4050 spin_lock_irq(&rqp->mcg_spl);
4051 res = find_gid(dev, slave, rqp, gid);
Eugenia Emantayev9f5b6c62012-02-15 06:23:16 +00004052 if (!res || res->prot != prot || res->steer != steer)
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004053 err = -EINVAL;
4054 else {
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00004055 *reg_id = res->reg_id;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004056 list_del(&res->list);
4057 kfree(res);
4058 err = 0;
4059 }
4060 spin_unlock_irq(&rqp->mcg_spl);
4061
4062 return err;
4063}
4064
Matan Barak449fc482014-03-19 18:11:52 +02004065static int qp_attach(struct mlx4_dev *dev, int slave, struct mlx4_qp *qp,
4066 u8 gid[16], int block_loopback, enum mlx4_protocol prot,
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00004067 enum mlx4_steer_type type, u64 *reg_id)
4068{
4069 switch (dev->caps.steering_mode) {
Matan Barak449fc482014-03-19 18:11:52 +02004070 case MLX4_STEERING_MODE_DEVICE_MANAGED: {
4071 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4072 if (port < 0)
4073 return port;
4074 return mlx4_trans_to_dmfs_attach(dev, qp, gid, port,
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00004075 block_loopback, prot,
4076 reg_id);
Matan Barak449fc482014-03-19 18:11:52 +02004077 }
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00004078 case MLX4_STEERING_MODE_B0:
Matan Barak449fc482014-03-19 18:11:52 +02004079 if (prot == MLX4_PROT_ETH) {
4080 int port = mlx4_slave_convert_port(dev, slave, gid[5]);
4081 if (port < 0)
4082 return port;
4083 gid[5] = port;
4084 }
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00004085 return mlx4_qp_attach_common(dev, qp, gid,
4086 block_loopback, prot, type);
4087 default:
4088 return -EINVAL;
4089 }
4090}
4091
Matan Barak449fc482014-03-19 18:11:52 +02004092static int qp_detach(struct mlx4_dev *dev, struct mlx4_qp *qp,
4093 u8 gid[16], enum mlx4_protocol prot,
4094 enum mlx4_steer_type type, u64 reg_id)
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00004095{
4096 switch (dev->caps.steering_mode) {
4097 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4098 return mlx4_flow_detach(dev, reg_id);
4099 case MLX4_STEERING_MODE_B0:
4100 return mlx4_qp_detach_common(dev, qp, gid, prot, type);
4101 default:
4102 return -EINVAL;
4103 }
4104}
4105
Jack Morgenstein531d9012014-05-04 17:07:22 +03004106static int mlx4_adjust_port(struct mlx4_dev *dev, int slave,
4107 u8 *gid, enum mlx4_protocol prot)
4108{
4109 int real_port;
4110
4111 if (prot != MLX4_PROT_ETH)
4112 return 0;
4113
4114 if (dev->caps.steering_mode == MLX4_STEERING_MODE_B0 ||
4115 dev->caps.steering_mode == MLX4_STEERING_MODE_DEVICE_MANAGED) {
4116 real_port = mlx4_slave_convert_port(dev, slave, gid[5]);
4117 if (real_port < 0)
4118 return -EINVAL;
4119 gid[5] = real_port;
4120 }
4121
4122 return 0;
4123}
4124
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004125int mlx4_QP_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4126 struct mlx4_vhcr *vhcr,
4127 struct mlx4_cmd_mailbox *inbox,
4128 struct mlx4_cmd_mailbox *outbox,
4129 struct mlx4_cmd_info *cmd)
4130{
4131 struct mlx4_qp qp; /* dummy for calling attach/detach */
4132 u8 *gid = inbox->buf;
4133 enum mlx4_protocol prot = (vhcr->in_modifier >> 28) & 0x7;
Or Gerlitz162344e2012-05-15 10:34:57 +00004134 int err;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004135 int qpn;
4136 struct res_qp *rqp;
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00004137 u64 reg_id = 0;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004138 int attach = vhcr->op_modifier;
4139 int block_loopback = vhcr->in_modifier >> 31;
4140 u8 steer_type_mask = 2;
Eugenia Emantayev75c60622012-02-15 06:22:49 +00004141 enum mlx4_steer_type type = (gid[7] & steer_type_mask) >> 1;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004142
4143 qpn = vhcr->in_modifier & 0xffffff;
4144 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4145 if (err)
4146 return err;
4147
4148 qp.qpn = qpn;
4149 if (attach) {
Matan Barak449fc482014-03-19 18:11:52 +02004150 err = qp_attach(dev, slave, &qp, gid, block_loopback, prot,
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00004151 type, &reg_id);
4152 if (err) {
4153 pr_err("Fail to attach rule to qp 0x%x\n", qpn);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004154 goto ex_put;
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00004155 }
4156 err = add_mcg_res(dev, slave, rqp, gid, prot, type, reg_id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004157 if (err)
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00004158 goto ex_detach;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004159 } else {
Jack Morgenstein531d9012014-05-04 17:07:22 +03004160 err = mlx4_adjust_port(dev, slave, gid, prot);
4161 if (err)
4162 goto ex_put;
4163
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00004164 err = rem_mcg_res(dev, slave, rqp, gid, prot, type, &reg_id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004165 if (err)
4166 goto ex_put;
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00004167
4168 err = qp_detach(dev, &qp, gid, prot, type, reg_id);
4169 if (err)
4170 pr_err("Fail to detach rule from qp 0x%x reg_id = 0x%llx\n",
4171 qpn, reg_id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004172 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004173 put_res(dev, slave, qpn, RES_QP);
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00004174 return err;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004175
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00004176ex_detach:
4177 qp_detach(dev, &qp, gid, prot, type, reg_id);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004178ex_put:
4179 put_res(dev, slave, qpn, RES_QP);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004180 return err;
4181}
4182
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004183/*
4184 * MAC validation for Flow Steering rules.
4185 * VF can attach rules only with a mac address which is assigned to it.
4186 */
4187static int validate_eth_header_mac(int slave, struct _rule_hw *eth_header,
4188 struct list_head *rlist)
4189{
4190 struct mac_res *res, *tmp;
4191 __be64 be_mac;
4192
4193 /* make sure it isn't multicast or broadcast mac*/
4194 if (!is_multicast_ether_addr(eth_header->eth.dst_mac) &&
4195 !is_broadcast_ether_addr(eth_header->eth.dst_mac)) {
4196 list_for_each_entry_safe(res, tmp, rlist, list) {
4197 be_mac = cpu_to_be64(res->mac << 16);
dingtianhongc0623e52013-12-30 15:40:55 +08004198 if (ether_addr_equal((u8 *)&be_mac, eth_header->eth.dst_mac))
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004199 return 0;
4200 }
4201 pr_err("MAC %pM doesn't belong to VF %d, Steering rule rejected\n",
4202 eth_header->eth.dst_mac, slave);
4203 return -EINVAL;
4204 }
4205 return 0;
4206}
4207
4208/*
4209 * In case of missing eth header, append eth header with a MAC address
4210 * assigned to the VF.
4211 */
4212static int add_eth_header(struct mlx4_dev *dev, int slave,
4213 struct mlx4_cmd_mailbox *inbox,
4214 struct list_head *rlist, int header_id)
4215{
4216 struct mac_res *res, *tmp;
4217 u8 port;
4218 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4219 struct mlx4_net_trans_rule_hw_eth *eth_header;
4220 struct mlx4_net_trans_rule_hw_ipv4 *ip_header;
4221 struct mlx4_net_trans_rule_hw_tcp_udp *l4_header;
4222 __be64 be_mac = 0;
4223 __be64 mac_msk = cpu_to_be64(MLX4_MAC_MASK << 16);
4224
4225 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
Hadar Hen Zion015465f2013-01-30 23:07:02 +00004226 port = ctrl->port;
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004227 eth_header = (struct mlx4_net_trans_rule_hw_eth *)(ctrl + 1);
4228
4229 /* Clear a space in the inbox for eth header */
4230 switch (header_id) {
4231 case MLX4_NET_TRANS_RULE_ID_IPV4:
4232 ip_header =
4233 (struct mlx4_net_trans_rule_hw_ipv4 *)(eth_header + 1);
4234 memmove(ip_header, eth_header,
4235 sizeof(*ip_header) + sizeof(*l4_header));
4236 break;
4237 case MLX4_NET_TRANS_RULE_ID_TCP:
4238 case MLX4_NET_TRANS_RULE_ID_UDP:
4239 l4_header = (struct mlx4_net_trans_rule_hw_tcp_udp *)
4240 (eth_header + 1);
4241 memmove(l4_header, eth_header, sizeof(*l4_header));
4242 break;
4243 default:
4244 return -EINVAL;
4245 }
4246 list_for_each_entry_safe(res, tmp, rlist, list) {
4247 if (port == res->port) {
4248 be_mac = cpu_to_be64(res->mac << 16);
4249 break;
4250 }
4251 }
4252 if (!be_mac) {
Joe Perches1a91de22014-05-07 12:52:57 -07004253 pr_err("Failed adding eth header to FS rule, Can't find matching MAC for port %d\n",
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004254 port);
4255 return -EINVAL;
4256 }
4257
4258 memset(eth_header, 0, sizeof(*eth_header));
4259 eth_header->size = sizeof(*eth_header) >> 2;
4260 eth_header->id = cpu_to_be16(__sw_id_hw[MLX4_NET_TRANS_RULE_ID_ETH]);
4261 memcpy(eth_header->dst_mac, &be_mac, ETH_ALEN);
4262 memcpy(eth_header->dst_mac_msk, &mac_msk, ETH_ALEN);
4263
4264 return 0;
4265
4266}
4267
Maor Gottlieb9a892832015-10-15 14:44:38 +03004268#define MLX4_UPD_QP_PATH_MASK_SUPPORTED ( \
4269 1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX |\
4270 1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)
Matan Barakce8d9e02014-05-15 15:29:27 +03004271int mlx4_UPDATE_QP_wrapper(struct mlx4_dev *dev, int slave,
4272 struct mlx4_vhcr *vhcr,
4273 struct mlx4_cmd_mailbox *inbox,
4274 struct mlx4_cmd_mailbox *outbox,
4275 struct mlx4_cmd_info *cmd_info)
4276{
4277 int err;
4278 u32 qpn = vhcr->in_modifier & 0xffffff;
4279 struct res_qp *rqp;
4280 u64 mac;
4281 unsigned port;
4282 u64 pri_addr_path_mask;
4283 struct mlx4_update_qp_context *cmd;
4284 int smac_index;
4285
4286 cmd = (struct mlx4_update_qp_context *)inbox->buf;
4287
4288 pri_addr_path_mask = be64_to_cpu(cmd->primary_addr_path_mask);
4289 if (cmd->qp_mask || cmd->secondary_addr_path_mask ||
4290 (pri_addr_path_mask & ~MLX4_UPD_QP_PATH_MASK_SUPPORTED))
4291 return -EPERM;
4292
Maor Gottlieb9a892832015-10-15 14:44:38 +03004293 if ((pri_addr_path_mask &
4294 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_SRC_CHECK_MC_LB)) &&
4295 !(dev->caps.flags2 &
4296 MLX4_DEV_CAP_FLAG2_UPDATE_QP_SRC_CHECK_LB)) {
Christophe Jaillet5d4de162016-07-02 14:31:05 +02004297 mlx4_warn(dev, "Src check LB for slave %d isn't supported\n",
4298 slave);
Or Gerlitz423b3ae2017-02-23 12:02:41 +02004299 return -EOPNOTSUPP;
Maor Gottlieb9a892832015-10-15 14:44:38 +03004300 }
4301
Matan Barakce8d9e02014-05-15 15:29:27 +03004302 /* Just change the smac for the QP */
4303 err = get_res(dev, slave, qpn, RES_QP, &rqp);
4304 if (err) {
4305 mlx4_err(dev, "Updating qpn 0x%x for slave %d rejected\n", qpn, slave);
4306 return err;
4307 }
4308
4309 port = (rqp->sched_queue >> 6 & 1) + 1;
Matan Barakb7834752014-09-10 16:41:55 +03004310
4311 if (pri_addr_path_mask & (1ULL << MLX4_UPD_QP_PATH_MASK_MAC_INDEX)) {
4312 smac_index = cmd->qp_context.pri_path.grh_mylmc;
4313 err = mac_find_smac_ix_in_slave(dev, slave, port,
4314 smac_index, &mac);
4315
4316 if (err) {
4317 mlx4_err(dev, "Failed to update qpn 0x%x, MAC is invalid. smac_ix: %d\n",
4318 qpn, smac_index);
4319 goto err_mac;
4320 }
Matan Barakce8d9e02014-05-15 15:29:27 +03004321 }
4322
4323 err = mlx4_cmd(dev, inbox->dma,
4324 vhcr->in_modifier, 0,
4325 MLX4_CMD_UPDATE_QP, MLX4_CMD_TIME_CLASS_A,
4326 MLX4_CMD_NATIVE);
4327 if (err) {
4328 mlx4_err(dev, "Failed to update qpn on qpn 0x%x, command failed\n", qpn);
4329 goto err_mac;
4330 }
4331
4332err_mac:
4333 put_res(dev, slave, qpn, RES_QP);
4334 return err;
4335}
4336
Moni Shoua78efed22015-12-06 18:07:40 +02004337static u32 qp_attach_mbox_size(void *mbox)
4338{
4339 u32 size = sizeof(struct mlx4_net_trans_rule_hw_ctrl);
4340 struct _rule_hw *rule_header;
4341
4342 rule_header = (struct _rule_hw *)(mbox + size);
4343
4344 while (rule_header->size) {
4345 size += rule_header->size * sizeof(u32);
4346 rule_header += 1;
4347 }
4348 return size;
4349}
4350
4351static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule);
4352
Hadar Hen Zion8fcfb4d2012-07-05 04:03:45 +00004353int mlx4_QP_FLOW_STEERING_ATTACH_wrapper(struct mlx4_dev *dev, int slave,
4354 struct mlx4_vhcr *vhcr,
4355 struct mlx4_cmd_mailbox *inbox,
4356 struct mlx4_cmd_mailbox *outbox,
4357 struct mlx4_cmd_info *cmd)
4358{
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004359
4360 struct mlx4_priv *priv = mlx4_priv(dev);
4361 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4362 struct list_head *rlist = &tracker->slave_list[slave].res_list[RES_MAC];
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004363 int err;
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00004364 int qpn;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00004365 struct res_qp *rqp;
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004366 struct mlx4_net_trans_rule_hw_ctrl *ctrl;
4367 struct _rule_hw *rule_header;
4368 int header_id;
Moni Shoua78efed22015-12-06 18:07:40 +02004369 struct res_fs_rule *rrule;
4370 u32 mbox_size;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004371
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00004372 if (dev->caps.steering_mode !=
4373 MLX4_STEERING_MODE_DEVICE_MANAGED)
4374 return -EOPNOTSUPP;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004375
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004376 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)inbox->buf;
Andrzej Hajda2b2b31c2015-12-14 11:05:58 +01004377 err = mlx4_slave_convert_port(dev, slave, ctrl->port);
4378 if (err <= 0)
Matan Barak449fc482014-03-19 18:11:52 +02004379 return -EINVAL;
Andrzej Hajda2b2b31c2015-12-14 11:05:58 +01004380 ctrl->port = err;
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00004381 qpn = be32_to_cpu(ctrl->qpn) & 0xffffff;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00004382 err = get_res(dev, slave, qpn, RES_QP, &rqp);
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00004383 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07004384 pr_err("Steering rule with qpn 0x%x rejected\n", qpn);
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00004385 return err;
4386 }
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004387 rule_header = (struct _rule_hw *)(ctrl + 1);
4388 header_id = map_hw_to_sw_id(be16_to_cpu(rule_header->id));
4389
Matan Barak48564132015-05-31 09:30:15 +03004390 if (header_id == MLX4_NET_TRANS_RULE_ID_ETH)
Jack Morgenstein10b1c042016-12-29 18:37:13 +02004391 mlx4_handle_eth_header_mcast_prio(ctrl, rule_header);
Matan Barak48564132015-05-31 09:30:15 +03004392
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004393 switch (header_id) {
4394 case MLX4_NET_TRANS_RULE_ID_ETH:
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00004395 if (validate_eth_header_mac(slave, rule_header, rlist)) {
4396 err = -EINVAL;
Moni Shoua78efed22015-12-06 18:07:40 +02004397 goto err_put_qp;
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00004398 }
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004399 break;
Jack Morgenstein60396682012-10-03 15:38:48 +00004400 case MLX4_NET_TRANS_RULE_ID_IB:
4401 break;
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004402 case MLX4_NET_TRANS_RULE_ID_IPV4:
4403 case MLX4_NET_TRANS_RULE_ID_TCP:
4404 case MLX4_NET_TRANS_RULE_ID_UDP:
Joe Perches1a91de22014-05-07 12:52:57 -07004405 pr_warn("Can't attach FS rule without L2 headers, adding L2 header\n");
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00004406 if (add_eth_header(dev, slave, inbox, rlist, header_id)) {
4407 err = -EINVAL;
Moni Shoua78efed22015-12-06 18:07:40 +02004408 goto err_put_qp;
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00004409 }
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004410 vhcr->in_modifier +=
4411 sizeof(struct mlx4_net_trans_rule_hw_eth) >> 2;
4412 break;
4413 default:
Joe Perches1a91de22014-05-07 12:52:57 -07004414 pr_err("Corrupted mailbox\n");
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00004415 err = -EINVAL;
Moni Shoua78efed22015-12-06 18:07:40 +02004416 goto err_put_qp;
Hadar Hen Zion7fb40f82012-09-05 22:50:49 +00004417 }
4418
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004419 err = mlx4_cmd_imm(dev, inbox->dma, &vhcr->out_param,
4420 vhcr->in_modifier, 0,
4421 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4422 MLX4_CMD_NATIVE);
4423 if (err)
Moni Shoua78efed22015-12-06 18:07:40 +02004424 goto err_put_qp;
4425
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004426
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00004427 err = add_res_range(dev, slave, vhcr->out_param, 1, RES_FS_RULE, qpn);
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004428 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07004429 mlx4_err(dev, "Fail to add flow steering resources\n");
Moni Shoua78efed22015-12-06 18:07:40 +02004430 goto err_detach;
4431 }
4432
4433 err = get_res(dev, slave, vhcr->out_param, RES_FS_RULE, &rrule);
4434 if (err)
4435 goto err_detach;
4436
4437 mbox_size = qp_attach_mbox_size(inbox->buf);
4438 rrule->mirr_mbox = kmalloc(mbox_size, GFP_KERNEL);
4439 if (!rrule->mirr_mbox) {
4440 err = -ENOMEM;
4441 goto err_put_rule;
4442 }
4443 rrule->mirr_mbox_size = mbox_size;
4444 rrule->mirr_rule_id = 0;
4445 memcpy(rrule->mirr_mbox, inbox->buf, mbox_size);
4446
4447 /* set different port */
4448 ctrl = (struct mlx4_net_trans_rule_hw_ctrl *)rrule->mirr_mbox;
4449 if (ctrl->port == 1)
4450 ctrl->port = 2;
4451 else
4452 ctrl->port = 1;
4453
4454 if (mlx4_is_bonded(dev))
4455 mlx4_do_mirror_rule(dev, rrule);
4456
4457 atomic_inc(&rqp->ref_count);
4458
4459err_put_rule:
4460 put_res(dev, slave, vhcr->out_param, RES_FS_RULE);
4461err_detach:
4462 /* detach rule on error */
4463 if (err)
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004464 mlx4_cmd(dev, vhcr->out_param, 0, 0,
Hadar Hen Zion2065b382012-12-06 17:11:58 +00004465 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004466 MLX4_CMD_NATIVE);
Moni Shoua78efed22015-12-06 18:07:40 +02004467err_put_qp:
Hadar Hen Ziona9c01e72012-12-06 17:11:57 +00004468 put_res(dev, slave, qpn, RES_QP);
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004469 return err;
Hadar Hen Zion8fcfb4d2012-07-05 04:03:45 +00004470}
4471
Moni Shoua78efed22015-12-06 18:07:40 +02004472static int mlx4_undo_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
4473{
4474 int err;
4475
4476 err = rem_res_range(dev, fs_rule->com.owner, fs_rule->com.res_id, 1, RES_FS_RULE, 0);
4477 if (err) {
4478 mlx4_err(dev, "Fail to remove flow steering resources\n");
4479 return err;
4480 }
4481
4482 mlx4_cmd(dev, fs_rule->com.res_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
4483 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
4484 return 0;
4485}
4486
Hadar Hen Zion8fcfb4d2012-07-05 04:03:45 +00004487int mlx4_QP_FLOW_STEERING_DETACH_wrapper(struct mlx4_dev *dev, int slave,
4488 struct mlx4_vhcr *vhcr,
4489 struct mlx4_cmd_mailbox *inbox,
4490 struct mlx4_cmd_mailbox *outbox,
4491 struct mlx4_cmd_info *cmd)
4492{
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004493 int err;
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00004494 struct res_qp *rqp;
4495 struct res_fs_rule *rrule;
Moni Shoua78efed22015-12-06 18:07:40 +02004496 u64 mirr_reg_id;
Jack Morgenstein3b01fe72016-12-29 18:37:09 +02004497 int qpn;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004498
Hadar Hen Zion0ff1fb62012-07-05 04:03:46 +00004499 if (dev->caps.steering_mode !=
4500 MLX4_STEERING_MODE_DEVICE_MANAGED)
4501 return -EOPNOTSUPP;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004502
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00004503 err = get_res(dev, slave, vhcr->in_param, RES_FS_RULE, &rrule);
4504 if (err)
4505 return err;
Moni Shoua78efed22015-12-06 18:07:40 +02004506
4507 if (!rrule->mirr_mbox) {
4508 mlx4_err(dev, "Mirror rules cannot be removed explicitly\n");
4509 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
4510 return -EINVAL;
4511 }
4512 mirr_reg_id = rrule->mirr_rule_id;
4513 kfree(rrule->mirr_mbox);
Jack Morgenstein3b01fe72016-12-29 18:37:09 +02004514 qpn = rrule->qpn;
Moni Shoua78efed22015-12-06 18:07:40 +02004515
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00004516 /* Release the rule form busy state before removal */
4517 put_res(dev, slave, vhcr->in_param, RES_FS_RULE);
Jack Morgenstein3b01fe72016-12-29 18:37:09 +02004518 err = get_res(dev, slave, qpn, RES_QP, &rqp);
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00004519 if (err)
4520 return err;
4521
Moni Shoua78efed22015-12-06 18:07:40 +02004522 if (mirr_reg_id && mlx4_is_bonded(dev)) {
4523 err = get_res(dev, slave, mirr_reg_id, RES_FS_RULE, &rrule);
4524 if (err) {
4525 mlx4_err(dev, "Fail to get resource of mirror rule\n");
4526 } else {
4527 put_res(dev, slave, mirr_reg_id, RES_FS_RULE);
4528 mlx4_undo_mirror_rule(dev, rrule);
4529 }
4530 }
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004531 err = rem_res_range(dev, slave, vhcr->in_param, 1, RES_FS_RULE, 0);
4532 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07004533 mlx4_err(dev, "Fail to remove flow steering resources\n");
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00004534 goto out;
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004535 }
4536
4537 err = mlx4_cmd(dev, vhcr->in_param, 0, 0,
4538 MLX4_QP_FLOW_STEERING_DETACH, MLX4_CMD_TIME_CLASS_A,
4539 MLX4_CMD_NATIVE);
Hadar Hen Zion2c473ae2013-03-21 05:55:55 +00004540 if (!err)
4541 atomic_dec(&rqp->ref_count);
4542out:
Jack Morgenstein3b01fe72016-12-29 18:37:09 +02004543 put_res(dev, slave, qpn, RES_QP);
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00004544 return err;
Hadar Hen Zion8fcfb4d2012-07-05 04:03:45 +00004545}
4546
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004547enum {
4548 BUSY_MAX_RETRIES = 10
4549};
4550
4551int mlx4_QUERY_IF_STAT_wrapper(struct mlx4_dev *dev, int slave,
4552 struct mlx4_vhcr *vhcr,
4553 struct mlx4_cmd_mailbox *inbox,
4554 struct mlx4_cmd_mailbox *outbox,
4555 struct mlx4_cmd_info *cmd)
4556{
4557 int err;
4558 int index = vhcr->in_modifier & 0xffff;
4559
4560 err = get_res(dev, slave, index, RES_COUNTER, NULL);
4561 if (err)
4562 return err;
4563
4564 err = mlx4_DMA_wrapper(dev, slave, vhcr, inbox, outbox, cmd);
4565 put_res(dev, slave, index, RES_COUNTER);
4566 return err;
4567}
4568
4569static void detach_qp(struct mlx4_dev *dev, int slave, struct res_qp *rqp)
4570{
4571 struct res_gid *rgid;
4572 struct res_gid *tmp;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004573 struct mlx4_qp qp; /* dummy for calling attach/detach */
4574
4575 list_for_each_entry_safe(rgid, tmp, &rqp->mcg_list, list) {
Hadar Hen Zionfab1e242013-04-11 01:56:41 +00004576 switch (dev->caps.steering_mode) {
4577 case MLX4_STEERING_MODE_DEVICE_MANAGED:
4578 mlx4_flow_detach(dev, rgid->reg_id);
4579 break;
4580 case MLX4_STEERING_MODE_B0:
4581 qp.qpn = rqp->local_qpn;
4582 (void) mlx4_qp_detach_common(dev, &qp, rgid->gid,
4583 rgid->prot, rgid->steer);
4584 break;
4585 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004586 list_del(&rgid->list);
4587 kfree(rgid);
4588 }
4589}
4590
4591static int _move_all_busy(struct mlx4_dev *dev, int slave,
4592 enum mlx4_resource type, int print)
4593{
4594 struct mlx4_priv *priv = mlx4_priv(dev);
4595 struct mlx4_resource_tracker *tracker =
4596 &priv->mfunc.master.res_tracker;
4597 struct list_head *rlist = &tracker->slave_list[slave].res_list[type];
4598 struct res_common *r;
4599 struct res_common *tmp;
4600 int busy;
4601
4602 busy = 0;
4603 spin_lock_irq(mlx4_tlock(dev));
4604 list_for_each_entry_safe(r, tmp, rlist, list) {
4605 if (r->owner == slave) {
4606 if (!r->removing) {
4607 if (r->state == RES_ANY_BUSY) {
4608 if (print)
4609 mlx4_dbg(dev,
Hadar Hen Zionaa1ec3d2012-07-05 04:03:42 +00004610 "%s id 0x%llx is busy\n",
Jack Morgenstein956463732014-06-08 13:49:45 +03004611 resource_str(type),
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004612 r->res_id);
4613 ++busy;
4614 } else {
4615 r->from_state = r->state;
4616 r->state = RES_ANY_BUSY;
4617 r->removing = 1;
4618 }
4619 }
4620 }
4621 }
4622 spin_unlock_irq(mlx4_tlock(dev));
4623
4624 return busy;
4625}
4626
4627static int move_all_busy(struct mlx4_dev *dev, int slave,
4628 enum mlx4_resource type)
4629{
4630 unsigned long begin;
4631 int busy;
4632
4633 begin = jiffies;
4634 do {
4635 busy = _move_all_busy(dev, slave, type, 0);
4636 if (time_after(jiffies, begin + 5 * HZ))
4637 break;
4638 if (busy)
4639 cond_resched();
4640 } while (busy);
4641
4642 if (busy)
4643 busy = _move_all_busy(dev, slave, type, 1);
4644
4645 return busy;
4646}
4647static void rem_slave_qps(struct mlx4_dev *dev, int slave)
4648{
4649 struct mlx4_priv *priv = mlx4_priv(dev);
4650 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4651 struct list_head *qp_list =
4652 &tracker->slave_list[slave].res_list[RES_QP];
4653 struct res_qp *qp;
4654 struct res_qp *tmp;
4655 int state;
4656 u64 in_param;
4657 int qpn;
4658 int err;
4659
4660 err = move_all_busy(dev, slave, RES_QP);
4661 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004662 mlx4_warn(dev, "rem_slave_qps: Could not move all qps to busy for slave %d\n",
4663 slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004664
4665 spin_lock_irq(mlx4_tlock(dev));
4666 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
4667 spin_unlock_irq(mlx4_tlock(dev));
4668 if (qp->com.owner == slave) {
4669 qpn = qp->com.res_id;
4670 detach_qp(dev, slave, qp);
4671 state = qp->com.from_state;
4672 while (state != 0) {
4673 switch (state) {
4674 case RES_QP_RESERVED:
4675 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004676 rb_erase(&qp->com.node,
4677 &tracker->res_tree[RES_QP]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004678 list_del(&qp->com.list);
4679 spin_unlock_irq(mlx4_tlock(dev));
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02004680 if (!valid_reserved(dev, slave, qpn)) {
4681 __mlx4_qp_release_range(dev, qpn, 1);
4682 mlx4_release_resource(dev, slave,
4683 RES_QP, 1, 0);
4684 }
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004685 kfree(qp);
4686 state = 0;
4687 break;
4688 case RES_QP_MAPPED:
4689 if (!valid_reserved(dev, slave, qpn))
4690 __mlx4_qp_free_icm(dev, qpn);
4691 state = RES_QP_RESERVED;
4692 break;
4693 case RES_QP_HW:
4694 in_param = slave;
4695 err = mlx4_cmd(dev, in_param,
4696 qp->local_qpn, 2,
4697 MLX4_CMD_2RST_QP,
4698 MLX4_CMD_TIME_CLASS_A,
4699 MLX4_CMD_NATIVE);
4700 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004701 mlx4_dbg(dev, "rem_slave_qps: failed to move slave %d qpn %d to reset\n",
4702 slave, qp->local_qpn);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004703 atomic_dec(&qp->rcq->ref_count);
4704 atomic_dec(&qp->scq->ref_count);
4705 atomic_dec(&qp->mtt->ref_count);
4706 if (qp->srq)
4707 atomic_dec(&qp->srq->ref_count);
4708 state = RES_QP_MAPPED;
4709 break;
4710 default:
4711 state = 0;
4712 }
4713 }
4714 }
4715 spin_lock_irq(mlx4_tlock(dev));
4716 }
4717 spin_unlock_irq(mlx4_tlock(dev));
4718}
4719
4720static void rem_slave_srqs(struct mlx4_dev *dev, int slave)
4721{
4722 struct mlx4_priv *priv = mlx4_priv(dev);
4723 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4724 struct list_head *srq_list =
4725 &tracker->slave_list[slave].res_list[RES_SRQ];
4726 struct res_srq *srq;
4727 struct res_srq *tmp;
4728 int state;
4729 u64 in_param;
4730 LIST_HEAD(tlist);
4731 int srqn;
4732 int err;
4733
4734 err = move_all_busy(dev, slave, RES_SRQ);
4735 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004736 mlx4_warn(dev, "rem_slave_srqs: Could not move all srqs - too busy for slave %d\n",
4737 slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004738
4739 spin_lock_irq(mlx4_tlock(dev));
4740 list_for_each_entry_safe(srq, tmp, srq_list, com.list) {
4741 spin_unlock_irq(mlx4_tlock(dev));
4742 if (srq->com.owner == slave) {
4743 srqn = srq->com.res_id;
4744 state = srq->com.from_state;
4745 while (state != 0) {
4746 switch (state) {
4747 case RES_SRQ_ALLOCATED:
4748 __mlx4_srq_free_icm(dev, srqn);
4749 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004750 rb_erase(&srq->com.node,
4751 &tracker->res_tree[RES_SRQ]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004752 list_del(&srq->com.list);
4753 spin_unlock_irq(mlx4_tlock(dev));
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02004754 mlx4_release_resource(dev, slave,
4755 RES_SRQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004756 kfree(srq);
4757 state = 0;
4758 break;
4759
4760 case RES_SRQ_HW:
4761 in_param = slave;
4762 err = mlx4_cmd(dev, in_param, srqn, 1,
4763 MLX4_CMD_HW2SW_SRQ,
4764 MLX4_CMD_TIME_CLASS_A,
4765 MLX4_CMD_NATIVE);
4766 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004767 mlx4_dbg(dev, "rem_slave_srqs: failed to move slave %d srq %d to SW ownership\n",
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004768 slave, srqn);
4769
4770 atomic_dec(&srq->mtt->ref_count);
4771 if (srq->cq)
4772 atomic_dec(&srq->cq->ref_count);
4773 state = RES_SRQ_ALLOCATED;
4774 break;
4775
4776 default:
4777 state = 0;
4778 }
4779 }
4780 }
4781 spin_lock_irq(mlx4_tlock(dev));
4782 }
4783 spin_unlock_irq(mlx4_tlock(dev));
4784}
4785
4786static void rem_slave_cqs(struct mlx4_dev *dev, int slave)
4787{
4788 struct mlx4_priv *priv = mlx4_priv(dev);
4789 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4790 struct list_head *cq_list =
4791 &tracker->slave_list[slave].res_list[RES_CQ];
4792 struct res_cq *cq;
4793 struct res_cq *tmp;
4794 int state;
4795 u64 in_param;
4796 LIST_HEAD(tlist);
4797 int cqn;
4798 int err;
4799
4800 err = move_all_busy(dev, slave, RES_CQ);
4801 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004802 mlx4_warn(dev, "rem_slave_cqs: Could not move all cqs - too busy for slave %d\n",
4803 slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004804
4805 spin_lock_irq(mlx4_tlock(dev));
4806 list_for_each_entry_safe(cq, tmp, cq_list, com.list) {
4807 spin_unlock_irq(mlx4_tlock(dev));
4808 if (cq->com.owner == slave && !atomic_read(&cq->ref_count)) {
4809 cqn = cq->com.res_id;
4810 state = cq->com.from_state;
4811 while (state != 0) {
4812 switch (state) {
4813 case RES_CQ_ALLOCATED:
4814 __mlx4_cq_free_icm(dev, cqn);
4815 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004816 rb_erase(&cq->com.node,
4817 &tracker->res_tree[RES_CQ]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004818 list_del(&cq->com.list);
4819 spin_unlock_irq(mlx4_tlock(dev));
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02004820 mlx4_release_resource(dev, slave,
4821 RES_CQ, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004822 kfree(cq);
4823 state = 0;
4824 break;
4825
4826 case RES_CQ_HW:
4827 in_param = slave;
4828 err = mlx4_cmd(dev, in_param, cqn, 1,
4829 MLX4_CMD_HW2SW_CQ,
4830 MLX4_CMD_TIME_CLASS_A,
4831 MLX4_CMD_NATIVE);
4832 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004833 mlx4_dbg(dev, "rem_slave_cqs: failed to move slave %d cq %d to SW ownership\n",
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004834 slave, cqn);
4835 atomic_dec(&cq->mtt->ref_count);
4836 state = RES_CQ_ALLOCATED;
4837 break;
4838
4839 default:
4840 state = 0;
4841 }
4842 }
4843 }
4844 spin_lock_irq(mlx4_tlock(dev));
4845 }
4846 spin_unlock_irq(mlx4_tlock(dev));
4847}
4848
4849static void rem_slave_mrs(struct mlx4_dev *dev, int slave)
4850{
4851 struct mlx4_priv *priv = mlx4_priv(dev);
4852 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
4853 struct list_head *mpt_list =
4854 &tracker->slave_list[slave].res_list[RES_MPT];
4855 struct res_mpt *mpt;
4856 struct res_mpt *tmp;
4857 int state;
4858 u64 in_param;
4859 LIST_HEAD(tlist);
4860 int mptn;
4861 int err;
4862
4863 err = move_all_busy(dev, slave, RES_MPT);
4864 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004865 mlx4_warn(dev, "rem_slave_mrs: Could not move all mpts - too busy for slave %d\n",
4866 slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004867
4868 spin_lock_irq(mlx4_tlock(dev));
4869 list_for_each_entry_safe(mpt, tmp, mpt_list, com.list) {
4870 spin_unlock_irq(mlx4_tlock(dev));
4871 if (mpt->com.owner == slave) {
4872 mptn = mpt->com.res_id;
4873 state = mpt->com.from_state;
4874 while (state != 0) {
4875 switch (state) {
4876 case RES_MPT_RESERVED:
Shani Michaelib20e5192013-02-06 16:19:08 +00004877 __mlx4_mpt_release(dev, mpt->key);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004878 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004879 rb_erase(&mpt->com.node,
4880 &tracker->res_tree[RES_MPT]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004881 list_del(&mpt->com.list);
4882 spin_unlock_irq(mlx4_tlock(dev));
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02004883 mlx4_release_resource(dev, slave,
4884 RES_MPT, 1, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004885 kfree(mpt);
4886 state = 0;
4887 break;
4888
4889 case RES_MPT_MAPPED:
Shani Michaelib20e5192013-02-06 16:19:08 +00004890 __mlx4_mpt_free_icm(dev, mpt->key);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004891 state = RES_MPT_RESERVED;
4892 break;
4893
4894 case RES_MPT_HW:
4895 in_param = slave;
4896 err = mlx4_cmd(dev, in_param, mptn, 0,
4897 MLX4_CMD_HW2SW_MPT,
4898 MLX4_CMD_TIME_CLASS_A,
4899 MLX4_CMD_NATIVE);
4900 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004901 mlx4_dbg(dev, "rem_slave_mrs: failed to move slave %d mpt %d to SW ownership\n",
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004902 slave, mptn);
4903 if (mpt->mtt)
4904 atomic_dec(&mpt->mtt->ref_count);
4905 state = RES_MPT_MAPPED;
4906 break;
4907 default:
4908 state = 0;
4909 }
4910 }
4911 }
4912 spin_lock_irq(mlx4_tlock(dev));
4913 }
4914 spin_unlock_irq(mlx4_tlock(dev));
4915}
4916
4917static void rem_slave_mtts(struct mlx4_dev *dev, int slave)
4918{
4919 struct mlx4_priv *priv = mlx4_priv(dev);
4920 struct mlx4_resource_tracker *tracker =
4921 &priv->mfunc.master.res_tracker;
4922 struct list_head *mtt_list =
4923 &tracker->slave_list[slave].res_list[RES_MTT];
4924 struct res_mtt *mtt;
4925 struct res_mtt *tmp;
4926 int state;
4927 LIST_HEAD(tlist);
4928 int base;
4929 int err;
4930
4931 err = move_all_busy(dev, slave, RES_MTT);
4932 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07004933 mlx4_warn(dev, "rem_slave_mtts: Could not move all mtts - too busy for slave %d\n",
4934 slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004935
4936 spin_lock_irq(mlx4_tlock(dev));
4937 list_for_each_entry_safe(mtt, tmp, mtt_list, com.list) {
4938 spin_unlock_irq(mlx4_tlock(dev));
4939 if (mtt->com.owner == slave) {
4940 base = mtt->com.res_id;
4941 state = mtt->com.from_state;
4942 while (state != 0) {
4943 switch (state) {
4944 case RES_MTT_ALLOCATED:
4945 __mlx4_free_mtt_range(dev, base,
4946 mtt->order);
4947 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00004948 rb_erase(&mtt->com.node,
4949 &tracker->res_tree[RES_MTT]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004950 list_del(&mtt->com.list);
4951 spin_unlock_irq(mlx4_tlock(dev));
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02004952 mlx4_release_resource(dev, slave, RES_MTT,
4953 1 << mtt->order, 0);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00004954 kfree(mtt);
4955 state = 0;
4956 break;
4957
4958 default:
4959 state = 0;
4960 }
4961 }
4962 }
4963 spin_lock_irq(mlx4_tlock(dev));
4964 }
4965 spin_unlock_irq(mlx4_tlock(dev));
4966}
4967
Moni Shoua78efed22015-12-06 18:07:40 +02004968static int mlx4_do_mirror_rule(struct mlx4_dev *dev, struct res_fs_rule *fs_rule)
4969{
4970 struct mlx4_cmd_mailbox *mailbox;
4971 int err;
4972 struct res_fs_rule *mirr_rule;
4973 u64 reg_id;
4974
4975 mailbox = mlx4_alloc_cmd_mailbox(dev);
4976 if (IS_ERR(mailbox))
4977 return PTR_ERR(mailbox);
4978
4979 if (!fs_rule->mirr_mbox) {
4980 mlx4_err(dev, "rule mirroring mailbox is null\n");
4981 return -EINVAL;
4982 }
4983 memcpy(mailbox->buf, fs_rule->mirr_mbox, fs_rule->mirr_mbox_size);
4984 err = mlx4_cmd_imm(dev, mailbox->dma, &reg_id, fs_rule->mirr_mbox_size >> 2, 0,
4985 MLX4_QP_FLOW_STEERING_ATTACH, MLX4_CMD_TIME_CLASS_A,
4986 MLX4_CMD_NATIVE);
4987 mlx4_free_cmd_mailbox(dev, mailbox);
4988
4989 if (err)
4990 goto err;
4991
4992 err = add_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, fs_rule->qpn);
4993 if (err)
4994 goto err_detach;
4995
4996 err = get_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE, &mirr_rule);
4997 if (err)
4998 goto err_rem;
4999
5000 fs_rule->mirr_rule_id = reg_id;
5001 mirr_rule->mirr_rule_id = 0;
5002 mirr_rule->mirr_mbox_size = 0;
5003 mirr_rule->mirr_mbox = NULL;
5004 put_res(dev, fs_rule->com.owner, reg_id, RES_FS_RULE);
5005
5006 return 0;
5007err_rem:
5008 rem_res_range(dev, fs_rule->com.owner, reg_id, 1, RES_FS_RULE, 0);
5009err_detach:
5010 mlx4_cmd(dev, reg_id, 0, 0, MLX4_QP_FLOW_STEERING_DETACH,
5011 MLX4_CMD_TIME_CLASS_A, MLX4_CMD_NATIVE);
5012err:
5013 return err;
5014}
5015
5016static int mlx4_mirror_fs_rules(struct mlx4_dev *dev, bool bond)
5017{
5018 struct mlx4_priv *priv = mlx4_priv(dev);
5019 struct mlx4_resource_tracker *tracker =
5020 &priv->mfunc.master.res_tracker;
5021 struct rb_root *root = &tracker->res_tree[RES_FS_RULE];
5022 struct rb_node *p;
5023 struct res_fs_rule *fs_rule;
5024 int err = 0;
5025 LIST_HEAD(mirr_list);
5026
5027 for (p = rb_first(root); p; p = rb_next(p)) {
5028 fs_rule = rb_entry(p, struct res_fs_rule, com.node);
5029 if ((bond && fs_rule->mirr_mbox_size) ||
5030 (!bond && !fs_rule->mirr_mbox_size))
5031 list_add_tail(&fs_rule->mirr_list, &mirr_list);
5032 }
5033
5034 list_for_each_entry(fs_rule, &mirr_list, mirr_list) {
5035 if (bond)
5036 err += mlx4_do_mirror_rule(dev, fs_rule);
5037 else
5038 err += mlx4_undo_mirror_rule(dev, fs_rule);
5039 }
5040 return err;
5041}
5042
5043int mlx4_bond_fs_rules(struct mlx4_dev *dev)
5044{
5045 return mlx4_mirror_fs_rules(dev, true);
5046}
5047
5048int mlx4_unbond_fs_rules(struct mlx4_dev *dev)
5049{
5050 return mlx4_mirror_fs_rules(dev, false);
5051}
5052
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00005053static void rem_slave_fs_rule(struct mlx4_dev *dev, int slave)
5054{
5055 struct mlx4_priv *priv = mlx4_priv(dev);
5056 struct mlx4_resource_tracker *tracker =
5057 &priv->mfunc.master.res_tracker;
5058 struct list_head *fs_rule_list =
5059 &tracker->slave_list[slave].res_list[RES_FS_RULE];
5060 struct res_fs_rule *fs_rule;
5061 struct res_fs_rule *tmp;
5062 int state;
5063 u64 base;
5064 int err;
5065
5066 err = move_all_busy(dev, slave, RES_FS_RULE);
5067 if (err)
5068 mlx4_warn(dev, "rem_slave_fs_rule: Could not move all mtts to busy for slave %d\n",
5069 slave);
5070
5071 spin_lock_irq(mlx4_tlock(dev));
5072 list_for_each_entry_safe(fs_rule, tmp, fs_rule_list, com.list) {
5073 spin_unlock_irq(mlx4_tlock(dev));
5074 if (fs_rule->com.owner == slave) {
5075 base = fs_rule->com.res_id;
5076 state = fs_rule->com.from_state;
5077 while (state != 0) {
5078 switch (state) {
5079 case RES_FS_RULE_ALLOCATED:
5080 /* detach rule */
5081 err = mlx4_cmd(dev, base, 0, 0,
5082 MLX4_QP_FLOW_STEERING_DETACH,
5083 MLX4_CMD_TIME_CLASS_A,
5084 MLX4_CMD_NATIVE);
5085
5086 spin_lock_irq(mlx4_tlock(dev));
5087 rb_erase(&fs_rule->com.node,
5088 &tracker->res_tree[RES_FS_RULE]);
5089 list_del(&fs_rule->com.list);
5090 spin_unlock_irq(mlx4_tlock(dev));
Moshe Shemesh461d5f12018-03-27 14:41:19 +03005091 kfree(fs_rule->mirr_mbox);
Hadar Hen Zion1b9c6b02012-07-05 04:03:47 +00005092 kfree(fs_rule);
5093 state = 0;
5094 break;
5095
5096 default:
5097 state = 0;
5098 }
5099 }
5100 }
5101 spin_lock_irq(mlx4_tlock(dev));
5102 }
5103 spin_unlock_irq(mlx4_tlock(dev));
5104}
5105
Eli Cohenc82e9aa2011-12-13 04:15:24 +00005106static void rem_slave_eqs(struct mlx4_dev *dev, int slave)
5107{
5108 struct mlx4_priv *priv = mlx4_priv(dev);
5109 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5110 struct list_head *eq_list =
5111 &tracker->slave_list[slave].res_list[RES_EQ];
5112 struct res_eq *eq;
5113 struct res_eq *tmp;
5114 int err;
5115 int state;
5116 LIST_HEAD(tlist);
5117 int eqn;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00005118
5119 err = move_all_busy(dev, slave, RES_EQ);
5120 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07005121 mlx4_warn(dev, "rem_slave_eqs: Could not move all eqs - too busy for slave %d\n",
5122 slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00005123
5124 spin_lock_irq(mlx4_tlock(dev));
5125 list_for_each_entry_safe(eq, tmp, eq_list, com.list) {
5126 spin_unlock_irq(mlx4_tlock(dev));
5127 if (eq->com.owner == slave) {
5128 eqn = eq->com.res_id;
5129 state = eq->com.from_state;
5130 while (state != 0) {
5131 switch (state) {
5132 case RES_EQ_RESERVED:
5133 spin_lock_irq(mlx4_tlock(dev));
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00005134 rb_erase(&eq->com.node,
5135 &tracker->res_tree[RES_EQ]);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00005136 list_del(&eq->com.list);
5137 spin_unlock_irq(mlx4_tlock(dev));
5138 kfree(eq);
5139 state = 0;
5140 break;
5141
5142 case RES_EQ_HW:
Yishai Hadas2d3c7392015-05-05 17:07:12 +03005143 err = mlx4_cmd(dev, slave, eqn & 0x3ff,
Jack Morgenstein30a5da52015-01-27 15:58:03 +02005144 1, MLX4_CMD_HW2SW_EQ,
5145 MLX4_CMD_TIME_CLASS_A,
5146 MLX4_CMD_NATIVE);
Jack Morgensteineb71d0d2012-05-15 10:35:04 +00005147 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07005148 mlx4_dbg(dev, "rem_slave_eqs: failed to move slave %d eqs %d to SW ownership\n",
Yishai Hadas2d3c7392015-05-05 17:07:12 +03005149 slave, eqn & 0x3ff);
Jack Morgensteineb71d0d2012-05-15 10:35:04 +00005150 atomic_dec(&eq->mtt->ref_count);
5151 state = RES_EQ_RESERVED;
Eli Cohenc82e9aa2011-12-13 04:15:24 +00005152 break;
5153
5154 default:
5155 state = 0;
5156 }
5157 }
5158 }
5159 spin_lock_irq(mlx4_tlock(dev));
5160 }
5161 spin_unlock_irq(mlx4_tlock(dev));
5162}
5163
Jack Morgensteinba062d52012-05-15 10:35:03 +00005164static void rem_slave_counters(struct mlx4_dev *dev, int slave)
5165{
5166 struct mlx4_priv *priv = mlx4_priv(dev);
5167 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5168 struct list_head *counter_list =
5169 &tracker->slave_list[slave].res_list[RES_COUNTER];
5170 struct res_counter *counter;
5171 struct res_counter *tmp;
5172 int err;
Eran Ben Elishaf5adbfe2015-11-12 19:35:29 +02005173 int *counters_arr = NULL;
5174 int i, j;
Jack Morgensteinba062d52012-05-15 10:35:03 +00005175
5176 err = move_all_busy(dev, slave, RES_COUNTER);
5177 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07005178 mlx4_warn(dev, "rem_slave_counters: Could not move all counters - too busy for slave %d\n",
5179 slave);
Jack Morgensteinba062d52012-05-15 10:35:03 +00005180
Eran Ben Elishaf5adbfe2015-11-12 19:35:29 +02005181 counters_arr = kmalloc_array(dev->caps.max_counters,
5182 sizeof(*counters_arr), GFP_KERNEL);
5183 if (!counters_arr)
5184 return;
5185
5186 do {
5187 i = 0;
5188 j = 0;
5189 spin_lock_irq(mlx4_tlock(dev));
5190 list_for_each_entry_safe(counter, tmp, counter_list, com.list) {
5191 if (counter->com.owner == slave) {
5192 counters_arr[i++] = counter->com.res_id;
5193 rb_erase(&counter->com.node,
5194 &tracker->res_tree[RES_COUNTER]);
5195 list_del(&counter->com.list);
5196 kfree(counter);
5197 }
5198 }
5199 spin_unlock_irq(mlx4_tlock(dev));
5200
5201 while (j < i) {
5202 __mlx4_counter_free(dev, counters_arr[j++]);
Jack Morgenstein146f3ef2013-11-03 10:03:25 +02005203 mlx4_release_resource(dev, slave, RES_COUNTER, 1, 0);
Jack Morgensteinba062d52012-05-15 10:35:03 +00005204 }
Eran Ben Elishaf5adbfe2015-11-12 19:35:29 +02005205 } while (i);
5206
5207 kfree(counters_arr);
Jack Morgensteinba062d52012-05-15 10:35:03 +00005208}
5209
5210static void rem_slave_xrcdns(struct mlx4_dev *dev, int slave)
5211{
5212 struct mlx4_priv *priv = mlx4_priv(dev);
5213 struct mlx4_resource_tracker *tracker = &priv->mfunc.master.res_tracker;
5214 struct list_head *xrcdn_list =
5215 &tracker->slave_list[slave].res_list[RES_XRCD];
5216 struct res_xrcdn *xrcd;
5217 struct res_xrcdn *tmp;
5218 int err;
5219 int xrcdn;
5220
5221 err = move_all_busy(dev, slave, RES_XRCD);
5222 if (err)
Joe Perches1a91de22014-05-07 12:52:57 -07005223 mlx4_warn(dev, "rem_slave_xrcdns: Could not move all xrcdns - too busy for slave %d\n",
5224 slave);
Jack Morgensteinba062d52012-05-15 10:35:03 +00005225
5226 spin_lock_irq(mlx4_tlock(dev));
5227 list_for_each_entry_safe(xrcd, tmp, xrcdn_list, com.list) {
5228 if (xrcd->com.owner == slave) {
5229 xrcdn = xrcd->com.res_id;
Hadar Hen Zion4af1c042012-07-05 04:03:41 +00005230 rb_erase(&xrcd->com.node, &tracker->res_tree[RES_XRCD]);
Jack Morgensteinba062d52012-05-15 10:35:03 +00005231 list_del(&xrcd->com.list);
5232 kfree(xrcd);
5233 __mlx4_xrcd_free(dev, xrcdn);
5234 }
5235 }
5236 spin_unlock_irq(mlx4_tlock(dev));
5237}
5238
Eli Cohenc82e9aa2011-12-13 04:15:24 +00005239void mlx4_delete_all_resources_for_slave(struct mlx4_dev *dev, int slave)
5240{
5241 struct mlx4_priv *priv = mlx4_priv(dev);
Jack Morgenstein111c6092014-05-27 09:26:38 +03005242 mlx4_reset_roce_gids(dev, slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00005243 mutex_lock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
Jack Morgenstein48740802013-11-03 10:03:20 +02005244 rem_slave_vlans(dev, slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00005245 rem_slave_macs(dev, slave);
Hadar Hen Zion80cb0022013-03-21 05:55:52 +00005246 rem_slave_fs_rule(dev, slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00005247 rem_slave_qps(dev, slave);
5248 rem_slave_srqs(dev, slave);
5249 rem_slave_cqs(dev, slave);
5250 rem_slave_mrs(dev, slave);
5251 rem_slave_eqs(dev, slave);
5252 rem_slave_mtts(dev, slave);
Jack Morgensteinba062d52012-05-15 10:35:03 +00005253 rem_slave_counters(dev, slave);
5254 rem_slave_xrcdns(dev, slave);
Eli Cohenc82e9aa2011-12-13 04:15:24 +00005255 mutex_unlock(&priv->mfunc.master.res_tracker.slave_list[slave].mutex);
5256}
Jack Morgensteinb01978c2013-06-27 19:05:21 +03005257
Ido Shamay269f9882017-06-05 10:44:56 +03005258static void update_qos_vpp(struct mlx4_update_qp_context *ctx,
5259 struct mlx4_vf_immed_vlan_work *work)
5260{
5261 ctx->qp_mask |= cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_QOS_VPP);
5262 ctx->qp_context.qos_vport = work->qos_vport;
5263}
5264
Jack Morgensteinb01978c2013-06-27 19:05:21 +03005265void mlx4_vf_immed_vlan_work_handler(struct work_struct *_work)
5266{
5267 struct mlx4_vf_immed_vlan_work *work =
5268 container_of(_work, struct mlx4_vf_immed_vlan_work, work);
5269 struct mlx4_cmd_mailbox *mailbox;
5270 struct mlx4_update_qp_context *upd_context;
5271 struct mlx4_dev *dev = &work->priv->dev;
5272 struct mlx4_resource_tracker *tracker =
5273 &work->priv->mfunc.master.res_tracker;
5274 struct list_head *qp_list =
5275 &tracker->slave_list[work->slave].res_list[RES_QP];
5276 struct res_qp *qp;
5277 struct res_qp *tmp;
Rony Efraimf0f829b2013-11-07 12:19:51 +02005278 u64 qp_path_mask_vlan_ctrl =
5279 ((1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_UNTAGGED) |
Jack Morgensteinb01978c2013-06-27 19:05:21 +03005280 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_1P) |
5281 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_TX_BLOCK_TAGGED) |
5282 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_UNTAGGED) |
5283 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_1P) |
Rony Efraimf0f829b2013-11-07 12:19:51 +02005284 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_RX_BLOCK_TAGGED));
5285
5286 u64 qp_path_mask = ((1ULL << MLX4_UPD_QP_PATH_MASK_VLAN_INDEX) |
5287 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL) |
5288 (1ULL << MLX4_UPD_QP_PATH_MASK_CV) |
Moshe Shemesh7c3d21c2016-09-22 12:11:13 +03005289 (1ULL << MLX4_UPD_QP_PATH_MASK_SV) |
Rony Efraimf0f829b2013-11-07 12:19:51 +02005290 (1ULL << MLX4_UPD_QP_PATH_MASK_ETH_HIDE_CQE_VLAN) |
5291 (1ULL << MLX4_UPD_QP_PATH_MASK_FEUP) |
5292 (1ULL << MLX4_UPD_QP_PATH_MASK_FVL_RX) |
Jack Morgensteinb01978c2013-06-27 19:05:21 +03005293 (1ULL << MLX4_UPD_QP_PATH_MASK_SCHED_QUEUE));
5294
5295 int err;
5296 int port, errors = 0;
5297 u8 vlan_control;
5298
5299 if (mlx4_is_slave(dev)) {
5300 mlx4_warn(dev, "Trying to update-qp in slave %d\n",
5301 work->slave);
5302 goto out;
5303 }
5304
5305 mailbox = mlx4_alloc_cmd_mailbox(dev);
5306 if (IS_ERR(mailbox))
5307 goto out;
Rony Efraim0a6eac22013-06-27 19:05:22 +03005308 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_LINK_DISABLE) /* block all */
5309 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5310 MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
5311 MLX4_VLAN_CTRL_ETH_TX_BLOCK_UNTAGGED |
5312 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5313 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED |
5314 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
5315 else if (!work->vlan_id)
Jack Morgensteinb01978c2013-06-27 19:05:21 +03005316 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5317 MLX4_VLAN_CTRL_ETH_RX_BLOCK_TAGGED;
Moshe Shemesh7c3d21c2016-09-22 12:11:13 +03005318 else if (work->vlan_proto == htons(ETH_P_8021AD))
5319 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_PRIO_TAGGED |
5320 MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5321 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5322 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
5323 else /* vst 802.1Q */
Jack Morgensteinb01978c2013-06-27 19:05:21 +03005324 vlan_control = MLX4_VLAN_CTRL_ETH_TX_BLOCK_TAGGED |
5325 MLX4_VLAN_CTRL_ETH_RX_BLOCK_PRIO_TAGGED |
5326 MLX4_VLAN_CTRL_ETH_RX_BLOCK_UNTAGGED;
5327
5328 upd_context = mailbox->buf;
Matan Barak311be982014-09-10 16:41:54 +03005329 upd_context->qp_mask = cpu_to_be64(1ULL << MLX4_UPD_QP_MASK_VSD);
Jack Morgensteinb01978c2013-06-27 19:05:21 +03005330
5331 spin_lock_irq(mlx4_tlock(dev));
5332 list_for_each_entry_safe(qp, tmp, qp_list, com.list) {
5333 spin_unlock_irq(mlx4_tlock(dev));
5334 if (qp->com.owner == work->slave) {
5335 if (qp->com.from_state != RES_QP_HW ||
5336 !qp->sched_queue || /* no INIT2RTR trans yet */
5337 mlx4_is_qp_reserved(dev, qp->local_qpn) ||
5338 qp->qpc_flags & (1 << MLX4_RSS_QPC_FLAG_OFFSET)) {
5339 spin_lock_irq(mlx4_tlock(dev));
5340 continue;
5341 }
5342 port = (qp->sched_queue >> 6 & 1) + 1;
5343 if (port != work->port) {
5344 spin_lock_irq(mlx4_tlock(dev));
5345 continue;
5346 }
Rony Efraimf0f829b2013-11-07 12:19:51 +02005347 if (MLX4_QP_ST_RC == ((qp->qpc_flags >> 16) & 0xff))
5348 upd_context->primary_addr_path_mask = cpu_to_be64(qp_path_mask);
5349 else
5350 upd_context->primary_addr_path_mask =
5351 cpu_to_be64(qp_path_mask | qp_path_mask_vlan_ctrl);
5352 if (work->vlan_id == MLX4_VGT) {
5353 upd_context->qp_context.param3 = qp->param3;
5354 upd_context->qp_context.pri_path.vlan_control = qp->vlan_control;
5355 upd_context->qp_context.pri_path.fvl_rx = qp->fvl_rx;
5356 upd_context->qp_context.pri_path.vlan_index = qp->vlan_index;
5357 upd_context->qp_context.pri_path.fl = qp->pri_path_fl;
5358 upd_context->qp_context.pri_path.feup = qp->feup;
5359 upd_context->qp_context.pri_path.sched_queue =
5360 qp->sched_queue;
5361 } else {
5362 upd_context->qp_context.param3 = qp->param3 & ~cpu_to_be32(MLX4_STRIP_VLAN);
5363 upd_context->qp_context.pri_path.vlan_control = vlan_control;
5364 upd_context->qp_context.pri_path.vlan_index = work->vlan_ix;
5365 upd_context->qp_context.pri_path.fvl_rx =
5366 qp->fvl_rx | MLX4_FVL_RX_FORCE_ETH_VLAN;
5367 upd_context->qp_context.pri_path.fl =
Moshe Shemesh7c3d21c2016-09-22 12:11:13 +03005368 qp->pri_path_fl | MLX4_FL_ETH_HIDE_CQE_VLAN;
5369 if (work->vlan_proto == htons(ETH_P_8021AD))
5370 upd_context->qp_context.pri_path.fl |= MLX4_FL_SV;
5371 else
5372 upd_context->qp_context.pri_path.fl |= MLX4_FL_CV;
Rony Efraimf0f829b2013-11-07 12:19:51 +02005373 upd_context->qp_context.pri_path.feup =
5374 qp->feup | MLX4_FEUP_FORCE_ETH_UP | MLX4_FVL_FORCE_ETH_VLAN;
5375 upd_context->qp_context.pri_path.sched_queue =
5376 qp->sched_queue & 0xC7;
5377 upd_context->qp_context.pri_path.sched_queue |=
5378 ((work->qos & 0x7) << 3);
Ido Shamay269f9882017-06-05 10:44:56 +03005379
5380 if (dev->caps.flags2 &
5381 MLX4_DEV_CAP_FLAG2_QOS_VPP)
5382 update_qos_vpp(upd_context, work);
Rony Efraimf0f829b2013-11-07 12:19:51 +02005383 }
Jack Morgensteinb01978c2013-06-27 19:05:21 +03005384
5385 err = mlx4_cmd(dev, mailbox->dma,
5386 qp->local_qpn & 0xffffff,
5387 0, MLX4_CMD_UPDATE_QP,
5388 MLX4_CMD_TIME_CLASS_C, MLX4_CMD_NATIVE);
5389 if (err) {
Joe Perches1a91de22014-05-07 12:52:57 -07005390 mlx4_info(dev, "UPDATE_QP failed for slave %d, port %d, qpn %d (%d)\n",
5391 work->slave, port, qp->local_qpn, err);
Jack Morgensteinb01978c2013-06-27 19:05:21 +03005392 errors++;
5393 }
5394 }
5395 spin_lock_irq(mlx4_tlock(dev));
5396 }
5397 spin_unlock_irq(mlx4_tlock(dev));
5398 mlx4_free_cmd_mailbox(dev, mailbox);
5399
5400 if (errors)
5401 mlx4_err(dev, "%d UPDATE_QP failures for slave %d, port %d\n",
5402 errors, work->slave, work->port);
5403
5404 /* unregister previous vlan_id if needed and we had no errors
5405 * while updating the QPs
5406 */
5407 if (work->flags & MLX4_VF_IMMED_VLAN_FLAG_VLAN && !errors &&
5408 NO_INDX != work->orig_vlan_ix)
5409 __mlx4_unregister_vlan(&work->priv->dev, work->port,
Jack Morgenstein2009d002013-11-03 10:03:19 +02005410 work->orig_vlan_id);
Jack Morgensteinb01978c2013-06-27 19:05:21 +03005411out:
5412 kfree(work);
5413 return;
5414}