blob: 9b17bc064cc899d57a1f38fa076e19098b0282d4 [file] [log] [blame]
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001/*
Matthew Finlayb3f63c32016-02-22 18:17:32 +02002 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
Amir Vadaif62b8bb82015-05-28 22:28:48 +03003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
Amir Vadaie8f887a2016-03-08 12:42:36 +020033#include <net/tc_act/tc_gact.h>
34#include <net/pkt_cls.h>
Maor Gottlieb86d722a2015-12-10 17:12:44 +020035#include <linux/mlx5/fs.h>
Matthew Finlayb3f63c32016-02-22 18:17:32 +020036#include <net/vxlan.h>
Amir Vadaif62b8bb82015-05-28 22:28:48 +030037#include "en.h"
Amir Vadaie8f887a2016-03-08 12:42:36 +020038#include "en_tc.h"
Saeed Mahameed66e49de2015-12-01 18:03:25 +020039#include "eswitch.h"
Matthew Finlayb3f63c32016-02-22 18:17:32 +020040#include "vxlan.h"
Amir Vadaif62b8bb82015-05-28 22:28:48 +030041
42struct mlx5e_rq_param {
43 u32 rqc[MLX5_ST_SZ_DW(rqc)];
44 struct mlx5_wq_param wq;
45};
46
47struct mlx5e_sq_param {
48 u32 sqc[MLX5_ST_SZ_DW(sqc)];
49 struct mlx5_wq_param wq;
Achiad Shochat58d52292015-07-23 23:35:58 +030050 u16 max_inline;
Tariq Toukand3c9bc22016-04-20 22:02:14 +030051 bool icosq;
Amir Vadaif62b8bb82015-05-28 22:28:48 +030052};
53
54struct mlx5e_cq_param {
55 u32 cqc[MLX5_ST_SZ_DW(cqc)];
56 struct mlx5_wq_param wq;
57 u16 eq_ix;
58};
59
60struct mlx5e_channel_param {
61 struct mlx5e_rq_param rq;
62 struct mlx5e_sq_param sq;
Tariq Toukand3c9bc22016-04-20 22:02:14 +030063 struct mlx5e_sq_param icosq;
Amir Vadaif62b8bb82015-05-28 22:28:48 +030064 struct mlx5e_cq_param rx_cq;
65 struct mlx5e_cq_param tx_cq;
Tariq Toukand3c9bc22016-04-20 22:02:14 +030066 struct mlx5e_cq_param icosq_cq;
Amir Vadaif62b8bb82015-05-28 22:28:48 +030067};
68
69static void mlx5e_update_carrier(struct mlx5e_priv *priv)
70{
71 struct mlx5_core_dev *mdev = priv->mdev;
72 u8 port_state;
73
74 port_state = mlx5_query_vport_state(mdev,
Saeed Mahameede7546512015-12-01 18:03:13 +020075 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
Amir Vadaif62b8bb82015-05-28 22:28:48 +030076
77 if (port_state == VPORT_STATE_UP)
78 netif_carrier_on(priv->netdev);
79 else
80 netif_carrier_off(priv->netdev);
81}
82
83static void mlx5e_update_carrier_work(struct work_struct *work)
84{
85 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
86 update_carrier_work);
87
88 mutex_lock(&priv->state_lock);
89 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
90 mlx5e_update_carrier(priv);
91 mutex_unlock(&priv->state_lock);
92}
93
Gal Pressmanefea3892015-08-04 14:05:47 +030094static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
95{
96 struct mlx5_core_dev *mdev = priv->mdev;
97 struct mlx5e_pport_stats *s = &priv->stats.pport;
98 u32 *in;
99 u32 *out;
100 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
101
102 in = mlx5_vzalloc(sz);
103 out = mlx5_vzalloc(sz);
104 if (!in || !out)
105 goto free_out;
106
107 MLX5_SET(ppcnt_reg, in, local_port, 1);
108
109 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
110 mlx5_core_access_reg(mdev, in, sz, out,
111 sz, MLX5_REG_PPCNT, 0, 0);
112 memcpy(s->IEEE_802_3_counters,
113 MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
114 sizeof(s->IEEE_802_3_counters));
115
116 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
117 mlx5_core_access_reg(mdev, in, sz, out,
118 sz, MLX5_REG_PPCNT, 0, 0);
119 memcpy(s->RFC_2863_counters,
120 MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
121 sizeof(s->RFC_2863_counters));
122
123 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
124 mlx5_core_access_reg(mdev, in, sz, out,
125 sz, MLX5_REG_PPCNT, 0, 0);
126 memcpy(s->RFC_2819_counters,
127 MLX5_ADDR_OF(ppcnt_reg, out, counter_set),
128 sizeof(s->RFC_2819_counters));
129
130free_out:
131 kvfree(in);
132 kvfree(out);
133}
134
Rana Shahout593cf332016-04-20 22:02:10 +0300135static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
136{
137 struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
138
139 if (!priv->q_counter)
140 return;
141
142 mlx5_core_query_out_of_buffer(priv->mdev, priv->q_counter,
143 &qcnt->rx_out_of_buffer);
144}
145
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300146void mlx5e_update_stats(struct mlx5e_priv *priv)
147{
148 struct mlx5_core_dev *mdev = priv->mdev;
149 struct mlx5e_vport_stats *s = &priv->stats.vport;
150 struct mlx5e_rq_stats *rq_stats;
151 struct mlx5e_sq_stats *sq_stats;
152 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
153 u32 *out;
154 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
155 u64 tx_offload_none;
156 int i, j;
157
158 out = mlx5_vzalloc(outlen);
159 if (!out)
160 return;
161
162 /* Collect firts the SW counters and then HW for consistency */
Gal Pressmanfaf44782016-02-29 21:17:15 +0200163 s->rx_packets = 0;
164 s->rx_bytes = 0;
165 s->tx_packets = 0;
166 s->tx_bytes = 0;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300167 s->tso_packets = 0;
168 s->tso_bytes = 0;
Matthew Finlay89db09e2016-02-22 18:17:34 +0200169 s->tso_inner_packets = 0;
170 s->tso_inner_bytes = 0;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300171 s->tx_queue_stopped = 0;
172 s->tx_queue_wake = 0;
173 s->tx_queue_dropped = 0;
Matthew Finlay89db09e2016-02-22 18:17:34 +0200174 s->tx_csum_inner = 0;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300175 tx_offload_none = 0;
176 s->lro_packets = 0;
177 s->lro_bytes = 0;
178 s->rx_csum_none = 0;
Achiad Shochatbbceefc2015-08-16 16:04:52 +0300179 s->rx_csum_sw = 0;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300180 s->rx_wqe_err = 0;
Tariq Toukan461017c2016-04-20 22:02:13 +0300181 s->rx_mpwqe_filler = 0;
Tariq Toukanbc77b242016-04-20 22:02:15 +0300182 s->rx_mpwqe_frag = 0;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300183 for (i = 0; i < priv->params.num_channels; i++) {
184 rq_stats = &priv->channel[i]->rq.stats;
185
Gal Pressmanfaf44782016-02-29 21:17:15 +0200186 s->rx_packets += rq_stats->packets;
187 s->rx_bytes += rq_stats->bytes;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300188 s->lro_packets += rq_stats->lro_packets;
189 s->lro_bytes += rq_stats->lro_bytes;
190 s->rx_csum_none += rq_stats->csum_none;
Achiad Shochatbbceefc2015-08-16 16:04:52 +0300191 s->rx_csum_sw += rq_stats->csum_sw;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300192 s->rx_wqe_err += rq_stats->wqe_err;
Tariq Toukan461017c2016-04-20 22:02:13 +0300193 s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
Tariq Toukanbc77b242016-04-20 22:02:15 +0300194 s->rx_mpwqe_frag += rq_stats->mpwqe_frag;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300195
Achiad Shochata4418a62015-07-29 15:05:41 +0300196 for (j = 0; j < priv->params.num_tc; j++) {
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300197 sq_stats = &priv->channel[i]->sq[j].stats;
198
Gal Pressmanfaf44782016-02-29 21:17:15 +0200199 s->tx_packets += sq_stats->packets;
200 s->tx_bytes += sq_stats->bytes;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300201 s->tso_packets += sq_stats->tso_packets;
202 s->tso_bytes += sq_stats->tso_bytes;
Matthew Finlay89db09e2016-02-22 18:17:34 +0200203 s->tso_inner_packets += sq_stats->tso_inner_packets;
204 s->tso_inner_bytes += sq_stats->tso_inner_bytes;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300205 s->tx_queue_stopped += sq_stats->stopped;
206 s->tx_queue_wake += sq_stats->wake;
207 s->tx_queue_dropped += sq_stats->dropped;
Matthew Finlay89db09e2016-02-22 18:17:34 +0200208 s->tx_csum_inner += sq_stats->csum_offload_inner;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300209 tx_offload_none += sq_stats->csum_offload_none;
210 }
211 }
212
213 /* HW counters */
214 memset(in, 0, sizeof(in));
215
216 MLX5_SET(query_vport_counter_in, in, opcode,
217 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
218 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
219 MLX5_SET(query_vport_counter_in, in, other_vport, 0);
220
221 memset(out, 0, outlen);
222
223 if (mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen))
224 goto free_out;
225
226#define MLX5_GET_CTR(p, x) \
227 MLX5_GET64(query_vport_counter_out, p, x)
228
229 s->rx_error_packets =
230 MLX5_GET_CTR(out, received_errors.packets);
231 s->rx_error_bytes =
232 MLX5_GET_CTR(out, received_errors.octets);
233 s->tx_error_packets =
234 MLX5_GET_CTR(out, transmit_errors.packets);
235 s->tx_error_bytes =
236 MLX5_GET_CTR(out, transmit_errors.octets);
237
238 s->rx_unicast_packets =
239 MLX5_GET_CTR(out, received_eth_unicast.packets);
240 s->rx_unicast_bytes =
241 MLX5_GET_CTR(out, received_eth_unicast.octets);
242 s->tx_unicast_packets =
243 MLX5_GET_CTR(out, transmitted_eth_unicast.packets);
244 s->tx_unicast_bytes =
245 MLX5_GET_CTR(out, transmitted_eth_unicast.octets);
246
247 s->rx_multicast_packets =
248 MLX5_GET_CTR(out, received_eth_multicast.packets);
249 s->rx_multicast_bytes =
250 MLX5_GET_CTR(out, received_eth_multicast.octets);
251 s->tx_multicast_packets =
252 MLX5_GET_CTR(out, transmitted_eth_multicast.packets);
253 s->tx_multicast_bytes =
254 MLX5_GET_CTR(out, transmitted_eth_multicast.octets);
255
256 s->rx_broadcast_packets =
257 MLX5_GET_CTR(out, received_eth_broadcast.packets);
258 s->rx_broadcast_bytes =
259 MLX5_GET_CTR(out, received_eth_broadcast.octets);
260 s->tx_broadcast_packets =
261 MLX5_GET_CTR(out, transmitted_eth_broadcast.packets);
262 s->tx_broadcast_bytes =
263 MLX5_GET_CTR(out, transmitted_eth_broadcast.octets);
264
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300265 /* Update calculated offload counters */
Matthew Finlay89db09e2016-02-22 18:17:34 +0200266 s->tx_csum_offload = s->tx_packets - tx_offload_none - s->tx_csum_inner;
Achiad Shochatbbceefc2015-08-16 16:04:52 +0300267 s->rx_csum_good = s->rx_packets - s->rx_csum_none -
268 s->rx_csum_sw;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300269
Gal Pressmanefea3892015-08-04 14:05:47 +0300270 mlx5e_update_pport_counters(priv);
Rana Shahout593cf332016-04-20 22:02:10 +0300271 mlx5e_update_q_counter(priv);
272
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300273free_out:
274 kvfree(out);
275}
276
277static void mlx5e_update_stats_work(struct work_struct *work)
278{
279 struct delayed_work *dwork = to_delayed_work(work);
280 struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
281 update_stats_work);
282 mutex_lock(&priv->state_lock);
283 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
284 mlx5e_update_stats(priv);
285 schedule_delayed_work(dwork,
286 msecs_to_jiffies(
287 MLX5E_UPDATE_STATS_INTERVAL));
288 }
289 mutex_unlock(&priv->state_lock);
290}
291
Tariq Toukandaa21562016-03-02 00:13:32 +0200292static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
293 enum mlx5_dev_event event, unsigned long param)
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300294{
Tariq Toukandaa21562016-03-02 00:13:32 +0200295 struct mlx5e_priv *priv = vpriv;
296
297 if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state))
298 return;
299
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300300 switch (event) {
301 case MLX5_DEV_EVENT_PORT_UP:
302 case MLX5_DEV_EVENT_PORT_DOWN:
303 schedule_work(&priv->update_carrier_work);
304 break;
305
306 default:
307 break;
308 }
309}
310
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300311static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
312{
313 set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
314}
315
316static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
317{
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300318 clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLE, &priv->state);
Tariq Toukandaa21562016-03-02 00:13:32 +0200319 synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC));
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300320}
321
Saeed Mahameedfacc9692015-06-11 14:47:27 +0300322#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
323#define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
324
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300325static int mlx5e_create_rq(struct mlx5e_channel *c,
326 struct mlx5e_rq_param *param,
327 struct mlx5e_rq *rq)
328{
329 struct mlx5e_priv *priv = c->priv;
330 struct mlx5_core_dev *mdev = priv->mdev;
331 void *rqc = param->rqc;
332 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
Tariq Toukan461017c2016-04-20 22:02:13 +0300333 u32 byte_count;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300334 int wq_sz;
335 int err;
336 int i;
337
Saeed Mahameed311c7c72015-07-23 23:35:57 +0300338 param->wq.db_numa_node = cpu_to_node(c->cpu);
339
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300340 err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
341 &rq->wq_ctrl);
342 if (err)
343 return err;
344
345 rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
346
347 wq_sz = mlx5_wq_ll_get_size(&rq->wq);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300348
Tariq Toukan461017c2016-04-20 22:02:13 +0300349 switch (priv->params.rq_wq_type) {
350 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
351 rq->wqe_info = kzalloc_node(wq_sz * sizeof(*rq->wqe_info),
352 GFP_KERNEL, cpu_to_node(c->cpu));
353 if (!rq->wqe_info) {
354 err = -ENOMEM;
355 goto err_rq_wq_destroy;
356 }
357 rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
358 rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
359
360 rq->wqe_sz = MLX5_MPWRQ_NUM_STRIDES * MLX5_MPWRQ_STRIDE_SIZE;
361 byte_count = rq->wqe_sz;
362 break;
363 default: /* MLX5_WQ_TYPE_LINKED_LIST */
364 rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
365 cpu_to_node(c->cpu));
366 if (!rq->skb) {
367 err = -ENOMEM;
368 goto err_rq_wq_destroy;
369 }
370 rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
371 rq->alloc_wqe = mlx5e_alloc_rx_wqe;
372
373 rq->wqe_sz = (priv->params.lro_en) ?
374 priv->params.lro_wqe_sz :
375 MLX5E_SW2HW_MTU(priv->netdev->mtu);
Tariq Toukanc5adb962016-04-20 22:02:16 +0300376 rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz);
377 byte_count = rq->wqe_sz;
Tariq Toukan461017c2016-04-20 22:02:13 +0300378 byte_count |= MLX5_HW_START_PADDING;
379 }
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300380
381 for (i = 0; i < wq_sz; i++) {
382 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
383
Tariq Toukan461017c2016-04-20 22:02:13 +0300384 wqe->data.byte_count = cpu_to_be32(byte_count);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300385 }
386
Tariq Toukan461017c2016-04-20 22:02:13 +0300387 rq->wq_type = priv->params.rq_wq_type;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300388 rq->pdev = c->pdev;
389 rq->netdev = c->netdev;
Eran Ben Elishaef9814d2015-12-29 14:58:31 +0200390 rq->tstamp = &priv->tstamp;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300391 rq->channel = c;
392 rq->ix = c->ix;
Achiad Shochat50cfa252015-08-04 14:05:41 +0300393 rq->priv = c->priv;
Tariq Toukanbc77b242016-04-20 22:02:15 +0300394 rq->mkey_be = c->mkey_be;
395 rq->umr_mkey_be = cpu_to_be32(c->priv->umr_mkey.key);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300396
397 return 0;
398
399err_rq_wq_destroy:
400 mlx5_wq_destroy(&rq->wq_ctrl);
401
402 return err;
403}
404
405static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
406{
Tariq Toukan461017c2016-04-20 22:02:13 +0300407 switch (rq->wq_type) {
408 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
409 kfree(rq->wqe_info);
410 break;
411 default: /* MLX5_WQ_TYPE_LINKED_LIST */
412 kfree(rq->skb);
413 }
414
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300415 mlx5_wq_destroy(&rq->wq_ctrl);
416}
417
418static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
419{
Achiad Shochat50cfa252015-08-04 14:05:41 +0300420 struct mlx5e_priv *priv = rq->priv;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300421 struct mlx5_core_dev *mdev = priv->mdev;
422
423 void *in;
424 void *rqc;
425 void *wq;
426 int inlen;
427 int err;
428
429 inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
430 sizeof(u64) * rq->wq_ctrl.buf.npages;
431 in = mlx5_vzalloc(inlen);
432 if (!in)
433 return -ENOMEM;
434
435 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
436 wq = MLX5_ADDR_OF(rqc, rqc, wq);
437
438 memcpy(rqc, param->rqc, sizeof(param->rqc));
439
Achiad Shochat97de9f32015-07-29 15:05:43 +0300440 MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300441 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
442 MLX5_SET(rqc, rqc, flush_in_error_en, 1);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300443 MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
Achiad Shochat68cdf5d2015-07-29 15:05:40 +0300444 MLX5_ADAPTER_PAGE_SHIFT);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300445 MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
446
447 mlx5_fill_page_array(&rq->wq_ctrl.buf,
448 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
449
Haggai Abramonvsky7db22ff2015-06-04 19:30:37 +0300450 err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300451
452 kvfree(in);
453
454 return err;
455}
456
457static int mlx5e_modify_rq(struct mlx5e_rq *rq, int curr_state, int next_state)
458{
459 struct mlx5e_channel *c = rq->channel;
460 struct mlx5e_priv *priv = c->priv;
461 struct mlx5_core_dev *mdev = priv->mdev;
462
463 void *in;
464 void *rqc;
465 int inlen;
466 int err;
467
468 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
469 in = mlx5_vzalloc(inlen);
470 if (!in)
471 return -ENOMEM;
472
473 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
474
475 MLX5_SET(modify_rq_in, in, rq_state, curr_state);
476 MLX5_SET(rqc, rqc, state, next_state);
477
Haggai Abramonvsky7db22ff2015-06-04 19:30:37 +0300478 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300479
480 kvfree(in);
481
482 return err;
483}
484
485static void mlx5e_disable_rq(struct mlx5e_rq *rq)
486{
Achiad Shochat50cfa252015-08-04 14:05:41 +0300487 mlx5_core_destroy_rq(rq->priv->mdev, rq->rqn);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300488}
489
490static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
491{
Achiad Shochat01c196a2015-11-03 08:07:19 +0200492 unsigned long exp_time = jiffies + msecs_to_jiffies(20000);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300493 struct mlx5e_channel *c = rq->channel;
494 struct mlx5e_priv *priv = c->priv;
495 struct mlx5_wq_ll *wq = &rq->wq;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300496
Achiad Shochat01c196a2015-11-03 08:07:19 +0200497 while (time_before(jiffies, exp_time)) {
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300498 if (wq->cur_sz >= priv->params.min_rx_wqes)
499 return 0;
500
501 msleep(20);
502 }
503
504 return -ETIMEDOUT;
505}
506
507static int mlx5e_open_rq(struct mlx5e_channel *c,
508 struct mlx5e_rq_param *param,
509 struct mlx5e_rq *rq)
510{
Tariq Toukand3c9bc22016-04-20 22:02:14 +0300511 struct mlx5e_sq *sq = &c->icosq;
512 u16 pi = sq->pc & sq->wq.sz_m1;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300513 int err;
514
515 err = mlx5e_create_rq(c, param, rq);
516 if (err)
517 return err;
518
519 err = mlx5e_enable_rq(rq, param);
520 if (err)
521 goto err_destroy_rq;
522
523 err = mlx5e_modify_rq(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
524 if (err)
525 goto err_disable_rq;
526
527 set_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
Tariq Toukand3c9bc22016-04-20 22:02:14 +0300528
529 sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP;
530 sq->ico_wqe_info[pi].num_wqebbs = 1;
531 mlx5e_send_nop(sq, true); /* trigger mlx5e_post_rx_wqes() */
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300532
533 return 0;
534
535err_disable_rq:
536 mlx5e_disable_rq(rq);
537err_destroy_rq:
538 mlx5e_destroy_rq(rq);
539
540 return err;
541}
542
543static void mlx5e_close_rq(struct mlx5e_rq *rq)
544{
545 clear_bit(MLX5E_RQ_STATE_POST_WQES_ENABLE, &rq->state);
546 napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
547
548 mlx5e_modify_rq(rq, MLX5_RQC_STATE_RDY, MLX5_RQC_STATE_ERR);
549 while (!mlx5_wq_ll_is_empty(&rq->wq))
550 msleep(20);
551
552 /* avoid destroying rq before mlx5e_poll_rx_cq() is done with it */
553 napi_synchronize(&rq->channel->napi);
554
555 mlx5e_disable_rq(rq);
556 mlx5e_destroy_rq(rq);
557}
558
559static void mlx5e_free_sq_db(struct mlx5e_sq *sq)
560{
Achiad Shochat34802a42015-12-29 14:58:29 +0200561 kfree(sq->wqe_info);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300562 kfree(sq->dma_fifo);
563 kfree(sq->skb);
564}
565
566static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
567{
568 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
569 int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
570
571 sq->skb = kzalloc_node(wq_sz * sizeof(*sq->skb), GFP_KERNEL, numa);
572 sq->dma_fifo = kzalloc_node(df_sz * sizeof(*sq->dma_fifo), GFP_KERNEL,
573 numa);
Achiad Shochat34802a42015-12-29 14:58:29 +0200574 sq->wqe_info = kzalloc_node(wq_sz * sizeof(*sq->wqe_info), GFP_KERNEL,
575 numa);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300576
Achiad Shochat34802a42015-12-29 14:58:29 +0200577 if (!sq->skb || !sq->dma_fifo || !sq->wqe_info) {
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300578 mlx5e_free_sq_db(sq);
579 return -ENOMEM;
580 }
581
582 sq->dma_fifo_mask = df_sz - 1;
583
584 return 0;
585}
586
587static int mlx5e_create_sq(struct mlx5e_channel *c,
588 int tc,
589 struct mlx5e_sq_param *param,
590 struct mlx5e_sq *sq)
591{
592 struct mlx5e_priv *priv = c->priv;
593 struct mlx5_core_dev *mdev = priv->mdev;
594
595 void *sqc = param->sqc;
596 void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
597 int err;
598
Moshe Lazer0ba42242016-03-02 00:13:40 +0200599 err = mlx5_alloc_map_uar(mdev, &sq->uar, true);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300600 if (err)
601 return err;
602
Saeed Mahameed311c7c72015-07-23 23:35:57 +0300603 param->wq.db_numa_node = cpu_to_node(c->cpu);
604
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300605 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
606 &sq->wq_ctrl);
607 if (err)
608 goto err_unmap_free_uar;
609
610 sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
Moshe Lazer0ba42242016-03-02 00:13:40 +0200611 if (sq->uar.bf_map) {
612 set_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state);
613 sq->uar_map = sq->uar.bf_map;
614 } else {
615 sq->uar_map = sq->uar.map;
616 }
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300617 sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
Achiad Shochat58d52292015-07-23 23:35:58 +0300618 sq->max_inline = param->max_inline;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300619
Dan Carpenter7ec0bb22015-06-11 11:50:01 +0300620 err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
621 if (err)
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300622 goto err_sq_wq_destroy;
623
Tariq Toukand3c9bc22016-04-20 22:02:14 +0300624 if (param->icosq) {
625 u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
626
627 sq->ico_wqe_info = kzalloc_node(sizeof(*sq->ico_wqe_info) *
628 wq_sz,
629 GFP_KERNEL,
630 cpu_to_node(c->cpu));
631 if (!sq->ico_wqe_info) {
632 err = -ENOMEM;
633 goto err_free_sq_db;
634 }
635 } else {
636 int txq_ix;
637
638 txq_ix = c->ix + tc * priv->params.num_channels;
639 sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
640 priv->txq_to_sq_map[txq_ix] = sq;
641 }
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300642
Achiad Shochat88a85f92015-07-23 23:35:59 +0300643 sq->pdev = c->pdev;
Eran Ben Elishaef9814d2015-12-29 14:58:31 +0200644 sq->tstamp = &priv->tstamp;
Achiad Shochat88a85f92015-07-23 23:35:59 +0300645 sq->mkey_be = c->mkey_be;
646 sq->channel = c;
647 sq->tc = tc;
648 sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
649 sq->bf_budget = MLX5E_SQ_BF_BUDGET;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300650
651 return 0;
652
Tariq Toukand3c9bc22016-04-20 22:02:14 +0300653err_free_sq_db:
654 mlx5e_free_sq_db(sq);
655
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300656err_sq_wq_destroy:
657 mlx5_wq_destroy(&sq->wq_ctrl);
658
659err_unmap_free_uar:
660 mlx5_unmap_free_uar(mdev, &sq->uar);
661
662 return err;
663}
664
665static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
666{
667 struct mlx5e_channel *c = sq->channel;
668 struct mlx5e_priv *priv = c->priv;
669
Tariq Toukand3c9bc22016-04-20 22:02:14 +0300670 kfree(sq->ico_wqe_info);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300671 mlx5e_free_sq_db(sq);
672 mlx5_wq_destroy(&sq->wq_ctrl);
673 mlx5_unmap_free_uar(priv->mdev, &sq->uar);
674}
675
676static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
677{
678 struct mlx5e_channel *c = sq->channel;
679 struct mlx5e_priv *priv = c->priv;
680 struct mlx5_core_dev *mdev = priv->mdev;
681
682 void *in;
683 void *sqc;
684 void *wq;
685 int inlen;
686 int err;
687
688 inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
689 sizeof(u64) * sq->wq_ctrl.buf.npages;
690 in = mlx5_vzalloc(inlen);
691 if (!in)
692 return -ENOMEM;
693
694 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
695 wq = MLX5_ADDR_OF(sqc, sqc, wq);
696
697 memcpy(sqc, param->sqc, sizeof(param->sqc));
698
Tariq Toukand3c9bc22016-04-20 22:02:14 +0300699 MLX5_SET(sqc, sqc, tis_num_0, param->icosq ? 0 : priv->tisn[sq->tc]);
700 MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300701 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
Tariq Toukand3c9bc22016-04-20 22:02:14 +0300702 MLX5_SET(sqc, sqc, tis_lst_sz, param->icosq ? 0 : 1);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300703 MLX5_SET(sqc, sqc, flush_in_error_en, 1);
704
705 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
706 MLX5_SET(wq, wq, uar_page, sq->uar.index);
707 MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
Achiad Shochat68cdf5d2015-07-29 15:05:40 +0300708 MLX5_ADAPTER_PAGE_SHIFT);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300709 MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
710
711 mlx5_fill_page_array(&sq->wq_ctrl.buf,
712 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
713
Haggai Abramonvsky7db22ff2015-06-04 19:30:37 +0300714 err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300715
716 kvfree(in);
717
718 return err;
719}
720
721static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state, int next_state)
722{
723 struct mlx5e_channel *c = sq->channel;
724 struct mlx5e_priv *priv = c->priv;
725 struct mlx5_core_dev *mdev = priv->mdev;
726
727 void *in;
728 void *sqc;
729 int inlen;
730 int err;
731
732 inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
733 in = mlx5_vzalloc(inlen);
734 if (!in)
735 return -ENOMEM;
736
737 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
738
739 MLX5_SET(modify_sq_in, in, sq_state, curr_state);
740 MLX5_SET(sqc, sqc, state, next_state);
741
Haggai Abramonvsky7db22ff2015-06-04 19:30:37 +0300742 err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300743
744 kvfree(in);
745
746 return err;
747}
748
749static void mlx5e_disable_sq(struct mlx5e_sq *sq)
750{
751 struct mlx5e_channel *c = sq->channel;
752 struct mlx5e_priv *priv = c->priv;
753 struct mlx5_core_dev *mdev = priv->mdev;
754
Haggai Abramonvsky7db22ff2015-06-04 19:30:37 +0300755 mlx5_core_destroy_sq(mdev, sq->sqn);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300756}
757
758static int mlx5e_open_sq(struct mlx5e_channel *c,
759 int tc,
760 struct mlx5e_sq_param *param,
761 struct mlx5e_sq *sq)
762{
763 int err;
764
765 err = mlx5e_create_sq(c, tc, param, sq);
766 if (err)
767 return err;
768
769 err = mlx5e_enable_sq(sq, param);
770 if (err)
771 goto err_destroy_sq;
772
773 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY);
774 if (err)
775 goto err_disable_sq;
776
Tariq Toukand3c9bc22016-04-20 22:02:14 +0300777 if (sq->txq) {
778 set_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
779 netdev_tx_reset_queue(sq->txq);
780 netif_tx_start_queue(sq->txq);
781 }
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300782
783 return 0;
784
785err_disable_sq:
786 mlx5e_disable_sq(sq);
787err_destroy_sq:
788 mlx5e_destroy_sq(sq);
789
790 return err;
791}
792
793static inline void netif_tx_disable_queue(struct netdev_queue *txq)
794{
795 __netif_tx_lock_bh(txq);
796 netif_tx_stop_queue(txq);
797 __netif_tx_unlock_bh(txq);
798}
799
800static void mlx5e_close_sq(struct mlx5e_sq *sq)
801{
Tariq Toukand3c9bc22016-04-20 22:02:14 +0300802 if (sq->txq) {
803 clear_bit(MLX5E_SQ_STATE_WAKE_TXQ_ENABLE, &sq->state);
804 /* prevent netif_tx_wake_queue */
805 napi_synchronize(&sq->channel->napi);
806 netif_tx_disable_queue(sq->txq);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300807
Tariq Toukand3c9bc22016-04-20 22:02:14 +0300808 /* ensure hw is notified of all pending wqes */
809 if (mlx5e_sq_has_room_for(sq, 1))
810 mlx5e_send_nop(sq, true);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300811
Tariq Toukand3c9bc22016-04-20 22:02:14 +0300812 mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY, MLX5_SQC_STATE_ERR);
813 }
814
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300815 while (sq->cc != sq->pc) /* wait till sq is empty */
816 msleep(20);
817
818 /* avoid destroying sq before mlx5e_poll_tx_cq() is done with it */
819 napi_synchronize(&sq->channel->napi);
820
821 mlx5e_disable_sq(sq);
822 mlx5e_destroy_sq(sq);
823}
824
825static int mlx5e_create_cq(struct mlx5e_channel *c,
826 struct mlx5e_cq_param *param,
827 struct mlx5e_cq *cq)
828{
829 struct mlx5e_priv *priv = c->priv;
830 struct mlx5_core_dev *mdev = priv->mdev;
831 struct mlx5_core_cq *mcq = &cq->mcq;
832 int eqn_not_used;
Doron Tsur0b6e26c2016-01-17 11:25:47 +0200833 unsigned int irqn;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300834 int err;
835 u32 i;
836
Saeed Mahameed311c7c72015-07-23 23:35:57 +0300837 param->wq.buf_numa_node = cpu_to_node(c->cpu);
838 param->wq.db_numa_node = cpu_to_node(c->cpu);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300839 param->eq_ix = c->ix;
840
841 err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
842 &cq->wq_ctrl);
843 if (err)
844 return err;
845
846 mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
847
848 cq->napi = &c->napi;
849
850 mcq->cqe_sz = 64;
851 mcq->set_ci_db = cq->wq_ctrl.db.db;
852 mcq->arm_db = cq->wq_ctrl.db.db + 1;
853 *mcq->set_ci_db = 0;
854 *mcq->arm_db = 0;
855 mcq->vector = param->eq_ix;
856 mcq->comp = mlx5e_completion_event;
857 mcq->event = mlx5e_cq_error_event;
858 mcq->irqn = irqn;
859 mcq->uar = &priv->cq_uar;
860
861 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
862 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
863
864 cqe->op_own = 0xf1;
865 }
866
867 cq->channel = c;
Achiad Shochat50cfa252015-08-04 14:05:41 +0300868 cq->priv = priv;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300869
870 return 0;
871}
872
873static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
874{
875 mlx5_wq_destroy(&cq->wq_ctrl);
876}
877
878static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
879{
Achiad Shochat50cfa252015-08-04 14:05:41 +0300880 struct mlx5e_priv *priv = cq->priv;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300881 struct mlx5_core_dev *mdev = priv->mdev;
882 struct mlx5_core_cq *mcq = &cq->mcq;
883
884 void *in;
885 void *cqc;
886 int inlen;
Doron Tsur0b6e26c2016-01-17 11:25:47 +0200887 unsigned int irqn_not_used;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300888 int eqn;
889 int err;
890
891 inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
892 sizeof(u64) * cq->wq_ctrl.buf.npages;
893 in = mlx5_vzalloc(inlen);
894 if (!in)
895 return -ENOMEM;
896
897 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
898
899 memcpy(cqc, param->cqc, sizeof(param->cqc));
900
901 mlx5_fill_page_array(&cq->wq_ctrl.buf,
902 (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
903
904 mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
905
906 MLX5_SET(cqc, cqc, c_eqn, eqn);
907 MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
908 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
Achiad Shochat68cdf5d2015-07-29 15:05:40 +0300909 MLX5_ADAPTER_PAGE_SHIFT);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300910 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
911
912 err = mlx5_core_create_cq(mdev, mcq, in, inlen);
913
914 kvfree(in);
915
916 if (err)
917 return err;
918
919 mlx5e_cq_arm(cq);
920
921 return 0;
922}
923
924static void mlx5e_disable_cq(struct mlx5e_cq *cq)
925{
Achiad Shochat50cfa252015-08-04 14:05:41 +0300926 struct mlx5e_priv *priv = cq->priv;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300927 struct mlx5_core_dev *mdev = priv->mdev;
928
929 mlx5_core_destroy_cq(mdev, &cq->mcq);
930}
931
932static int mlx5e_open_cq(struct mlx5e_channel *c,
933 struct mlx5e_cq_param *param,
934 struct mlx5e_cq *cq,
935 u16 moderation_usecs,
936 u16 moderation_frames)
937{
938 int err;
939 struct mlx5e_priv *priv = c->priv;
940 struct mlx5_core_dev *mdev = priv->mdev;
941
942 err = mlx5e_create_cq(c, param, cq);
943 if (err)
944 return err;
945
946 err = mlx5e_enable_cq(cq, param);
947 if (err)
948 goto err_destroy_cq;
949
Gal Pressman7524a5d2016-03-02 00:13:37 +0200950 if (MLX5_CAP_GEN(mdev, cq_moderation))
951 mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
952 moderation_usecs,
953 moderation_frames);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300954 return 0;
955
956err_destroy_cq:
957 mlx5e_destroy_cq(cq);
958
959 return err;
960}
961
962static void mlx5e_close_cq(struct mlx5e_cq *cq)
963{
964 mlx5e_disable_cq(cq);
965 mlx5e_destroy_cq(cq);
966}
967
968static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
969{
970 return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
971}
972
973static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
974 struct mlx5e_channel_param *cparam)
975{
976 struct mlx5e_priv *priv = c->priv;
977 int err;
978 int tc;
979
980 for (tc = 0; tc < c->num_tc; tc++) {
981 err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq,
982 priv->params.tx_cq_moderation_usec,
983 priv->params.tx_cq_moderation_pkts);
984 if (err)
985 goto err_close_tx_cqs;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300986 }
987
988 return 0;
989
990err_close_tx_cqs:
991 for (tc--; tc >= 0; tc--)
992 mlx5e_close_cq(&c->sq[tc].cq);
993
994 return err;
995}
996
997static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
998{
999 int tc;
1000
1001 for (tc = 0; tc < c->num_tc; tc++)
1002 mlx5e_close_cq(&c->sq[tc].cq);
1003}
1004
1005static int mlx5e_open_sqs(struct mlx5e_channel *c,
1006 struct mlx5e_channel_param *cparam)
1007{
1008 int err;
1009 int tc;
1010
1011 for (tc = 0; tc < c->num_tc; tc++) {
1012 err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
1013 if (err)
1014 goto err_close_sqs;
1015 }
1016
1017 return 0;
1018
1019err_close_sqs:
1020 for (tc--; tc >= 0; tc--)
1021 mlx5e_close_sq(&c->sq[tc]);
1022
1023 return err;
1024}
1025
1026static void mlx5e_close_sqs(struct mlx5e_channel *c)
1027{
1028 int tc;
1029
1030 for (tc = 0; tc < c->num_tc; tc++)
1031 mlx5e_close_sq(&c->sq[tc]);
1032}
1033
Rana Shahout5283af82015-08-23 16:12:14 +03001034static void mlx5e_build_channeltc_to_txq_map(struct mlx5e_priv *priv, int ix)
Saeed Mahameed03289b82015-06-23 17:14:14 +03001035{
1036 int i;
1037
1038 for (i = 0; i < MLX5E_MAX_NUM_TC; i++)
Rana Shahout5283af82015-08-23 16:12:14 +03001039 priv->channeltc_to_txq_map[ix][i] =
1040 ix + i * priv->params.num_channels;
Saeed Mahameed03289b82015-06-23 17:14:14 +03001041}
1042
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001043static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1044 struct mlx5e_channel_param *cparam,
1045 struct mlx5e_channel **cp)
1046{
1047 struct net_device *netdev = priv->netdev;
1048 int cpu = mlx5e_get_cpu(priv, ix);
1049 struct mlx5e_channel *c;
1050 int err;
1051
1052 c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
1053 if (!c)
1054 return -ENOMEM;
1055
1056 c->priv = priv;
1057 c->ix = ix;
1058 c->cpu = cpu;
1059 c->pdev = &priv->mdev->pdev->dev;
1060 c->netdev = priv->netdev;
Matan Baraka606b0f2016-02-29 18:05:28 +02001061 c->mkey_be = cpu_to_be32(priv->mkey.key);
Achiad Shochata4418a62015-07-29 15:05:41 +03001062 c->num_tc = priv->params.num_tc;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001063
Rana Shahout5283af82015-08-23 16:12:14 +03001064 mlx5e_build_channeltc_to_txq_map(priv, ix);
Saeed Mahameed03289b82015-06-23 17:14:14 +03001065
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001066 netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
1067
Tariq Toukand3c9bc22016-04-20 22:02:14 +03001068 err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, 0, 0);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001069 if (err)
1070 goto err_napi_del;
1071
Tariq Toukand3c9bc22016-04-20 22:02:14 +03001072 err = mlx5e_open_tx_cqs(c, cparam);
1073 if (err)
1074 goto err_close_icosq_cq;
1075
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001076 err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq,
1077 priv->params.rx_cq_moderation_usec,
1078 priv->params.rx_cq_moderation_pkts);
1079 if (err)
1080 goto err_close_tx_cqs;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001081
1082 napi_enable(&c->napi);
1083
Tariq Toukand3c9bc22016-04-20 22:02:14 +03001084 err = mlx5e_open_sq(c, 0, &cparam->icosq, &c->icosq);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001085 if (err)
1086 goto err_disable_napi;
1087
Tariq Toukand3c9bc22016-04-20 22:02:14 +03001088 err = mlx5e_open_sqs(c, cparam);
1089 if (err)
1090 goto err_close_icosq;
1091
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001092 err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
1093 if (err)
1094 goto err_close_sqs;
1095
1096 netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix);
1097 *cp = c;
1098
1099 return 0;
1100
1101err_close_sqs:
1102 mlx5e_close_sqs(c);
1103
Tariq Toukand3c9bc22016-04-20 22:02:14 +03001104err_close_icosq:
1105 mlx5e_close_sq(&c->icosq);
1106
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001107err_disable_napi:
1108 napi_disable(&c->napi);
1109 mlx5e_close_cq(&c->rq.cq);
1110
1111err_close_tx_cqs:
1112 mlx5e_close_tx_cqs(c);
1113
Tariq Toukand3c9bc22016-04-20 22:02:14 +03001114err_close_icosq_cq:
1115 mlx5e_close_cq(&c->icosq.cq);
1116
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001117err_napi_del:
1118 netif_napi_del(&c->napi);
Eric Dumazet7ae92ae2015-11-18 06:30:55 -08001119 napi_hash_del(&c->napi);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001120 kfree(c);
1121
1122 return err;
1123}
1124
1125static void mlx5e_close_channel(struct mlx5e_channel *c)
1126{
1127 mlx5e_close_rq(&c->rq);
1128 mlx5e_close_sqs(c);
Tariq Toukand3c9bc22016-04-20 22:02:14 +03001129 mlx5e_close_sq(&c->icosq);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001130 napi_disable(&c->napi);
1131 mlx5e_close_cq(&c->rq.cq);
1132 mlx5e_close_tx_cqs(c);
Tariq Toukand3c9bc22016-04-20 22:02:14 +03001133 mlx5e_close_cq(&c->icosq.cq);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001134 netif_napi_del(&c->napi);
Eric Dumazet7ae92ae2015-11-18 06:30:55 -08001135
1136 napi_hash_del(&c->napi);
1137 synchronize_rcu();
1138
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001139 kfree(c);
1140}
1141
1142static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
1143 struct mlx5e_rq_param *param)
1144{
1145 void *rqc = param->rqc;
1146 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1147
Tariq Toukan461017c2016-04-20 22:02:13 +03001148 switch (priv->params.rq_wq_type) {
1149 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
1150 MLX5_SET(wq, wq, log_wqe_num_of_strides,
1151 MLX5_MPWRQ_LOG_NUM_STRIDES - 9);
1152 MLX5_SET(wq, wq, log_wqe_stride_size,
1153 MLX5_MPWRQ_LOG_STRIDE_SIZE - 6);
1154 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
1155 break;
1156 default: /* MLX5_WQ_TYPE_LINKED_LIST */
1157 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1158 }
1159
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001160 MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
1161 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
1162 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
1163 MLX5_SET(wq, wq, pd, priv->pdn);
Rana Shahout593cf332016-04-20 22:02:10 +03001164 MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001165
Saeed Mahameed311c7c72015-07-23 23:35:57 +03001166 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001167 param->wq.linear = 1;
1168}
1169
Tariq Toukan556dd1b2016-03-02 00:13:36 +02001170static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
1171{
1172 void *rqc = param->rqc;
1173 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1174
1175 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1176 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
1177}
1178
Tariq Toukand3c9bc22016-04-20 22:02:14 +03001179static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
1180 struct mlx5e_sq_param *param)
1181{
1182 void *sqc = param->sqc;
1183 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1184
1185 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
1186 MLX5_SET(wq, wq, pd, priv->pdn);
1187
1188 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
1189}
1190
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001191static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
1192 struct mlx5e_sq_param *param)
1193{
1194 void *sqc = param->sqc;
1195 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1196
Tariq Toukand3c9bc22016-04-20 22:02:14 +03001197 mlx5e_build_sq_param_common(priv, param);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001198 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001199
Achiad Shochat58d52292015-07-23 23:35:58 +03001200 param->max_inline = priv->params.tx_max_inline;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001201}
1202
1203static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
1204 struct mlx5e_cq_param *param)
1205{
1206 void *cqc = param->cqc;
1207
1208 MLX5_SET(cqc, cqc, uar_page, priv->cq_uar.index);
1209}
1210
1211static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
1212 struct mlx5e_cq_param *param)
1213{
1214 void *cqc = param->cqc;
Tariq Toukan461017c2016-04-20 22:02:13 +03001215 u8 log_cq_size;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001216
Tariq Toukan461017c2016-04-20 22:02:13 +03001217 switch (priv->params.rq_wq_type) {
1218 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
1219 log_cq_size = priv->params.log_rq_size +
1220 MLX5_MPWRQ_LOG_NUM_STRIDES;
1221 break;
1222 default: /* MLX5_WQ_TYPE_LINKED_LIST */
1223 log_cq_size = priv->params.log_rq_size;
1224 }
1225
1226 MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001227
1228 mlx5e_build_common_cq_param(priv, param);
1229}
1230
1231static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
1232 struct mlx5e_cq_param *param)
1233{
1234 void *cqc = param->cqc;
1235
Tariq Toukand3c9bc22016-04-20 22:02:14 +03001236 MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001237
1238 mlx5e_build_common_cq_param(priv, param);
1239}
1240
Tariq Toukand3c9bc22016-04-20 22:02:14 +03001241static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
1242 struct mlx5e_cq_param *param,
1243 u8 log_wq_size)
1244{
1245 void *cqc = param->cqc;
1246
1247 MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
1248
1249 mlx5e_build_common_cq_param(priv, param);
1250}
1251
1252static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
1253 struct mlx5e_sq_param *param,
1254 u8 log_wq_size)
1255{
1256 void *sqc = param->sqc;
1257 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1258
1259 mlx5e_build_sq_param_common(priv, param);
1260
1261 MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
Tariq Toukanbc77b242016-04-20 22:02:15 +03001262 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
Tariq Toukand3c9bc22016-04-20 22:02:14 +03001263
1264 param->icosq = true;
1265}
1266
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001267static void mlx5e_build_channel_param(struct mlx5e_priv *priv,
1268 struct mlx5e_channel_param *cparam)
1269{
Tariq Toukanbc77b242016-04-20 22:02:15 +03001270 u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
Tariq Toukand3c9bc22016-04-20 22:02:14 +03001271
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001272 memset(cparam, 0, sizeof(*cparam));
1273
1274 mlx5e_build_rq_param(priv, &cparam->rq);
1275 mlx5e_build_sq_param(priv, &cparam->sq);
Tariq Toukand3c9bc22016-04-20 22:02:14 +03001276 mlx5e_build_icosq_param(priv, &cparam->icosq, icosq_log_wq_sz);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001277 mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
1278 mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
Tariq Toukand3c9bc22016-04-20 22:02:14 +03001279 mlx5e_build_ico_cq_param(priv, &cparam->icosq_cq, icosq_log_wq_sz);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001280}
1281
1282static int mlx5e_open_channels(struct mlx5e_priv *priv)
1283{
1284 struct mlx5e_channel_param cparam;
Achiad Shochata4418a62015-07-29 15:05:41 +03001285 int nch = priv->params.num_channels;
Saeed Mahameed03289b82015-06-23 17:14:14 +03001286 int err = -ENOMEM;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001287 int i;
1288 int j;
1289
Achiad Shochata4418a62015-07-29 15:05:41 +03001290 priv->channel = kcalloc(nch, sizeof(struct mlx5e_channel *),
1291 GFP_KERNEL);
Saeed Mahameed03289b82015-06-23 17:14:14 +03001292
Achiad Shochata4418a62015-07-29 15:05:41 +03001293 priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc,
Saeed Mahameed03289b82015-06-23 17:14:14 +03001294 sizeof(struct mlx5e_sq *), GFP_KERNEL);
1295
1296 if (!priv->channel || !priv->txq_to_sq_map)
1297 goto err_free_txq_to_sq_map;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001298
1299 mlx5e_build_channel_param(priv, &cparam);
Achiad Shochata4418a62015-07-29 15:05:41 +03001300 for (i = 0; i < nch; i++) {
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001301 err = mlx5e_open_channel(priv, i, &cparam, &priv->channel[i]);
1302 if (err)
1303 goto err_close_channels;
1304 }
1305
Achiad Shochata4418a62015-07-29 15:05:41 +03001306 for (j = 0; j < nch; j++) {
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001307 err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
1308 if (err)
1309 goto err_close_channels;
1310 }
1311
1312 return 0;
1313
1314err_close_channels:
1315 for (i--; i >= 0; i--)
1316 mlx5e_close_channel(priv->channel[i]);
1317
Saeed Mahameed03289b82015-06-23 17:14:14 +03001318err_free_txq_to_sq_map:
1319 kfree(priv->txq_to_sq_map);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001320 kfree(priv->channel);
1321
1322 return err;
1323}
1324
1325static void mlx5e_close_channels(struct mlx5e_priv *priv)
1326{
1327 int i;
1328
1329 for (i = 0; i < priv->params.num_channels; i++)
1330 mlx5e_close_channel(priv->channel[i]);
1331
Saeed Mahameed03289b82015-06-23 17:14:14 +03001332 kfree(priv->txq_to_sq_map);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001333 kfree(priv->channel);
1334}
1335
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001336static int mlx5e_rx_hash_fn(int hfunc)
1337{
1338 return (hfunc == ETH_RSS_HASH_TOP) ?
1339 MLX5_RX_HASH_FN_TOEPLITZ :
1340 MLX5_RX_HASH_FN_INVERTED_XOR8;
1341}
1342
1343static int mlx5e_bits_invert(unsigned long a, int size)
1344{
1345 int inv = 0;
1346 int i;
1347
1348 for (i = 0; i < size; i++)
1349 inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
1350
1351 return inv;
1352}
1353
Achiad Shochat936896e2015-08-16 16:04:46 +03001354static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
1355{
1356 int i;
1357
1358 for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) {
1359 int ix = i;
1360
1361 if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
1362 ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
1363
Achiad Shochat2d75b2b2015-08-16 16:04:47 +03001364 ix = priv->params.indirection_rqt[ix];
Achiad Shochat936896e2015-08-16 16:04:46 +03001365 MLX5_SET(rqtc, rqtc, rq_num[i],
1366 test_bit(MLX5E_STATE_OPENED, &priv->state) ?
1367 priv->channel[ix]->rq.rqn :
1368 priv->drop_rq.rqn);
1369 }
1370}
1371
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001372static void mlx5e_fill_rqt_rqns(struct mlx5e_priv *priv, void *rqtc,
1373 enum mlx5e_rqt_ix rqt_ix)
1374{
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001375
1376 switch (rqt_ix) {
1377 case MLX5E_INDIRECTION_RQT:
Achiad Shochat936896e2015-08-16 16:04:46 +03001378 mlx5e_fill_indir_rqt_rqns(priv, rqtc);
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001379
1380 break;
1381
1382 default: /* MLX5E_SINGLE_RQ_RQT */
1383 MLX5_SET(rqtc, rqtc, rq_num[0],
1384 test_bit(MLX5E_STATE_OPENED, &priv->state) ?
1385 priv->channel[0]->rq.rqn :
1386 priv->drop_rq.rqn);
1387
1388 break;
1389 }
1390}
1391
1392static int mlx5e_create_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
1393{
1394 struct mlx5_core_dev *mdev = priv->mdev;
1395 u32 *in;
1396 void *rqtc;
1397 int inlen;
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001398 int sz;
1399 int err;
1400
Achiad Shochat936896e2015-08-16 16:04:46 +03001401 sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001402
1403 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
1404 in = mlx5_vzalloc(inlen);
1405 if (!in)
1406 return -ENOMEM;
1407
1408 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
1409
1410 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
1411 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
1412
1413 mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
1414
1415 err = mlx5_core_create_rqt(mdev, in, inlen, &priv->rqtn[rqt_ix]);
1416
1417 kvfree(in);
1418
1419 return err;
1420}
1421
Achiad Shochat2d75b2b2015-08-16 16:04:47 +03001422int mlx5e_redirect_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001423{
1424 struct mlx5_core_dev *mdev = priv->mdev;
1425 u32 *in;
1426 void *rqtc;
1427 int inlen;
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001428 int sz;
1429 int err;
1430
Achiad Shochat936896e2015-08-16 16:04:46 +03001431 sz = (rqt_ix == MLX5E_SINGLE_RQ_RQT) ? 1 : MLX5E_INDIR_RQT_SIZE;
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001432
1433 inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
1434 in = mlx5_vzalloc(inlen);
1435 if (!in)
1436 return -ENOMEM;
1437
1438 rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
1439
1440 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
1441
1442 mlx5e_fill_rqt_rqns(priv, rqtc, rqt_ix);
1443
1444 MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
1445
1446 err = mlx5_core_modify_rqt(mdev, priv->rqtn[rqt_ix], in, inlen);
1447
1448 kvfree(in);
1449
1450 return err;
1451}
1452
1453static void mlx5e_destroy_rqt(struct mlx5e_priv *priv, enum mlx5e_rqt_ix rqt_ix)
1454{
1455 mlx5_core_destroy_rqt(priv->mdev, priv->rqtn[rqt_ix]);
1456}
1457
1458static void mlx5e_redirect_rqts(struct mlx5e_priv *priv)
1459{
1460 mlx5e_redirect_rqt(priv, MLX5E_INDIRECTION_RQT);
1461 mlx5e_redirect_rqt(priv, MLX5E_SINGLE_RQ_RQT);
1462}
1463
1464static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
1465{
1466 if (!priv->params.lro_en)
1467 return;
1468
1469#define ROUGH_MAX_L2_L3_HDR_SZ 256
1470
1471 MLX5_SET(tirc, tirc, lro_enable_mask,
1472 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
1473 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
1474 MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
1475 (priv->params.lro_wqe_sz -
1476 ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
1477 MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
1478 MLX5_CAP_ETH(priv->mdev,
Achiad Shochatd9a40272015-08-16 16:04:49 +03001479 lro_timer_supported_periods[2]));
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001480}
1481
Tariq Toukanbdfc0282016-02-29 21:17:12 +02001482void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
1483{
1484 MLX5_SET(tirc, tirc, rx_hash_fn,
1485 mlx5e_rx_hash_fn(priv->params.rss_hfunc));
1486 if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
1487 void *rss_key = MLX5_ADDR_OF(tirc, tirc,
1488 rx_hash_toeplitz_key);
1489 size_t len = MLX5_FLD_SZ_BYTES(tirc,
1490 rx_hash_toeplitz_key);
1491
1492 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
1493 memcpy(rss_key, priv->params.toeplitz_hash_key, len);
1494 }
1495}
1496
Tariq Toukanab0394f2016-02-29 21:17:10 +02001497static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001498{
1499 struct mlx5_core_dev *mdev = priv->mdev;
1500
1501 void *in;
1502 void *tirc;
1503 int inlen;
1504 int err;
Tariq Toukanab0394f2016-02-29 21:17:10 +02001505 int tt;
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001506
1507 inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1508 in = mlx5_vzalloc(inlen);
1509 if (!in)
1510 return -ENOMEM;
1511
1512 MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
1513 tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
1514
1515 mlx5e_build_tir_ctx_lro(tirc, priv);
1516
Tariq Toukanab0394f2016-02-29 21:17:10 +02001517 for (tt = 0; tt < MLX5E_NUM_TT; tt++) {
1518 err = mlx5_core_modify_tir(mdev, priv->tirn[tt], in, inlen);
1519 if (err)
1520 break;
1521 }
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001522
1523 kvfree(in);
1524
1525 return err;
1526}
1527
Tariq Toukan66189962015-11-12 19:35:26 +02001528static int mlx5e_refresh_tir_self_loopback_enable(struct mlx5_core_dev *mdev,
1529 u32 tirn)
1530{
1531 void *in;
1532 int inlen;
1533 int err;
1534
1535 inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1536 in = mlx5_vzalloc(inlen);
1537 if (!in)
1538 return -ENOMEM;
1539
1540 MLX5_SET(modify_tir_in, in, bitmask.self_lb_en, 1);
1541
1542 err = mlx5_core_modify_tir(mdev, tirn, in, inlen);
1543
1544 kvfree(in);
1545
1546 return err;
1547}
1548
1549static int mlx5e_refresh_tirs_self_loopback_enable(struct mlx5e_priv *priv)
1550{
1551 int err;
1552 int i;
1553
1554 for (i = 0; i < MLX5E_NUM_TT; i++) {
1555 err = mlx5e_refresh_tir_self_loopback_enable(priv->mdev,
1556 priv->tirn[i]);
1557 if (err)
1558 return err;
1559 }
1560
1561 return 0;
1562}
1563
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001564static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
1565{
1566 struct mlx5e_priv *priv = netdev_priv(netdev);
1567 struct mlx5_core_dev *mdev = priv->mdev;
1568 int hw_mtu;
1569 int err;
1570
1571 err = mlx5_set_port_mtu(mdev, MLX5E_SW2HW_MTU(netdev->mtu), 1);
1572 if (err)
1573 return err;
1574
1575 mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
1576
1577 if (MLX5E_HW2SW_MTU(hw_mtu) != netdev->mtu)
1578 netdev_warn(netdev, "%s: Port MTU %d is different than netdev mtu %d\n",
1579 __func__, MLX5E_HW2SW_MTU(hw_mtu), netdev->mtu);
1580
1581 netdev->mtu = MLX5E_HW2SW_MTU(hw_mtu);
1582 return 0;
1583}
1584
Saeed Mahameed08fb1da2016-02-22 18:17:26 +02001585static void mlx5e_netdev_set_tcs(struct net_device *netdev)
1586{
1587 struct mlx5e_priv *priv = netdev_priv(netdev);
1588 int nch = priv->params.num_channels;
1589 int ntc = priv->params.num_tc;
1590 int tc;
1591
1592 netdev_reset_tc(netdev);
1593
1594 if (ntc == 1)
1595 return;
1596
1597 netdev_set_num_tc(netdev, ntc);
1598
1599 for (tc = 0; tc < ntc; tc++)
1600 netdev_set_tc_queue(netdev, tc, nch, tc * nch);
1601}
1602
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001603int mlx5e_open_locked(struct net_device *netdev)
1604{
1605 struct mlx5e_priv *priv = netdev_priv(netdev);
1606 int num_txqs;
1607 int err;
1608
1609 set_bit(MLX5E_STATE_OPENED, &priv->state);
1610
Saeed Mahameed08fb1da2016-02-22 18:17:26 +02001611 mlx5e_netdev_set_tcs(netdev);
1612
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001613 num_txqs = priv->params.num_channels * priv->params.num_tc;
1614 netif_set_real_num_tx_queues(netdev, num_txqs);
1615 netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
1616
1617 err = mlx5e_set_dev_port_mtu(netdev);
1618 if (err)
Achiad Shochat343b29f2015-09-25 10:49:09 +03001619 goto err_clear_state_opened_flag;
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001620
1621 err = mlx5e_open_channels(priv);
1622 if (err) {
1623 netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
1624 __func__, err);
Achiad Shochat343b29f2015-09-25 10:49:09 +03001625 goto err_clear_state_opened_flag;
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001626 }
1627
Tariq Toukan66189962015-11-12 19:35:26 +02001628 err = mlx5e_refresh_tirs_self_loopback_enable(priv);
1629 if (err) {
1630 netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n",
1631 __func__, err);
1632 goto err_close_channels;
1633 }
1634
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001635 mlx5e_redirect_rqts(priv);
Tariq Toukance89ef32016-03-02 00:13:33 +02001636 mlx5e_update_carrier(priv);
Eran Ben Elishaef9814d2015-12-29 14:58:31 +02001637 mlx5e_timestamp_init(priv);
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001638
1639 schedule_delayed_work(&priv->update_stats_work, 0);
Achiad Shochat9b37b072015-08-04 14:05:46 +03001640
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001641 return 0;
Achiad Shochat343b29f2015-09-25 10:49:09 +03001642
Tariq Toukan66189962015-11-12 19:35:26 +02001643err_close_channels:
1644 mlx5e_close_channels(priv);
Achiad Shochat343b29f2015-09-25 10:49:09 +03001645err_clear_state_opened_flag:
1646 clear_bit(MLX5E_STATE_OPENED, &priv->state);
1647 return err;
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001648}
1649
1650static int mlx5e_open(struct net_device *netdev)
1651{
1652 struct mlx5e_priv *priv = netdev_priv(netdev);
1653 int err;
1654
1655 mutex_lock(&priv->state_lock);
1656 err = mlx5e_open_locked(netdev);
1657 mutex_unlock(&priv->state_lock);
1658
1659 return err;
1660}
1661
1662int mlx5e_close_locked(struct net_device *netdev)
1663{
1664 struct mlx5e_priv *priv = netdev_priv(netdev);
1665
Achiad Shochata1985742015-11-03 08:07:18 +02001666 /* May already be CLOSED in case a previous configuration operation
1667 * (e.g RX/TX queue size change) that involves close&open failed.
1668 */
1669 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
1670 return 0;
1671
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001672 clear_bit(MLX5E_STATE_OPENED, &priv->state);
1673
Eran Ben Elishaef9814d2015-12-29 14:58:31 +02001674 mlx5e_timestamp_cleanup(priv);
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001675 netif_carrier_off(priv->netdev);
Tariq Toukance89ef32016-03-02 00:13:33 +02001676 mlx5e_redirect_rqts(priv);
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001677 mlx5e_close_channels(priv);
1678
1679 return 0;
1680}
1681
1682static int mlx5e_close(struct net_device *netdev)
1683{
1684 struct mlx5e_priv *priv = netdev_priv(netdev);
1685 int err;
1686
1687 mutex_lock(&priv->state_lock);
1688 err = mlx5e_close_locked(netdev);
1689 mutex_unlock(&priv->state_lock);
1690
1691 return err;
1692}
1693
Achiad Shochat50cfa252015-08-04 14:05:41 +03001694static int mlx5e_create_drop_rq(struct mlx5e_priv *priv,
1695 struct mlx5e_rq *rq,
1696 struct mlx5e_rq_param *param)
1697{
1698 struct mlx5_core_dev *mdev = priv->mdev;
1699 void *rqc = param->rqc;
1700 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
1701 int err;
1702
1703 param->wq.db_numa_node = param->wq.buf_numa_node;
1704
1705 err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
1706 &rq->wq_ctrl);
1707 if (err)
1708 return err;
1709
1710 rq->priv = priv;
1711
1712 return 0;
1713}
1714
1715static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
1716 struct mlx5e_cq *cq,
1717 struct mlx5e_cq_param *param)
1718{
1719 struct mlx5_core_dev *mdev = priv->mdev;
1720 struct mlx5_core_cq *mcq = &cq->mcq;
1721 int eqn_not_used;
Doron Tsur0b6e26c2016-01-17 11:25:47 +02001722 unsigned int irqn;
Achiad Shochat50cfa252015-08-04 14:05:41 +03001723 int err;
1724
1725 err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
1726 &cq->wq_ctrl);
1727 if (err)
1728 return err;
1729
1730 mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
1731
1732 mcq->cqe_sz = 64;
1733 mcq->set_ci_db = cq->wq_ctrl.db.db;
1734 mcq->arm_db = cq->wq_ctrl.db.db + 1;
1735 *mcq->set_ci_db = 0;
1736 *mcq->arm_db = 0;
1737 mcq->vector = param->eq_ix;
1738 mcq->comp = mlx5e_completion_event;
1739 mcq->event = mlx5e_cq_error_event;
1740 mcq->irqn = irqn;
1741 mcq->uar = &priv->cq_uar;
1742
1743 cq->priv = priv;
1744
1745 return 0;
1746}
1747
1748static int mlx5e_open_drop_rq(struct mlx5e_priv *priv)
1749{
1750 struct mlx5e_cq_param cq_param;
1751 struct mlx5e_rq_param rq_param;
1752 struct mlx5e_rq *rq = &priv->drop_rq;
1753 struct mlx5e_cq *cq = &priv->drop_rq.cq;
1754 int err;
1755
1756 memset(&cq_param, 0, sizeof(cq_param));
1757 memset(&rq_param, 0, sizeof(rq_param));
Tariq Toukan556dd1b2016-03-02 00:13:36 +02001758 mlx5e_build_drop_rq_param(&rq_param);
Achiad Shochat50cfa252015-08-04 14:05:41 +03001759
1760 err = mlx5e_create_drop_cq(priv, cq, &cq_param);
1761 if (err)
1762 return err;
1763
1764 err = mlx5e_enable_cq(cq, &cq_param);
1765 if (err)
1766 goto err_destroy_cq;
1767
1768 err = mlx5e_create_drop_rq(priv, rq, &rq_param);
1769 if (err)
1770 goto err_disable_cq;
1771
1772 err = mlx5e_enable_rq(rq, &rq_param);
1773 if (err)
1774 goto err_destroy_rq;
1775
1776 return 0;
1777
1778err_destroy_rq:
1779 mlx5e_destroy_rq(&priv->drop_rq);
1780
1781err_disable_cq:
1782 mlx5e_disable_cq(&priv->drop_rq.cq);
1783
1784err_destroy_cq:
1785 mlx5e_destroy_cq(&priv->drop_rq.cq);
1786
1787 return err;
1788}
1789
1790static void mlx5e_close_drop_rq(struct mlx5e_priv *priv)
1791{
1792 mlx5e_disable_rq(&priv->drop_rq);
1793 mlx5e_destroy_rq(&priv->drop_rq);
1794 mlx5e_disable_cq(&priv->drop_rq.cq);
1795 mlx5e_destroy_cq(&priv->drop_rq.cq);
1796}
1797
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001798static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc)
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001799{
1800 struct mlx5_core_dev *mdev = priv->mdev;
1801 u32 in[MLX5_ST_SZ_DW(create_tis_in)];
1802 void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
1803
1804 memset(in, 0, sizeof(in));
1805
Saeed Mahameed08fb1da2016-02-22 18:17:26 +02001806 MLX5_SET(tisc, tisc, prio, tc << 1);
Achiad Shochat3191e05f2015-06-11 14:47:33 +03001807 MLX5_SET(tisc, tisc, transport_domain, priv->tdn);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001808
Haggai Abramonvsky7db22ff2015-06-04 19:30:37 +03001809 return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001810}
1811
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001812static void mlx5e_destroy_tis(struct mlx5e_priv *priv, int tc)
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001813{
Haggai Abramonvsky7db22ff2015-06-04 19:30:37 +03001814 mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001815}
1816
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001817static int mlx5e_create_tises(struct mlx5e_priv *priv)
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001818{
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001819 int err;
1820 int tc;
1821
Saeed Mahameed08fb1da2016-02-22 18:17:26 +02001822 for (tc = 0; tc < MLX5E_MAX_NUM_TC; tc++) {
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001823 err = mlx5e_create_tis(priv, tc);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001824 if (err)
1825 goto err_close_tises;
1826 }
1827
1828 return 0;
1829
1830err_close_tises:
1831 for (tc--; tc >= 0; tc--)
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001832 mlx5e_destroy_tis(priv, tc);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001833
1834 return err;
1835}
1836
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001837static void mlx5e_destroy_tises(struct mlx5e_priv *priv)
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001838{
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001839 int tc;
1840
Saeed Mahameed08fb1da2016-02-22 18:17:26 +02001841 for (tc = 0; tc < MLX5E_MAX_NUM_TC; tc++)
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001842 mlx5e_destroy_tis(priv, tc);
Achiad Shochat5c503682015-08-04 14:05:43 +03001843}
1844
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001845static void mlx5e_build_tir_ctx(struct mlx5e_priv *priv, u32 *tirc, int tt)
1846{
1847 void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
1848
Achiad Shochat3191e05f2015-06-11 14:47:33 +03001849 MLX5_SET(tirc, tirc, transport_domain, priv->tdn);
1850
Achiad Shochat5a6f8ae2015-07-23 23:36:00 +03001851#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
1852 MLX5_HASH_FIELD_SEL_DST_IP)
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001853
Achiad Shochat5a6f8ae2015-07-23 23:36:00 +03001854#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
1855 MLX5_HASH_FIELD_SEL_DST_IP |\
1856 MLX5_HASH_FIELD_SEL_L4_SPORT |\
1857 MLX5_HASH_FIELD_SEL_L4_DPORT)
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001858
Achiad Shochata7417492015-07-23 23:36:01 +03001859#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
1860 MLX5_HASH_FIELD_SEL_DST_IP |\
1861 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
1862
Achiad Shochat5c503682015-08-04 14:05:43 +03001863 mlx5e_build_tir_ctx_lro(tirc, priv);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001864
Achiad Shochat4cbeaff2015-08-04 14:05:40 +03001865 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
1866
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001867 switch (tt) {
1868 case MLX5E_TT_ANY:
Achiad Shochat4cbeaff2015-08-04 14:05:40 +03001869 MLX5_SET(tirc, tirc, indirect_table,
1870 priv->rqtn[MLX5E_SINGLE_RQ_RQT]);
1871 MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001872 break;
1873 default:
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001874 MLX5_SET(tirc, tirc, indirect_table,
Achiad Shochat4cbeaff2015-08-04 14:05:40 +03001875 priv->rqtn[MLX5E_INDIRECTION_RQT]);
Tariq Toukanbdfc0282016-02-29 21:17:12 +02001876 mlx5e_build_tir_ctx_hash(tirc, priv);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001877 break;
1878 }
1879
1880 switch (tt) {
1881 case MLX5E_TT_IPV4_TCP:
1882 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1883 MLX5_L3_PROT_TYPE_IPV4);
1884 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1885 MLX5_L4_PROT_TYPE_TCP);
1886 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
Achiad Shochat5a6f8ae2015-07-23 23:36:00 +03001887 MLX5_HASH_IP_L4PORTS);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001888 break;
1889
1890 case MLX5E_TT_IPV6_TCP:
1891 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1892 MLX5_L3_PROT_TYPE_IPV6);
1893 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1894 MLX5_L4_PROT_TYPE_TCP);
1895 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
Achiad Shochat5a6f8ae2015-07-23 23:36:00 +03001896 MLX5_HASH_IP_L4PORTS);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001897 break;
1898
1899 case MLX5E_TT_IPV4_UDP:
1900 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1901 MLX5_L3_PROT_TYPE_IPV4);
1902 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1903 MLX5_L4_PROT_TYPE_UDP);
1904 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
Achiad Shochat5a6f8ae2015-07-23 23:36:00 +03001905 MLX5_HASH_IP_L4PORTS);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001906 break;
1907
1908 case MLX5E_TT_IPV6_UDP:
1909 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1910 MLX5_L3_PROT_TYPE_IPV6);
1911 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
1912 MLX5_L4_PROT_TYPE_UDP);
1913 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
Achiad Shochat5a6f8ae2015-07-23 23:36:00 +03001914 MLX5_HASH_IP_L4PORTS);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001915 break;
1916
Achiad Shochata7417492015-07-23 23:36:01 +03001917 case MLX5E_TT_IPV4_IPSEC_AH:
1918 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1919 MLX5_L3_PROT_TYPE_IPV4);
1920 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1921 MLX5_HASH_IP_IPSEC_SPI);
1922 break;
1923
1924 case MLX5E_TT_IPV6_IPSEC_AH:
1925 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1926 MLX5_L3_PROT_TYPE_IPV6);
1927 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1928 MLX5_HASH_IP_IPSEC_SPI);
1929 break;
1930
1931 case MLX5E_TT_IPV4_IPSEC_ESP:
1932 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1933 MLX5_L3_PROT_TYPE_IPV4);
1934 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1935 MLX5_HASH_IP_IPSEC_SPI);
1936 break;
1937
1938 case MLX5E_TT_IPV6_IPSEC_ESP:
1939 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1940 MLX5_L3_PROT_TYPE_IPV6);
1941 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1942 MLX5_HASH_IP_IPSEC_SPI);
1943 break;
1944
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001945 case MLX5E_TT_IPV4:
1946 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1947 MLX5_L3_PROT_TYPE_IPV4);
1948 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1949 MLX5_HASH_IP);
1950 break;
1951
1952 case MLX5E_TT_IPV6:
1953 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
1954 MLX5_L3_PROT_TYPE_IPV6);
1955 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
1956 MLX5_HASH_IP);
1957 break;
1958 }
1959}
1960
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001961static int mlx5e_create_tir(struct mlx5e_priv *priv, int tt)
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001962{
1963 struct mlx5_core_dev *mdev = priv->mdev;
1964 u32 *in;
1965 void *tirc;
1966 int inlen;
1967 int err;
1968
1969 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
1970 in = mlx5_vzalloc(inlen);
1971 if (!in)
1972 return -ENOMEM;
1973
1974 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
1975
1976 mlx5e_build_tir_ctx(priv, tirc, tt);
1977
Haggai Abramonvsky7db22ff2015-06-04 19:30:37 +03001978 err = mlx5_core_create_tir(mdev, in, inlen, &priv->tirn[tt]);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001979
1980 kvfree(in);
1981
1982 return err;
1983}
1984
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001985static void mlx5e_destroy_tir(struct mlx5e_priv *priv, int tt)
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001986{
Haggai Abramonvsky7db22ff2015-06-04 19:30:37 +03001987 mlx5_core_destroy_tir(priv->mdev, priv->tirn[tt]);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001988}
1989
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001990static int mlx5e_create_tirs(struct mlx5e_priv *priv)
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001991{
1992 int err;
1993 int i;
1994
1995 for (i = 0; i < MLX5E_NUM_TT; i++) {
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001996 err = mlx5e_create_tir(priv, i);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001997 if (err)
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001998 goto err_destroy_tirs;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001999 }
2000
2001 return 0;
2002
Achiad Shochat40ab6a62015-08-04 14:05:44 +03002003err_destroy_tirs:
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002004 for (i--; i >= 0; i--)
Achiad Shochat40ab6a62015-08-04 14:05:44 +03002005 mlx5e_destroy_tir(priv, i);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002006
2007 return err;
2008}
2009
Achiad Shochat40ab6a62015-08-04 14:05:44 +03002010static void mlx5e_destroy_tirs(struct mlx5e_priv *priv)
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002011{
2012 int i;
2013
2014 for (i = 0; i < MLX5E_NUM_TT; i++)
Achiad Shochat40ab6a62015-08-04 14:05:44 +03002015 mlx5e_destroy_tir(priv, i);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002016}
2017
Saeed Mahameed08fb1da2016-02-22 18:17:26 +02002018static int mlx5e_setup_tc(struct net_device *netdev, u8 tc)
2019{
2020 struct mlx5e_priv *priv = netdev_priv(netdev);
2021 bool was_opened;
2022 int err = 0;
2023
2024 if (tc && tc != MLX5E_MAX_NUM_TC)
2025 return -EINVAL;
2026
2027 mutex_lock(&priv->state_lock);
2028
2029 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
2030 if (was_opened)
2031 mlx5e_close_locked(priv->netdev);
2032
2033 priv->params.num_tc = tc ? tc : 1;
2034
2035 if (was_opened)
2036 err = mlx5e_open_locked(priv->netdev);
2037
2038 mutex_unlock(&priv->state_lock);
2039
2040 return err;
2041}
2042
2043static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle,
2044 __be16 proto, struct tc_to_netdev *tc)
2045{
Amir Vadaie8f887a2016-03-08 12:42:36 +02002046 struct mlx5e_priv *priv = netdev_priv(dev);
2047
2048 if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
2049 goto mqprio;
2050
2051 switch (tc->type) {
Amir Vadaie3a2b7e2016-03-08 12:42:37 +02002052 case TC_SETUP_CLSFLOWER:
2053 switch (tc->cls_flower->command) {
2054 case TC_CLSFLOWER_REPLACE:
2055 return mlx5e_configure_flower(priv, proto, tc->cls_flower);
2056 case TC_CLSFLOWER_DESTROY:
2057 return mlx5e_delete_flower(priv, tc->cls_flower);
2058 }
Amir Vadaie8f887a2016-03-08 12:42:36 +02002059 default:
2060 return -EOPNOTSUPP;
2061 }
2062
2063mqprio:
Amir Vadai67ba4222016-03-08 12:42:34 +02002064 if (tc->type != TC_SETUP_MQPRIO)
Saeed Mahameed08fb1da2016-02-22 18:17:26 +02002065 return -EINVAL;
2066
2067 return mlx5e_setup_tc(dev, tc->tc);
2068}
2069
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002070static struct rtnl_link_stats64 *
2071mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
2072{
2073 struct mlx5e_priv *priv = netdev_priv(dev);
2074 struct mlx5e_vport_stats *vstats = &priv->stats.vport;
2075
2076 stats->rx_packets = vstats->rx_packets;
2077 stats->rx_bytes = vstats->rx_bytes;
2078 stats->tx_packets = vstats->tx_packets;
2079 stats->tx_bytes = vstats->tx_bytes;
2080 stats->multicast = vstats->rx_multicast_packets +
2081 vstats->tx_multicast_packets;
2082 stats->tx_errors = vstats->tx_error_packets;
2083 stats->rx_errors = vstats->rx_error_packets;
2084 stats->tx_dropped = vstats->tx_queue_dropped;
2085 stats->rx_crc_errors = 0;
2086 stats->rx_length_errors = 0;
2087
2088 return stats;
2089}
2090
2091static void mlx5e_set_rx_mode(struct net_device *dev)
2092{
2093 struct mlx5e_priv *priv = netdev_priv(dev);
2094
2095 schedule_work(&priv->set_rx_mode_work);
2096}
2097
2098static int mlx5e_set_mac(struct net_device *netdev, void *addr)
2099{
2100 struct mlx5e_priv *priv = netdev_priv(netdev);
2101 struct sockaddr *saddr = addr;
2102
2103 if (!is_valid_ether_addr(saddr->sa_data))
2104 return -EADDRNOTAVAIL;
2105
2106 netif_addr_lock_bh(netdev);
2107 ether_addr_copy(netdev->dev_addr, saddr->sa_data);
2108 netif_addr_unlock_bh(netdev);
2109
2110 schedule_work(&priv->set_rx_mode_work);
2111
2112 return 0;
2113}
2114
2115static int mlx5e_set_features(struct net_device *netdev,
2116 netdev_features_t features)
2117{
2118 struct mlx5e_priv *priv = netdev_priv(netdev);
Achiad Shochat98e81b02015-07-29 15:05:46 +03002119 int err = 0;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002120 netdev_features_t changes = features ^ netdev->features;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002121
2122 mutex_lock(&priv->state_lock);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002123
2124 if (changes & NETIF_F_LRO) {
Achiad Shochat98e81b02015-07-29 15:05:46 +03002125 bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002126
Tariq Toukan461017c2016-04-20 22:02:13 +03002127 if (was_opened && (priv->params.rq_wq_type ==
2128 MLX5_WQ_TYPE_LINKED_LIST))
Achiad Shochat98e81b02015-07-29 15:05:46 +03002129 mlx5e_close_locked(priv->netdev);
2130
2131 priv->params.lro_en = !!(features & NETIF_F_LRO);
Tariq Toukanab0394f2016-02-29 21:17:10 +02002132 err = mlx5e_modify_tirs_lro(priv);
2133 if (err)
2134 mlx5_core_warn(priv->mdev, "lro modify failed, %d\n",
2135 err);
Achiad Shochat98e81b02015-07-29 15:05:46 +03002136
Tariq Toukan461017c2016-04-20 22:02:13 +03002137 if (was_opened && (priv->params.rq_wq_type ==
2138 MLX5_WQ_TYPE_LINKED_LIST))
Achiad Shochat98e81b02015-07-29 15:05:46 +03002139 err = mlx5e_open_locked(priv->netdev);
2140 }
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002141
Achiad Shochat9b37b072015-08-04 14:05:46 +03002142 mutex_unlock(&priv->state_lock);
2143
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002144 if (changes & NETIF_F_HW_VLAN_CTAG_FILTER) {
2145 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
2146 mlx5e_enable_vlan_filter(priv);
2147 else
2148 mlx5e_disable_vlan_filter(priv);
2149 }
2150
Amir Vadaie8f887a2016-03-08 12:42:36 +02002151 if ((changes & NETIF_F_HW_TC) && !(features & NETIF_F_HW_TC) &&
2152 mlx5e_tc_num_filters(priv)) {
2153 netdev_err(netdev,
2154 "Active offloaded tc filters, can't turn hw_tc_offload off\n");
2155 return -EINVAL;
2156 }
2157
Achiad Shochatfe9f4fe2015-11-03 08:07:22 +02002158 return err;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002159}
2160
2161static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
2162{
2163 struct mlx5e_priv *priv = netdev_priv(netdev);
2164 struct mlx5_core_dev *mdev = priv->mdev;
Achiad Shochat98e81b02015-07-29 15:05:46 +03002165 bool was_opened;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002166 int max_mtu;
Achiad Shochat98e81b02015-07-29 15:05:46 +03002167 int err = 0;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002168
Saeed Mahameedfacc9692015-06-11 14:47:27 +03002169 mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002170
Doron Tsur50a9eea2015-11-12 19:35:27 +02002171 max_mtu = MLX5E_HW2SW_MTU(max_mtu);
2172
Saeed Mahameedfacc9692015-06-11 14:47:27 +03002173 if (new_mtu > max_mtu) {
2174 netdev_err(netdev,
2175 "%s: Bad MTU (%d) > (%d) Max\n",
2176 __func__, new_mtu, max_mtu);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002177 return -EINVAL;
2178 }
2179
2180 mutex_lock(&priv->state_lock);
Achiad Shochat98e81b02015-07-29 15:05:46 +03002181
2182 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
2183 if (was_opened)
2184 mlx5e_close_locked(netdev);
2185
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002186 netdev->mtu = new_mtu;
Achiad Shochat98e81b02015-07-29 15:05:46 +03002187
2188 if (was_opened)
2189 err = mlx5e_open_locked(netdev);
2190
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002191 mutex_unlock(&priv->state_lock);
2192
2193 return err;
2194}
2195
Eran Ben Elishaef9814d2015-12-29 14:58:31 +02002196static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2197{
2198 switch (cmd) {
2199 case SIOCSHWTSTAMP:
2200 return mlx5e_hwstamp_set(dev, ifr);
2201 case SIOCGHWTSTAMP:
2202 return mlx5e_hwstamp_get(dev, ifr);
2203 default:
2204 return -EOPNOTSUPP;
2205 }
2206}
2207
Saeed Mahameed66e49de2015-12-01 18:03:25 +02002208static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
2209{
2210 struct mlx5e_priv *priv = netdev_priv(dev);
2211 struct mlx5_core_dev *mdev = priv->mdev;
2212
2213 return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac);
2214}
2215
2216static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
2217{
2218 struct mlx5e_priv *priv = netdev_priv(dev);
2219 struct mlx5_core_dev *mdev = priv->mdev;
2220
2221 return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1,
2222 vlan, qos);
2223}
2224
2225static int mlx5_vport_link2ifla(u8 esw_link)
2226{
2227 switch (esw_link) {
2228 case MLX5_ESW_VPORT_ADMIN_STATE_DOWN:
2229 return IFLA_VF_LINK_STATE_DISABLE;
2230 case MLX5_ESW_VPORT_ADMIN_STATE_UP:
2231 return IFLA_VF_LINK_STATE_ENABLE;
2232 }
2233 return IFLA_VF_LINK_STATE_AUTO;
2234}
2235
2236static int mlx5_ifla_link2vport(u8 ifla_link)
2237{
2238 switch (ifla_link) {
2239 case IFLA_VF_LINK_STATE_DISABLE:
2240 return MLX5_ESW_VPORT_ADMIN_STATE_DOWN;
2241 case IFLA_VF_LINK_STATE_ENABLE:
2242 return MLX5_ESW_VPORT_ADMIN_STATE_UP;
2243 }
2244 return MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
2245}
2246
2247static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
2248 int link_state)
2249{
2250 struct mlx5e_priv *priv = netdev_priv(dev);
2251 struct mlx5_core_dev *mdev = priv->mdev;
2252
2253 return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1,
2254 mlx5_ifla_link2vport(link_state));
2255}
2256
2257static int mlx5e_get_vf_config(struct net_device *dev,
2258 int vf, struct ifla_vf_info *ivi)
2259{
2260 struct mlx5e_priv *priv = netdev_priv(dev);
2261 struct mlx5_core_dev *mdev = priv->mdev;
2262 int err;
2263
2264 err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi);
2265 if (err)
2266 return err;
2267 ivi->linkstate = mlx5_vport_link2ifla(ivi->linkstate);
2268 return 0;
2269}
2270
2271static int mlx5e_get_vf_stats(struct net_device *dev,
2272 int vf, struct ifla_vf_stats *vf_stats)
2273{
2274 struct mlx5e_priv *priv = netdev_priv(dev);
2275 struct mlx5_core_dev *mdev = priv->mdev;
2276
2277 return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1,
2278 vf_stats);
2279}
2280
Matthew Finlayb3f63c32016-02-22 18:17:32 +02002281static void mlx5e_add_vxlan_port(struct net_device *netdev,
2282 sa_family_t sa_family, __be16 port)
2283{
2284 struct mlx5e_priv *priv = netdev_priv(netdev);
2285
2286 if (!mlx5e_vxlan_allowed(priv->mdev))
2287 return;
2288
2289 mlx5e_vxlan_add_port(priv, be16_to_cpu(port));
2290}
2291
2292static void mlx5e_del_vxlan_port(struct net_device *netdev,
2293 sa_family_t sa_family, __be16 port)
2294{
2295 struct mlx5e_priv *priv = netdev_priv(netdev);
2296
2297 if (!mlx5e_vxlan_allowed(priv->mdev))
2298 return;
2299
2300 mlx5e_vxlan_del_port(priv, be16_to_cpu(port));
2301}
2302
2303static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv,
2304 struct sk_buff *skb,
2305 netdev_features_t features)
2306{
2307 struct udphdr *udph;
2308 u16 proto;
2309 u16 port = 0;
2310
2311 switch (vlan_get_protocol(skb)) {
2312 case htons(ETH_P_IP):
2313 proto = ip_hdr(skb)->protocol;
2314 break;
2315 case htons(ETH_P_IPV6):
2316 proto = ipv6_hdr(skb)->nexthdr;
2317 break;
2318 default:
2319 goto out;
2320 }
2321
2322 if (proto == IPPROTO_UDP) {
2323 udph = udp_hdr(skb);
2324 port = be16_to_cpu(udph->dest);
2325 }
2326
2327 /* Verify if UDP port is being offloaded by HW */
2328 if (port && mlx5e_vxlan_lookup_port(priv, port))
2329 return features;
2330
2331out:
2332 /* Disable CSUM and GSO if the udp dport is not offloaded by HW */
2333 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2334}
2335
2336static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
2337 struct net_device *netdev,
2338 netdev_features_t features)
2339{
2340 struct mlx5e_priv *priv = netdev_priv(netdev);
2341
2342 features = vlan_features_check(skb, features);
2343 features = vxlan_features_check(skb, features);
2344
2345 /* Validate if the tunneled packet is being offloaded by HW */
2346 if (skb->encapsulation &&
2347 (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
2348 return mlx5e_vxlan_features_check(priv, skb, features);
2349
2350 return features;
2351}
2352
Saeed Mahameedb0eed402016-02-09 14:57:44 +02002353static const struct net_device_ops mlx5e_netdev_ops_basic = {
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002354 .ndo_open = mlx5e_open,
2355 .ndo_stop = mlx5e_close,
2356 .ndo_start_xmit = mlx5e_xmit,
Saeed Mahameed08fb1da2016-02-22 18:17:26 +02002357 .ndo_setup_tc = mlx5e_ndo_setup_tc,
2358 .ndo_select_queue = mlx5e_select_queue,
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002359 .ndo_get_stats64 = mlx5e_get_stats,
2360 .ndo_set_rx_mode = mlx5e_set_rx_mode,
2361 .ndo_set_mac_address = mlx5e_set_mac,
Saeed Mahameedb0eed402016-02-09 14:57:44 +02002362 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
2363 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002364 .ndo_set_features = mlx5e_set_features,
Saeed Mahameedb0eed402016-02-09 14:57:44 +02002365 .ndo_change_mtu = mlx5e_change_mtu,
2366 .ndo_do_ioctl = mlx5e_ioctl,
2367};
2368
2369static const struct net_device_ops mlx5e_netdev_ops_sriov = {
2370 .ndo_open = mlx5e_open,
2371 .ndo_stop = mlx5e_close,
2372 .ndo_start_xmit = mlx5e_xmit,
Saeed Mahameed08fb1da2016-02-22 18:17:26 +02002373 .ndo_setup_tc = mlx5e_ndo_setup_tc,
2374 .ndo_select_queue = mlx5e_select_queue,
Saeed Mahameedb0eed402016-02-09 14:57:44 +02002375 .ndo_get_stats64 = mlx5e_get_stats,
2376 .ndo_set_rx_mode = mlx5e_set_rx_mode,
2377 .ndo_set_mac_address = mlx5e_set_mac,
2378 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
2379 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
2380 .ndo_set_features = mlx5e_set_features,
2381 .ndo_change_mtu = mlx5e_change_mtu,
2382 .ndo_do_ioctl = mlx5e_ioctl,
Matthew Finlayb3f63c32016-02-22 18:17:32 +02002383 .ndo_add_vxlan_port = mlx5e_add_vxlan_port,
2384 .ndo_del_vxlan_port = mlx5e_del_vxlan_port,
2385 .ndo_features_check = mlx5e_features_check,
Saeed Mahameedb0eed402016-02-09 14:57:44 +02002386 .ndo_set_vf_mac = mlx5e_set_vf_mac,
2387 .ndo_set_vf_vlan = mlx5e_set_vf_vlan,
2388 .ndo_get_vf_config = mlx5e_get_vf_config,
2389 .ndo_set_vf_link_state = mlx5e_set_vf_link_state,
2390 .ndo_get_vf_stats = mlx5e_get_vf_stats,
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002391};
2392
2393static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
2394{
2395 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
2396 return -ENOTSUPP;
2397 if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
2398 !MLX5_CAP_GEN(mdev, nic_flow_table) ||
2399 !MLX5_CAP_ETH(mdev, csum_cap) ||
2400 !MLX5_CAP_ETH(mdev, max_lso_cap) ||
2401 !MLX5_CAP_ETH(mdev, vlan_cap) ||
Gal Pressman796a27e2015-06-11 14:47:30 +03002402 !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
2403 MLX5_CAP_FLOWTABLE(mdev,
2404 flow_table_properties_nic_receive.max_ft_level)
2405 < 3) {
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002406 mlx5_core_warn(mdev,
2407 "Not creating net device, some required device capabilities are missing\n");
2408 return -ENOTSUPP;
2409 }
Tariq Toukan66189962015-11-12 19:35:26 +02002410 if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
2411 mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
Gal Pressman7524a5d2016-03-02 00:13:37 +02002412 if (!MLX5_CAP_GEN(mdev, cq_moderation))
2413 mlx5_core_warn(mdev, "CQ modiration is not supported\n");
Tariq Toukan66189962015-11-12 19:35:26 +02002414
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002415 return 0;
2416}
2417
Achiad Shochat58d52292015-07-23 23:35:58 +03002418u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
2419{
2420 int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
2421
2422 return bf_buf_size -
2423 sizeof(struct mlx5e_tx_wqe) +
2424 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
2425}
2426
Saeed Mahameed08fb1da2016-02-22 18:17:26 +02002427#ifdef CONFIG_MLX5_CORE_EN_DCB
2428static void mlx5e_ets_init(struct mlx5e_priv *priv)
2429{
2430 int i;
2431
2432 priv->params.ets.ets_cap = mlx5_max_tc(priv->mdev) + 1;
2433 for (i = 0; i < priv->params.ets.ets_cap; i++) {
2434 priv->params.ets.tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
2435 priv->params.ets.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
2436 priv->params.ets.prio_tc[i] = i;
2437 }
2438
2439 /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
2440 priv->params.ets.prio_tc[0] = 1;
2441 priv->params.ets.prio_tc[1] = 0;
2442}
2443#endif
2444
Tariq Toukand8c96602016-04-20 22:02:11 +03002445void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
2446 u32 *indirection_rqt, int len,
Tariq Toukan85082db2016-02-29 21:17:13 +02002447 int num_channels)
2448{
Tariq Toukand8c96602016-04-20 22:02:11 +03002449 int node = mdev->priv.numa_node;
2450 int node_num_of_cores;
Tariq Toukan85082db2016-02-29 21:17:13 +02002451 int i;
2452
Tariq Toukand8c96602016-04-20 22:02:11 +03002453 if (node == -1)
2454 node = first_online_node;
2455
2456 node_num_of_cores = cpumask_weight(cpumask_of_node(node));
2457
2458 if (node_num_of_cores)
2459 num_channels = min_t(int, num_channels, node_num_of_cores);
2460
Tariq Toukan85082db2016-02-29 21:17:13 +02002461 for (i = 0; i < len; i++)
2462 indirection_rqt[i] = i % num_channels;
2463}
2464
Tariq Toukanbc77b242016-04-20 22:02:15 +03002465static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
2466{
2467 return MLX5_CAP_GEN(mdev, striding_rq) &&
2468 MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
2469 MLX5_CAP_ETH(mdev, reg_umr_sq);
2470}
2471
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002472static void mlx5e_build_netdev_priv(struct mlx5_core_dev *mdev,
2473 struct net_device *netdev,
Achiad Shochat936896e2015-08-16 16:04:46 +03002474 int num_channels)
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002475{
2476 struct mlx5e_priv *priv = netdev_priv(netdev);
2477
2478 priv->params.log_sq_size =
2479 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
Tariq Toukanbc77b242016-04-20 22:02:15 +03002480 priv->params.rq_wq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) ?
Tariq Toukan461017c2016-04-20 22:02:13 +03002481 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
2482 MLX5_WQ_TYPE_LINKED_LIST;
2483
2484 switch (priv->params.rq_wq_type) {
2485 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
2486 priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
2487 priv->params.lro_en = true;
2488 break;
2489 default: /* MLX5_WQ_TYPE_LINKED_LIST */
2490 priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
2491 }
2492
2493 priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
2494 BIT(priv->params.log_rq_size));
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002495 priv->params.rx_cq_moderation_usec =
2496 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
2497 priv->params.rx_cq_moderation_pkts =
2498 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
2499 priv->params.tx_cq_moderation_usec =
2500 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
2501 priv->params.tx_cq_moderation_pkts =
2502 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
Achiad Shochat58d52292015-07-23 23:35:58 +03002503 priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002504 priv->params.num_tc = 1;
Saeed Mahameed2be69672015-07-23 23:35:56 +03002505 priv->params.rss_hfunc = ETH_RSS_HASH_XOR;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002506
Achiad Shochat57afead2015-08-16 16:04:45 +03002507 netdev_rss_key_fill(priv->params.toeplitz_hash_key,
2508 sizeof(priv->params.toeplitz_hash_key));
2509
Tariq Toukand8c96602016-04-20 22:02:11 +03002510 mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt,
Tariq Toukan85082db2016-02-29 21:17:13 +02002511 MLX5E_INDIR_RQT_SIZE, num_channels);
Achiad Shochat2d75b2b2015-08-16 16:04:47 +03002512
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002513 priv->params.lro_wqe_sz =
2514 MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
2515
2516 priv->mdev = mdev;
2517 priv->netdev = netdev;
Achiad Shochat936896e2015-08-16 16:04:46 +03002518 priv->params.num_channels = num_channels;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002519
Saeed Mahameed08fb1da2016-02-22 18:17:26 +02002520#ifdef CONFIG_MLX5_CORE_EN_DCB
2521 mlx5e_ets_init(priv);
2522#endif
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002523
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002524 mutex_init(&priv->state_lock);
2525
2526 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
2527 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
2528 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
2529}
2530
2531static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
2532{
2533 struct mlx5e_priv *priv = netdev_priv(netdev);
2534
Saeed Mahameede1d7d342015-12-01 18:03:11 +02002535 mlx5_query_nic_vport_mac_address(priv->mdev, 0, netdev->dev_addr);
Saeed Mahameed108805f2015-12-10 17:12:38 +02002536 if (is_zero_ether_addr(netdev->dev_addr) &&
2537 !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
2538 eth_hw_addr_random(netdev);
2539 mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
2540 }
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002541}
2542
2543static void mlx5e_build_netdev(struct net_device *netdev)
2544{
2545 struct mlx5e_priv *priv = netdev_priv(netdev);
2546 struct mlx5_core_dev *mdev = priv->mdev;
2547
2548 SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
2549
Saeed Mahameed08fb1da2016-02-22 18:17:26 +02002550 if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
Saeed Mahameedb0eed402016-02-09 14:57:44 +02002551 netdev->netdev_ops = &mlx5e_netdev_ops_sriov;
Saeed Mahameed08fb1da2016-02-22 18:17:26 +02002552#ifdef CONFIG_MLX5_CORE_EN_DCB
2553 netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
2554#endif
2555 } else {
Saeed Mahameedb0eed402016-02-09 14:57:44 +02002556 netdev->netdev_ops = &mlx5e_netdev_ops_basic;
Saeed Mahameed08fb1da2016-02-22 18:17:26 +02002557 }
Saeed Mahameed66e49de2015-12-01 18:03:25 +02002558
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002559 netdev->watchdog_timeo = 15 * HZ;
2560
2561 netdev->ethtool_ops = &mlx5e_ethtool_ops;
2562
Saeed Mahameed12be4b22015-06-11 14:47:31 +03002563 netdev->vlan_features |= NETIF_F_SG;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002564 netdev->vlan_features |= NETIF_F_IP_CSUM;
2565 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
2566 netdev->vlan_features |= NETIF_F_GRO;
2567 netdev->vlan_features |= NETIF_F_TSO;
2568 netdev->vlan_features |= NETIF_F_TSO6;
2569 netdev->vlan_features |= NETIF_F_RXCSUM;
2570 netdev->vlan_features |= NETIF_F_RXHASH;
2571
2572 if (!!MLX5_CAP_ETH(mdev, lro_cap))
2573 netdev->vlan_features |= NETIF_F_LRO;
2574
2575 netdev->hw_features = netdev->vlan_features;
Achiad Shochate4cf27b2015-11-03 08:07:23 +02002576 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002577 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
2578 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
2579
Matthew Finlayb3f63c32016-02-22 18:17:32 +02002580 if (mlx5e_vxlan_allowed(mdev)) {
2581 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL;
2582 netdev->hw_enc_features |= NETIF_F_IP_CSUM;
2583 netdev->hw_enc_features |= NETIF_F_RXCSUM;
2584 netdev->hw_enc_features |= NETIF_F_TSO;
2585 netdev->hw_enc_features |= NETIF_F_TSO6;
2586 netdev->hw_enc_features |= NETIF_F_RXHASH;
2587 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
2588 }
2589
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002590 netdev->features = netdev->hw_features;
2591 if (!priv->params.lro_en)
2592 netdev->features &= ~NETIF_F_LRO;
2593
Amir Vadaie8f887a2016-03-08 12:42:36 +02002594#define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
2595 if (FT_CAP(flow_modify_en) &&
2596 FT_CAP(modify_root) &&
2597 FT_CAP(identified_miss_table_mode) &&
2598 FT_CAP(flow_table_modify))
2599 priv->netdev->hw_features |= NETIF_F_HW_TC;
2600
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002601 netdev->features |= NETIF_F_HIGHDMA;
2602
2603 netdev->priv_flags |= IFF_UNICAST_FLT;
2604
2605 mlx5e_set_netdev_dev_addr(netdev);
2606}
2607
2608static int mlx5e_create_mkey(struct mlx5e_priv *priv, u32 pdn,
Matan Baraka606b0f2016-02-29 18:05:28 +02002609 struct mlx5_core_mkey *mkey)
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002610{
2611 struct mlx5_core_dev *mdev = priv->mdev;
2612 struct mlx5_create_mkey_mbox_in *in;
2613 int err;
2614
2615 in = mlx5_vzalloc(sizeof(*in));
2616 if (!in)
2617 return -ENOMEM;
2618
2619 in->seg.flags = MLX5_PERM_LOCAL_WRITE |
2620 MLX5_PERM_LOCAL_READ |
2621 MLX5_ACCESS_MODE_PA;
2622 in->seg.flags_pd = cpu_to_be32(pdn | MLX5_MKEY_LEN64);
2623 in->seg.qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
2624
Matan Baraka606b0f2016-02-29 18:05:28 +02002625 err = mlx5_core_create_mkey(mdev, mkey, in, sizeof(*in), NULL, NULL,
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002626 NULL);
2627
2628 kvfree(in);
2629
2630 return err;
2631}
2632
Rana Shahout593cf332016-04-20 22:02:10 +03002633static void mlx5e_create_q_counter(struct mlx5e_priv *priv)
2634{
2635 struct mlx5_core_dev *mdev = priv->mdev;
2636 int err;
2637
2638 err = mlx5_core_alloc_q_counter(mdev, &priv->q_counter);
2639 if (err) {
2640 mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err);
2641 priv->q_counter = 0;
2642 }
2643}
2644
2645static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv)
2646{
2647 if (!priv->q_counter)
2648 return;
2649
2650 mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
2651}
2652
Tariq Toukanbc77b242016-04-20 22:02:15 +03002653static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
2654{
2655 struct mlx5_core_dev *mdev = priv->mdev;
2656 struct mlx5_create_mkey_mbox_in *in;
2657 struct mlx5_mkey_seg *mkc;
2658 int inlen = sizeof(*in);
2659 u64 npages =
2660 mlx5e_get_max_num_channels(mdev) * MLX5_CHANNEL_MAX_NUM_MTTS;
2661 int err;
2662
2663 in = mlx5_vzalloc(inlen);
2664 if (!in)
2665 return -ENOMEM;
2666
2667 mkc = &in->seg;
2668 mkc->status = MLX5_MKEY_STATUS_FREE;
2669 mkc->flags = MLX5_PERM_UMR_EN |
2670 MLX5_PERM_LOCAL_READ |
2671 MLX5_PERM_LOCAL_WRITE |
2672 MLX5_ACCESS_MODE_MTT;
2673
2674 mkc->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
2675 mkc->flags_pd = cpu_to_be32(priv->pdn);
2676 mkc->len = cpu_to_be64(npages << PAGE_SHIFT);
2677 mkc->xlt_oct_size = cpu_to_be32(mlx5e_get_mtt_octw(npages));
2678 mkc->log2_page_size = PAGE_SHIFT;
2679
2680 err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen, NULL,
2681 NULL, NULL);
2682
2683 kvfree(in);
2684
2685 return err;
2686}
2687
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002688static void *mlx5e_create_netdev(struct mlx5_core_dev *mdev)
2689{
2690 struct net_device *netdev;
2691 struct mlx5e_priv *priv;
Achiad Shochat3435ab52015-11-03 08:07:21 +02002692 int nch = mlx5e_get_max_num_channels(mdev);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002693 int err;
2694
2695 if (mlx5e_check_required_hca_cap(mdev))
2696 return NULL;
2697
Saeed Mahameed08fb1da2016-02-22 18:17:26 +02002698 netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
2699 nch * MLX5E_MAX_NUM_TC,
2700 nch);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002701 if (!netdev) {
2702 mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
2703 return NULL;
2704 }
2705
Achiad Shochat936896e2015-08-16 16:04:46 +03002706 mlx5e_build_netdev_priv(mdev, netdev, nch);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002707 mlx5e_build_netdev(netdev);
2708
2709 netif_carrier_off(netdev);
2710
2711 priv = netdev_priv(netdev);
2712
Moshe Lazer0ba42242016-03-02 00:13:40 +02002713 err = mlx5_alloc_map_uar(mdev, &priv->cq_uar, false);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002714 if (err) {
Achiad Shochat1f2a3002015-07-29 15:05:44 +03002715 mlx5_core_err(mdev, "alloc_map uar failed, %d\n", err);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002716 goto err_free_netdev;
2717 }
2718
2719 err = mlx5_core_alloc_pd(mdev, &priv->pdn);
2720 if (err) {
Achiad Shochat1f2a3002015-07-29 15:05:44 +03002721 mlx5_core_err(mdev, "alloc pd failed, %d\n", err);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002722 goto err_unmap_free_uar;
2723 }
2724
majd@mellanox.com8d7f9ec2016-01-14 19:12:59 +02002725 err = mlx5_core_alloc_transport_domain(mdev, &priv->tdn);
Achiad Shochat3191e05f2015-06-11 14:47:33 +03002726 if (err) {
Achiad Shochat1f2a3002015-07-29 15:05:44 +03002727 mlx5_core_err(mdev, "alloc td failed, %d\n", err);
Achiad Shochat3191e05f2015-06-11 14:47:33 +03002728 goto err_dealloc_pd;
2729 }
2730
Matan Baraka606b0f2016-02-29 18:05:28 +02002731 err = mlx5e_create_mkey(priv, priv->pdn, &priv->mkey);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002732 if (err) {
Achiad Shochat1f2a3002015-07-29 15:05:44 +03002733 mlx5_core_err(mdev, "create mkey failed, %d\n", err);
Achiad Shochat3191e05f2015-06-11 14:47:33 +03002734 goto err_dealloc_transport_domain;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002735 }
2736
Tariq Toukanbc77b242016-04-20 22:02:15 +03002737 err = mlx5e_create_umr_mkey(priv);
2738 if (err) {
2739 mlx5_core_err(mdev, "create umr mkey failed, %d\n", err);
2740 goto err_destroy_mkey;
2741 }
2742
Achiad Shochat40ab6a62015-08-04 14:05:44 +03002743 err = mlx5e_create_tises(priv);
Achiad Shochat5c503682015-08-04 14:05:43 +03002744 if (err) {
Achiad Shochat40ab6a62015-08-04 14:05:44 +03002745 mlx5_core_warn(mdev, "create tises failed, %d\n", err);
Tariq Toukanbc77b242016-04-20 22:02:15 +03002746 goto err_destroy_umr_mkey;
Achiad Shochat5c503682015-08-04 14:05:43 +03002747 }
2748
2749 err = mlx5e_open_drop_rq(priv);
2750 if (err) {
2751 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
Achiad Shochat40ab6a62015-08-04 14:05:44 +03002752 goto err_destroy_tises;
Achiad Shochat5c503682015-08-04 14:05:43 +03002753 }
2754
Achiad Shochat40ab6a62015-08-04 14:05:44 +03002755 err = mlx5e_create_rqt(priv, MLX5E_INDIRECTION_RQT);
Achiad Shochat5c503682015-08-04 14:05:43 +03002756 if (err) {
Achiad Shochat40ab6a62015-08-04 14:05:44 +03002757 mlx5_core_warn(mdev, "create rqt(INDIR) failed, %d\n", err);
Achiad Shochat5c503682015-08-04 14:05:43 +03002758 goto err_close_drop_rq;
2759 }
2760
Achiad Shochat40ab6a62015-08-04 14:05:44 +03002761 err = mlx5e_create_rqt(priv, MLX5E_SINGLE_RQ_RQT);
Achiad Shochat5c503682015-08-04 14:05:43 +03002762 if (err) {
Achiad Shochat40ab6a62015-08-04 14:05:44 +03002763 mlx5_core_warn(mdev, "create rqt(SINGLE) failed, %d\n", err);
2764 goto err_destroy_rqt_indir;
Achiad Shochat5c503682015-08-04 14:05:43 +03002765 }
2766
Achiad Shochat40ab6a62015-08-04 14:05:44 +03002767 err = mlx5e_create_tirs(priv);
Achiad Shochat5c503682015-08-04 14:05:43 +03002768 if (err) {
Achiad Shochat40ab6a62015-08-04 14:05:44 +03002769 mlx5_core_warn(mdev, "create tirs failed, %d\n", err);
2770 goto err_destroy_rqt_single;
Achiad Shochat5c503682015-08-04 14:05:43 +03002771 }
2772
Achiad Shochat40ab6a62015-08-04 14:05:44 +03002773 err = mlx5e_create_flow_tables(priv);
Achiad Shochat5c503682015-08-04 14:05:43 +03002774 if (err) {
Achiad Shochat40ab6a62015-08-04 14:05:44 +03002775 mlx5_core_warn(mdev, "create flow tables failed, %d\n", err);
2776 goto err_destroy_tirs;
Achiad Shochat5c503682015-08-04 14:05:43 +03002777 }
2778
Rana Shahout593cf332016-04-20 22:02:10 +03002779 mlx5e_create_q_counter(priv);
2780
Achiad Shochat5c503682015-08-04 14:05:43 +03002781 mlx5e_init_eth_addr(priv);
2782
Matthew Finlayb3f63c32016-02-22 18:17:32 +02002783 mlx5e_vxlan_init(priv);
2784
Amir Vadaie8f887a2016-03-08 12:42:36 +02002785 err = mlx5e_tc_init(priv);
2786 if (err)
Rana Shahout593cf332016-04-20 22:02:10 +03002787 goto err_dealloc_q_counters;
Amir Vadaie8f887a2016-03-08 12:42:36 +02002788
Saeed Mahameed08fb1da2016-02-22 18:17:26 +02002789#ifdef CONFIG_MLX5_CORE_EN_DCB
2790 mlx5e_dcbnl_ieee_setets_core(priv, &priv->params.ets);
2791#endif
2792
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002793 err = register_netdev(netdev);
2794 if (err) {
Achiad Shochat1f2a3002015-07-29 15:05:44 +03002795 mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
Amir Vadaie8f887a2016-03-08 12:42:36 +02002796 goto err_tc_cleanup;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002797 }
2798
Matthew Finlayb3f63c32016-02-22 18:17:32 +02002799 if (mlx5e_vxlan_allowed(mdev))
2800 vxlan_get_rx_port(netdev);
2801
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002802 mlx5e_enable_async_events(priv);
Achiad Shochat9b37b072015-08-04 14:05:46 +03002803 schedule_work(&priv->set_rx_mode_work);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002804
2805 return priv;
2806
Amir Vadaie8f887a2016-03-08 12:42:36 +02002807err_tc_cleanup:
2808 mlx5e_tc_cleanup(priv);
2809
Rana Shahout593cf332016-04-20 22:02:10 +03002810err_dealloc_q_counters:
2811 mlx5e_destroy_q_counter(priv);
Achiad Shochat40ab6a62015-08-04 14:05:44 +03002812 mlx5e_destroy_flow_tables(priv);
Achiad Shochat5c503682015-08-04 14:05:43 +03002813
Achiad Shochat40ab6a62015-08-04 14:05:44 +03002814err_destroy_tirs:
2815 mlx5e_destroy_tirs(priv);
Achiad Shochat5c503682015-08-04 14:05:43 +03002816
Achiad Shochat40ab6a62015-08-04 14:05:44 +03002817err_destroy_rqt_single:
2818 mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
Achiad Shochat5c503682015-08-04 14:05:43 +03002819
Achiad Shochat40ab6a62015-08-04 14:05:44 +03002820err_destroy_rqt_indir:
2821 mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
Achiad Shochat5c503682015-08-04 14:05:43 +03002822
2823err_close_drop_rq:
2824 mlx5e_close_drop_rq(priv);
2825
Achiad Shochat40ab6a62015-08-04 14:05:44 +03002826err_destroy_tises:
2827 mlx5e_destroy_tises(priv);
Achiad Shochat5c503682015-08-04 14:05:43 +03002828
Tariq Toukanbc77b242016-04-20 22:02:15 +03002829err_destroy_umr_mkey:
2830 mlx5_core_destroy_mkey(mdev, &priv->umr_mkey);
2831
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002832err_destroy_mkey:
Matan Baraka606b0f2016-02-29 18:05:28 +02002833 mlx5_core_destroy_mkey(mdev, &priv->mkey);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002834
Achiad Shochat3191e05f2015-06-11 14:47:33 +03002835err_dealloc_transport_domain:
majd@mellanox.com8d7f9ec2016-01-14 19:12:59 +02002836 mlx5_core_dealloc_transport_domain(mdev, priv->tdn);
Achiad Shochat3191e05f2015-06-11 14:47:33 +03002837
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002838err_dealloc_pd:
2839 mlx5_core_dealloc_pd(mdev, priv->pdn);
2840
2841err_unmap_free_uar:
2842 mlx5_unmap_free_uar(mdev, &priv->cq_uar);
2843
2844err_free_netdev:
2845 free_netdev(netdev);
2846
2847 return NULL;
2848}
2849
2850static void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, void *vpriv)
2851{
2852 struct mlx5e_priv *priv = vpriv;
2853 struct net_device *netdev = priv->netdev;
2854
Achiad Shochat9b37b072015-08-04 14:05:46 +03002855 set_bit(MLX5E_STATE_DESTROYING, &priv->state);
2856
2857 schedule_work(&priv->set_rx_mode_work);
Achiad Shochat1cefa322015-08-04 14:05:45 +03002858 mlx5e_disable_async_events(priv);
2859 flush_scheduled_work();
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002860 unregister_netdev(netdev);
Amir Vadaie8f887a2016-03-08 12:42:36 +02002861 mlx5e_tc_cleanup(priv);
Matthew Finlayb3f63c32016-02-22 18:17:32 +02002862 mlx5e_vxlan_cleanup(priv);
Rana Shahout593cf332016-04-20 22:02:10 +03002863 mlx5e_destroy_q_counter(priv);
Achiad Shochat40ab6a62015-08-04 14:05:44 +03002864 mlx5e_destroy_flow_tables(priv);
2865 mlx5e_destroy_tirs(priv);
2866 mlx5e_destroy_rqt(priv, MLX5E_SINGLE_RQ_RQT);
2867 mlx5e_destroy_rqt(priv, MLX5E_INDIRECTION_RQT);
Achiad Shochat5c503682015-08-04 14:05:43 +03002868 mlx5e_close_drop_rq(priv);
Achiad Shochat40ab6a62015-08-04 14:05:44 +03002869 mlx5e_destroy_tises(priv);
Tariq Toukanbc77b242016-04-20 22:02:15 +03002870 mlx5_core_destroy_mkey(priv->mdev, &priv->umr_mkey);
Matan Baraka606b0f2016-02-29 18:05:28 +02002871 mlx5_core_destroy_mkey(priv->mdev, &priv->mkey);
majd@mellanox.com8d7f9ec2016-01-14 19:12:59 +02002872 mlx5_core_dealloc_transport_domain(priv->mdev, priv->tdn);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002873 mlx5_core_dealloc_pd(priv->mdev, priv->pdn);
2874 mlx5_unmap_free_uar(priv->mdev, &priv->cq_uar);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002875 free_netdev(netdev);
2876}
2877
2878static void *mlx5e_get_netdev(void *vpriv)
2879{
2880 struct mlx5e_priv *priv = vpriv;
2881
2882 return priv->netdev;
2883}
2884
2885static struct mlx5_interface mlx5e_interface = {
2886 .add = mlx5e_create_netdev,
2887 .remove = mlx5e_destroy_netdev,
2888 .event = mlx5e_async_event,
2889 .protocol = MLX5_INTERFACE_PROTOCOL_ETH,
2890 .get_dev = mlx5e_get_netdev,
2891};
2892
2893void mlx5e_init(void)
2894{
2895 mlx5_register_interface(&mlx5e_interface);
2896}
2897
2898void mlx5e_cleanup(void)
2899{
2900 mlx5_unregister_interface(&mlx5e_interface);
2901}