blob: 2459c7f3db8d26152724dc7edfd20c12a996ff45 [file] [log] [blame]
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001/*
Matthew Finlayb3f63c32016-02-22 18:17:32 +02002 * Copyright (c) 2015-2016, Mellanox Technologies. All rights reserved.
Amir Vadaif62b8bb82015-05-28 22:28:48 +03003 *
4 * This software is available to you under a choice of one of two
5 * licenses. You may choose to be licensed under the terms of the GNU
6 * General Public License (GPL) Version 2, available from the file
7 * COPYING in the main directory of this source tree, or the
8 * OpenIB.org BSD license below:
9 *
10 * Redistribution and use in source and binary forms, with or
11 * without modification, are permitted provided that the following
12 * conditions are met:
13 *
14 * - Redistributions of source code must retain the above
15 * copyright notice, this list of conditions and the following
16 * disclaimer.
17 *
18 * - Redistributions in binary form must reproduce the above
19 * copyright notice, this list of conditions and the following
20 * disclaimer in the documentation and/or other materials
21 * provided with the distribution.
22 *
23 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
24 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
25 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
26 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
27 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
28 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
29 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
30 * SOFTWARE.
31 */
32
Amir Vadaie8f887a2016-03-08 12:42:36 +020033#include <net/tc_act/tc_gact.h>
34#include <net/pkt_cls.h>
Maor Gottlieb86d722a2015-12-10 17:12:44 +020035#include <linux/mlx5/fs.h>
Matthew Finlayb3f63c32016-02-22 18:17:32 +020036#include <net/vxlan.h>
Amir Vadaif62b8bb82015-05-28 22:28:48 +030037#include "en.h"
Amir Vadaie8f887a2016-03-08 12:42:36 +020038#include "en_tc.h"
Saeed Mahameed66e49de2015-12-01 18:03:25 +020039#include "eswitch.h"
Matthew Finlayb3f63c32016-02-22 18:17:32 +020040#include "vxlan.h"
Amir Vadaif62b8bb82015-05-28 22:28:48 +030041
42struct mlx5e_rq_param {
Gil Rockahcb3c7fd2016-06-23 17:02:41 +030043 u32 rqc[MLX5_ST_SZ_DW(rqc)];
44 struct mlx5_wq_param wq;
45 bool am_enabled;
Amir Vadaif62b8bb82015-05-28 22:28:48 +030046};
47
48struct mlx5e_sq_param {
49 u32 sqc[MLX5_ST_SZ_DW(sqc)];
50 struct mlx5_wq_param wq;
Achiad Shochat58d52292015-07-23 23:35:58 +030051 u16 max_inline;
Hadar Hen Zioncff92d72016-07-24 16:12:40 +030052 u8 min_inline_mode;
Tariq Toukand3c9bc22016-04-20 22:02:14 +030053 bool icosq;
Amir Vadaif62b8bb82015-05-28 22:28:48 +030054};
55
56struct mlx5e_cq_param {
57 u32 cqc[MLX5_ST_SZ_DW(cqc)];
58 struct mlx5_wq_param wq;
59 u16 eq_ix;
Tariq Toukan9908aa22016-06-23 17:02:40 +030060 u8 cq_period_mode;
Amir Vadaif62b8bb82015-05-28 22:28:48 +030061};
62
63struct mlx5e_channel_param {
64 struct mlx5e_rq_param rq;
65 struct mlx5e_sq_param sq;
Tariq Toukand3c9bc22016-04-20 22:02:14 +030066 struct mlx5e_sq_param icosq;
Amir Vadaif62b8bb82015-05-28 22:28:48 +030067 struct mlx5e_cq_param rx_cq;
68 struct mlx5e_cq_param tx_cq;
Tariq Toukand3c9bc22016-04-20 22:02:14 +030069 struct mlx5e_cq_param icosq_cq;
Amir Vadaif62b8bb82015-05-28 22:28:48 +030070};
71
72static void mlx5e_update_carrier(struct mlx5e_priv *priv)
73{
74 struct mlx5_core_dev *mdev = priv->mdev;
75 u8 port_state;
76
77 port_state = mlx5_query_vport_state(mdev,
Saeed Mahameede7546512015-12-01 18:03:13 +020078 MLX5_QUERY_VPORT_STATE_IN_OP_MOD_VNIC_VPORT, 0);
Amir Vadaif62b8bb82015-05-28 22:28:48 +030079
Shaker Daibes87424ad2016-06-30 17:34:50 +030080 if (port_state == VPORT_STATE_UP) {
81 netdev_info(priv->netdev, "Link up\n");
Amir Vadaif62b8bb82015-05-28 22:28:48 +030082 netif_carrier_on(priv->netdev);
Shaker Daibes87424ad2016-06-30 17:34:50 +030083 } else {
84 netdev_info(priv->netdev, "Link down\n");
Amir Vadaif62b8bb82015-05-28 22:28:48 +030085 netif_carrier_off(priv->netdev);
Shaker Daibes87424ad2016-06-30 17:34:50 +030086 }
Amir Vadaif62b8bb82015-05-28 22:28:48 +030087}
88
89static void mlx5e_update_carrier_work(struct work_struct *work)
90{
91 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
92 update_carrier_work);
93
94 mutex_lock(&priv->state_lock);
95 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
96 mlx5e_update_carrier(priv);
97 mutex_unlock(&priv->state_lock);
98}
99
Daniel Jurgens3947ca12016-06-30 17:34:45 +0300100static void mlx5e_tx_timeout_work(struct work_struct *work)
101{
102 struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv,
103 tx_timeout_work);
104 int err;
105
106 rtnl_lock();
107 mutex_lock(&priv->state_lock);
108 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
109 goto unlock;
110 mlx5e_close_locked(priv->netdev);
111 err = mlx5e_open_locked(priv->netdev);
112 if (err)
113 netdev_err(priv->netdev, "mlx5e_open_locked failed recovering from a tx_timeout, err(%d).\n",
114 err);
115unlock:
116 mutex_unlock(&priv->state_lock);
117 rtnl_unlock();
118}
119
Gal Pressman9218b442016-04-24 22:51:47 +0300120static void mlx5e_update_sw_counters(struct mlx5e_priv *priv)
Gal Pressmanefea3892015-08-04 14:05:47 +0300121{
Gal Pressman9218b442016-04-24 22:51:47 +0300122 struct mlx5e_sw_stats *s = &priv->stats.sw;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300123 struct mlx5e_rq_stats *rq_stats;
124 struct mlx5e_sq_stats *sq_stats;
Gal Pressman9218b442016-04-24 22:51:47 +0300125 u64 tx_offload_none = 0;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300126 int i, j;
127
Gal Pressman9218b442016-04-24 22:51:47 +0300128 memset(s, 0, sizeof(*s));
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300129 for (i = 0; i < priv->params.num_channels; i++) {
130 rq_stats = &priv->channel[i]->rq.stats;
131
Gal Pressmanfaf44782016-02-29 21:17:15 +0200132 s->rx_packets += rq_stats->packets;
133 s->rx_bytes += rq_stats->bytes;
Gal Pressmanbfe6d8d2016-06-27 12:08:38 +0300134 s->rx_lro_packets += rq_stats->lro_packets;
135 s->rx_lro_bytes += rq_stats->lro_bytes;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300136 s->rx_csum_none += rq_stats->csum_none;
Gal Pressmanbfe6d8d2016-06-27 12:08:38 +0300137 s->rx_csum_complete += rq_stats->csum_complete;
138 s->rx_csum_unnecessary_inner += rq_stats->csum_unnecessary_inner;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300139 s->rx_wqe_err += rq_stats->wqe_err;
Tariq Toukan461017c2016-04-20 22:02:13 +0300140 s->rx_mpwqe_filler += rq_stats->mpwqe_filler;
Tariq Toukanbc77b242016-04-20 22:02:15 +0300141 s->rx_mpwqe_frag += rq_stats->mpwqe_frag;
Tariq Toukan54984402016-04-20 22:02:19 +0300142 s->rx_buff_alloc_err += rq_stats->buff_alloc_err;
Tariq Toukan7219ab32016-05-11 00:29:14 +0300143 s->rx_cqe_compress_blks += rq_stats->cqe_compress_blks;
144 s->rx_cqe_compress_pkts += rq_stats->cqe_compress_pkts;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300145
Achiad Shochata4418a62015-07-29 15:05:41 +0300146 for (j = 0; j < priv->params.num_tc; j++) {
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300147 sq_stats = &priv->channel[i]->sq[j].stats;
148
Gal Pressmanfaf44782016-02-29 21:17:15 +0200149 s->tx_packets += sq_stats->packets;
150 s->tx_bytes += sq_stats->bytes;
Gal Pressmanbfe6d8d2016-06-27 12:08:38 +0300151 s->tx_tso_packets += sq_stats->tso_packets;
152 s->tx_tso_bytes += sq_stats->tso_bytes;
153 s->tx_tso_inner_packets += sq_stats->tso_inner_packets;
154 s->tx_tso_inner_bytes += sq_stats->tso_inner_bytes;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300155 s->tx_queue_stopped += sq_stats->stopped;
156 s->tx_queue_wake += sq_stats->wake;
157 s->tx_queue_dropped += sq_stats->dropped;
Tariq Toukanc8cf78f2016-08-29 01:13:47 +0300158 s->tx_xmit_more += sq_stats->xmit_more;
Gal Pressmanbfe6d8d2016-06-27 12:08:38 +0300159 s->tx_csum_partial_inner += sq_stats->csum_partial_inner;
160 tx_offload_none += sq_stats->csum_none;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300161 }
162 }
163
Gal Pressman9218b442016-04-24 22:51:47 +0300164 /* Update calculated offload counters */
Gal Pressmanbfe6d8d2016-06-27 12:08:38 +0300165 s->tx_csum_partial = s->tx_packets - tx_offload_none - s->tx_csum_partial_inner;
166 s->rx_csum_unnecessary = s->rx_packets - s->rx_csum_none - s->rx_csum_complete;
Gal Pressman121fcdc2016-04-24 22:51:50 +0300167
Gal Pressmanbfe6d8d2016-06-27 12:08:38 +0300168 s->link_down_events_phy = MLX5_GET(ppcnt_reg,
Gal Pressman121fcdc2016-04-24 22:51:50 +0300169 priv->stats.pport.phy_counters,
170 counter_set.phys_layer_cntrs.link_down_events);
Gal Pressman9218b442016-04-24 22:51:47 +0300171}
172
173static void mlx5e_update_vport_counters(struct mlx5e_priv *priv)
174{
175 int outlen = MLX5_ST_SZ_BYTES(query_vport_counter_out);
176 u32 *out = (u32 *)priv->stats.vport.query_vport_out;
177 u32 in[MLX5_ST_SZ_DW(query_vport_counter_in)];
178 struct mlx5_core_dev *mdev = priv->mdev;
179
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300180 memset(in, 0, sizeof(in));
181
182 MLX5_SET(query_vport_counter_in, in, opcode,
183 MLX5_CMD_OP_QUERY_VPORT_COUNTER);
184 MLX5_SET(query_vport_counter_in, in, op_mod, 0);
185 MLX5_SET(query_vport_counter_in, in, other_vport, 0);
186
187 memset(out, 0, outlen);
188
Gal Pressman9218b442016-04-24 22:51:47 +0300189 mlx5_cmd_exec(mdev, in, sizeof(in), out, outlen);
190}
191
192static void mlx5e_update_pport_counters(struct mlx5e_priv *priv)
193{
194 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
195 struct mlx5_core_dev *mdev = priv->mdev;
196 int sz = MLX5_ST_SZ_BYTES(ppcnt_reg);
Gal Pressmancf678572016-04-24 22:51:49 +0300197 int prio;
Gal Pressman9218b442016-04-24 22:51:47 +0300198 void *out;
199 u32 *in;
200
201 in = mlx5_vzalloc(sz);
202 if (!in)
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300203 goto free_out;
204
Gal Pressman9218b442016-04-24 22:51:47 +0300205 MLX5_SET(ppcnt_reg, in, local_port, 1);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300206
Gal Pressman9218b442016-04-24 22:51:47 +0300207 out = pstats->IEEE_802_3_counters;
208 MLX5_SET(ppcnt_reg, in, grp, MLX5_IEEE_802_3_COUNTERS_GROUP);
209 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300210
Gal Pressman9218b442016-04-24 22:51:47 +0300211 out = pstats->RFC_2863_counters;
212 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2863_COUNTERS_GROUP);
213 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300214
Gal Pressman9218b442016-04-24 22:51:47 +0300215 out = pstats->RFC_2819_counters;
216 MLX5_SET(ppcnt_reg, in, grp, MLX5_RFC_2819_COUNTERS_GROUP);
217 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
Rana Shahout593cf332016-04-20 22:02:10 +0300218
Gal Pressman121fcdc2016-04-24 22:51:50 +0300219 out = pstats->phy_counters;
220 MLX5_SET(ppcnt_reg, in, grp, MLX5_PHYSICAL_LAYER_COUNTERS_GROUP);
221 mlx5_core_access_reg(mdev, in, sz, out, sz, MLX5_REG_PPCNT, 0, 0);
222
Gal Pressmancf678572016-04-24 22:51:49 +0300223 MLX5_SET(ppcnt_reg, in, grp, MLX5_PER_PRIORITY_COUNTERS_GROUP);
224 for (prio = 0; prio < NUM_PPORT_PRIO; prio++) {
225 out = pstats->per_prio_counters[prio];
226 MLX5_SET(ppcnt_reg, in, prio_tc, prio);
227 mlx5_core_access_reg(mdev, in, sz, out, sz,
228 MLX5_REG_PPCNT, 0, 0);
229 }
230
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300231free_out:
Gal Pressman9218b442016-04-24 22:51:47 +0300232 kvfree(in);
233}
234
235static void mlx5e_update_q_counter(struct mlx5e_priv *priv)
236{
237 struct mlx5e_qcounter_stats *qcnt = &priv->stats.qcnt;
238
239 if (!priv->q_counter)
240 return;
241
242 mlx5_core_query_out_of_buffer(priv->mdev, priv->q_counter,
243 &qcnt->rx_out_of_buffer);
244}
245
246void mlx5e_update_stats(struct mlx5e_priv *priv)
247{
Gal Pressman9218b442016-04-24 22:51:47 +0300248 mlx5e_update_q_counter(priv);
249 mlx5e_update_vport_counters(priv);
250 mlx5e_update_pport_counters(priv);
Gal Pressman121fcdc2016-04-24 22:51:50 +0300251 mlx5e_update_sw_counters(priv);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300252}
253
Hadar Hen Zioncb67b832016-07-01 14:51:09 +0300254void mlx5e_update_stats_work(struct work_struct *work)
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300255{
256 struct delayed_work *dwork = to_delayed_work(work);
257 struct mlx5e_priv *priv = container_of(dwork, struct mlx5e_priv,
258 update_stats_work);
259 mutex_lock(&priv->state_lock);
260 if (test_bit(MLX5E_STATE_OPENED, &priv->state)) {
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +0300261 priv->profile->update_stats(priv);
Matthew Finlay7bb29752016-05-01 22:59:56 +0300262 queue_delayed_work(priv->wq, dwork,
263 msecs_to_jiffies(MLX5E_UPDATE_STATS_INTERVAL));
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300264 }
265 mutex_unlock(&priv->state_lock);
266}
267
Tariq Toukandaa21562016-03-02 00:13:32 +0200268static void mlx5e_async_event(struct mlx5_core_dev *mdev, void *vpriv,
269 enum mlx5_dev_event event, unsigned long param)
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300270{
Tariq Toukandaa21562016-03-02 00:13:32 +0200271 struct mlx5e_priv *priv = vpriv;
272
Eli Cohene0f46eb2016-06-27 12:08:34 +0300273 if (!test_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state))
Tariq Toukandaa21562016-03-02 00:13:32 +0200274 return;
275
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300276 switch (event) {
277 case MLX5_DEV_EVENT_PORT_UP:
278 case MLX5_DEV_EVENT_PORT_DOWN:
Matthew Finlay7bb29752016-05-01 22:59:56 +0300279 queue_work(priv->wq, &priv->update_carrier_work);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300280 break;
281
282 default:
283 break;
284 }
285}
286
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300287static void mlx5e_enable_async_events(struct mlx5e_priv *priv)
288{
Eli Cohene0f46eb2016-06-27 12:08:34 +0300289 set_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300290}
291
292static void mlx5e_disable_async_events(struct mlx5e_priv *priv)
293{
Eli Cohene0f46eb2016-06-27 12:08:34 +0300294 clear_bit(MLX5E_STATE_ASYNC_EVENTS_ENABLED, &priv->state);
Tariq Toukandaa21562016-03-02 00:13:32 +0200295 synchronize_irq(mlx5_get_msix_vec(priv->mdev, MLX5_EQ_VEC_ASYNC));
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300296}
297
Saeed Mahameedfacc9692015-06-11 14:47:27 +0300298#define MLX5E_HW2SW_MTU(hwmtu) (hwmtu - (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
299#define MLX5E_SW2HW_MTU(swmtu) (swmtu + (ETH_HLEN + VLAN_HLEN + ETH_FCS_LEN))
300
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300301static int mlx5e_create_rq(struct mlx5e_channel *c,
302 struct mlx5e_rq_param *param,
303 struct mlx5e_rq *rq)
304{
305 struct mlx5e_priv *priv = c->priv;
306 struct mlx5_core_dev *mdev = priv->mdev;
307 void *rqc = param->rqc;
308 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
Tariq Toukan461017c2016-04-20 22:02:13 +0300309 u32 byte_count;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300310 int wq_sz;
311 int err;
312 int i;
313
Saeed Mahameed311c7c72015-07-23 23:35:57 +0300314 param->wq.db_numa_node = cpu_to_node(c->cpu);
315
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300316 err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
317 &rq->wq_ctrl);
318 if (err)
319 return err;
320
321 rq->wq.db = &rq->wq.db[MLX5_RCV_DBR];
322
323 wq_sz = mlx5_wq_ll_get_size(&rq->wq);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300324
Tariq Toukan461017c2016-04-20 22:02:13 +0300325 switch (priv->params.rq_wq_type) {
326 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
327 rq->wqe_info = kzalloc_node(wq_sz * sizeof(*rq->wqe_info),
328 GFP_KERNEL, cpu_to_node(c->cpu));
329 if (!rq->wqe_info) {
330 err = -ENOMEM;
331 goto err_rq_wq_destroy;
332 }
333 rq->handle_rx_cqe = mlx5e_handle_rx_cqe_mpwrq;
334 rq->alloc_wqe = mlx5e_alloc_rx_mpwqe;
Daniel Jurgens6cd392a2016-06-30 17:34:46 +0300335 rq->dealloc_wqe = mlx5e_dealloc_rx_mpwqe;
Tariq Toukan461017c2016-04-20 22:02:13 +0300336
Saeed Mahameedfe4c9882016-08-29 01:13:42 +0300337 rq->mpwqe_mtt_offset = c->ix *
338 MLX5E_REQUIRED_MTTS(1, BIT(priv->params.log_rq_size));
339
Tariq Toukand9d9f152016-05-11 00:29:15 +0300340 rq->mpwqe_stride_sz = BIT(priv->params.mpwqe_log_stride_sz);
341 rq->mpwqe_num_strides = BIT(priv->params.mpwqe_log_num_strides);
342 rq->wqe_sz = rq->mpwqe_stride_sz * rq->mpwqe_num_strides;
Tariq Toukan461017c2016-04-20 22:02:13 +0300343 byte_count = rq->wqe_sz;
344 break;
345 default: /* MLX5_WQ_TYPE_LINKED_LIST */
346 rq->skb = kzalloc_node(wq_sz * sizeof(*rq->skb), GFP_KERNEL,
347 cpu_to_node(c->cpu));
348 if (!rq->skb) {
349 err = -ENOMEM;
350 goto err_rq_wq_destroy;
351 }
352 rq->handle_rx_cqe = mlx5e_handle_rx_cqe;
353 rq->alloc_wqe = mlx5e_alloc_rx_wqe;
Daniel Jurgens6cd392a2016-06-30 17:34:46 +0300354 rq->dealloc_wqe = mlx5e_dealloc_rx_wqe;
Tariq Toukan461017c2016-04-20 22:02:13 +0300355
356 rq->wqe_sz = (priv->params.lro_en) ?
357 priv->params.lro_wqe_sz :
358 MLX5E_SW2HW_MTU(priv->netdev->mtu);
Tariq Toukanc5adb962016-04-20 22:02:16 +0300359 rq->wqe_sz = SKB_DATA_ALIGN(rq->wqe_sz);
360 byte_count = rq->wqe_sz;
Tariq Toukan461017c2016-04-20 22:02:13 +0300361 byte_count |= MLX5_HW_START_PADDING;
362 }
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300363
364 for (i = 0; i < wq_sz; i++) {
365 struct mlx5e_rx_wqe *wqe = mlx5_wq_ll_get_wqe(&rq->wq, i);
366
Tariq Toukan461017c2016-04-20 22:02:13 +0300367 wqe->data.byte_count = cpu_to_be32(byte_count);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300368 }
369
Gil Rockahcb3c7fd2016-06-23 17:02:41 +0300370 INIT_WORK(&rq->am.work, mlx5e_rx_am_work);
371 rq->am.mode = priv->params.rx_cq_period_mode;
372
Tariq Toukan461017c2016-04-20 22:02:13 +0300373 rq->wq_type = priv->params.rq_wq_type;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300374 rq->pdev = c->pdev;
375 rq->netdev = c->netdev;
Eran Ben Elishaef9814d2015-12-29 14:58:31 +0200376 rq->tstamp = &priv->tstamp;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300377 rq->channel = c;
378 rq->ix = c->ix;
Achiad Shochat50cfa252015-08-04 14:05:41 +0300379 rq->priv = c->priv;
Tariq Toukanbc77b242016-04-20 22:02:15 +0300380 rq->mkey_be = c->mkey_be;
381 rq->umr_mkey_be = cpu_to_be32(c->priv->umr_mkey.key);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300382
383 return 0;
384
385err_rq_wq_destroy:
386 mlx5_wq_destroy(&rq->wq_ctrl);
387
388 return err;
389}
390
391static void mlx5e_destroy_rq(struct mlx5e_rq *rq)
392{
Tariq Toukan461017c2016-04-20 22:02:13 +0300393 switch (rq->wq_type) {
394 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
395 kfree(rq->wqe_info);
396 break;
397 default: /* MLX5_WQ_TYPE_LINKED_LIST */
398 kfree(rq->skb);
399 }
400
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300401 mlx5_wq_destroy(&rq->wq_ctrl);
402}
403
404static int mlx5e_enable_rq(struct mlx5e_rq *rq, struct mlx5e_rq_param *param)
405{
Achiad Shochat50cfa252015-08-04 14:05:41 +0300406 struct mlx5e_priv *priv = rq->priv;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300407 struct mlx5_core_dev *mdev = priv->mdev;
408
409 void *in;
410 void *rqc;
411 void *wq;
412 int inlen;
413 int err;
414
415 inlen = MLX5_ST_SZ_BYTES(create_rq_in) +
416 sizeof(u64) * rq->wq_ctrl.buf.npages;
417 in = mlx5_vzalloc(inlen);
418 if (!in)
419 return -ENOMEM;
420
421 rqc = MLX5_ADDR_OF(create_rq_in, in, ctx);
422 wq = MLX5_ADDR_OF(rqc, rqc, wq);
423
424 memcpy(rqc, param->rqc, sizeof(param->rqc));
425
Achiad Shochat97de9f32015-07-29 15:05:43 +0300426 MLX5_SET(rqc, rqc, cqn, rq->cq.mcq.cqn);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300427 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RST);
Gal Pressman36350112016-04-24 22:51:55 +0300428 MLX5_SET(rqc, rqc, vsd, priv->params.vlan_strip_disable);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300429 MLX5_SET(wq, wq, log_wq_pg_sz, rq->wq_ctrl.buf.page_shift -
Achiad Shochat68cdf5d2015-07-29 15:05:40 +0300430 MLX5_ADAPTER_PAGE_SHIFT);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300431 MLX5_SET64(wq, wq, dbr_addr, rq->wq_ctrl.db.dma);
432
433 mlx5_fill_page_array(&rq->wq_ctrl.buf,
434 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
435
Haggai Abramonvsky7db22ff2015-06-04 19:30:37 +0300436 err = mlx5_core_create_rq(mdev, in, inlen, &rq->rqn);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300437
438 kvfree(in);
439
440 return err;
441}
442
Gal Pressman36350112016-04-24 22:51:55 +0300443static int mlx5e_modify_rq_state(struct mlx5e_rq *rq, int curr_state,
444 int next_state)
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300445{
446 struct mlx5e_channel *c = rq->channel;
447 struct mlx5e_priv *priv = c->priv;
448 struct mlx5_core_dev *mdev = priv->mdev;
449
450 void *in;
451 void *rqc;
452 int inlen;
453 int err;
454
455 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
456 in = mlx5_vzalloc(inlen);
457 if (!in)
458 return -ENOMEM;
459
460 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
461
462 MLX5_SET(modify_rq_in, in, rq_state, curr_state);
463 MLX5_SET(rqc, rqc, state, next_state);
464
Haggai Abramonvsky7db22ff2015-06-04 19:30:37 +0300465 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300466
467 kvfree(in);
468
469 return err;
470}
471
Gal Pressman36350112016-04-24 22:51:55 +0300472static int mlx5e_modify_rq_vsd(struct mlx5e_rq *rq, bool vsd)
473{
474 struct mlx5e_channel *c = rq->channel;
475 struct mlx5e_priv *priv = c->priv;
476 struct mlx5_core_dev *mdev = priv->mdev;
477
478 void *in;
479 void *rqc;
480 int inlen;
481 int err;
482
483 inlen = MLX5_ST_SZ_BYTES(modify_rq_in);
484 in = mlx5_vzalloc(inlen);
485 if (!in)
486 return -ENOMEM;
487
488 rqc = MLX5_ADDR_OF(modify_rq_in, in, ctx);
489
490 MLX5_SET(modify_rq_in, in, rq_state, MLX5_RQC_STATE_RDY);
491 MLX5_SET64(modify_rq_in, in, modify_bitmask, MLX5_RQ_BITMASK_VSD);
492 MLX5_SET(rqc, rqc, vsd, vsd);
493 MLX5_SET(rqc, rqc, state, MLX5_RQC_STATE_RDY);
494
495 err = mlx5_core_modify_rq(mdev, rq->rqn, in, inlen);
496
497 kvfree(in);
498
499 return err;
500}
501
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300502static void mlx5e_disable_rq(struct mlx5e_rq *rq)
503{
Achiad Shochat50cfa252015-08-04 14:05:41 +0300504 mlx5_core_destroy_rq(rq->priv->mdev, rq->rqn);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300505}
506
507static int mlx5e_wait_for_min_rx_wqes(struct mlx5e_rq *rq)
508{
Achiad Shochat01c196a2015-11-03 08:07:19 +0200509 unsigned long exp_time = jiffies + msecs_to_jiffies(20000);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300510 struct mlx5e_channel *c = rq->channel;
511 struct mlx5e_priv *priv = c->priv;
512 struct mlx5_wq_ll *wq = &rq->wq;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300513
Achiad Shochat01c196a2015-11-03 08:07:19 +0200514 while (time_before(jiffies, exp_time)) {
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300515 if (wq->cur_sz >= priv->params.min_rx_wqes)
516 return 0;
517
518 msleep(20);
519 }
520
521 return -ETIMEDOUT;
522}
523
Saeed Mahameedf2fde182016-08-29 01:13:43 +0300524static void mlx5e_free_rx_descs(struct mlx5e_rq *rq)
525{
526 struct mlx5_wq_ll *wq = &rq->wq;
527 struct mlx5e_rx_wqe *wqe;
528 __be16 wqe_ix_be;
529 u16 wqe_ix;
530
Saeed Mahameed8484f9e2016-08-29 01:13:44 +0300531 /* UMR WQE (if in progress) is always at wq->head */
532 if (test_bit(MLX5E_RQ_STATE_UMR_WQE_IN_PROGRESS, &rq->state))
533 mlx5e_free_rx_fragmented_mpwqe(rq, &rq->wqe_info[wq->head]);
534
Saeed Mahameedf2fde182016-08-29 01:13:43 +0300535 while (!mlx5_wq_ll_is_empty(wq)) {
536 wqe_ix_be = *wq->tail_next;
537 wqe_ix = be16_to_cpu(wqe_ix_be);
538 wqe = mlx5_wq_ll_get_wqe(&rq->wq, wqe_ix);
539 rq->dealloc_wqe(rq, wqe_ix);
540 mlx5_wq_ll_pop(&rq->wq, wqe_ix_be,
541 &wqe->next.next_wqe_index);
542 }
543}
544
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300545static int mlx5e_open_rq(struct mlx5e_channel *c,
546 struct mlx5e_rq_param *param,
547 struct mlx5e_rq *rq)
548{
Tariq Toukand3c9bc22016-04-20 22:02:14 +0300549 struct mlx5e_sq *sq = &c->icosq;
550 u16 pi = sq->pc & sq->wq.sz_m1;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300551 int err;
552
553 err = mlx5e_create_rq(c, param, rq);
554 if (err)
555 return err;
556
557 err = mlx5e_enable_rq(rq, param);
558 if (err)
559 goto err_destroy_rq;
560
Gal Pressman36350112016-04-24 22:51:55 +0300561 err = mlx5e_modify_rq_state(rq, MLX5_RQC_STATE_RST, MLX5_RQC_STATE_RDY);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300562 if (err)
563 goto err_disable_rq;
564
Gil Rockahcb3c7fd2016-06-23 17:02:41 +0300565 if (param->am_enabled)
566 set_bit(MLX5E_RQ_STATE_AM, &c->rq.state);
567
Tariq Toukand3c9bc22016-04-20 22:02:14 +0300568 sq->ico_wqe_info[pi].opcode = MLX5_OPCODE_NOP;
569 sq->ico_wqe_info[pi].num_wqebbs = 1;
570 mlx5e_send_nop(sq, true); /* trigger mlx5e_post_rx_wqes() */
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300571
572 return 0;
573
574err_disable_rq:
575 mlx5e_disable_rq(rq);
576err_destroy_rq:
577 mlx5e_destroy_rq(rq);
578
579 return err;
580}
581
582static void mlx5e_close_rq(struct mlx5e_rq *rq)
583{
Saeed Mahameedf2fde182016-08-29 01:13:43 +0300584 set_bit(MLX5E_RQ_STATE_FLUSH, &rq->state);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300585 napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */
Gil Rockahcb3c7fd2016-06-23 17:02:41 +0300586 cancel_work_sync(&rq->am.work);
587
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300588 mlx5e_disable_rq(rq);
Daniel Jurgens6cd392a2016-06-30 17:34:46 +0300589 mlx5e_free_rx_descs(rq);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300590 mlx5e_destroy_rq(rq);
591}
592
593static void mlx5e_free_sq_db(struct mlx5e_sq *sq)
594{
Achiad Shochat34802a42015-12-29 14:58:29 +0200595 kfree(sq->wqe_info);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300596 kfree(sq->dma_fifo);
597 kfree(sq->skb);
598}
599
600static int mlx5e_alloc_sq_db(struct mlx5e_sq *sq, int numa)
601{
602 int wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
603 int df_sz = wq_sz * MLX5_SEND_WQEBB_NUM_DS;
604
605 sq->skb = kzalloc_node(wq_sz * sizeof(*sq->skb), GFP_KERNEL, numa);
606 sq->dma_fifo = kzalloc_node(df_sz * sizeof(*sq->dma_fifo), GFP_KERNEL,
607 numa);
Achiad Shochat34802a42015-12-29 14:58:29 +0200608 sq->wqe_info = kzalloc_node(wq_sz * sizeof(*sq->wqe_info), GFP_KERNEL,
609 numa);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300610
Achiad Shochat34802a42015-12-29 14:58:29 +0200611 if (!sq->skb || !sq->dma_fifo || !sq->wqe_info) {
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300612 mlx5e_free_sq_db(sq);
613 return -ENOMEM;
614 }
615
616 sq->dma_fifo_mask = df_sz - 1;
617
618 return 0;
619}
620
621static int mlx5e_create_sq(struct mlx5e_channel *c,
622 int tc,
623 struct mlx5e_sq_param *param,
624 struct mlx5e_sq *sq)
625{
626 struct mlx5e_priv *priv = c->priv;
627 struct mlx5_core_dev *mdev = priv->mdev;
628
629 void *sqc = param->sqc;
630 void *sqc_wq = MLX5_ADDR_OF(sqc, sqc, wq);
631 int err;
632
Gal Pressmanfd4782c2016-06-27 12:08:35 +0300633 err = mlx5_alloc_map_uar(mdev, &sq->uar, !!MLX5_CAP_GEN(mdev, bf));
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300634 if (err)
635 return err;
636
Saeed Mahameed311c7c72015-07-23 23:35:57 +0300637 param->wq.db_numa_node = cpu_to_node(c->cpu);
638
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300639 err = mlx5_wq_cyc_create(mdev, &param->wq, sqc_wq, &sq->wq,
640 &sq->wq_ctrl);
641 if (err)
642 goto err_unmap_free_uar;
643
644 sq->wq.db = &sq->wq.db[MLX5_SND_DBR];
Moshe Lazer0ba42242016-03-02 00:13:40 +0200645 if (sq->uar.bf_map) {
646 set_bit(MLX5E_SQ_STATE_BF_ENABLE, &sq->state);
647 sq->uar_map = sq->uar.bf_map;
648 } else {
649 sq->uar_map = sq->uar.map;
650 }
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300651 sq->bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
Achiad Shochat58d52292015-07-23 23:35:58 +0300652 sq->max_inline = param->max_inline;
Hadar Hen Zioncff92d72016-07-24 16:12:40 +0300653 sq->min_inline_mode =
654 MLX5_CAP_ETH(mdev, wqe_inline_mode) == MLX5E_INLINE_MODE_VPORT_CONTEXT ?
655 param->min_inline_mode : 0;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300656
Dan Carpenter7ec0bb22015-06-11 11:50:01 +0300657 err = mlx5e_alloc_sq_db(sq, cpu_to_node(c->cpu));
658 if (err)
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300659 goto err_sq_wq_destroy;
660
Tariq Toukand3c9bc22016-04-20 22:02:14 +0300661 if (param->icosq) {
662 u8 wq_sz = mlx5_wq_cyc_get_size(&sq->wq);
663
664 sq->ico_wqe_info = kzalloc_node(sizeof(*sq->ico_wqe_info) *
665 wq_sz,
666 GFP_KERNEL,
667 cpu_to_node(c->cpu));
668 if (!sq->ico_wqe_info) {
669 err = -ENOMEM;
670 goto err_free_sq_db;
671 }
672 } else {
673 int txq_ix;
674
675 txq_ix = c->ix + tc * priv->params.num_channels;
676 sq->txq = netdev_get_tx_queue(priv->netdev, txq_ix);
677 priv->txq_to_sq_map[txq_ix] = sq;
678 }
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300679
Achiad Shochat88a85f92015-07-23 23:35:59 +0300680 sq->pdev = c->pdev;
Eran Ben Elishaef9814d2015-12-29 14:58:31 +0200681 sq->tstamp = &priv->tstamp;
Achiad Shochat88a85f92015-07-23 23:35:59 +0300682 sq->mkey_be = c->mkey_be;
683 sq->channel = c;
684 sq->tc = tc;
685 sq->edge = (sq->wq.sz_m1 + 1) - MLX5_SEND_WQE_MAX_WQEBBS;
686 sq->bf_budget = MLX5E_SQ_BF_BUDGET;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300687
688 return 0;
689
Tariq Toukand3c9bc22016-04-20 22:02:14 +0300690err_free_sq_db:
691 mlx5e_free_sq_db(sq);
692
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300693err_sq_wq_destroy:
694 mlx5_wq_destroy(&sq->wq_ctrl);
695
696err_unmap_free_uar:
697 mlx5_unmap_free_uar(mdev, &sq->uar);
698
699 return err;
700}
701
702static void mlx5e_destroy_sq(struct mlx5e_sq *sq)
703{
704 struct mlx5e_channel *c = sq->channel;
705 struct mlx5e_priv *priv = c->priv;
706
Tariq Toukand3c9bc22016-04-20 22:02:14 +0300707 kfree(sq->ico_wqe_info);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300708 mlx5e_free_sq_db(sq);
709 mlx5_wq_destroy(&sq->wq_ctrl);
710 mlx5_unmap_free_uar(priv->mdev, &sq->uar);
711}
712
713static int mlx5e_enable_sq(struct mlx5e_sq *sq, struct mlx5e_sq_param *param)
714{
715 struct mlx5e_channel *c = sq->channel;
716 struct mlx5e_priv *priv = c->priv;
717 struct mlx5_core_dev *mdev = priv->mdev;
718
719 void *in;
720 void *sqc;
721 void *wq;
722 int inlen;
723 int err;
724
725 inlen = MLX5_ST_SZ_BYTES(create_sq_in) +
726 sizeof(u64) * sq->wq_ctrl.buf.npages;
727 in = mlx5_vzalloc(inlen);
728 if (!in)
729 return -ENOMEM;
730
731 sqc = MLX5_ADDR_OF(create_sq_in, in, ctx);
732 wq = MLX5_ADDR_OF(sqc, sqc, wq);
733
734 memcpy(sqc, param->sqc, sizeof(param->sqc));
735
Tariq Toukand3c9bc22016-04-20 22:02:14 +0300736 MLX5_SET(sqc, sqc, tis_num_0, param->icosq ? 0 : priv->tisn[sq->tc]);
737 MLX5_SET(sqc, sqc, cqn, sq->cq.mcq.cqn);
Hadar Hen Zioncff92d72016-07-24 16:12:40 +0300738 MLX5_SET(sqc, sqc, min_wqe_inline_mode, sq->min_inline_mode);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300739 MLX5_SET(sqc, sqc, state, MLX5_SQC_STATE_RST);
Tariq Toukand3c9bc22016-04-20 22:02:14 +0300740 MLX5_SET(sqc, sqc, tis_lst_sz, param->icosq ? 0 : 1);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300741 MLX5_SET(sqc, sqc, flush_in_error_en, 1);
742
743 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_CYCLIC);
744 MLX5_SET(wq, wq, uar_page, sq->uar.index);
745 MLX5_SET(wq, wq, log_wq_pg_sz, sq->wq_ctrl.buf.page_shift -
Achiad Shochat68cdf5d2015-07-29 15:05:40 +0300746 MLX5_ADAPTER_PAGE_SHIFT);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300747 MLX5_SET64(wq, wq, dbr_addr, sq->wq_ctrl.db.dma);
748
749 mlx5_fill_page_array(&sq->wq_ctrl.buf,
750 (__be64 *)MLX5_ADDR_OF(wq, wq, pas));
751
Haggai Abramonvsky7db22ff2015-06-04 19:30:37 +0300752 err = mlx5_core_create_sq(mdev, in, inlen, &sq->sqn);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300753
754 kvfree(in);
755
756 return err;
757}
758
Yevgeny Petrilin507f0c82016-06-23 17:02:38 +0300759static int mlx5e_modify_sq(struct mlx5e_sq *sq, int curr_state,
760 int next_state, bool update_rl, int rl_index)
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300761{
762 struct mlx5e_channel *c = sq->channel;
763 struct mlx5e_priv *priv = c->priv;
764 struct mlx5_core_dev *mdev = priv->mdev;
765
766 void *in;
767 void *sqc;
768 int inlen;
769 int err;
770
771 inlen = MLX5_ST_SZ_BYTES(modify_sq_in);
772 in = mlx5_vzalloc(inlen);
773 if (!in)
774 return -ENOMEM;
775
776 sqc = MLX5_ADDR_OF(modify_sq_in, in, ctx);
777
778 MLX5_SET(modify_sq_in, in, sq_state, curr_state);
779 MLX5_SET(sqc, sqc, state, next_state);
Yevgeny Petrilin507f0c82016-06-23 17:02:38 +0300780 if (update_rl && next_state == MLX5_SQC_STATE_RDY) {
781 MLX5_SET64(modify_sq_in, in, modify_bitmask, 1);
782 MLX5_SET(sqc, sqc, packet_pacing_rate_limit_index, rl_index);
783 }
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300784
Haggai Abramonvsky7db22ff2015-06-04 19:30:37 +0300785 err = mlx5_core_modify_sq(mdev, sq->sqn, in, inlen);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300786
787 kvfree(in);
788
789 return err;
790}
791
792static void mlx5e_disable_sq(struct mlx5e_sq *sq)
793{
794 struct mlx5e_channel *c = sq->channel;
795 struct mlx5e_priv *priv = c->priv;
796 struct mlx5_core_dev *mdev = priv->mdev;
797
Haggai Abramonvsky7db22ff2015-06-04 19:30:37 +0300798 mlx5_core_destroy_sq(mdev, sq->sqn);
Yevgeny Petrilin507f0c82016-06-23 17:02:38 +0300799 if (sq->rate_limit)
800 mlx5_rl_remove_rate(mdev, sq->rate_limit);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300801}
802
803static int mlx5e_open_sq(struct mlx5e_channel *c,
804 int tc,
805 struct mlx5e_sq_param *param,
806 struct mlx5e_sq *sq)
807{
808 int err;
809
810 err = mlx5e_create_sq(c, tc, param, sq);
811 if (err)
812 return err;
813
814 err = mlx5e_enable_sq(sq, param);
815 if (err)
816 goto err_destroy_sq;
817
Yevgeny Petrilin507f0c82016-06-23 17:02:38 +0300818 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RST, MLX5_SQC_STATE_RDY,
819 false, 0);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300820 if (err)
821 goto err_disable_sq;
822
Tariq Toukand3c9bc22016-04-20 22:02:14 +0300823 if (sq->txq) {
Tariq Toukand3c9bc22016-04-20 22:02:14 +0300824 netdev_tx_reset_queue(sq->txq);
825 netif_tx_start_queue(sq->txq);
826 }
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300827
828 return 0;
829
830err_disable_sq:
831 mlx5e_disable_sq(sq);
832err_destroy_sq:
833 mlx5e_destroy_sq(sq);
834
835 return err;
836}
837
838static inline void netif_tx_disable_queue(struct netdev_queue *txq)
839{
840 __netif_tx_lock_bh(txq);
841 netif_tx_stop_queue(txq);
842 __netif_tx_unlock_bh(txq);
843}
844
845static void mlx5e_close_sq(struct mlx5e_sq *sq)
846{
Saeed Mahameed6e8dd6d2016-08-29 01:13:45 +0300847 set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state);
848 /* prevent netif_tx_wake_queue */
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300849 napi_synchronize(&sq->channel->napi);
850
Saeed Mahameed6e8dd6d2016-08-29 01:13:45 +0300851 if (sq->txq) {
852 netif_tx_disable_queue(sq->txq);
853
854 /* last doorbell out, godspeed .. */
855 if (mlx5e_sq_has_room_for(sq, 1))
856 mlx5e_send_nop(sq, true);
857 }
858
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300859 mlx5e_disable_sq(sq);
Saeed Mahameed6e8dd6d2016-08-29 01:13:45 +0300860 mlx5e_free_tx_descs(sq);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300861 mlx5e_destroy_sq(sq);
862}
863
864static int mlx5e_create_cq(struct mlx5e_channel *c,
865 struct mlx5e_cq_param *param,
866 struct mlx5e_cq *cq)
867{
868 struct mlx5e_priv *priv = c->priv;
869 struct mlx5_core_dev *mdev = priv->mdev;
870 struct mlx5_core_cq *mcq = &cq->mcq;
871 int eqn_not_used;
Doron Tsur0b6e26c2016-01-17 11:25:47 +0200872 unsigned int irqn;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300873 int err;
874 u32 i;
875
Saeed Mahameed311c7c72015-07-23 23:35:57 +0300876 param->wq.buf_numa_node = cpu_to_node(c->cpu);
877 param->wq.db_numa_node = cpu_to_node(c->cpu);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300878 param->eq_ix = c->ix;
879
880 err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
881 &cq->wq_ctrl);
882 if (err)
883 return err;
884
885 mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
886
887 cq->napi = &c->napi;
888
889 mcq->cqe_sz = 64;
890 mcq->set_ci_db = cq->wq_ctrl.db.db;
891 mcq->arm_db = cq->wq_ctrl.db.db + 1;
892 *mcq->set_ci_db = 0;
893 *mcq->arm_db = 0;
894 mcq->vector = param->eq_ix;
895 mcq->comp = mlx5e_completion_event;
896 mcq->event = mlx5e_cq_error_event;
897 mcq->irqn = irqn;
Hadar Hen Zionb50d2922016-07-01 14:51:04 +0300898 mcq->uar = &mdev->mlx5e_res.cq_uar;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300899
900 for (i = 0; i < mlx5_cqwq_get_size(&cq->wq); i++) {
901 struct mlx5_cqe64 *cqe = mlx5_cqwq_get_wqe(&cq->wq, i);
902
903 cqe->op_own = 0xf1;
904 }
905
906 cq->channel = c;
Achiad Shochat50cfa252015-08-04 14:05:41 +0300907 cq->priv = priv;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300908
909 return 0;
910}
911
912static void mlx5e_destroy_cq(struct mlx5e_cq *cq)
913{
914 mlx5_wq_destroy(&cq->wq_ctrl);
915}
916
917static int mlx5e_enable_cq(struct mlx5e_cq *cq, struct mlx5e_cq_param *param)
918{
Achiad Shochat50cfa252015-08-04 14:05:41 +0300919 struct mlx5e_priv *priv = cq->priv;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300920 struct mlx5_core_dev *mdev = priv->mdev;
921 struct mlx5_core_cq *mcq = &cq->mcq;
922
923 void *in;
924 void *cqc;
925 int inlen;
Doron Tsur0b6e26c2016-01-17 11:25:47 +0200926 unsigned int irqn_not_used;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300927 int eqn;
928 int err;
929
930 inlen = MLX5_ST_SZ_BYTES(create_cq_in) +
931 sizeof(u64) * cq->wq_ctrl.buf.npages;
932 in = mlx5_vzalloc(inlen);
933 if (!in)
934 return -ENOMEM;
935
936 cqc = MLX5_ADDR_OF(create_cq_in, in, cq_context);
937
938 memcpy(cqc, param->cqc, sizeof(param->cqc));
939
940 mlx5_fill_page_array(&cq->wq_ctrl.buf,
941 (__be64 *)MLX5_ADDR_OF(create_cq_in, in, pas));
942
943 mlx5_vector2eqn(mdev, param->eq_ix, &eqn, &irqn_not_used);
944
Tariq Toukan9908aa22016-06-23 17:02:40 +0300945 MLX5_SET(cqc, cqc, cq_period_mode, param->cq_period_mode);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300946 MLX5_SET(cqc, cqc, c_eqn, eqn);
947 MLX5_SET(cqc, cqc, uar_page, mcq->uar->index);
948 MLX5_SET(cqc, cqc, log_page_size, cq->wq_ctrl.buf.page_shift -
Achiad Shochat68cdf5d2015-07-29 15:05:40 +0300949 MLX5_ADAPTER_PAGE_SHIFT);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300950 MLX5_SET64(cqc, cqc, dbr_addr, cq->wq_ctrl.db.dma);
951
952 err = mlx5_core_create_cq(mdev, mcq, in, inlen);
953
954 kvfree(in);
955
956 if (err)
957 return err;
958
959 mlx5e_cq_arm(cq);
960
961 return 0;
962}
963
964static void mlx5e_disable_cq(struct mlx5e_cq *cq)
965{
Achiad Shochat50cfa252015-08-04 14:05:41 +0300966 struct mlx5e_priv *priv = cq->priv;
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300967 struct mlx5_core_dev *mdev = priv->mdev;
968
969 mlx5_core_destroy_cq(mdev, &cq->mcq);
970}
971
972static int mlx5e_open_cq(struct mlx5e_channel *c,
973 struct mlx5e_cq_param *param,
974 struct mlx5e_cq *cq,
Tariq Toukan9908aa22016-06-23 17:02:40 +0300975 struct mlx5e_cq_moder moderation)
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300976{
977 int err;
978 struct mlx5e_priv *priv = c->priv;
979 struct mlx5_core_dev *mdev = priv->mdev;
980
981 err = mlx5e_create_cq(c, param, cq);
982 if (err)
983 return err;
984
985 err = mlx5e_enable_cq(cq, param);
986 if (err)
987 goto err_destroy_cq;
988
Gal Pressman7524a5d2016-03-02 00:13:37 +0200989 if (MLX5_CAP_GEN(mdev, cq_moderation))
990 mlx5_core_modify_cq_moderation(mdev, &cq->mcq,
Tariq Toukan9908aa22016-06-23 17:02:40 +0300991 moderation.usec,
992 moderation.pkts);
Amir Vadaif62b8bb82015-05-28 22:28:48 +0300993 return 0;
994
995err_destroy_cq:
996 mlx5e_destroy_cq(cq);
997
998 return err;
999}
1000
1001static void mlx5e_close_cq(struct mlx5e_cq *cq)
1002{
1003 mlx5e_disable_cq(cq);
1004 mlx5e_destroy_cq(cq);
1005}
1006
1007static int mlx5e_get_cpu(struct mlx5e_priv *priv, int ix)
1008{
1009 return cpumask_first(priv->mdev->priv.irq_info[ix].mask);
1010}
1011
1012static int mlx5e_open_tx_cqs(struct mlx5e_channel *c,
1013 struct mlx5e_channel_param *cparam)
1014{
1015 struct mlx5e_priv *priv = c->priv;
1016 int err;
1017 int tc;
1018
1019 for (tc = 0; tc < c->num_tc; tc++) {
1020 err = mlx5e_open_cq(c, &cparam->tx_cq, &c->sq[tc].cq,
Tariq Toukan9908aa22016-06-23 17:02:40 +03001021 priv->params.tx_cq_moderation);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001022 if (err)
1023 goto err_close_tx_cqs;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001024 }
1025
1026 return 0;
1027
1028err_close_tx_cqs:
1029 for (tc--; tc >= 0; tc--)
1030 mlx5e_close_cq(&c->sq[tc].cq);
1031
1032 return err;
1033}
1034
1035static void mlx5e_close_tx_cqs(struct mlx5e_channel *c)
1036{
1037 int tc;
1038
1039 for (tc = 0; tc < c->num_tc; tc++)
1040 mlx5e_close_cq(&c->sq[tc].cq);
1041}
1042
1043static int mlx5e_open_sqs(struct mlx5e_channel *c,
1044 struct mlx5e_channel_param *cparam)
1045{
1046 int err;
1047 int tc;
1048
1049 for (tc = 0; tc < c->num_tc; tc++) {
1050 err = mlx5e_open_sq(c, tc, &cparam->sq, &c->sq[tc]);
1051 if (err)
1052 goto err_close_sqs;
1053 }
1054
1055 return 0;
1056
1057err_close_sqs:
1058 for (tc--; tc >= 0; tc--)
1059 mlx5e_close_sq(&c->sq[tc]);
1060
1061 return err;
1062}
1063
1064static void mlx5e_close_sqs(struct mlx5e_channel *c)
1065{
1066 int tc;
1067
1068 for (tc = 0; tc < c->num_tc; tc++)
1069 mlx5e_close_sq(&c->sq[tc]);
1070}
1071
Rana Shahout5283af82015-08-23 16:12:14 +03001072static void mlx5e_build_channeltc_to_txq_map(struct mlx5e_priv *priv, int ix)
Saeed Mahameed03289b82015-06-23 17:14:14 +03001073{
1074 int i;
1075
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03001076 for (i = 0; i < priv->profile->max_tc; i++)
Rana Shahout5283af82015-08-23 16:12:14 +03001077 priv->channeltc_to_txq_map[ix][i] =
1078 ix + i * priv->params.num_channels;
Saeed Mahameed03289b82015-06-23 17:14:14 +03001079}
1080
Yevgeny Petrilin507f0c82016-06-23 17:02:38 +03001081static int mlx5e_set_sq_maxrate(struct net_device *dev,
1082 struct mlx5e_sq *sq, u32 rate)
1083{
1084 struct mlx5e_priv *priv = netdev_priv(dev);
1085 struct mlx5_core_dev *mdev = priv->mdev;
1086 u16 rl_index = 0;
1087 int err;
1088
1089 if (rate == sq->rate_limit)
1090 /* nothing to do */
1091 return 0;
1092
1093 if (sq->rate_limit)
1094 /* remove current rl index to free space to next ones */
1095 mlx5_rl_remove_rate(mdev, sq->rate_limit);
1096
1097 sq->rate_limit = 0;
1098
1099 if (rate) {
1100 err = mlx5_rl_add_rate(mdev, rate, &rl_index);
1101 if (err) {
1102 netdev_err(dev, "Failed configuring rate %u: %d\n",
1103 rate, err);
1104 return err;
1105 }
1106 }
1107
1108 err = mlx5e_modify_sq(sq, MLX5_SQC_STATE_RDY,
1109 MLX5_SQC_STATE_RDY, true, rl_index);
1110 if (err) {
1111 netdev_err(dev, "Failed configuring rate %u: %d\n",
1112 rate, err);
1113 /* remove the rate from the table */
1114 if (rate)
1115 mlx5_rl_remove_rate(mdev, rate);
1116 return err;
1117 }
1118
1119 sq->rate_limit = rate;
1120 return 0;
1121}
1122
1123static int mlx5e_set_tx_maxrate(struct net_device *dev, int index, u32 rate)
1124{
1125 struct mlx5e_priv *priv = netdev_priv(dev);
1126 struct mlx5_core_dev *mdev = priv->mdev;
1127 struct mlx5e_sq *sq = priv->txq_to_sq_map[index];
1128 int err = 0;
1129
1130 if (!mlx5_rl_is_supported(mdev)) {
1131 netdev_err(dev, "Rate limiting is not supported on this device\n");
1132 return -EINVAL;
1133 }
1134
1135 /* rate is given in Mb/sec, HW config is in Kb/sec */
1136 rate = rate << 10;
1137
1138 /* Check whether rate in valid range, 0 is always valid */
1139 if (rate && !mlx5_rl_is_in_range(mdev, rate)) {
1140 netdev_err(dev, "TX rate %u, is not in range\n", rate);
1141 return -ERANGE;
1142 }
1143
1144 mutex_lock(&priv->state_lock);
1145 if (test_bit(MLX5E_STATE_OPENED, &priv->state))
1146 err = mlx5e_set_sq_maxrate(dev, sq, rate);
1147 if (!err)
1148 priv->tx_rates[index] = rate;
1149 mutex_unlock(&priv->state_lock);
1150
1151 return err;
1152}
1153
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001154static int mlx5e_open_channel(struct mlx5e_priv *priv, int ix,
1155 struct mlx5e_channel_param *cparam,
1156 struct mlx5e_channel **cp)
1157{
Tariq Toukan9908aa22016-06-23 17:02:40 +03001158 struct mlx5e_cq_moder icosq_cq_moder = {0, 0};
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001159 struct net_device *netdev = priv->netdev;
Gil Rockahcb3c7fd2016-06-23 17:02:41 +03001160 struct mlx5e_cq_moder rx_cq_profile;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001161 int cpu = mlx5e_get_cpu(priv, ix);
1162 struct mlx5e_channel *c;
Yevgeny Petrilin507f0c82016-06-23 17:02:38 +03001163 struct mlx5e_sq *sq;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001164 int err;
Yevgeny Petrilin507f0c82016-06-23 17:02:38 +03001165 int i;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001166
1167 c = kzalloc_node(sizeof(*c), GFP_KERNEL, cpu_to_node(cpu));
1168 if (!c)
1169 return -ENOMEM;
1170
1171 c->priv = priv;
1172 c->ix = ix;
1173 c->cpu = cpu;
1174 c->pdev = &priv->mdev->pdev->dev;
1175 c->netdev = priv->netdev;
Hadar Hen Zionb50d2922016-07-01 14:51:04 +03001176 c->mkey_be = cpu_to_be32(priv->mdev->mlx5e_res.mkey.key);
Achiad Shochata4418a62015-07-29 15:05:41 +03001177 c->num_tc = priv->params.num_tc;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001178
Gil Rockahcb3c7fd2016-06-23 17:02:41 +03001179 if (priv->params.rx_am_enabled)
1180 rx_cq_profile = mlx5e_am_get_def_profile(priv->params.rx_cq_period_mode);
1181 else
1182 rx_cq_profile = priv->params.rx_cq_moderation;
1183
Rana Shahout5283af82015-08-23 16:12:14 +03001184 mlx5e_build_channeltc_to_txq_map(priv, ix);
Saeed Mahameed03289b82015-06-23 17:14:14 +03001185
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001186 netif_napi_add(netdev, &c->napi, mlx5e_napi_poll, 64);
1187
Tariq Toukan9908aa22016-06-23 17:02:40 +03001188 err = mlx5e_open_cq(c, &cparam->icosq_cq, &c->icosq.cq, icosq_cq_moder);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001189 if (err)
1190 goto err_napi_del;
1191
Tariq Toukand3c9bc22016-04-20 22:02:14 +03001192 err = mlx5e_open_tx_cqs(c, cparam);
1193 if (err)
1194 goto err_close_icosq_cq;
1195
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001196 err = mlx5e_open_cq(c, &cparam->rx_cq, &c->rq.cq,
Gil Rockahcb3c7fd2016-06-23 17:02:41 +03001197 rx_cq_profile);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001198 if (err)
1199 goto err_close_tx_cqs;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001200
1201 napi_enable(&c->napi);
1202
Tariq Toukand3c9bc22016-04-20 22:02:14 +03001203 err = mlx5e_open_sq(c, 0, &cparam->icosq, &c->icosq);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001204 if (err)
1205 goto err_disable_napi;
1206
Tariq Toukand3c9bc22016-04-20 22:02:14 +03001207 err = mlx5e_open_sqs(c, cparam);
1208 if (err)
1209 goto err_close_icosq;
1210
Yevgeny Petrilin507f0c82016-06-23 17:02:38 +03001211 for (i = 0; i < priv->params.num_tc; i++) {
1212 u32 txq_ix = priv->channeltc_to_txq_map[ix][i];
1213
1214 if (priv->tx_rates[txq_ix]) {
1215 sq = priv->txq_to_sq_map[txq_ix];
1216 mlx5e_set_sq_maxrate(priv->netdev, sq,
1217 priv->tx_rates[txq_ix]);
1218 }
1219 }
1220
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001221 err = mlx5e_open_rq(c, &cparam->rq, &c->rq);
1222 if (err)
1223 goto err_close_sqs;
1224
1225 netif_set_xps_queue(netdev, get_cpu_mask(c->cpu), ix);
1226 *cp = c;
1227
1228 return 0;
1229
1230err_close_sqs:
1231 mlx5e_close_sqs(c);
1232
Tariq Toukand3c9bc22016-04-20 22:02:14 +03001233err_close_icosq:
1234 mlx5e_close_sq(&c->icosq);
1235
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001236err_disable_napi:
1237 napi_disable(&c->napi);
1238 mlx5e_close_cq(&c->rq.cq);
1239
1240err_close_tx_cqs:
1241 mlx5e_close_tx_cqs(c);
1242
Tariq Toukand3c9bc22016-04-20 22:02:14 +03001243err_close_icosq_cq:
1244 mlx5e_close_cq(&c->icosq.cq);
1245
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001246err_napi_del:
1247 netif_napi_del(&c->napi);
Eric Dumazet7ae92ae2015-11-18 06:30:55 -08001248 napi_hash_del(&c->napi);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001249 kfree(c);
1250
1251 return err;
1252}
1253
1254static void mlx5e_close_channel(struct mlx5e_channel *c)
1255{
1256 mlx5e_close_rq(&c->rq);
1257 mlx5e_close_sqs(c);
Tariq Toukand3c9bc22016-04-20 22:02:14 +03001258 mlx5e_close_sq(&c->icosq);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001259 napi_disable(&c->napi);
1260 mlx5e_close_cq(&c->rq.cq);
1261 mlx5e_close_tx_cqs(c);
Tariq Toukand3c9bc22016-04-20 22:02:14 +03001262 mlx5e_close_cq(&c->icosq.cq);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001263 netif_napi_del(&c->napi);
Eric Dumazet7ae92ae2015-11-18 06:30:55 -08001264
1265 napi_hash_del(&c->napi);
1266 synchronize_rcu();
1267
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001268 kfree(c);
1269}
1270
1271static void mlx5e_build_rq_param(struct mlx5e_priv *priv,
1272 struct mlx5e_rq_param *param)
1273{
1274 void *rqc = param->rqc;
1275 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1276
Tariq Toukan461017c2016-04-20 22:02:13 +03001277 switch (priv->params.rq_wq_type) {
1278 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
1279 MLX5_SET(wq, wq, log_wqe_num_of_strides,
Tariq Toukand9d9f152016-05-11 00:29:15 +03001280 priv->params.mpwqe_log_num_strides - 9);
Tariq Toukan461017c2016-04-20 22:02:13 +03001281 MLX5_SET(wq, wq, log_wqe_stride_size,
Tariq Toukand9d9f152016-05-11 00:29:15 +03001282 priv->params.mpwqe_log_stride_sz - 6);
Tariq Toukan461017c2016-04-20 22:02:13 +03001283 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
1284 break;
1285 default: /* MLX5_WQ_TYPE_LINKED_LIST */
1286 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1287 }
1288
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001289 MLX5_SET(wq, wq, end_padding_mode, MLX5_WQ_END_PAD_MODE_ALIGN);
1290 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
1291 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_rq_size);
Hadar Hen Zionb50d2922016-07-01 14:51:04 +03001292 MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
Rana Shahout593cf332016-04-20 22:02:10 +03001293 MLX5_SET(rqc, rqc, counter_set_id, priv->q_counter);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001294
Saeed Mahameed311c7c72015-07-23 23:35:57 +03001295 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001296 param->wq.linear = 1;
Gil Rockahcb3c7fd2016-06-23 17:02:41 +03001297
1298 param->am_enabled = priv->params.rx_am_enabled;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001299}
1300
Tariq Toukan556dd1b2016-03-02 00:13:36 +02001301static void mlx5e_build_drop_rq_param(struct mlx5e_rq_param *param)
1302{
1303 void *rqc = param->rqc;
1304 void *wq = MLX5_ADDR_OF(rqc, rqc, wq);
1305
1306 MLX5_SET(wq, wq, wq_type, MLX5_WQ_TYPE_LINKED_LIST);
1307 MLX5_SET(wq, wq, log_wq_stride, ilog2(sizeof(struct mlx5e_rx_wqe)));
1308}
1309
Tariq Toukand3c9bc22016-04-20 22:02:14 +03001310static void mlx5e_build_sq_param_common(struct mlx5e_priv *priv,
1311 struct mlx5e_sq_param *param)
1312{
1313 void *sqc = param->sqc;
1314 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1315
1316 MLX5_SET(wq, wq, log_wq_stride, ilog2(MLX5_SEND_WQE_BB));
Hadar Hen Zionb50d2922016-07-01 14:51:04 +03001317 MLX5_SET(wq, wq, pd, priv->mdev->mlx5e_res.pdn);
Tariq Toukand3c9bc22016-04-20 22:02:14 +03001318
1319 param->wq.buf_numa_node = dev_to_node(&priv->mdev->pdev->dev);
1320}
1321
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001322static void mlx5e_build_sq_param(struct mlx5e_priv *priv,
1323 struct mlx5e_sq_param *param)
1324{
1325 void *sqc = param->sqc;
1326 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1327
Tariq Toukand3c9bc22016-04-20 22:02:14 +03001328 mlx5e_build_sq_param_common(priv, param);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001329 MLX5_SET(wq, wq, log_wq_sz, priv->params.log_sq_size);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001330
Achiad Shochat58d52292015-07-23 23:35:58 +03001331 param->max_inline = priv->params.tx_max_inline;
Hadar Hen Zioncff92d72016-07-24 16:12:40 +03001332 param->min_inline_mode = priv->params.tx_min_inline_mode;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001333}
1334
1335static void mlx5e_build_common_cq_param(struct mlx5e_priv *priv,
1336 struct mlx5e_cq_param *param)
1337{
1338 void *cqc = param->cqc;
1339
Hadar Hen Zionb50d2922016-07-01 14:51:04 +03001340 MLX5_SET(cqc, cqc, uar_page, priv->mdev->mlx5e_res.cq_uar.index);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001341}
1342
1343static void mlx5e_build_rx_cq_param(struct mlx5e_priv *priv,
1344 struct mlx5e_cq_param *param)
1345{
1346 void *cqc = param->cqc;
Tariq Toukan461017c2016-04-20 22:02:13 +03001347 u8 log_cq_size;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001348
Tariq Toukan461017c2016-04-20 22:02:13 +03001349 switch (priv->params.rq_wq_type) {
1350 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
1351 log_cq_size = priv->params.log_rq_size +
Tariq Toukand9d9f152016-05-11 00:29:15 +03001352 priv->params.mpwqe_log_num_strides;
Tariq Toukan461017c2016-04-20 22:02:13 +03001353 break;
1354 default: /* MLX5_WQ_TYPE_LINKED_LIST */
1355 log_cq_size = priv->params.log_rq_size;
1356 }
1357
1358 MLX5_SET(cqc, cqc, log_cq_size, log_cq_size);
Tariq Toukan7219ab32016-05-11 00:29:14 +03001359 if (priv->params.rx_cqe_compress) {
1360 MLX5_SET(cqc, cqc, mini_cqe_res_format, MLX5_CQE_FORMAT_CSUM);
1361 MLX5_SET(cqc, cqc, cqe_comp_en, 1);
1362 }
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001363
1364 mlx5e_build_common_cq_param(priv, param);
Tariq Toukan9908aa22016-06-23 17:02:40 +03001365
1366 param->cq_period_mode = priv->params.rx_cq_period_mode;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001367}
1368
1369static void mlx5e_build_tx_cq_param(struct mlx5e_priv *priv,
1370 struct mlx5e_cq_param *param)
1371{
1372 void *cqc = param->cqc;
1373
Tariq Toukand3c9bc22016-04-20 22:02:14 +03001374 MLX5_SET(cqc, cqc, log_cq_size, priv->params.log_sq_size);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001375
1376 mlx5e_build_common_cq_param(priv, param);
Tariq Toukan9908aa22016-06-23 17:02:40 +03001377
1378 param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001379}
1380
Tariq Toukand3c9bc22016-04-20 22:02:14 +03001381static void mlx5e_build_ico_cq_param(struct mlx5e_priv *priv,
1382 struct mlx5e_cq_param *param,
1383 u8 log_wq_size)
1384{
1385 void *cqc = param->cqc;
1386
1387 MLX5_SET(cqc, cqc, log_cq_size, log_wq_size);
1388
1389 mlx5e_build_common_cq_param(priv, param);
Tariq Toukan9908aa22016-06-23 17:02:40 +03001390
1391 param->cq_period_mode = MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
Tariq Toukand3c9bc22016-04-20 22:02:14 +03001392}
1393
1394static void mlx5e_build_icosq_param(struct mlx5e_priv *priv,
1395 struct mlx5e_sq_param *param,
1396 u8 log_wq_size)
1397{
1398 void *sqc = param->sqc;
1399 void *wq = MLX5_ADDR_OF(sqc, sqc, wq);
1400
1401 mlx5e_build_sq_param_common(priv, param);
1402
1403 MLX5_SET(wq, wq, log_wq_sz, log_wq_size);
Tariq Toukanbc77b242016-04-20 22:02:15 +03001404 MLX5_SET(sqc, sqc, reg_umr, MLX5_CAP_ETH(priv->mdev, reg_umr_sq));
Tariq Toukand3c9bc22016-04-20 22:02:14 +03001405
1406 param->icosq = true;
1407}
1408
Arnd Bergmann6b876632016-04-26 17:52:33 +02001409static void mlx5e_build_channel_param(struct mlx5e_priv *priv, struct mlx5e_channel_param *cparam)
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001410{
Tariq Toukanbc77b242016-04-20 22:02:15 +03001411 u8 icosq_log_wq_sz = MLX5E_PARAMS_MINIMUM_LOG_SQ_SIZE;
Tariq Toukand3c9bc22016-04-20 22:02:14 +03001412
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001413 mlx5e_build_rq_param(priv, &cparam->rq);
1414 mlx5e_build_sq_param(priv, &cparam->sq);
Tariq Toukand3c9bc22016-04-20 22:02:14 +03001415 mlx5e_build_icosq_param(priv, &cparam->icosq, icosq_log_wq_sz);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001416 mlx5e_build_rx_cq_param(priv, &cparam->rx_cq);
1417 mlx5e_build_tx_cq_param(priv, &cparam->tx_cq);
Tariq Toukand3c9bc22016-04-20 22:02:14 +03001418 mlx5e_build_ico_cq_param(priv, &cparam->icosq_cq, icosq_log_wq_sz);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001419}
1420
1421static int mlx5e_open_channels(struct mlx5e_priv *priv)
1422{
Arnd Bergmann6b876632016-04-26 17:52:33 +02001423 struct mlx5e_channel_param *cparam;
Achiad Shochata4418a62015-07-29 15:05:41 +03001424 int nch = priv->params.num_channels;
Saeed Mahameed03289b82015-06-23 17:14:14 +03001425 int err = -ENOMEM;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001426 int i;
1427 int j;
1428
Achiad Shochata4418a62015-07-29 15:05:41 +03001429 priv->channel = kcalloc(nch, sizeof(struct mlx5e_channel *),
1430 GFP_KERNEL);
Saeed Mahameed03289b82015-06-23 17:14:14 +03001431
Achiad Shochata4418a62015-07-29 15:05:41 +03001432 priv->txq_to_sq_map = kcalloc(nch * priv->params.num_tc,
Saeed Mahameed03289b82015-06-23 17:14:14 +03001433 sizeof(struct mlx5e_sq *), GFP_KERNEL);
1434
Arnd Bergmann6b876632016-04-26 17:52:33 +02001435 cparam = kzalloc(sizeof(struct mlx5e_channel_param), GFP_KERNEL);
1436
1437 if (!priv->channel || !priv->txq_to_sq_map || !cparam)
Saeed Mahameed03289b82015-06-23 17:14:14 +03001438 goto err_free_txq_to_sq_map;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001439
Arnd Bergmann6b876632016-04-26 17:52:33 +02001440 mlx5e_build_channel_param(priv, cparam);
1441
Achiad Shochata4418a62015-07-29 15:05:41 +03001442 for (i = 0; i < nch; i++) {
Arnd Bergmann6b876632016-04-26 17:52:33 +02001443 err = mlx5e_open_channel(priv, i, cparam, &priv->channel[i]);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001444 if (err)
1445 goto err_close_channels;
1446 }
1447
Achiad Shochata4418a62015-07-29 15:05:41 +03001448 for (j = 0; j < nch; j++) {
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001449 err = mlx5e_wait_for_min_rx_wqes(&priv->channel[j]->rq);
1450 if (err)
1451 goto err_close_channels;
1452 }
1453
Mohamad Haj Yahiac3b7c5c2016-07-13 00:07:00 +03001454 /* FIXME: This is a W/A for tx timeout watch dog false alarm when
1455 * polling for inactive tx queues.
1456 */
1457 netif_tx_start_all_queues(priv->netdev);
1458
Arnd Bergmann6b876632016-04-26 17:52:33 +02001459 kfree(cparam);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001460 return 0;
1461
1462err_close_channels:
1463 for (i--; i >= 0; i--)
1464 mlx5e_close_channel(priv->channel[i]);
1465
Saeed Mahameed03289b82015-06-23 17:14:14 +03001466err_free_txq_to_sq_map:
1467 kfree(priv->txq_to_sq_map);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001468 kfree(priv->channel);
Arnd Bergmann6b876632016-04-26 17:52:33 +02001469 kfree(cparam);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001470
1471 return err;
1472}
1473
1474static void mlx5e_close_channels(struct mlx5e_priv *priv)
1475{
1476 int i;
1477
Mohamad Haj Yahiac3b7c5c2016-07-13 00:07:00 +03001478 /* FIXME: This is a W/A only for tx timeout watch dog false alarm when
1479 * polling for inactive tx queues.
1480 */
1481 netif_tx_stop_all_queues(priv->netdev);
1482 netif_tx_disable(priv->netdev);
1483
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001484 for (i = 0; i < priv->params.num_channels; i++)
1485 mlx5e_close_channel(priv->channel[i]);
1486
Saeed Mahameed03289b82015-06-23 17:14:14 +03001487 kfree(priv->txq_to_sq_map);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03001488 kfree(priv->channel);
1489}
1490
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001491static int mlx5e_rx_hash_fn(int hfunc)
1492{
1493 return (hfunc == ETH_RSS_HASH_TOP) ?
1494 MLX5_RX_HASH_FN_TOEPLITZ :
1495 MLX5_RX_HASH_FN_INVERTED_XOR8;
1496}
1497
1498static int mlx5e_bits_invert(unsigned long a, int size)
1499{
1500 int inv = 0;
1501 int i;
1502
1503 for (i = 0; i < size; i++)
1504 inv |= (test_bit(size - i - 1, &a) ? 1 : 0) << i;
1505
1506 return inv;
1507}
1508
Achiad Shochat936896e2015-08-16 16:04:46 +03001509static void mlx5e_fill_indir_rqt_rqns(struct mlx5e_priv *priv, void *rqtc)
1510{
1511 int i;
1512
1513 for (i = 0; i < MLX5E_INDIR_RQT_SIZE; i++) {
1514 int ix = i;
Tariq Toukan1da36692016-04-29 01:36:32 +03001515 u32 rqn;
Achiad Shochat936896e2015-08-16 16:04:46 +03001516
1517 if (priv->params.rss_hfunc == ETH_RSS_HASH_XOR)
1518 ix = mlx5e_bits_invert(i, MLX5E_LOG_INDIR_RQT_SIZE);
1519
Achiad Shochat2d75b2b2015-08-16 16:04:47 +03001520 ix = priv->params.indirection_rqt[ix];
Tariq Toukan1da36692016-04-29 01:36:32 +03001521 rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ?
1522 priv->channel[ix]->rq.rqn :
1523 priv->drop_rq.rqn;
1524 MLX5_SET(rqtc, rqtc, rq_num[i], rqn);
Achiad Shochat936896e2015-08-16 16:04:46 +03001525 }
1526}
1527
Tariq Toukan1da36692016-04-29 01:36:32 +03001528static void mlx5e_fill_direct_rqt_rqn(struct mlx5e_priv *priv, void *rqtc,
1529 int ix)
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001530{
Tariq Toukan1da36692016-04-29 01:36:32 +03001531 u32 rqn = test_bit(MLX5E_STATE_OPENED, &priv->state) ?
1532 priv->channel[ix]->rq.rqn :
1533 priv->drop_rq.rqn;
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001534
Tariq Toukan1da36692016-04-29 01:36:32 +03001535 MLX5_SET(rqtc, rqtc, rq_num[0], rqn);
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001536}
1537
Hadar Hen Zion398f3352016-07-01 14:51:06 +03001538static int mlx5e_create_rqt(struct mlx5e_priv *priv, int sz,
1539 int ix, struct mlx5e_rqt *rqt)
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001540{
1541 struct mlx5_core_dev *mdev = priv->mdev;
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001542 void *rqtc;
1543 int inlen;
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001544 int err;
Tariq Toukan1da36692016-04-29 01:36:32 +03001545 u32 *in;
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001546
1547 inlen = MLX5_ST_SZ_BYTES(create_rqt_in) + sizeof(u32) * sz;
1548 in = mlx5_vzalloc(inlen);
1549 if (!in)
1550 return -ENOMEM;
1551
1552 rqtc = MLX5_ADDR_OF(create_rqt_in, in, rqt_context);
1553
1554 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
1555 MLX5_SET(rqtc, rqtc, rqt_max_size, sz);
1556
Tariq Toukan1da36692016-04-29 01:36:32 +03001557 if (sz > 1) /* RSS */
1558 mlx5e_fill_indir_rqt_rqns(priv, rqtc);
1559 else
1560 mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001561
Hadar Hen Zion398f3352016-07-01 14:51:06 +03001562 err = mlx5_core_create_rqt(mdev, in, inlen, &rqt->rqtn);
1563 if (!err)
1564 rqt->enabled = true;
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001565
1566 kvfree(in);
Tariq Toukan1da36692016-04-29 01:36:32 +03001567 return err;
1568}
1569
Hadar Hen Zioncb67b832016-07-01 14:51:09 +03001570void mlx5e_destroy_rqt(struct mlx5e_priv *priv, struct mlx5e_rqt *rqt)
Tariq Toukan1da36692016-04-29 01:36:32 +03001571{
Hadar Hen Zion398f3352016-07-01 14:51:06 +03001572 rqt->enabled = false;
1573 mlx5_core_destroy_rqt(priv->mdev, rqt->rqtn);
Tariq Toukan1da36692016-04-29 01:36:32 +03001574}
1575
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03001576static int mlx5e_create_indirect_rqts(struct mlx5e_priv *priv)
Tariq Toukan1da36692016-04-29 01:36:32 +03001577{
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03001578 struct mlx5e_rqt *rqt = &priv->indir_rqt;
1579
1580 return mlx5e_create_rqt(priv, MLX5E_INDIR_RQT_SIZE, 0, rqt);
1581}
1582
Hadar Hen Zioncb67b832016-07-01 14:51:09 +03001583int mlx5e_create_direct_rqts(struct mlx5e_priv *priv)
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03001584{
Hadar Hen Zion398f3352016-07-01 14:51:06 +03001585 struct mlx5e_rqt *rqt;
Tariq Toukan1da36692016-04-29 01:36:32 +03001586 int err;
1587 int ix;
1588
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03001589 for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
Hadar Hen Zion398f3352016-07-01 14:51:06 +03001590 rqt = &priv->direct_tir[ix].rqt;
1591 err = mlx5e_create_rqt(priv, 1 /*size */, ix, rqt);
Tariq Toukan1da36692016-04-29 01:36:32 +03001592 if (err)
1593 goto err_destroy_rqts;
1594 }
1595
1596 return 0;
1597
1598err_destroy_rqts:
1599 for (ix--; ix >= 0; ix--)
Hadar Hen Zion398f3352016-07-01 14:51:06 +03001600 mlx5e_destroy_rqt(priv, &priv->direct_tir[ix].rqt);
Tariq Toukan1da36692016-04-29 01:36:32 +03001601
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001602 return err;
1603}
1604
Tariq Toukan1da36692016-04-29 01:36:32 +03001605int mlx5e_redirect_rqt(struct mlx5e_priv *priv, u32 rqtn, int sz, int ix)
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001606{
1607 struct mlx5_core_dev *mdev = priv->mdev;
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001608 void *rqtc;
1609 int inlen;
Tariq Toukan1da36692016-04-29 01:36:32 +03001610 u32 *in;
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001611 int err;
1612
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001613 inlen = MLX5_ST_SZ_BYTES(modify_rqt_in) + sizeof(u32) * sz;
1614 in = mlx5_vzalloc(inlen);
1615 if (!in)
1616 return -ENOMEM;
1617
1618 rqtc = MLX5_ADDR_OF(modify_rqt_in, in, ctx);
1619
1620 MLX5_SET(rqtc, rqtc, rqt_actual_size, sz);
Tariq Toukan1da36692016-04-29 01:36:32 +03001621 if (sz > 1) /* RSS */
1622 mlx5e_fill_indir_rqt_rqns(priv, rqtc);
1623 else
1624 mlx5e_fill_direct_rqt_rqn(priv, rqtc, ix);
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001625
1626 MLX5_SET(modify_rqt_in, in, bitmask.rqn_list, 1);
1627
Tariq Toukan1da36692016-04-29 01:36:32 +03001628 err = mlx5_core_modify_rqt(mdev, rqtn, in, inlen);
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001629
1630 kvfree(in);
1631
1632 return err;
1633}
1634
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001635static void mlx5e_redirect_rqts(struct mlx5e_priv *priv)
1636{
Tariq Toukan1da36692016-04-29 01:36:32 +03001637 u32 rqtn;
1638 int ix;
1639
Hadar Hen Zion398f3352016-07-01 14:51:06 +03001640 if (priv->indir_rqt.enabled) {
1641 rqtn = priv->indir_rqt.rqtn;
1642 mlx5e_redirect_rqt(priv, rqtn, MLX5E_INDIR_RQT_SIZE, 0);
1643 }
1644
Tariq Toukan1da36692016-04-29 01:36:32 +03001645 for (ix = 0; ix < priv->params.num_channels; ix++) {
Hadar Hen Zion398f3352016-07-01 14:51:06 +03001646 if (!priv->direct_tir[ix].rqt.enabled)
1647 continue;
1648 rqtn = priv->direct_tir[ix].rqt.rqtn;
Tariq Toukan1da36692016-04-29 01:36:32 +03001649 mlx5e_redirect_rqt(priv, rqtn, 1, ix);
1650 }
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001651}
1652
1653static void mlx5e_build_tir_ctx_lro(void *tirc, struct mlx5e_priv *priv)
1654{
1655 if (!priv->params.lro_en)
1656 return;
1657
1658#define ROUGH_MAX_L2_L3_HDR_SZ 256
1659
1660 MLX5_SET(tirc, tirc, lro_enable_mask,
1661 MLX5_TIRC_LRO_ENABLE_MASK_IPV4_LRO |
1662 MLX5_TIRC_LRO_ENABLE_MASK_IPV6_LRO);
1663 MLX5_SET(tirc, tirc, lro_max_ip_payload_size,
1664 (priv->params.lro_wqe_sz -
1665 ROUGH_MAX_L2_L3_HDR_SZ) >> 8);
1666 MLX5_SET(tirc, tirc, lro_timeout_period_usecs,
1667 MLX5_CAP_ETH(priv->mdev,
Achiad Shochatd9a40272015-08-16 16:04:49 +03001668 lro_timer_supported_periods[2]));
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001669}
1670
Tariq Toukanbdfc0282016-02-29 21:17:12 +02001671void mlx5e_build_tir_ctx_hash(void *tirc, struct mlx5e_priv *priv)
1672{
1673 MLX5_SET(tirc, tirc, rx_hash_fn,
1674 mlx5e_rx_hash_fn(priv->params.rss_hfunc));
1675 if (priv->params.rss_hfunc == ETH_RSS_HASH_TOP) {
1676 void *rss_key = MLX5_ADDR_OF(tirc, tirc,
1677 rx_hash_toeplitz_key);
1678 size_t len = MLX5_FLD_SZ_BYTES(tirc,
1679 rx_hash_toeplitz_key);
1680
1681 MLX5_SET(tirc, tirc, rx_hash_symmetric, 1);
1682 memcpy(rss_key, priv->params.toeplitz_hash_key, len);
1683 }
1684}
1685
Tariq Toukanab0394f2016-02-29 21:17:10 +02001686static int mlx5e_modify_tirs_lro(struct mlx5e_priv *priv)
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001687{
1688 struct mlx5_core_dev *mdev = priv->mdev;
1689
1690 void *in;
1691 void *tirc;
1692 int inlen;
1693 int err;
Tariq Toukanab0394f2016-02-29 21:17:10 +02001694 int tt;
Tariq Toukan1da36692016-04-29 01:36:32 +03001695 int ix;
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001696
1697 inlen = MLX5_ST_SZ_BYTES(modify_tir_in);
1698 in = mlx5_vzalloc(inlen);
1699 if (!in)
1700 return -ENOMEM;
1701
1702 MLX5_SET(modify_tir_in, in, bitmask.lro, 1);
1703 tirc = MLX5_ADDR_OF(modify_tir_in, in, ctx);
1704
1705 mlx5e_build_tir_ctx_lro(tirc, priv);
1706
Tariq Toukan1da36692016-04-29 01:36:32 +03001707 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
Hadar Hen Zion724b2aa2016-07-01 14:51:05 +03001708 err = mlx5_core_modify_tir(mdev, priv->indir_tir[tt].tirn, in,
Tariq Toukan1da36692016-04-29 01:36:32 +03001709 inlen);
Tariq Toukanab0394f2016-02-29 21:17:10 +02001710 if (err)
Tariq Toukan1da36692016-04-29 01:36:32 +03001711 goto free_in;
Tariq Toukanab0394f2016-02-29 21:17:10 +02001712 }
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001713
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03001714 for (ix = 0; ix < priv->profile->max_nch(priv->mdev); ix++) {
Tariq Toukan1da36692016-04-29 01:36:32 +03001715 err = mlx5_core_modify_tir(mdev, priv->direct_tir[ix].tirn,
1716 in, inlen);
1717 if (err)
1718 goto free_in;
1719 }
1720
1721free_in:
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001722 kvfree(in);
1723
1724 return err;
1725}
1726
Saeed Mahameedcd255ef2016-04-22 00:33:05 +03001727static int mlx5e_set_mtu(struct mlx5e_priv *priv, u16 mtu)
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001728{
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001729 struct mlx5_core_dev *mdev = priv->mdev;
Saeed Mahameedcd255ef2016-04-22 00:33:05 +03001730 u16 hw_mtu = MLX5E_SW2HW_MTU(mtu);
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001731 int err;
1732
Saeed Mahameedcd255ef2016-04-22 00:33:05 +03001733 err = mlx5_set_port_mtu(mdev, hw_mtu, 1);
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001734 if (err)
1735 return err;
1736
Saeed Mahameedcd255ef2016-04-22 00:33:05 +03001737 /* Update vport context MTU */
1738 mlx5_modify_nic_vport_mtu(mdev, hw_mtu);
1739 return 0;
1740}
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001741
Saeed Mahameedcd255ef2016-04-22 00:33:05 +03001742static void mlx5e_query_mtu(struct mlx5e_priv *priv, u16 *mtu)
1743{
1744 struct mlx5_core_dev *mdev = priv->mdev;
1745 u16 hw_mtu = 0;
1746 int err;
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001747
Saeed Mahameedcd255ef2016-04-22 00:33:05 +03001748 err = mlx5_query_nic_vport_mtu(mdev, &hw_mtu);
1749 if (err || !hw_mtu) /* fallback to port oper mtu */
1750 mlx5_query_port_oper_mtu(mdev, &hw_mtu, 1);
1751
1752 *mtu = MLX5E_HW2SW_MTU(hw_mtu);
1753}
1754
1755static int mlx5e_set_dev_port_mtu(struct net_device *netdev)
1756{
1757 struct mlx5e_priv *priv = netdev_priv(netdev);
1758 u16 mtu;
1759 int err;
1760
1761 err = mlx5e_set_mtu(priv, netdev->mtu);
1762 if (err)
1763 return err;
1764
1765 mlx5e_query_mtu(priv, &mtu);
1766 if (mtu != netdev->mtu)
1767 netdev_warn(netdev, "%s: VPort MTU %d is different than netdev mtu %d\n",
1768 __func__, mtu, netdev->mtu);
1769
1770 netdev->mtu = mtu;
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001771 return 0;
1772}
1773
Saeed Mahameed08fb1da2016-02-22 18:17:26 +02001774static void mlx5e_netdev_set_tcs(struct net_device *netdev)
1775{
1776 struct mlx5e_priv *priv = netdev_priv(netdev);
1777 int nch = priv->params.num_channels;
1778 int ntc = priv->params.num_tc;
1779 int tc;
1780
1781 netdev_reset_tc(netdev);
1782
1783 if (ntc == 1)
1784 return;
1785
1786 netdev_set_num_tc(netdev, ntc);
1787
Rana Shahout7ccdd082016-06-30 17:34:48 +03001788 /* Map netdev TCs to offset 0
1789 * We have our own UP to TXQ mapping for QoS
1790 */
Saeed Mahameed08fb1da2016-02-22 18:17:26 +02001791 for (tc = 0; tc < ntc; tc++)
Rana Shahout7ccdd082016-06-30 17:34:48 +03001792 netdev_set_tc_queue(netdev, tc, nch, 0);
Saeed Mahameed08fb1da2016-02-22 18:17:26 +02001793}
1794
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001795int mlx5e_open_locked(struct net_device *netdev)
1796{
1797 struct mlx5e_priv *priv = netdev_priv(netdev);
Hadar Hen Zioncb67b832016-07-01 14:51:09 +03001798 struct mlx5_core_dev *mdev = priv->mdev;
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001799 int num_txqs;
1800 int err;
1801
1802 set_bit(MLX5E_STATE_OPENED, &priv->state);
1803
Saeed Mahameed08fb1da2016-02-22 18:17:26 +02001804 mlx5e_netdev_set_tcs(netdev);
1805
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001806 num_txqs = priv->params.num_channels * priv->params.num_tc;
1807 netif_set_real_num_tx_queues(netdev, num_txqs);
1808 netif_set_real_num_rx_queues(netdev, priv->params.num_channels);
1809
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001810 err = mlx5e_open_channels(priv);
1811 if (err) {
1812 netdev_err(netdev, "%s: mlx5e_open_channels failed, %d\n",
1813 __func__, err);
Achiad Shochat343b29f2015-09-25 10:49:09 +03001814 goto err_clear_state_opened_flag;
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001815 }
1816
Hadar Hen Zion724b2aa2016-07-01 14:51:05 +03001817 err = mlx5e_refresh_tirs_self_loopback_enable(priv->mdev);
Tariq Toukan66189962015-11-12 19:35:26 +02001818 if (err) {
1819 netdev_err(netdev, "%s: mlx5e_refresh_tirs_self_loopback_enable failed, %d\n",
1820 __func__, err);
1821 goto err_close_channels;
1822 }
1823
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001824 mlx5e_redirect_rqts(priv);
Tariq Toukance89ef32016-03-02 00:13:33 +02001825 mlx5e_update_carrier(priv);
Eran Ben Elishaef9814d2015-12-29 14:58:31 +02001826 mlx5e_timestamp_init(priv);
Maor Gottlieb5a7b27e2016-04-29 01:36:39 +03001827#ifdef CONFIG_RFS_ACCEL
1828 priv->netdev->rx_cpu_rmap = priv->mdev->rmap;
1829#endif
Hadar Hen Zioncb67b832016-07-01 14:51:09 +03001830 if (priv->profile->update_stats)
1831 queue_delayed_work(priv->wq, &priv->update_stats_work, 0);
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001832
Hadar Hen Zioncb67b832016-07-01 14:51:09 +03001833 if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
1834 err = mlx5e_add_sqs_fwd_rules(priv);
1835 if (err)
1836 goto err_close_channels;
1837 }
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001838 return 0;
Achiad Shochat343b29f2015-09-25 10:49:09 +03001839
Tariq Toukan66189962015-11-12 19:35:26 +02001840err_close_channels:
1841 mlx5e_close_channels(priv);
Achiad Shochat343b29f2015-09-25 10:49:09 +03001842err_clear_state_opened_flag:
1843 clear_bit(MLX5E_STATE_OPENED, &priv->state);
1844 return err;
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001845}
1846
Hadar Hen Zioncb67b832016-07-01 14:51:09 +03001847int mlx5e_open(struct net_device *netdev)
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001848{
1849 struct mlx5e_priv *priv = netdev_priv(netdev);
1850 int err;
1851
1852 mutex_lock(&priv->state_lock);
1853 err = mlx5e_open_locked(netdev);
1854 mutex_unlock(&priv->state_lock);
1855
1856 return err;
1857}
1858
1859int mlx5e_close_locked(struct net_device *netdev)
1860{
1861 struct mlx5e_priv *priv = netdev_priv(netdev);
Hadar Hen Zioncb67b832016-07-01 14:51:09 +03001862 struct mlx5_core_dev *mdev = priv->mdev;
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001863
Achiad Shochata1985742015-11-03 08:07:18 +02001864 /* May already be CLOSED in case a previous configuration operation
1865 * (e.g RX/TX queue size change) that involves close&open failed.
1866 */
1867 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
1868 return 0;
1869
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001870 clear_bit(MLX5E_STATE_OPENED, &priv->state);
1871
Hadar Hen Zioncb67b832016-07-01 14:51:09 +03001872 if (MLX5_CAP_GEN(mdev, vport_group_manager))
1873 mlx5e_remove_sqs_fwd_rules(priv);
1874
Eran Ben Elishaef9814d2015-12-29 14:58:31 +02001875 mlx5e_timestamp_cleanup(priv);
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001876 netif_carrier_off(priv->netdev);
Tariq Toukance89ef32016-03-02 00:13:33 +02001877 mlx5e_redirect_rqts(priv);
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001878 mlx5e_close_channels(priv);
1879
1880 return 0;
1881}
1882
Hadar Hen Zioncb67b832016-07-01 14:51:09 +03001883int mlx5e_close(struct net_device *netdev)
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001884{
1885 struct mlx5e_priv *priv = netdev_priv(netdev);
1886 int err;
1887
1888 mutex_lock(&priv->state_lock);
1889 err = mlx5e_close_locked(netdev);
1890 mutex_unlock(&priv->state_lock);
1891
1892 return err;
1893}
1894
Achiad Shochat50cfa252015-08-04 14:05:41 +03001895static int mlx5e_create_drop_rq(struct mlx5e_priv *priv,
1896 struct mlx5e_rq *rq,
1897 struct mlx5e_rq_param *param)
1898{
1899 struct mlx5_core_dev *mdev = priv->mdev;
1900 void *rqc = param->rqc;
1901 void *rqc_wq = MLX5_ADDR_OF(rqc, rqc, wq);
1902 int err;
1903
1904 param->wq.db_numa_node = param->wq.buf_numa_node;
1905
1906 err = mlx5_wq_ll_create(mdev, &param->wq, rqc_wq, &rq->wq,
1907 &rq->wq_ctrl);
1908 if (err)
1909 return err;
1910
1911 rq->priv = priv;
1912
1913 return 0;
1914}
1915
1916static int mlx5e_create_drop_cq(struct mlx5e_priv *priv,
1917 struct mlx5e_cq *cq,
1918 struct mlx5e_cq_param *param)
1919{
1920 struct mlx5_core_dev *mdev = priv->mdev;
1921 struct mlx5_core_cq *mcq = &cq->mcq;
1922 int eqn_not_used;
Doron Tsur0b6e26c2016-01-17 11:25:47 +02001923 unsigned int irqn;
Achiad Shochat50cfa252015-08-04 14:05:41 +03001924 int err;
1925
1926 err = mlx5_cqwq_create(mdev, &param->wq, param->cqc, &cq->wq,
1927 &cq->wq_ctrl);
1928 if (err)
1929 return err;
1930
1931 mlx5_vector2eqn(mdev, param->eq_ix, &eqn_not_used, &irqn);
1932
1933 mcq->cqe_sz = 64;
1934 mcq->set_ci_db = cq->wq_ctrl.db.db;
1935 mcq->arm_db = cq->wq_ctrl.db.db + 1;
1936 *mcq->set_ci_db = 0;
1937 *mcq->arm_db = 0;
1938 mcq->vector = param->eq_ix;
1939 mcq->comp = mlx5e_completion_event;
1940 mcq->event = mlx5e_cq_error_event;
1941 mcq->irqn = irqn;
Hadar Hen Zionb50d2922016-07-01 14:51:04 +03001942 mcq->uar = &mdev->mlx5e_res.cq_uar;
Achiad Shochat50cfa252015-08-04 14:05:41 +03001943
1944 cq->priv = priv;
1945
1946 return 0;
1947}
1948
1949static int mlx5e_open_drop_rq(struct mlx5e_priv *priv)
1950{
1951 struct mlx5e_cq_param cq_param;
1952 struct mlx5e_rq_param rq_param;
1953 struct mlx5e_rq *rq = &priv->drop_rq;
1954 struct mlx5e_cq *cq = &priv->drop_rq.cq;
1955 int err;
1956
1957 memset(&cq_param, 0, sizeof(cq_param));
1958 memset(&rq_param, 0, sizeof(rq_param));
Tariq Toukan556dd1b2016-03-02 00:13:36 +02001959 mlx5e_build_drop_rq_param(&rq_param);
Achiad Shochat50cfa252015-08-04 14:05:41 +03001960
1961 err = mlx5e_create_drop_cq(priv, cq, &cq_param);
1962 if (err)
1963 return err;
1964
1965 err = mlx5e_enable_cq(cq, &cq_param);
1966 if (err)
1967 goto err_destroy_cq;
1968
1969 err = mlx5e_create_drop_rq(priv, rq, &rq_param);
1970 if (err)
1971 goto err_disable_cq;
1972
1973 err = mlx5e_enable_rq(rq, &rq_param);
1974 if (err)
1975 goto err_destroy_rq;
1976
1977 return 0;
1978
1979err_destroy_rq:
1980 mlx5e_destroy_rq(&priv->drop_rq);
1981
1982err_disable_cq:
1983 mlx5e_disable_cq(&priv->drop_rq.cq);
1984
1985err_destroy_cq:
1986 mlx5e_destroy_cq(&priv->drop_rq.cq);
1987
1988 return err;
1989}
1990
1991static void mlx5e_close_drop_rq(struct mlx5e_priv *priv)
1992{
1993 mlx5e_disable_rq(&priv->drop_rq);
1994 mlx5e_destroy_rq(&priv->drop_rq);
1995 mlx5e_disable_cq(&priv->drop_rq.cq);
1996 mlx5e_destroy_cq(&priv->drop_rq.cq);
1997}
1998
Achiad Shochat40ab6a62015-08-04 14:05:44 +03001999static int mlx5e_create_tis(struct mlx5e_priv *priv, int tc)
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002000{
2001 struct mlx5_core_dev *mdev = priv->mdev;
2002 u32 in[MLX5_ST_SZ_DW(create_tis_in)];
2003 void *tisc = MLX5_ADDR_OF(create_tis_in, in, ctx);
2004
2005 memset(in, 0, sizeof(in));
2006
Saeed Mahameed08fb1da2016-02-22 18:17:26 +02002007 MLX5_SET(tisc, tisc, prio, tc << 1);
Hadar Hen Zionb50d2922016-07-01 14:51:04 +03002008 MLX5_SET(tisc, tisc, transport_domain, mdev->mlx5e_res.td.tdn);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002009
Haggai Abramonvsky7db22ff2015-06-04 19:30:37 +03002010 return mlx5_core_create_tis(mdev, in, sizeof(in), &priv->tisn[tc]);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002011}
2012
Achiad Shochat40ab6a62015-08-04 14:05:44 +03002013static void mlx5e_destroy_tis(struct mlx5e_priv *priv, int tc)
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002014{
Haggai Abramonvsky7db22ff2015-06-04 19:30:37 +03002015 mlx5_core_destroy_tis(priv->mdev, priv->tisn[tc]);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002016}
2017
Hadar Hen Zioncb67b832016-07-01 14:51:09 +03002018int mlx5e_create_tises(struct mlx5e_priv *priv)
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002019{
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002020 int err;
2021 int tc;
2022
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03002023 for (tc = 0; tc < priv->profile->max_tc; tc++) {
Achiad Shochat40ab6a62015-08-04 14:05:44 +03002024 err = mlx5e_create_tis(priv, tc);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002025 if (err)
2026 goto err_close_tises;
2027 }
2028
2029 return 0;
2030
2031err_close_tises:
2032 for (tc--; tc >= 0; tc--)
Achiad Shochat40ab6a62015-08-04 14:05:44 +03002033 mlx5e_destroy_tis(priv, tc);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002034
2035 return err;
2036}
2037
Hadar Hen Zioncb67b832016-07-01 14:51:09 +03002038void mlx5e_cleanup_nic_tx(struct mlx5e_priv *priv)
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002039{
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002040 int tc;
2041
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03002042 for (tc = 0; tc < priv->profile->max_tc; tc++)
Achiad Shochat40ab6a62015-08-04 14:05:44 +03002043 mlx5e_destroy_tis(priv, tc);
Achiad Shochat5c503682015-08-04 14:05:43 +03002044}
2045
Tariq Toukan1da36692016-04-29 01:36:32 +03002046static void mlx5e_build_indir_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
2047 enum mlx5e_traffic_types tt)
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002048{
2049 void *hfso = MLX5_ADDR_OF(tirc, tirc, rx_hash_field_selector_outer);
2050
Hadar Hen Zionb50d2922016-07-01 14:51:04 +03002051 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
Achiad Shochat3191e05f2015-06-11 14:47:33 +03002052
Achiad Shochat5a6f8ae2015-07-23 23:36:00 +03002053#define MLX5_HASH_IP (MLX5_HASH_FIELD_SEL_SRC_IP |\
2054 MLX5_HASH_FIELD_SEL_DST_IP)
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002055
Achiad Shochat5a6f8ae2015-07-23 23:36:00 +03002056#define MLX5_HASH_IP_L4PORTS (MLX5_HASH_FIELD_SEL_SRC_IP |\
2057 MLX5_HASH_FIELD_SEL_DST_IP |\
2058 MLX5_HASH_FIELD_SEL_L4_SPORT |\
2059 MLX5_HASH_FIELD_SEL_L4_DPORT)
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002060
Achiad Shochata7417492015-07-23 23:36:01 +03002061#define MLX5_HASH_IP_IPSEC_SPI (MLX5_HASH_FIELD_SEL_SRC_IP |\
2062 MLX5_HASH_FIELD_SEL_DST_IP |\
2063 MLX5_HASH_FIELD_SEL_IPSEC_SPI)
2064
Achiad Shochat5c503682015-08-04 14:05:43 +03002065 mlx5e_build_tir_ctx_lro(tirc, priv);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002066
Achiad Shochat4cbeaff2015-08-04 14:05:40 +03002067 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
Hadar Hen Zion398f3352016-07-01 14:51:06 +03002068 MLX5_SET(tirc, tirc, indirect_table, priv->indir_rqt.rqtn);
Tariq Toukan1da36692016-04-29 01:36:32 +03002069 mlx5e_build_tir_ctx_hash(tirc, priv);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002070
2071 switch (tt) {
2072 case MLX5E_TT_IPV4_TCP:
2073 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2074 MLX5_L3_PROT_TYPE_IPV4);
2075 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2076 MLX5_L4_PROT_TYPE_TCP);
2077 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
Achiad Shochat5a6f8ae2015-07-23 23:36:00 +03002078 MLX5_HASH_IP_L4PORTS);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002079 break;
2080
2081 case MLX5E_TT_IPV6_TCP:
2082 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2083 MLX5_L3_PROT_TYPE_IPV6);
2084 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2085 MLX5_L4_PROT_TYPE_TCP);
2086 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
Achiad Shochat5a6f8ae2015-07-23 23:36:00 +03002087 MLX5_HASH_IP_L4PORTS);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002088 break;
2089
2090 case MLX5E_TT_IPV4_UDP:
2091 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2092 MLX5_L3_PROT_TYPE_IPV4);
2093 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2094 MLX5_L4_PROT_TYPE_UDP);
2095 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
Achiad Shochat5a6f8ae2015-07-23 23:36:00 +03002096 MLX5_HASH_IP_L4PORTS);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002097 break;
2098
2099 case MLX5E_TT_IPV6_UDP:
2100 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2101 MLX5_L3_PROT_TYPE_IPV6);
2102 MLX5_SET(rx_hash_field_select, hfso, l4_prot_type,
2103 MLX5_L4_PROT_TYPE_UDP);
2104 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
Achiad Shochat5a6f8ae2015-07-23 23:36:00 +03002105 MLX5_HASH_IP_L4PORTS);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002106 break;
2107
Achiad Shochata7417492015-07-23 23:36:01 +03002108 case MLX5E_TT_IPV4_IPSEC_AH:
2109 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2110 MLX5_L3_PROT_TYPE_IPV4);
2111 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2112 MLX5_HASH_IP_IPSEC_SPI);
2113 break;
2114
2115 case MLX5E_TT_IPV6_IPSEC_AH:
2116 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2117 MLX5_L3_PROT_TYPE_IPV6);
2118 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2119 MLX5_HASH_IP_IPSEC_SPI);
2120 break;
2121
2122 case MLX5E_TT_IPV4_IPSEC_ESP:
2123 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2124 MLX5_L3_PROT_TYPE_IPV4);
2125 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2126 MLX5_HASH_IP_IPSEC_SPI);
2127 break;
2128
2129 case MLX5E_TT_IPV6_IPSEC_ESP:
2130 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2131 MLX5_L3_PROT_TYPE_IPV6);
2132 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2133 MLX5_HASH_IP_IPSEC_SPI);
2134 break;
2135
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002136 case MLX5E_TT_IPV4:
2137 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2138 MLX5_L3_PROT_TYPE_IPV4);
2139 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2140 MLX5_HASH_IP);
2141 break;
2142
2143 case MLX5E_TT_IPV6:
2144 MLX5_SET(rx_hash_field_select, hfso, l3_prot_type,
2145 MLX5_L3_PROT_TYPE_IPV6);
2146 MLX5_SET(rx_hash_field_select, hfso, selected_fields,
2147 MLX5_HASH_IP);
2148 break;
Tariq Toukan1da36692016-04-29 01:36:32 +03002149 default:
2150 WARN_ONCE(true,
2151 "mlx5e_build_indir_tir_ctx: bad traffic type!\n");
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002152 }
2153}
2154
Tariq Toukan1da36692016-04-29 01:36:32 +03002155static void mlx5e_build_direct_tir_ctx(struct mlx5e_priv *priv, u32 *tirc,
2156 u32 rqtn)
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002157{
Hadar Hen Zionb50d2922016-07-01 14:51:04 +03002158 MLX5_SET(tirc, tirc, transport_domain, priv->mdev->mlx5e_res.td.tdn);
Tariq Toukan1da36692016-04-29 01:36:32 +03002159
2160 mlx5e_build_tir_ctx_lro(tirc, priv);
2161
2162 MLX5_SET(tirc, tirc, disp_type, MLX5_TIRC_DISP_TYPE_INDIRECT);
2163 MLX5_SET(tirc, tirc, indirect_table, rqtn);
2164 MLX5_SET(tirc, tirc, rx_hash_fn, MLX5_RX_HASH_FN_INVERTED_XOR8);
2165}
2166
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03002167static int mlx5e_create_indirect_tirs(struct mlx5e_priv *priv)
Tariq Toukan1da36692016-04-29 01:36:32 +03002168{
Hadar Hen Zion724b2aa2016-07-01 14:51:05 +03002169 struct mlx5e_tir *tir;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002170 void *tirc;
2171 int inlen;
2172 int err;
Tariq Toukan1da36692016-04-29 01:36:32 +03002173 u32 *in;
Tariq Toukan1da36692016-04-29 01:36:32 +03002174 int tt;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002175
2176 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
2177 in = mlx5_vzalloc(inlen);
2178 if (!in)
2179 return -ENOMEM;
2180
Tariq Toukan1da36692016-04-29 01:36:32 +03002181 for (tt = 0; tt < MLX5E_NUM_INDIR_TIRS; tt++) {
2182 memset(in, 0, inlen);
Hadar Hen Zion724b2aa2016-07-01 14:51:05 +03002183 tir = &priv->indir_tir[tt];
Tariq Toukan1da36692016-04-29 01:36:32 +03002184 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
2185 mlx5e_build_indir_tir_ctx(priv, tirc, tt);
Hadar Hen Zion724b2aa2016-07-01 14:51:05 +03002186 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
Tariq Toukan1da36692016-04-29 01:36:32 +03002187 if (err)
2188 goto err_destroy_tirs;
2189 }
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002190
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03002191 kvfree(in);
2192
2193 return 0;
2194
2195err_destroy_tirs:
2196 for (tt--; tt >= 0; tt--)
2197 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[tt]);
2198
2199 kvfree(in);
2200
2201 return err;
2202}
2203
Hadar Hen Zioncb67b832016-07-01 14:51:09 +03002204int mlx5e_create_direct_tirs(struct mlx5e_priv *priv)
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03002205{
2206 int nch = priv->profile->max_nch(priv->mdev);
2207 struct mlx5e_tir *tir;
2208 void *tirc;
2209 int inlen;
2210 int err;
2211 u32 *in;
2212 int ix;
2213
2214 inlen = MLX5_ST_SZ_BYTES(create_tir_in);
2215 in = mlx5_vzalloc(inlen);
2216 if (!in)
2217 return -ENOMEM;
2218
Tariq Toukan1da36692016-04-29 01:36:32 +03002219 for (ix = 0; ix < nch; ix++) {
2220 memset(in, 0, inlen);
Hadar Hen Zion724b2aa2016-07-01 14:51:05 +03002221 tir = &priv->direct_tir[ix];
Tariq Toukan1da36692016-04-29 01:36:32 +03002222 tirc = MLX5_ADDR_OF(create_tir_in, in, ctx);
2223 mlx5e_build_direct_tir_ctx(priv, tirc,
Hadar Hen Zion398f3352016-07-01 14:51:06 +03002224 priv->direct_tir[ix].rqt.rqtn);
Hadar Hen Zion724b2aa2016-07-01 14:51:05 +03002225 err = mlx5e_create_tir(priv->mdev, tir, in, inlen);
Tariq Toukan1da36692016-04-29 01:36:32 +03002226 if (err)
2227 goto err_destroy_ch_tirs;
2228 }
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002229
Tariq Toukan1da36692016-04-29 01:36:32 +03002230 kvfree(in);
2231
2232 return 0;
2233
2234err_destroy_ch_tirs:
2235 for (ix--; ix >= 0; ix--)
Hadar Hen Zion724b2aa2016-07-01 14:51:05 +03002236 mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[ix]);
Tariq Toukan1da36692016-04-29 01:36:32 +03002237
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002238 kvfree(in);
2239
2240 return err;
2241}
2242
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03002243static void mlx5e_destroy_indirect_tirs(struct mlx5e_priv *priv)
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002244{
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03002245 int i;
2246
2247 for (i = 0; i < MLX5E_NUM_INDIR_TIRS; i++)
2248 mlx5e_destroy_tir(priv->mdev, &priv->indir_tir[i]);
2249}
2250
Hadar Hen Zioncb67b832016-07-01 14:51:09 +03002251void mlx5e_destroy_direct_tirs(struct mlx5e_priv *priv)
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03002252{
2253 int nch = priv->profile->max_nch(priv->mdev);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002254 int i;
2255
Tariq Toukan1da36692016-04-29 01:36:32 +03002256 for (i = 0; i < nch; i++)
Hadar Hen Zion724b2aa2016-07-01 14:51:05 +03002257 mlx5e_destroy_tir(priv->mdev, &priv->direct_tir[i]);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002258}
2259
Gal Pressman36350112016-04-24 22:51:55 +03002260int mlx5e_modify_rqs_vsd(struct mlx5e_priv *priv, bool vsd)
2261{
2262 int err = 0;
2263 int i;
2264
2265 if (!test_bit(MLX5E_STATE_OPENED, &priv->state))
2266 return 0;
2267
2268 for (i = 0; i < priv->params.num_channels; i++) {
2269 err = mlx5e_modify_rq_vsd(&priv->channel[i]->rq, vsd);
2270 if (err)
2271 return err;
2272 }
2273
2274 return 0;
2275}
2276
Saeed Mahameed08fb1da2016-02-22 18:17:26 +02002277static int mlx5e_setup_tc(struct net_device *netdev, u8 tc)
2278{
2279 struct mlx5e_priv *priv = netdev_priv(netdev);
2280 bool was_opened;
2281 int err = 0;
2282
2283 if (tc && tc != MLX5E_MAX_NUM_TC)
2284 return -EINVAL;
2285
2286 mutex_lock(&priv->state_lock);
2287
2288 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
2289 if (was_opened)
2290 mlx5e_close_locked(priv->netdev);
2291
2292 priv->params.num_tc = tc ? tc : 1;
2293
2294 if (was_opened)
2295 err = mlx5e_open_locked(priv->netdev);
2296
2297 mutex_unlock(&priv->state_lock);
2298
2299 return err;
2300}
2301
2302static int mlx5e_ndo_setup_tc(struct net_device *dev, u32 handle,
2303 __be16 proto, struct tc_to_netdev *tc)
2304{
Amir Vadaie8f887a2016-03-08 12:42:36 +02002305 struct mlx5e_priv *priv = netdev_priv(dev);
2306
2307 if (TC_H_MAJ(handle) != TC_H_MAJ(TC_H_INGRESS))
2308 goto mqprio;
2309
2310 switch (tc->type) {
Amir Vadaie3a2b7e2016-03-08 12:42:37 +02002311 case TC_SETUP_CLSFLOWER:
2312 switch (tc->cls_flower->command) {
2313 case TC_CLSFLOWER_REPLACE:
2314 return mlx5e_configure_flower(priv, proto, tc->cls_flower);
2315 case TC_CLSFLOWER_DESTROY:
2316 return mlx5e_delete_flower(priv, tc->cls_flower);
Amir Vadaiaad7e082016-05-13 12:55:42 +00002317 case TC_CLSFLOWER_STATS:
2318 return mlx5e_stats_flower(priv, tc->cls_flower);
Amir Vadaie3a2b7e2016-03-08 12:42:37 +02002319 }
Amir Vadaie8f887a2016-03-08 12:42:36 +02002320 default:
2321 return -EOPNOTSUPP;
2322 }
2323
2324mqprio:
Amir Vadai67ba4222016-03-08 12:42:34 +02002325 if (tc->type != TC_SETUP_MQPRIO)
Saeed Mahameed08fb1da2016-02-22 18:17:26 +02002326 return -EINVAL;
2327
2328 return mlx5e_setup_tc(dev, tc->tc);
2329}
2330
Hadar Hen Zioncb67b832016-07-01 14:51:09 +03002331struct rtnl_link_stats64 *
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002332mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats)
2333{
2334 struct mlx5e_priv *priv = netdev_priv(dev);
Gal Pressman9218b442016-04-24 22:51:47 +03002335 struct mlx5e_sw_stats *sstats = &priv->stats.sw;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002336 struct mlx5e_vport_stats *vstats = &priv->stats.vport;
Gal Pressman269e6b32016-04-24 22:51:46 +03002337 struct mlx5e_pport_stats *pstats = &priv->stats.pport;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002338
Gal Pressman9218b442016-04-24 22:51:47 +03002339 stats->rx_packets = sstats->rx_packets;
2340 stats->rx_bytes = sstats->rx_bytes;
2341 stats->tx_packets = sstats->tx_packets;
2342 stats->tx_bytes = sstats->tx_bytes;
Gal Pressman269e6b32016-04-24 22:51:46 +03002343
2344 stats->rx_dropped = priv->stats.qcnt.rx_out_of_buffer;
Gal Pressman9218b442016-04-24 22:51:47 +03002345 stats->tx_dropped = sstats->tx_queue_dropped;
Gal Pressman269e6b32016-04-24 22:51:46 +03002346
2347 stats->rx_length_errors =
Gal Pressman9218b442016-04-24 22:51:47 +03002348 PPORT_802_3_GET(pstats, a_in_range_length_errors) +
2349 PPORT_802_3_GET(pstats, a_out_of_range_length_field) +
2350 PPORT_802_3_GET(pstats, a_frame_too_long_errors);
Gal Pressman269e6b32016-04-24 22:51:46 +03002351 stats->rx_crc_errors =
Gal Pressman9218b442016-04-24 22:51:47 +03002352 PPORT_802_3_GET(pstats, a_frame_check_sequence_errors);
2353 stats->rx_frame_errors = PPORT_802_3_GET(pstats, a_alignment_errors);
2354 stats->tx_aborted_errors = PPORT_2863_GET(pstats, if_out_discards);
Gal Pressman269e6b32016-04-24 22:51:46 +03002355 stats->tx_carrier_errors =
Gal Pressman9218b442016-04-24 22:51:47 +03002356 PPORT_802_3_GET(pstats, a_symbol_error_during_carrier);
Gal Pressman269e6b32016-04-24 22:51:46 +03002357 stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors +
2358 stats->rx_frame_errors;
2359 stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors;
2360
2361 /* vport multicast also counts packets that are dropped due to steering
2362 * or rx out of buffer
2363 */
Gal Pressman9218b442016-04-24 22:51:47 +03002364 stats->multicast =
2365 VPORT_COUNTER_GET(vstats, received_eth_multicast.packets);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002366
2367 return stats;
2368}
2369
2370static void mlx5e_set_rx_mode(struct net_device *dev)
2371{
2372 struct mlx5e_priv *priv = netdev_priv(dev);
2373
Matthew Finlay7bb29752016-05-01 22:59:56 +03002374 queue_work(priv->wq, &priv->set_rx_mode_work);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002375}
2376
2377static int mlx5e_set_mac(struct net_device *netdev, void *addr)
2378{
2379 struct mlx5e_priv *priv = netdev_priv(netdev);
2380 struct sockaddr *saddr = addr;
2381
2382 if (!is_valid_ether_addr(saddr->sa_data))
2383 return -EADDRNOTAVAIL;
2384
2385 netif_addr_lock_bh(netdev);
2386 ether_addr_copy(netdev->dev_addr, saddr->sa_data);
2387 netif_addr_unlock_bh(netdev);
2388
Matthew Finlay7bb29752016-05-01 22:59:56 +03002389 queue_work(priv->wq, &priv->set_rx_mode_work);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002390
2391 return 0;
2392}
2393
Gal Pressman0e405442016-04-24 22:51:51 +03002394#define MLX5E_SET_FEATURE(netdev, feature, enable) \
2395 do { \
2396 if (enable) \
2397 netdev->features |= feature; \
2398 else \
2399 netdev->features &= ~feature; \
2400 } while (0)
2401
2402typedef int (*mlx5e_feature_handler)(struct net_device *netdev, bool enable);
2403
2404static int set_feature_lro(struct net_device *netdev, bool enable)
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002405{
2406 struct mlx5e_priv *priv = netdev_priv(netdev);
Gal Pressman0e405442016-04-24 22:51:51 +03002407 bool was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
2408 int err;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002409
2410 mutex_lock(&priv->state_lock);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002411
Gal Pressman0e405442016-04-24 22:51:51 +03002412 if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST))
2413 mlx5e_close_locked(priv->netdev);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002414
Gal Pressman0e405442016-04-24 22:51:51 +03002415 priv->params.lro_en = enable;
2416 err = mlx5e_modify_tirs_lro(priv);
2417 if (err) {
2418 netdev_err(netdev, "lro modify failed, %d\n", err);
2419 priv->params.lro_en = !enable;
Achiad Shochat98e81b02015-07-29 15:05:46 +03002420 }
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002421
Gal Pressman0e405442016-04-24 22:51:51 +03002422 if (was_opened && (priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST))
2423 mlx5e_open_locked(priv->netdev);
2424
Achiad Shochat9b37b072015-08-04 14:05:46 +03002425 mutex_unlock(&priv->state_lock);
2426
Gal Pressman0e405442016-04-24 22:51:51 +03002427 return err;
2428}
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002429
Gal Pressman0e405442016-04-24 22:51:51 +03002430static int set_feature_vlan_filter(struct net_device *netdev, bool enable)
2431{
2432 struct mlx5e_priv *priv = netdev_priv(netdev);
2433
2434 if (enable)
2435 mlx5e_enable_vlan_filter(priv);
2436 else
2437 mlx5e_disable_vlan_filter(priv);
2438
2439 return 0;
2440}
2441
2442static int set_feature_tc_num_filters(struct net_device *netdev, bool enable)
2443{
2444 struct mlx5e_priv *priv = netdev_priv(netdev);
2445
2446 if (!enable && mlx5e_tc_num_filters(priv)) {
Amir Vadaie8f887a2016-03-08 12:42:36 +02002447 netdev_err(netdev,
2448 "Active offloaded tc filters, can't turn hw_tc_offload off\n");
2449 return -EINVAL;
2450 }
2451
Gal Pressman0e405442016-04-24 22:51:51 +03002452 return 0;
2453}
2454
Eran Ben Elisha94cb1eb2016-04-24 22:51:52 +03002455static int set_feature_rx_all(struct net_device *netdev, bool enable)
2456{
2457 struct mlx5e_priv *priv = netdev_priv(netdev);
2458 struct mlx5_core_dev *mdev = priv->mdev;
2459
2460 return mlx5_set_port_fcs(mdev, !enable);
2461}
2462
Gal Pressman36350112016-04-24 22:51:55 +03002463static int set_feature_rx_vlan(struct net_device *netdev, bool enable)
2464{
2465 struct mlx5e_priv *priv = netdev_priv(netdev);
2466 int err;
2467
2468 mutex_lock(&priv->state_lock);
2469
2470 priv->params.vlan_strip_disable = !enable;
2471 err = mlx5e_modify_rqs_vsd(priv, !enable);
2472 if (err)
2473 priv->params.vlan_strip_disable = enable;
2474
2475 mutex_unlock(&priv->state_lock);
2476
2477 return err;
2478}
2479
Maor Gottlieb45bf454a2016-04-29 01:36:42 +03002480#ifdef CONFIG_RFS_ACCEL
2481static int set_feature_arfs(struct net_device *netdev, bool enable)
2482{
2483 struct mlx5e_priv *priv = netdev_priv(netdev);
2484 int err;
2485
2486 if (enable)
2487 err = mlx5e_arfs_enable(priv);
2488 else
2489 err = mlx5e_arfs_disable(priv);
2490
2491 return err;
2492}
2493#endif
2494
Gal Pressman0e405442016-04-24 22:51:51 +03002495static int mlx5e_handle_feature(struct net_device *netdev,
2496 netdev_features_t wanted_features,
2497 netdev_features_t feature,
2498 mlx5e_feature_handler feature_handler)
2499{
2500 netdev_features_t changes = wanted_features ^ netdev->features;
2501 bool enable = !!(wanted_features & feature);
2502 int err;
2503
2504 if (!(changes & feature))
2505 return 0;
2506
2507 err = feature_handler(netdev, enable);
2508 if (err) {
2509 netdev_err(netdev, "%s feature 0x%llx failed err %d\n",
2510 enable ? "Enable" : "Disable", feature, err);
2511 return err;
2512 }
2513
2514 MLX5E_SET_FEATURE(netdev, feature, enable);
2515 return 0;
2516}
2517
2518static int mlx5e_set_features(struct net_device *netdev,
2519 netdev_features_t features)
2520{
2521 int err;
2522
2523 err = mlx5e_handle_feature(netdev, features, NETIF_F_LRO,
2524 set_feature_lro);
2525 err |= mlx5e_handle_feature(netdev, features,
2526 NETIF_F_HW_VLAN_CTAG_FILTER,
2527 set_feature_vlan_filter);
2528 err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_TC,
2529 set_feature_tc_num_filters);
Eran Ben Elisha94cb1eb2016-04-24 22:51:52 +03002530 err |= mlx5e_handle_feature(netdev, features, NETIF_F_RXALL,
2531 set_feature_rx_all);
Gal Pressman36350112016-04-24 22:51:55 +03002532 err |= mlx5e_handle_feature(netdev, features, NETIF_F_HW_VLAN_CTAG_RX,
2533 set_feature_rx_vlan);
Maor Gottlieb45bf454a2016-04-29 01:36:42 +03002534#ifdef CONFIG_RFS_ACCEL
2535 err |= mlx5e_handle_feature(netdev, features, NETIF_F_NTUPLE,
2536 set_feature_arfs);
2537#endif
Gal Pressman0e405442016-04-24 22:51:51 +03002538
2539 return err ? -EINVAL : 0;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002540}
2541
Saeed Mahameedd8edd242016-04-22 00:33:04 +03002542#define MXL5_HW_MIN_MTU 64
2543#define MXL5E_MIN_MTU (MXL5_HW_MIN_MTU + ETH_FCS_LEN)
2544
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002545static int mlx5e_change_mtu(struct net_device *netdev, int new_mtu)
2546{
2547 struct mlx5e_priv *priv = netdev_priv(netdev);
2548 struct mlx5_core_dev *mdev = priv->mdev;
Achiad Shochat98e81b02015-07-29 15:05:46 +03002549 bool was_opened;
Saeed Mahameed046339e2016-04-22 00:33:03 +03002550 u16 max_mtu;
Saeed Mahameedd8edd242016-04-22 00:33:04 +03002551 u16 min_mtu;
Achiad Shochat98e81b02015-07-29 15:05:46 +03002552 int err = 0;
Tariq Toukan506753b2016-08-18 21:09:03 +03002553 bool reset;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002554
Saeed Mahameedfacc9692015-06-11 14:47:27 +03002555 mlx5_query_port_max_mtu(mdev, &max_mtu, 1);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002556
Doron Tsur50a9eea2015-11-12 19:35:27 +02002557 max_mtu = MLX5E_HW2SW_MTU(max_mtu);
Saeed Mahameedd8edd242016-04-22 00:33:04 +03002558 min_mtu = MLX5E_HW2SW_MTU(MXL5E_MIN_MTU);
Doron Tsur50a9eea2015-11-12 19:35:27 +02002559
Saeed Mahameedd8edd242016-04-22 00:33:04 +03002560 if (new_mtu > max_mtu || new_mtu < min_mtu) {
Saeed Mahameedfacc9692015-06-11 14:47:27 +03002561 netdev_err(netdev,
Saeed Mahameedd8edd242016-04-22 00:33:04 +03002562 "%s: Bad MTU (%d), valid range is: [%d..%d]\n",
2563 __func__, new_mtu, min_mtu, max_mtu);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002564 return -EINVAL;
2565 }
2566
2567 mutex_lock(&priv->state_lock);
Achiad Shochat98e81b02015-07-29 15:05:46 +03002568
Tariq Toukan506753b2016-08-18 21:09:03 +03002569 reset = !priv->params.lro_en &&
2570 (priv->params.rq_wq_type !=
2571 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ);
2572
Achiad Shochat98e81b02015-07-29 15:05:46 +03002573 was_opened = test_bit(MLX5E_STATE_OPENED, &priv->state);
Tariq Toukan506753b2016-08-18 21:09:03 +03002574 if (was_opened && reset)
Achiad Shochat98e81b02015-07-29 15:05:46 +03002575 mlx5e_close_locked(netdev);
2576
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002577 netdev->mtu = new_mtu;
Saeed Mahameed13f9bba2016-08-18 21:09:02 +03002578 mlx5e_set_dev_port_mtu(netdev);
Achiad Shochat98e81b02015-07-29 15:05:46 +03002579
Tariq Toukan506753b2016-08-18 21:09:03 +03002580 if (was_opened && reset)
Achiad Shochat98e81b02015-07-29 15:05:46 +03002581 err = mlx5e_open_locked(netdev);
2582
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002583 mutex_unlock(&priv->state_lock);
2584
2585 return err;
2586}
2587
Eran Ben Elishaef9814d2015-12-29 14:58:31 +02002588static int mlx5e_ioctl(struct net_device *dev, struct ifreq *ifr, int cmd)
2589{
2590 switch (cmd) {
2591 case SIOCSHWTSTAMP:
2592 return mlx5e_hwstamp_set(dev, ifr);
2593 case SIOCGHWTSTAMP:
2594 return mlx5e_hwstamp_get(dev, ifr);
2595 default:
2596 return -EOPNOTSUPP;
2597 }
2598}
2599
Saeed Mahameed66e49de2015-12-01 18:03:25 +02002600static int mlx5e_set_vf_mac(struct net_device *dev, int vf, u8 *mac)
2601{
2602 struct mlx5e_priv *priv = netdev_priv(dev);
2603 struct mlx5_core_dev *mdev = priv->mdev;
2604
2605 return mlx5_eswitch_set_vport_mac(mdev->priv.eswitch, vf + 1, mac);
2606}
2607
2608static int mlx5e_set_vf_vlan(struct net_device *dev, int vf, u16 vlan, u8 qos)
2609{
2610 struct mlx5e_priv *priv = netdev_priv(dev);
2611 struct mlx5_core_dev *mdev = priv->mdev;
2612
2613 return mlx5_eswitch_set_vport_vlan(mdev->priv.eswitch, vf + 1,
2614 vlan, qos);
2615}
2616
Mohamad Haj Yahiaf9423802016-05-03 17:13:59 +03002617static int mlx5e_set_vf_spoofchk(struct net_device *dev, int vf, bool setting)
2618{
2619 struct mlx5e_priv *priv = netdev_priv(dev);
2620 struct mlx5_core_dev *mdev = priv->mdev;
2621
2622 return mlx5_eswitch_set_vport_spoofchk(mdev->priv.eswitch, vf + 1, setting);
2623}
2624
Mohamad Haj Yahia1edc57e2016-05-03 17:14:04 +03002625static int mlx5e_set_vf_trust(struct net_device *dev, int vf, bool setting)
2626{
2627 struct mlx5e_priv *priv = netdev_priv(dev);
2628 struct mlx5_core_dev *mdev = priv->mdev;
2629
2630 return mlx5_eswitch_set_vport_trust(mdev->priv.eswitch, vf + 1, setting);
2631}
Saeed Mahameed66e49de2015-12-01 18:03:25 +02002632static int mlx5_vport_link2ifla(u8 esw_link)
2633{
2634 switch (esw_link) {
2635 case MLX5_ESW_VPORT_ADMIN_STATE_DOWN:
2636 return IFLA_VF_LINK_STATE_DISABLE;
2637 case MLX5_ESW_VPORT_ADMIN_STATE_UP:
2638 return IFLA_VF_LINK_STATE_ENABLE;
2639 }
2640 return IFLA_VF_LINK_STATE_AUTO;
2641}
2642
2643static int mlx5_ifla_link2vport(u8 ifla_link)
2644{
2645 switch (ifla_link) {
2646 case IFLA_VF_LINK_STATE_DISABLE:
2647 return MLX5_ESW_VPORT_ADMIN_STATE_DOWN;
2648 case IFLA_VF_LINK_STATE_ENABLE:
2649 return MLX5_ESW_VPORT_ADMIN_STATE_UP;
2650 }
2651 return MLX5_ESW_VPORT_ADMIN_STATE_AUTO;
2652}
2653
2654static int mlx5e_set_vf_link_state(struct net_device *dev, int vf,
2655 int link_state)
2656{
2657 struct mlx5e_priv *priv = netdev_priv(dev);
2658 struct mlx5_core_dev *mdev = priv->mdev;
2659
2660 return mlx5_eswitch_set_vport_state(mdev->priv.eswitch, vf + 1,
2661 mlx5_ifla_link2vport(link_state));
2662}
2663
2664static int mlx5e_get_vf_config(struct net_device *dev,
2665 int vf, struct ifla_vf_info *ivi)
2666{
2667 struct mlx5e_priv *priv = netdev_priv(dev);
2668 struct mlx5_core_dev *mdev = priv->mdev;
2669 int err;
2670
2671 err = mlx5_eswitch_get_vport_config(mdev->priv.eswitch, vf + 1, ivi);
2672 if (err)
2673 return err;
2674 ivi->linkstate = mlx5_vport_link2ifla(ivi->linkstate);
2675 return 0;
2676}
2677
2678static int mlx5e_get_vf_stats(struct net_device *dev,
2679 int vf, struct ifla_vf_stats *vf_stats)
2680{
2681 struct mlx5e_priv *priv = netdev_priv(dev);
2682 struct mlx5_core_dev *mdev = priv->mdev;
2683
2684 return mlx5_eswitch_get_vport_stats(mdev->priv.eswitch, vf + 1,
2685 vf_stats);
2686}
2687
Matthew Finlayb3f63c32016-02-22 18:17:32 +02002688static void mlx5e_add_vxlan_port(struct net_device *netdev,
Alexander Duyck974c3f32016-06-16 12:22:38 -07002689 struct udp_tunnel_info *ti)
Matthew Finlayb3f63c32016-02-22 18:17:32 +02002690{
2691 struct mlx5e_priv *priv = netdev_priv(netdev);
2692
Alexander Duyck974c3f32016-06-16 12:22:38 -07002693 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2694 return;
2695
Matthew Finlayb3f63c32016-02-22 18:17:32 +02002696 if (!mlx5e_vxlan_allowed(priv->mdev))
2697 return;
2698
Alexander Duyck974c3f32016-06-16 12:22:38 -07002699 mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 1);
Matthew Finlayb3f63c32016-02-22 18:17:32 +02002700}
2701
2702static void mlx5e_del_vxlan_port(struct net_device *netdev,
Alexander Duyck974c3f32016-06-16 12:22:38 -07002703 struct udp_tunnel_info *ti)
Matthew Finlayb3f63c32016-02-22 18:17:32 +02002704{
2705 struct mlx5e_priv *priv = netdev_priv(netdev);
2706
Alexander Duyck974c3f32016-06-16 12:22:38 -07002707 if (ti->type != UDP_TUNNEL_TYPE_VXLAN)
2708 return;
2709
Matthew Finlayb3f63c32016-02-22 18:17:32 +02002710 if (!mlx5e_vxlan_allowed(priv->mdev))
2711 return;
2712
Alexander Duyck974c3f32016-06-16 12:22:38 -07002713 mlx5e_vxlan_queue_work(priv, ti->sa_family, be16_to_cpu(ti->port), 0);
Matthew Finlayb3f63c32016-02-22 18:17:32 +02002714}
2715
2716static netdev_features_t mlx5e_vxlan_features_check(struct mlx5e_priv *priv,
2717 struct sk_buff *skb,
2718 netdev_features_t features)
2719{
2720 struct udphdr *udph;
2721 u16 proto;
2722 u16 port = 0;
2723
2724 switch (vlan_get_protocol(skb)) {
2725 case htons(ETH_P_IP):
2726 proto = ip_hdr(skb)->protocol;
2727 break;
2728 case htons(ETH_P_IPV6):
2729 proto = ipv6_hdr(skb)->nexthdr;
2730 break;
2731 default:
2732 goto out;
2733 }
2734
2735 if (proto == IPPROTO_UDP) {
2736 udph = udp_hdr(skb);
2737 port = be16_to_cpu(udph->dest);
2738 }
2739
2740 /* Verify if UDP port is being offloaded by HW */
2741 if (port && mlx5e_vxlan_lookup_port(priv, port))
2742 return features;
2743
2744out:
2745 /* Disable CSUM and GSO if the udp dport is not offloaded by HW */
2746 return features & ~(NETIF_F_CSUM_MASK | NETIF_F_GSO_MASK);
2747}
2748
2749static netdev_features_t mlx5e_features_check(struct sk_buff *skb,
2750 struct net_device *netdev,
2751 netdev_features_t features)
2752{
2753 struct mlx5e_priv *priv = netdev_priv(netdev);
2754
2755 features = vlan_features_check(skb, features);
2756 features = vxlan_features_check(skb, features);
2757
2758 /* Validate if the tunneled packet is being offloaded by HW */
2759 if (skb->encapsulation &&
2760 (features & NETIF_F_CSUM_MASK || features & NETIF_F_GSO_MASK))
2761 return mlx5e_vxlan_features_check(priv, skb, features);
2762
2763 return features;
2764}
2765
Daniel Jurgens3947ca12016-06-30 17:34:45 +03002766static void mlx5e_tx_timeout(struct net_device *dev)
2767{
2768 struct mlx5e_priv *priv = netdev_priv(dev);
2769 bool sched_work = false;
2770 int i;
2771
2772 netdev_err(dev, "TX timeout detected\n");
2773
2774 for (i = 0; i < priv->params.num_channels * priv->params.num_tc; i++) {
2775 struct mlx5e_sq *sq = priv->txq_to_sq_map[i];
2776
Daniel Jurgens2c1ccc92016-07-13 00:06:59 +03002777 if (!netif_xmit_stopped(netdev_get_tx_queue(dev, i)))
Daniel Jurgens3947ca12016-06-30 17:34:45 +03002778 continue;
2779 sched_work = true;
Saeed Mahameed6e8dd6d2016-08-29 01:13:45 +03002780 set_bit(MLX5E_SQ_STATE_FLUSH, &sq->state);
Daniel Jurgens3947ca12016-06-30 17:34:45 +03002781 netdev_err(dev, "TX timeout on queue: %d, SQ: 0x%x, CQ: 0x%x, SQ Cons: 0x%x SQ Prod: 0x%x\n",
2782 i, sq->sqn, sq->cq.mcq.cqn, sq->cc, sq->pc);
2783 }
2784
2785 if (sched_work && test_bit(MLX5E_STATE_OPENED, &priv->state))
2786 schedule_work(&priv->tx_timeout_work);
2787}
2788
Saeed Mahameedb0eed402016-02-09 14:57:44 +02002789static const struct net_device_ops mlx5e_netdev_ops_basic = {
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002790 .ndo_open = mlx5e_open,
2791 .ndo_stop = mlx5e_close,
2792 .ndo_start_xmit = mlx5e_xmit,
Saeed Mahameed08fb1da2016-02-22 18:17:26 +02002793 .ndo_setup_tc = mlx5e_ndo_setup_tc,
2794 .ndo_select_queue = mlx5e_select_queue,
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002795 .ndo_get_stats64 = mlx5e_get_stats,
2796 .ndo_set_rx_mode = mlx5e_set_rx_mode,
2797 .ndo_set_mac_address = mlx5e_set_mac,
Saeed Mahameedb0eed402016-02-09 14:57:44 +02002798 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
2799 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002800 .ndo_set_features = mlx5e_set_features,
Saeed Mahameedb0eed402016-02-09 14:57:44 +02002801 .ndo_change_mtu = mlx5e_change_mtu,
2802 .ndo_do_ioctl = mlx5e_ioctl,
Yevgeny Petrilin507f0c82016-06-23 17:02:38 +03002803 .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
Maor Gottlieb45bf454a2016-04-29 01:36:42 +03002804#ifdef CONFIG_RFS_ACCEL
2805 .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
2806#endif
Daniel Jurgens3947ca12016-06-30 17:34:45 +03002807 .ndo_tx_timeout = mlx5e_tx_timeout,
Saeed Mahameedb0eed402016-02-09 14:57:44 +02002808};
2809
2810static const struct net_device_ops mlx5e_netdev_ops_sriov = {
2811 .ndo_open = mlx5e_open,
2812 .ndo_stop = mlx5e_close,
2813 .ndo_start_xmit = mlx5e_xmit,
Saeed Mahameed08fb1da2016-02-22 18:17:26 +02002814 .ndo_setup_tc = mlx5e_ndo_setup_tc,
2815 .ndo_select_queue = mlx5e_select_queue,
Saeed Mahameedb0eed402016-02-09 14:57:44 +02002816 .ndo_get_stats64 = mlx5e_get_stats,
2817 .ndo_set_rx_mode = mlx5e_set_rx_mode,
2818 .ndo_set_mac_address = mlx5e_set_mac,
2819 .ndo_vlan_rx_add_vid = mlx5e_vlan_rx_add_vid,
2820 .ndo_vlan_rx_kill_vid = mlx5e_vlan_rx_kill_vid,
2821 .ndo_set_features = mlx5e_set_features,
2822 .ndo_change_mtu = mlx5e_change_mtu,
2823 .ndo_do_ioctl = mlx5e_ioctl,
Alexander Duyck974c3f32016-06-16 12:22:38 -07002824 .ndo_udp_tunnel_add = mlx5e_add_vxlan_port,
2825 .ndo_udp_tunnel_del = mlx5e_del_vxlan_port,
Yevgeny Petrilin507f0c82016-06-23 17:02:38 +03002826 .ndo_set_tx_maxrate = mlx5e_set_tx_maxrate,
Matthew Finlayb3f63c32016-02-22 18:17:32 +02002827 .ndo_features_check = mlx5e_features_check,
Maor Gottlieb45bf454a2016-04-29 01:36:42 +03002828#ifdef CONFIG_RFS_ACCEL
2829 .ndo_rx_flow_steer = mlx5e_rx_flow_steer,
2830#endif
Saeed Mahameedb0eed402016-02-09 14:57:44 +02002831 .ndo_set_vf_mac = mlx5e_set_vf_mac,
2832 .ndo_set_vf_vlan = mlx5e_set_vf_vlan,
Mohamad Haj Yahiaf9423802016-05-03 17:13:59 +03002833 .ndo_set_vf_spoofchk = mlx5e_set_vf_spoofchk,
Mohamad Haj Yahia1edc57e2016-05-03 17:14:04 +03002834 .ndo_set_vf_trust = mlx5e_set_vf_trust,
Saeed Mahameedb0eed402016-02-09 14:57:44 +02002835 .ndo_get_vf_config = mlx5e_get_vf_config,
2836 .ndo_set_vf_link_state = mlx5e_set_vf_link_state,
2837 .ndo_get_vf_stats = mlx5e_get_vf_stats,
Daniel Jurgens3947ca12016-06-30 17:34:45 +03002838 .ndo_tx_timeout = mlx5e_tx_timeout,
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002839};
2840
2841static int mlx5e_check_required_hca_cap(struct mlx5_core_dev *mdev)
2842{
2843 if (MLX5_CAP_GEN(mdev, port_type) != MLX5_CAP_PORT_TYPE_ETH)
2844 return -ENOTSUPP;
2845 if (!MLX5_CAP_GEN(mdev, eth_net_offloads) ||
2846 !MLX5_CAP_GEN(mdev, nic_flow_table) ||
2847 !MLX5_CAP_ETH(mdev, csum_cap) ||
2848 !MLX5_CAP_ETH(mdev, max_lso_cap) ||
2849 !MLX5_CAP_ETH(mdev, vlan_cap) ||
Gal Pressman796a27e2015-06-11 14:47:30 +03002850 !MLX5_CAP_ETH(mdev, rss_ind_tbl_cap) ||
2851 MLX5_CAP_FLOWTABLE(mdev,
2852 flow_table_properties_nic_receive.max_ft_level)
2853 < 3) {
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002854 mlx5_core_warn(mdev,
2855 "Not creating net device, some required device capabilities are missing\n");
2856 return -ENOTSUPP;
2857 }
Tariq Toukan66189962015-11-12 19:35:26 +02002858 if (!MLX5_CAP_ETH(mdev, self_lb_en_modifiable))
2859 mlx5_core_warn(mdev, "Self loop back prevention is not supported\n");
Gal Pressman7524a5d2016-03-02 00:13:37 +02002860 if (!MLX5_CAP_GEN(mdev, cq_moderation))
2861 mlx5_core_warn(mdev, "CQ modiration is not supported\n");
Tariq Toukan66189962015-11-12 19:35:26 +02002862
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002863 return 0;
2864}
2865
Achiad Shochat58d52292015-07-23 23:35:58 +03002866u16 mlx5e_get_max_inline_cap(struct mlx5_core_dev *mdev)
2867{
2868 int bf_buf_size = (1 << MLX5_CAP_GEN(mdev, log_bf_reg_size)) / 2;
2869
2870 return bf_buf_size -
2871 sizeof(struct mlx5e_tx_wqe) +
2872 2 /*sizeof(mlx5e_tx_wqe.inline_hdr_start)*/;
2873}
2874
Saeed Mahameed08fb1da2016-02-22 18:17:26 +02002875#ifdef CONFIG_MLX5_CORE_EN_DCB
2876static void mlx5e_ets_init(struct mlx5e_priv *priv)
2877{
2878 int i;
2879
2880 priv->params.ets.ets_cap = mlx5_max_tc(priv->mdev) + 1;
2881 for (i = 0; i < priv->params.ets.ets_cap; i++) {
2882 priv->params.ets.tc_tx_bw[i] = MLX5E_MAX_BW_ALLOC;
2883 priv->params.ets.tc_tsa[i] = IEEE_8021QAZ_TSA_VENDOR;
2884 priv->params.ets.prio_tc[i] = i;
2885 }
2886
2887 /* tclass[prio=0]=1, tclass[prio=1]=0, tclass[prio=i]=i (for i>1) */
2888 priv->params.ets.prio_tc[0] = 1;
2889 priv->params.ets.prio_tc[1] = 0;
2890}
2891#endif
2892
Tariq Toukand8c96602016-04-20 22:02:11 +03002893void mlx5e_build_default_indir_rqt(struct mlx5_core_dev *mdev,
2894 u32 *indirection_rqt, int len,
Tariq Toukan85082db2016-02-29 21:17:13 +02002895 int num_channels)
2896{
Tariq Toukand8c96602016-04-20 22:02:11 +03002897 int node = mdev->priv.numa_node;
2898 int node_num_of_cores;
Tariq Toukan85082db2016-02-29 21:17:13 +02002899 int i;
2900
Tariq Toukand8c96602016-04-20 22:02:11 +03002901 if (node == -1)
2902 node = first_online_node;
2903
2904 node_num_of_cores = cpumask_weight(cpumask_of_node(node));
2905
2906 if (node_num_of_cores)
2907 num_channels = min_t(int, num_channels, node_num_of_cores);
2908
Tariq Toukan85082db2016-02-29 21:17:13 +02002909 for (i = 0; i < len; i++)
2910 indirection_rqt[i] = i % num_channels;
2911}
2912
Tariq Toukanbc77b242016-04-20 22:02:15 +03002913static bool mlx5e_check_fragmented_striding_rq_cap(struct mlx5_core_dev *mdev)
2914{
2915 return MLX5_CAP_GEN(mdev, striding_rq) &&
2916 MLX5_CAP_GEN(mdev, umr_ptr_rlky) &&
2917 MLX5_CAP_ETH(mdev, reg_umr_sq);
2918}
2919
Saeed Mahameedb797a682016-05-11 00:29:16 +03002920static int mlx5e_get_pci_bw(struct mlx5_core_dev *mdev, u32 *pci_bw)
2921{
2922 enum pcie_link_width width;
2923 enum pci_bus_speed speed;
2924 int err = 0;
2925
2926 err = pcie_get_minimum_link(mdev->pdev, &speed, &width);
2927 if (err)
2928 return err;
2929
2930 if (speed == PCI_SPEED_UNKNOWN || width == PCIE_LNK_WIDTH_UNKNOWN)
2931 return -EINVAL;
2932
2933 switch (speed) {
2934 case PCIE_SPEED_2_5GT:
2935 *pci_bw = 2500 * width;
2936 break;
2937 case PCIE_SPEED_5_0GT:
2938 *pci_bw = 5000 * width;
2939 break;
2940 case PCIE_SPEED_8_0GT:
2941 *pci_bw = 8000 * width;
2942 break;
2943 default:
2944 return -EINVAL;
2945 }
2946
2947 return 0;
2948}
2949
2950static bool cqe_compress_heuristic(u32 link_speed, u32 pci_bw)
2951{
2952 return (link_speed && pci_bw &&
2953 (pci_bw < 40000) && (pci_bw < link_speed));
2954}
2955
Tariq Toukan9908aa22016-06-23 17:02:40 +03002956void mlx5e_set_rx_cq_mode_params(struct mlx5e_params *params, u8 cq_period_mode)
2957{
2958 params->rx_cq_period_mode = cq_period_mode;
2959
2960 params->rx_cq_moderation.pkts =
2961 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_PKTS;
2962 params->rx_cq_moderation.usec =
2963 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC;
2964
2965 if (cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE)
2966 params->rx_cq_moderation.usec =
2967 MLX5E_PARAMS_DEFAULT_RX_CQ_MODERATION_USEC_FROM_CQE;
2968}
2969
Hadar Hen Zioncff92d72016-07-24 16:12:40 +03002970static void mlx5e_query_min_inline(struct mlx5_core_dev *mdev,
2971 u8 *min_inline_mode)
2972{
2973 switch (MLX5_CAP_ETH(mdev, wqe_inline_mode)) {
2974 case MLX5E_INLINE_MODE_L2:
2975 *min_inline_mode = MLX5_INLINE_MODE_L2;
2976 break;
2977 case MLX5E_INLINE_MODE_VPORT_CONTEXT:
2978 mlx5_query_nic_vport_min_inline(mdev,
2979 min_inline_mode);
2980 break;
2981 case MLX5_INLINE_MODE_NOT_REQUIRED:
2982 *min_inline_mode = MLX5_INLINE_MODE_NONE;
2983 break;
2984 }
2985}
2986
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03002987static void mlx5e_build_nic_netdev_priv(struct mlx5_core_dev *mdev,
2988 struct net_device *netdev,
Hadar Hen Zion127ea382016-07-01 14:51:08 +03002989 const struct mlx5e_profile *profile,
2990 void *ppriv)
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002991{
2992 struct mlx5e_priv *priv = netdev_priv(netdev);
Saeed Mahameedb797a682016-05-11 00:29:16 +03002993 u32 link_speed = 0;
2994 u32 pci_bw = 0;
Gil Rockahcb3c7fd2016-06-23 17:02:41 +03002995 u8 cq_period_mode = MLX5_CAP_GEN(mdev, cq_period_start_from_cqe) ?
2996 MLX5_CQ_PERIOD_MODE_START_FROM_CQE :
2997 MLX5_CQ_PERIOD_MODE_START_FROM_EQE;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03002998
2999 priv->params.log_sq_size =
3000 MLX5E_PARAMS_DEFAULT_LOG_SQ_SIZE;
Tariq Toukanbc77b242016-04-20 22:02:15 +03003001 priv->params.rq_wq_type = mlx5e_check_fragmented_striding_rq_cap(mdev) ?
Tariq Toukan461017c2016-04-20 22:02:13 +03003002 MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ :
3003 MLX5_WQ_TYPE_LINKED_LIST;
3004
Saeed Mahameedb797a682016-05-11 00:29:16 +03003005 /* set CQE compression */
3006 priv->params.rx_cqe_compress_admin = false;
3007 if (MLX5_CAP_GEN(mdev, cqe_compression) &&
3008 MLX5_CAP_GEN(mdev, vport_group_manager)) {
3009 mlx5e_get_max_linkspeed(mdev, &link_speed);
3010 mlx5e_get_pci_bw(mdev, &pci_bw);
3011 mlx5_core_dbg(mdev, "Max link speed = %d, PCI BW = %d\n",
3012 link_speed, pci_bw);
3013 priv->params.rx_cqe_compress_admin =
3014 cqe_compress_heuristic(link_speed, pci_bw);
3015 }
3016
3017 priv->params.rx_cqe_compress = priv->params.rx_cqe_compress_admin;
3018
Tariq Toukan461017c2016-04-20 22:02:13 +03003019 switch (priv->params.rq_wq_type) {
3020 case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ:
3021 priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE_MPW;
Tariq Toukand9d9f152016-05-11 00:29:15 +03003022 priv->params.mpwqe_log_stride_sz =
3023 priv->params.rx_cqe_compress ?
3024 MLX5_MPWRQ_LOG_STRIDE_SIZE_CQE_COMPRESS :
3025 MLX5_MPWRQ_LOG_STRIDE_SIZE;
3026 priv->params.mpwqe_log_num_strides = MLX5_MPWRQ_LOG_WQE_SZ -
3027 priv->params.mpwqe_log_stride_sz;
Tariq Toukan461017c2016-04-20 22:02:13 +03003028 priv->params.lro_en = true;
3029 break;
3030 default: /* MLX5_WQ_TYPE_LINKED_LIST */
3031 priv->params.log_rq_size = MLX5E_PARAMS_DEFAULT_LOG_RQ_SIZE;
3032 }
3033
Tariq Toukand9d9f152016-05-11 00:29:15 +03003034 mlx5_core_info(mdev,
3035 "MLX5E: StrdRq(%d) RqSz(%ld) StrdSz(%ld) RxCqeCmprss(%d)\n",
3036 priv->params.rq_wq_type == MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ,
3037 BIT(priv->params.log_rq_size),
3038 BIT(priv->params.mpwqe_log_stride_sz),
3039 priv->params.rx_cqe_compress_admin);
3040
Tariq Toukan461017c2016-04-20 22:02:13 +03003041 priv->params.min_rx_wqes = mlx5_min_rx_wqes(priv->params.rq_wq_type,
3042 BIT(priv->params.log_rq_size));
Tariq Toukan9908aa22016-06-23 17:02:40 +03003043
Gil Rockahcb3c7fd2016-06-23 17:02:41 +03003044 priv->params.rx_am_enabled = MLX5_CAP_GEN(mdev, cq_moderation);
3045 mlx5e_set_rx_cq_mode_params(&priv->params, cq_period_mode);
Tariq Toukan9908aa22016-06-23 17:02:40 +03003046
3047 priv->params.tx_cq_moderation.usec =
Amir Vadaif62b8bb82015-05-28 22:28:48 +03003048 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_USEC;
Tariq Toukan9908aa22016-06-23 17:02:40 +03003049 priv->params.tx_cq_moderation.pkts =
Amir Vadaif62b8bb82015-05-28 22:28:48 +03003050 MLX5E_PARAMS_DEFAULT_TX_CQ_MODERATION_PKTS;
Achiad Shochat58d52292015-07-23 23:35:58 +03003051 priv->params.tx_max_inline = mlx5e_get_max_inline_cap(mdev);
Hadar Hen Zioncff92d72016-07-24 16:12:40 +03003052 mlx5e_query_min_inline(mdev, &priv->params.tx_min_inline_mode);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03003053 priv->params.num_tc = 1;
Saeed Mahameed2be69672015-07-23 23:35:56 +03003054 priv->params.rss_hfunc = ETH_RSS_HASH_XOR;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03003055
Achiad Shochat57afead2015-08-16 16:04:45 +03003056 netdev_rss_key_fill(priv->params.toeplitz_hash_key,
3057 sizeof(priv->params.toeplitz_hash_key));
3058
Tariq Toukand8c96602016-04-20 22:02:11 +03003059 mlx5e_build_default_indir_rqt(mdev, priv->params.indirection_rqt,
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03003060 MLX5E_INDIR_RQT_SIZE, profile->max_nch(mdev));
Achiad Shochat2d75b2b2015-08-16 16:04:47 +03003061
Amir Vadaif62b8bb82015-05-28 22:28:48 +03003062 priv->params.lro_wqe_sz =
3063 MLX5E_PARAMS_DEFAULT_LRO_WQE_SZ;
3064
Tariq Toukan9908aa22016-06-23 17:02:40 +03003065 /* Initialize pflags */
3066 MLX5E_SET_PRIV_FLAG(priv, MLX5E_PFLAG_RX_CQE_BASED_MODER,
3067 priv->params.rx_cq_period_mode == MLX5_CQ_PERIOD_MODE_START_FROM_CQE);
3068
Amir Vadaif62b8bb82015-05-28 22:28:48 +03003069 priv->mdev = mdev;
3070 priv->netdev = netdev;
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03003071 priv->params.num_channels = profile->max_nch(mdev);
3072 priv->profile = profile;
Hadar Hen Zion127ea382016-07-01 14:51:08 +03003073 priv->ppriv = ppriv;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03003074
Saeed Mahameed08fb1da2016-02-22 18:17:26 +02003075#ifdef CONFIG_MLX5_CORE_EN_DCB
3076 mlx5e_ets_init(priv);
3077#endif
Amir Vadaif62b8bb82015-05-28 22:28:48 +03003078
Amir Vadaif62b8bb82015-05-28 22:28:48 +03003079 mutex_init(&priv->state_lock);
3080
3081 INIT_WORK(&priv->update_carrier_work, mlx5e_update_carrier_work);
3082 INIT_WORK(&priv->set_rx_mode_work, mlx5e_set_rx_mode_work);
Daniel Jurgens3947ca12016-06-30 17:34:45 +03003083 INIT_WORK(&priv->tx_timeout_work, mlx5e_tx_timeout_work);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03003084 INIT_DELAYED_WORK(&priv->update_stats_work, mlx5e_update_stats_work);
3085}
3086
3087static void mlx5e_set_netdev_dev_addr(struct net_device *netdev)
3088{
3089 struct mlx5e_priv *priv = netdev_priv(netdev);
3090
Saeed Mahameede1d7d342015-12-01 18:03:11 +02003091 mlx5_query_nic_vport_mac_address(priv->mdev, 0, netdev->dev_addr);
Saeed Mahameed108805f2015-12-10 17:12:38 +02003092 if (is_zero_ether_addr(netdev->dev_addr) &&
3093 !MLX5_CAP_GEN(priv->mdev, vport_group_manager)) {
3094 eth_hw_addr_random(netdev);
3095 mlx5_core_info(priv->mdev, "Assigned random MAC address %pM\n", netdev->dev_addr);
3096 }
Amir Vadaif62b8bb82015-05-28 22:28:48 +03003097}
3098
Hadar Hen Zioncb67b832016-07-01 14:51:09 +03003099static const struct switchdev_ops mlx5e_switchdev_ops = {
3100 .switchdev_port_attr_get = mlx5e_attr_get,
3101};
3102
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03003103static void mlx5e_build_nic_netdev(struct net_device *netdev)
Amir Vadaif62b8bb82015-05-28 22:28:48 +03003104{
3105 struct mlx5e_priv *priv = netdev_priv(netdev);
3106 struct mlx5_core_dev *mdev = priv->mdev;
Eran Ben Elisha94cb1eb2016-04-24 22:51:52 +03003107 bool fcs_supported;
3108 bool fcs_enabled;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03003109
3110 SET_NETDEV_DEV(netdev, &mdev->pdev->dev);
3111
Saeed Mahameed08fb1da2016-02-22 18:17:26 +02003112 if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
Saeed Mahameedb0eed402016-02-09 14:57:44 +02003113 netdev->netdev_ops = &mlx5e_netdev_ops_sriov;
Saeed Mahameed08fb1da2016-02-22 18:17:26 +02003114#ifdef CONFIG_MLX5_CORE_EN_DCB
3115 netdev->dcbnl_ops = &mlx5e_dcbnl_ops;
3116#endif
3117 } else {
Saeed Mahameedb0eed402016-02-09 14:57:44 +02003118 netdev->netdev_ops = &mlx5e_netdev_ops_basic;
Saeed Mahameed08fb1da2016-02-22 18:17:26 +02003119 }
Saeed Mahameed66e49de2015-12-01 18:03:25 +02003120
Amir Vadaif62b8bb82015-05-28 22:28:48 +03003121 netdev->watchdog_timeo = 15 * HZ;
3122
3123 netdev->ethtool_ops = &mlx5e_ethtool_ops;
3124
Saeed Mahameed12be4b22015-06-11 14:47:31 +03003125 netdev->vlan_features |= NETIF_F_SG;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03003126 netdev->vlan_features |= NETIF_F_IP_CSUM;
3127 netdev->vlan_features |= NETIF_F_IPV6_CSUM;
3128 netdev->vlan_features |= NETIF_F_GRO;
3129 netdev->vlan_features |= NETIF_F_TSO;
3130 netdev->vlan_features |= NETIF_F_TSO6;
3131 netdev->vlan_features |= NETIF_F_RXCSUM;
3132 netdev->vlan_features |= NETIF_F_RXHASH;
3133
3134 if (!!MLX5_CAP_ETH(mdev, lro_cap))
3135 netdev->vlan_features |= NETIF_F_LRO;
3136
3137 netdev->hw_features = netdev->vlan_features;
Achiad Shochate4cf27b2015-11-03 08:07:23 +02003138 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_TX;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03003139 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_RX;
3140 netdev->hw_features |= NETIF_F_HW_VLAN_CTAG_FILTER;
3141
Matthew Finlayb3f63c32016-02-22 18:17:32 +02003142 if (mlx5e_vxlan_allowed(mdev)) {
Alexander Duyckb49663c2016-05-02 09:38:43 -07003143 netdev->hw_features |= NETIF_F_GSO_UDP_TUNNEL |
3144 NETIF_F_GSO_UDP_TUNNEL_CSUM |
3145 NETIF_F_GSO_PARTIAL;
Matthew Finlayb3f63c32016-02-22 18:17:32 +02003146 netdev->hw_enc_features |= NETIF_F_IP_CSUM;
Alexander Duyckf3ed6532016-05-02 09:38:49 -07003147 netdev->hw_enc_features |= NETIF_F_IPV6_CSUM;
Matthew Finlayb3f63c32016-02-22 18:17:32 +02003148 netdev->hw_enc_features |= NETIF_F_TSO;
3149 netdev->hw_enc_features |= NETIF_F_TSO6;
Matthew Finlayb3f63c32016-02-22 18:17:32 +02003150 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL;
Alexander Duyckb49663c2016-05-02 09:38:43 -07003151 netdev->hw_enc_features |= NETIF_F_GSO_UDP_TUNNEL_CSUM |
3152 NETIF_F_GSO_PARTIAL;
3153 netdev->gso_partial_features = NETIF_F_GSO_UDP_TUNNEL_CSUM;
Matthew Finlayb3f63c32016-02-22 18:17:32 +02003154 }
3155
Eran Ben Elisha94cb1eb2016-04-24 22:51:52 +03003156 mlx5_query_port_fcs(mdev, &fcs_supported, &fcs_enabled);
3157
3158 if (fcs_supported)
3159 netdev->hw_features |= NETIF_F_RXALL;
3160
Amir Vadaif62b8bb82015-05-28 22:28:48 +03003161 netdev->features = netdev->hw_features;
3162 if (!priv->params.lro_en)
3163 netdev->features &= ~NETIF_F_LRO;
3164
Eran Ben Elisha94cb1eb2016-04-24 22:51:52 +03003165 if (fcs_enabled)
3166 netdev->features &= ~NETIF_F_RXALL;
3167
Amir Vadaie8f887a2016-03-08 12:42:36 +02003168#define FT_CAP(f) MLX5_CAP_FLOWTABLE(mdev, flow_table_properties_nic_receive.f)
3169 if (FT_CAP(flow_modify_en) &&
3170 FT_CAP(modify_root) &&
3171 FT_CAP(identified_miss_table_mode) &&
Maor Gottlieb1cabe6b2016-04-29 01:36:40 +03003172 FT_CAP(flow_table_modify)) {
3173 netdev->hw_features |= NETIF_F_HW_TC;
3174#ifdef CONFIG_RFS_ACCEL
3175 netdev->hw_features |= NETIF_F_NTUPLE;
3176#endif
3177 }
Amir Vadaie8f887a2016-03-08 12:42:36 +02003178
Amir Vadaif62b8bb82015-05-28 22:28:48 +03003179 netdev->features |= NETIF_F_HIGHDMA;
3180
3181 netdev->priv_flags |= IFF_UNICAST_FLT;
3182
3183 mlx5e_set_netdev_dev_addr(netdev);
Hadar Hen Zioncb67b832016-07-01 14:51:09 +03003184
3185#ifdef CONFIG_NET_SWITCHDEV
3186 if (MLX5_CAP_GEN(mdev, vport_group_manager))
3187 netdev->switchdev_ops = &mlx5e_switchdev_ops;
3188#endif
Amir Vadaif62b8bb82015-05-28 22:28:48 +03003189}
3190
Rana Shahout593cf332016-04-20 22:02:10 +03003191static void mlx5e_create_q_counter(struct mlx5e_priv *priv)
3192{
3193 struct mlx5_core_dev *mdev = priv->mdev;
3194 int err;
3195
3196 err = mlx5_core_alloc_q_counter(mdev, &priv->q_counter);
3197 if (err) {
3198 mlx5_core_warn(mdev, "alloc queue counter failed, %d\n", err);
3199 priv->q_counter = 0;
3200 }
3201}
3202
3203static void mlx5e_destroy_q_counter(struct mlx5e_priv *priv)
3204{
3205 if (!priv->q_counter)
3206 return;
3207
3208 mlx5_core_dealloc_q_counter(priv->mdev, priv->q_counter);
3209}
3210
Tariq Toukanbc77b242016-04-20 22:02:15 +03003211static int mlx5e_create_umr_mkey(struct mlx5e_priv *priv)
3212{
3213 struct mlx5_core_dev *mdev = priv->mdev;
3214 struct mlx5_create_mkey_mbox_in *in;
3215 struct mlx5_mkey_seg *mkc;
3216 int inlen = sizeof(*in);
Saeed Mahameedfe4c9882016-08-29 01:13:42 +03003217 u64 npages = MLX5E_REQUIRED_MTTS(priv->profile->max_nch(mdev),
3218 BIT(MLX5E_PARAMS_MAXIMUM_LOG_RQ_SIZE_MPW));
Tariq Toukanbc77b242016-04-20 22:02:15 +03003219 int err;
3220
3221 in = mlx5_vzalloc(inlen);
3222 if (!in)
3223 return -ENOMEM;
3224
3225 mkc = &in->seg;
3226 mkc->status = MLX5_MKEY_STATUS_FREE;
3227 mkc->flags = MLX5_PERM_UMR_EN |
3228 MLX5_PERM_LOCAL_READ |
3229 MLX5_PERM_LOCAL_WRITE |
3230 MLX5_ACCESS_MODE_MTT;
3231
Saeed Mahameedfe4c9882016-08-29 01:13:42 +03003232 npages = min_t(u32, ALIGN(U16_MAX, 4) * 2, npages);
3233
Tariq Toukanbc77b242016-04-20 22:02:15 +03003234 mkc->qpn_mkey7_0 = cpu_to_be32(0xffffff << 8);
Hadar Hen Zionb50d2922016-07-01 14:51:04 +03003235 mkc->flags_pd = cpu_to_be32(mdev->mlx5e_res.pdn);
Tariq Toukanbc77b242016-04-20 22:02:15 +03003236 mkc->len = cpu_to_be64(npages << PAGE_SHIFT);
Saeed Mahameedfe4c9882016-08-29 01:13:42 +03003237 mkc->xlt_oct_size = cpu_to_be32(MLX5_MTT_OCTW(npages));
Tariq Toukanbc77b242016-04-20 22:02:15 +03003238 mkc->log2_page_size = PAGE_SHIFT;
3239
3240 err = mlx5_core_create_mkey(mdev, &priv->umr_mkey, in, inlen, NULL,
3241 NULL, NULL);
3242
3243 kvfree(in);
3244
3245 return err;
3246}
3247
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03003248static void mlx5e_nic_init(struct mlx5_core_dev *mdev,
3249 struct net_device *netdev,
Hadar Hen Zion127ea382016-07-01 14:51:08 +03003250 const struct mlx5e_profile *profile,
3251 void *ppriv)
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03003252{
3253 struct mlx5e_priv *priv = netdev_priv(netdev);
3254
Hadar Hen Zion127ea382016-07-01 14:51:08 +03003255 mlx5e_build_nic_netdev_priv(mdev, netdev, profile, ppriv);
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03003256 mlx5e_build_nic_netdev(netdev);
3257 mlx5e_vxlan_init(priv);
3258}
3259
3260static void mlx5e_nic_cleanup(struct mlx5e_priv *priv)
3261{
Hadar Hen Zion127ea382016-07-01 14:51:08 +03003262 struct mlx5_core_dev *mdev = priv->mdev;
3263 struct mlx5_eswitch *esw = mdev->priv.eswitch;
3264
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03003265 mlx5e_vxlan_cleanup(priv);
Hadar Hen Zion127ea382016-07-01 14:51:08 +03003266
3267 if (MLX5_CAP_GEN(mdev, vport_group_manager))
3268 mlx5_eswitch_unregister_vport_rep(esw, 0);
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03003269}
3270
3271static int mlx5e_init_nic_rx(struct mlx5e_priv *priv)
3272{
3273 struct mlx5_core_dev *mdev = priv->mdev;
3274 int err;
3275 int i;
3276
3277 err = mlx5e_create_indirect_rqts(priv);
3278 if (err) {
3279 mlx5_core_warn(mdev, "create indirect rqts failed, %d\n", err);
3280 return err;
3281 }
3282
3283 err = mlx5e_create_direct_rqts(priv);
3284 if (err) {
3285 mlx5_core_warn(mdev, "create direct rqts failed, %d\n", err);
3286 goto err_destroy_indirect_rqts;
3287 }
3288
3289 err = mlx5e_create_indirect_tirs(priv);
3290 if (err) {
3291 mlx5_core_warn(mdev, "create indirect tirs failed, %d\n", err);
3292 goto err_destroy_direct_rqts;
3293 }
3294
3295 err = mlx5e_create_direct_tirs(priv);
3296 if (err) {
3297 mlx5_core_warn(mdev, "create direct tirs failed, %d\n", err);
3298 goto err_destroy_indirect_tirs;
3299 }
3300
3301 err = mlx5e_create_flow_steering(priv);
3302 if (err) {
3303 mlx5_core_warn(mdev, "create flow steering failed, %d\n", err);
3304 goto err_destroy_direct_tirs;
3305 }
3306
3307 err = mlx5e_tc_init(priv);
3308 if (err)
3309 goto err_destroy_flow_steering;
3310
3311 return 0;
3312
3313err_destroy_flow_steering:
3314 mlx5e_destroy_flow_steering(priv);
3315err_destroy_direct_tirs:
3316 mlx5e_destroy_direct_tirs(priv);
3317err_destroy_indirect_tirs:
3318 mlx5e_destroy_indirect_tirs(priv);
3319err_destroy_direct_rqts:
3320 for (i = 0; i < priv->profile->max_nch(mdev); i++)
3321 mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
3322err_destroy_indirect_rqts:
3323 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
3324 return err;
3325}
3326
3327static void mlx5e_cleanup_nic_rx(struct mlx5e_priv *priv)
3328{
3329 int i;
3330
3331 mlx5e_tc_cleanup(priv);
3332 mlx5e_destroy_flow_steering(priv);
3333 mlx5e_destroy_direct_tirs(priv);
3334 mlx5e_destroy_indirect_tirs(priv);
3335 for (i = 0; i < priv->profile->max_nch(priv->mdev); i++)
3336 mlx5e_destroy_rqt(priv, &priv->direct_tir[i].rqt);
3337 mlx5e_destroy_rqt(priv, &priv->indir_rqt);
3338}
3339
3340static int mlx5e_init_nic_tx(struct mlx5e_priv *priv)
3341{
3342 int err;
3343
3344 err = mlx5e_create_tises(priv);
3345 if (err) {
3346 mlx5_core_warn(priv->mdev, "create tises failed, %d\n", err);
3347 return err;
3348 }
3349
3350#ifdef CONFIG_MLX5_CORE_EN_DCB
3351 mlx5e_dcbnl_ieee_setets_core(priv, &priv->params.ets);
3352#endif
3353 return 0;
3354}
3355
3356static void mlx5e_nic_enable(struct mlx5e_priv *priv)
3357{
3358 struct net_device *netdev = priv->netdev;
3359 struct mlx5_core_dev *mdev = priv->mdev;
Hadar Hen Zion127ea382016-07-01 14:51:08 +03003360 struct mlx5_eswitch *esw = mdev->priv.eswitch;
3361 struct mlx5_eswitch_rep rep;
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03003362
3363 if (mlx5e_vxlan_allowed(mdev)) {
3364 rtnl_lock();
3365 udp_tunnel_get_rx_info(netdev);
3366 rtnl_unlock();
3367 }
3368
3369 mlx5e_enable_async_events(priv);
3370 queue_work(priv->wq, &priv->set_rx_mode_work);
Hadar Hen Zion127ea382016-07-01 14:51:08 +03003371
3372 if (MLX5_CAP_GEN(mdev, vport_group_manager)) {
Hadar Hen Ziondbe413e2016-08-18 21:09:08 +03003373 mlx5_query_nic_vport_mac_address(mdev, 0, rep.hw_id);
Hadar Hen Zioncb67b832016-07-01 14:51:09 +03003374 rep.load = mlx5e_nic_rep_load;
3375 rep.unload = mlx5e_nic_rep_unload;
Hadar Hen Zion127ea382016-07-01 14:51:08 +03003376 rep.vport = 0;
3377 rep.priv_data = priv;
3378 mlx5_eswitch_register_vport_rep(esw, &rep);
3379 }
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03003380}
3381
3382static void mlx5e_nic_disable(struct mlx5e_priv *priv)
3383{
3384 queue_work(priv->wq, &priv->set_rx_mode_work);
3385 mlx5e_disable_async_events(priv);
3386}
3387
3388static const struct mlx5e_profile mlx5e_nic_profile = {
3389 .init = mlx5e_nic_init,
3390 .cleanup = mlx5e_nic_cleanup,
3391 .init_rx = mlx5e_init_nic_rx,
3392 .cleanup_rx = mlx5e_cleanup_nic_rx,
3393 .init_tx = mlx5e_init_nic_tx,
3394 .cleanup_tx = mlx5e_cleanup_nic_tx,
3395 .enable = mlx5e_nic_enable,
3396 .disable = mlx5e_nic_disable,
3397 .update_stats = mlx5e_update_stats,
3398 .max_nch = mlx5e_get_max_num_channels,
3399 .max_tc = MLX5E_MAX_NUM_TC,
3400};
3401
Hadar Hen Zioncb67b832016-07-01 14:51:09 +03003402void *mlx5e_create_netdev(struct mlx5_core_dev *mdev,
3403 const struct mlx5e_profile *profile, void *ppriv)
Amir Vadaif62b8bb82015-05-28 22:28:48 +03003404{
3405 struct net_device *netdev;
3406 struct mlx5e_priv *priv;
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03003407 int nch = profile->max_nch(mdev);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03003408 int err;
3409
Saeed Mahameed08fb1da2016-02-22 18:17:26 +02003410 netdev = alloc_etherdev_mqs(sizeof(struct mlx5e_priv),
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03003411 nch * profile->max_tc,
Saeed Mahameed08fb1da2016-02-22 18:17:26 +02003412 nch);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03003413 if (!netdev) {
3414 mlx5_core_err(mdev, "alloc_etherdev_mqs() failed\n");
3415 return NULL;
3416 }
3417
Hadar Hen Zion127ea382016-07-01 14:51:08 +03003418 profile->init(mdev, netdev, profile, ppriv);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03003419
3420 netif_carrier_off(netdev);
3421
3422 priv = netdev_priv(netdev);
3423
Matthew Finlay7bb29752016-05-01 22:59:56 +03003424 priv->wq = create_singlethread_workqueue("mlx5e");
3425 if (!priv->wq)
3426 goto err_free_netdev;
3427
Tariq Toukanbc77b242016-04-20 22:02:15 +03003428 err = mlx5e_create_umr_mkey(priv);
3429 if (err) {
3430 mlx5_core_err(mdev, "create umr mkey failed, %d\n", err);
Hadar Hen Zionb50d2922016-07-01 14:51:04 +03003431 goto err_destroy_wq;
Tariq Toukanbc77b242016-04-20 22:02:15 +03003432 }
3433
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03003434 err = profile->init_tx(priv);
3435 if (err)
Tariq Toukanbc77b242016-04-20 22:02:15 +03003436 goto err_destroy_umr_mkey;
Achiad Shochat5c503682015-08-04 14:05:43 +03003437
3438 err = mlx5e_open_drop_rq(priv);
3439 if (err) {
3440 mlx5_core_err(mdev, "open drop rq failed, %d\n", err);
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03003441 goto err_cleanup_tx;
Achiad Shochat5c503682015-08-04 14:05:43 +03003442 }
3443
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03003444 err = profile->init_rx(priv);
3445 if (err)
Achiad Shochat5c503682015-08-04 14:05:43 +03003446 goto err_close_drop_rq;
Achiad Shochat5c503682015-08-04 14:05:43 +03003447
Rana Shahout593cf332016-04-20 22:02:10 +03003448 mlx5e_create_q_counter(priv);
3449
Maor Gottlieb33cfaaa2016-04-29 01:36:38 +03003450 mlx5e_init_l2_addr(priv);
Achiad Shochat5c503682015-08-04 14:05:43 +03003451
Saeed Mahameed13f9bba2016-08-18 21:09:02 +03003452 mlx5e_set_dev_port_mtu(netdev);
3453
Amir Vadaif62b8bb82015-05-28 22:28:48 +03003454 err = register_netdev(netdev);
3455 if (err) {
Achiad Shochat1f2a3002015-07-29 15:05:44 +03003456 mlx5_core_err(mdev, "register_netdev failed, %d\n", err);
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03003457 goto err_dealloc_q_counters;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03003458 }
3459
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03003460 if (profile->enable)
3461 profile->enable(priv);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03003462
3463 return priv;
3464
Rana Shahout593cf332016-04-20 22:02:10 +03003465err_dealloc_q_counters:
3466 mlx5e_destroy_q_counter(priv);
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03003467 profile->cleanup_rx(priv);
Achiad Shochat5c503682015-08-04 14:05:43 +03003468
3469err_close_drop_rq:
3470 mlx5e_close_drop_rq(priv);
3471
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03003472err_cleanup_tx:
3473 profile->cleanup_tx(priv);
Achiad Shochat5c503682015-08-04 14:05:43 +03003474
Tariq Toukanbc77b242016-04-20 22:02:15 +03003475err_destroy_umr_mkey:
3476 mlx5_core_destroy_mkey(mdev, &priv->umr_mkey);
3477
Matthew Finlay7bb29752016-05-01 22:59:56 +03003478err_destroy_wq:
3479 destroy_workqueue(priv->wq);
3480
Amir Vadaif62b8bb82015-05-28 22:28:48 +03003481err_free_netdev:
3482 free_netdev(netdev);
3483
3484 return NULL;
3485}
3486
Hadar Hen Zion127ea382016-07-01 14:51:08 +03003487static void mlx5e_register_vport_rep(struct mlx5_core_dev *mdev)
3488{
3489 struct mlx5_eswitch *esw = mdev->priv.eswitch;
3490 int total_vfs = MLX5_TOTAL_VPORTS(mdev);
3491 int vport;
Hadar Hen Ziondbe413e2016-08-18 21:09:08 +03003492 u8 mac[ETH_ALEN];
Hadar Hen Zion127ea382016-07-01 14:51:08 +03003493
3494 if (!MLX5_CAP_GEN(mdev, vport_group_manager))
3495 return;
3496
Hadar Hen Ziondbe413e2016-08-18 21:09:08 +03003497 mlx5_query_nic_vport_mac_address(mdev, 0, mac);
3498
Hadar Hen Zion127ea382016-07-01 14:51:08 +03003499 for (vport = 1; vport < total_vfs; vport++) {
3500 struct mlx5_eswitch_rep rep;
3501
Hadar Hen Zioncb67b832016-07-01 14:51:09 +03003502 rep.load = mlx5e_vport_rep_load;
3503 rep.unload = mlx5e_vport_rep_unload;
Hadar Hen Zion127ea382016-07-01 14:51:08 +03003504 rep.vport = vport;
Hadar Hen Ziondbe413e2016-08-18 21:09:08 +03003505 ether_addr_copy(rep.hw_id, mac);
Hadar Hen Zion127ea382016-07-01 14:51:08 +03003506 mlx5_eswitch_register_vport_rep(esw, &rep);
3507 }
3508}
3509
Hadar Hen Zionb50d2922016-07-01 14:51:04 +03003510static void *mlx5e_add(struct mlx5_core_dev *mdev)
Amir Vadaif62b8bb82015-05-28 22:28:48 +03003511{
Hadar Hen Zion127ea382016-07-01 14:51:08 +03003512 struct mlx5_eswitch *esw = mdev->priv.eswitch;
3513 void *ppriv = NULL;
Hadar Hen Zionb50d2922016-07-01 14:51:04 +03003514 void *ret;
3515
3516 if (mlx5e_check_required_hca_cap(mdev))
3517 return NULL;
3518
3519 if (mlx5e_create_mdev_resources(mdev))
3520 return NULL;
3521
Hadar Hen Zion127ea382016-07-01 14:51:08 +03003522 mlx5e_register_vport_rep(mdev);
3523
3524 if (MLX5_CAP_GEN(mdev, vport_group_manager))
3525 ppriv = &esw->offloads.vport_reps[0];
3526
3527 ret = mlx5e_create_netdev(mdev, &mlx5e_nic_profile, ppriv);
Hadar Hen Zionb50d2922016-07-01 14:51:04 +03003528 if (!ret) {
3529 mlx5e_destroy_mdev_resources(mdev);
3530 return NULL;
3531 }
3532 return ret;
3533}
3534
Hadar Hen Zioncb67b832016-07-01 14:51:09 +03003535void mlx5e_destroy_netdev(struct mlx5_core_dev *mdev, struct mlx5e_priv *priv)
Hadar Hen Zionb50d2922016-07-01 14:51:04 +03003536{
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03003537 const struct mlx5e_profile *profile = priv->profile;
Amir Vadaif62b8bb82015-05-28 22:28:48 +03003538 struct net_device *netdev = priv->netdev;
3539
Achiad Shochat9b37b072015-08-04 14:05:46 +03003540 set_bit(MLX5E_STATE_DESTROYING, &priv->state);
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03003541 if (profile->disable)
3542 profile->disable(priv);
Achiad Shochat9b37b072015-08-04 14:05:46 +03003543
Matthew Finlay7bb29752016-05-01 22:59:56 +03003544 flush_workqueue(priv->wq);
Majd Dibbiny5fc71972016-04-22 00:33:07 +03003545 if (test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state)) {
3546 netif_device_detach(netdev);
Eran Ben Elisha811afea2016-06-10 00:07:39 +03003547 mlx5e_close(netdev);
Majd Dibbiny5fc71972016-04-22 00:33:07 +03003548 } else {
3549 unregister_netdev(netdev);
3550 }
3551
Rana Shahout593cf332016-04-20 22:02:10 +03003552 mlx5e_destroy_q_counter(priv);
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03003553 profile->cleanup_rx(priv);
Achiad Shochat5c503682015-08-04 14:05:43 +03003554 mlx5e_close_drop_rq(priv);
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03003555 profile->cleanup_tx(priv);
Tariq Toukanbc77b242016-04-20 22:02:15 +03003556 mlx5_core_destroy_mkey(priv->mdev, &priv->umr_mkey);
Matthew Finlay7bb29752016-05-01 22:59:56 +03003557 cancel_delayed_work_sync(&priv->update_stats_work);
3558 destroy_workqueue(priv->wq);
Hadar Hen Zion6bfd3902016-07-01 14:51:07 +03003559 if (profile->cleanup)
3560 profile->cleanup(priv);
Majd Dibbiny5fc71972016-04-22 00:33:07 +03003561
3562 if (!test_bit(MLX5_INTERFACE_STATE_SHUTDOWN, &mdev->intf_state))
3563 free_netdev(netdev);
Amir Vadaif62b8bb82015-05-28 22:28:48 +03003564}
3565
Hadar Hen Zionb50d2922016-07-01 14:51:04 +03003566static void mlx5e_remove(struct mlx5_core_dev *mdev, void *vpriv)
3567{
Hadar Hen Zion127ea382016-07-01 14:51:08 +03003568 struct mlx5_eswitch *esw = mdev->priv.eswitch;
3569 int total_vfs = MLX5_TOTAL_VPORTS(mdev);
Hadar Hen Zionb50d2922016-07-01 14:51:04 +03003570 struct mlx5e_priv *priv = vpriv;
Hadar Hen Zion127ea382016-07-01 14:51:08 +03003571 int vport;
Hadar Hen Zionb50d2922016-07-01 14:51:04 +03003572
3573 mlx5e_destroy_netdev(mdev, priv);
Hadar Hen Zion127ea382016-07-01 14:51:08 +03003574
3575 for (vport = 1; vport < total_vfs; vport++)
3576 mlx5_eswitch_unregister_vport_rep(esw, vport);
3577
Hadar Hen Zionb50d2922016-07-01 14:51:04 +03003578 mlx5e_destroy_mdev_resources(mdev);
3579}
3580
Amir Vadaif62b8bb82015-05-28 22:28:48 +03003581static void *mlx5e_get_netdev(void *vpriv)
3582{
3583 struct mlx5e_priv *priv = vpriv;
3584
3585 return priv->netdev;
3586}
3587
3588static struct mlx5_interface mlx5e_interface = {
Hadar Hen Zionb50d2922016-07-01 14:51:04 +03003589 .add = mlx5e_add,
3590 .remove = mlx5e_remove,
Amir Vadaif62b8bb82015-05-28 22:28:48 +03003591 .event = mlx5e_async_event,
3592 .protocol = MLX5_INTERFACE_PROTOCOL_ETH,
3593 .get_dev = mlx5e_get_netdev,
3594};
3595
3596void mlx5e_init(void)
3597{
Gal Pressman665bc532016-06-23 17:02:45 +03003598 mlx5e_build_ptys2ethtool_map();
Amir Vadaif62b8bb82015-05-28 22:28:48 +03003599 mlx5_register_interface(&mlx5e_interface);
3600}
3601
3602void mlx5e_cleanup(void)
3603{
3604 mlx5_unregister_interface(&mlx5e_interface);
3605}