blob: 71828a5beefe29ceb62872ea59ea370beb8a1824 [file] [log] [blame]
Sathya Perla2ae74082017-08-28 13:40:33 -04001/* Broadcom NetXtreme-C/E network driver.
2 *
3 * Copyright (c) 2017 Broadcom Limited
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 */
9
10#include <linux/netdevice.h>
11#include <linux/inetdevice.h>
12#include <linux/if_vlan.h>
13#include <net/flow_dissector.h>
14#include <net/pkt_cls.h>
15#include <net/tc_act/tc_gact.h>
16#include <net/tc_act/tc_skbedit.h>
17#include <net/tc_act/tc_mirred.h>
18#include <net/tc_act/tc_vlan.h>
Sathya Perla8c95f772017-10-26 11:51:29 -040019#include <net/tc_act/tc_tunnel_key.h>
Sathya Perla2ae74082017-08-28 13:40:33 -040020
21#include "bnxt_hsi.h"
22#include "bnxt.h"
23#include "bnxt_sriov.h"
24#include "bnxt_tc.h"
25#include "bnxt_vfr.h"
26
Sathya Perla2ae74082017-08-28 13:40:33 -040027#define BNXT_FID_INVALID 0xffff
28#define VLAN_TCI(vid, prio) ((vid) | ((prio) << VLAN_PRIO_SHIFT))
29
30/* Return the dst fid of the func for flow forwarding
31 * For PFs: src_fid is the fid of the PF
32 * For VF-reps: src_fid the fid of the VF
33 */
34static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev)
35{
36 struct bnxt *bp;
37
38 /* check if dev belongs to the same switch */
39 if (!switchdev_port_same_parent_id(pf_bp->dev, dev)) {
40 netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch",
41 dev->ifindex);
42 return BNXT_FID_INVALID;
43 }
44
45 /* Is dev a VF-rep? */
46 if (dev != pf_bp->dev)
47 return bnxt_vf_rep_get_fid(dev);
48
49 bp = netdev_priv(dev);
50 return bp->pf.fw_fid;
51}
52
53static int bnxt_tc_parse_redir(struct bnxt *bp,
54 struct bnxt_tc_actions *actions,
55 const struct tc_action *tc_act)
56{
57 int ifindex = tcf_mirred_ifindex(tc_act);
58 struct net_device *dev;
59 u16 dst_fid;
60
61 dev = __dev_get_by_index(dev_net(bp->dev), ifindex);
62 if (!dev) {
63 netdev_info(bp->dev, "no dev for ifindex=%d", ifindex);
64 return -EINVAL;
65 }
66
67 /* find the FID from dev */
68 dst_fid = bnxt_flow_get_dst_fid(bp, dev);
69 if (dst_fid == BNXT_FID_INVALID) {
70 netdev_info(bp->dev, "can't get fid for ifindex=%d", ifindex);
71 return -EINVAL;
72 }
73
74 actions->flags |= BNXT_TC_ACTION_FLAG_FWD;
75 actions->dst_fid = dst_fid;
76 actions->dst_dev = dev;
77 return 0;
78}
79
80static void bnxt_tc_parse_vlan(struct bnxt *bp,
81 struct bnxt_tc_actions *actions,
82 const struct tc_action *tc_act)
83{
84 if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_POP) {
85 actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN;
86 } else if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_PUSH) {
87 actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN;
88 actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act));
89 actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act);
90 }
91}
92
Sathya Perla8c95f772017-10-26 11:51:29 -040093static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
94 struct bnxt_tc_actions *actions,
95 const struct tc_action *tc_act)
96{
97 struct ip_tunnel_info *tun_info = tcf_tunnel_info(tc_act);
98 struct ip_tunnel_key *tun_key = &tun_info->key;
99
100 if (ip_tunnel_info_af(tun_info) != AF_INET) {
101 netdev_info(bp->dev, "only IPv4 tunnel-encap is supported");
102 return -EOPNOTSUPP;
103 }
104
105 actions->tun_encap_key = *tun_key;
106 actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP;
107 return 0;
108}
109
Sathya Perla2ae74082017-08-28 13:40:33 -0400110static int bnxt_tc_parse_actions(struct bnxt *bp,
111 struct bnxt_tc_actions *actions,
112 struct tcf_exts *tc_exts)
113{
114 const struct tc_action *tc_act;
115 LIST_HEAD(tc_actions);
116 int rc;
117
118 if (!tcf_exts_has_actions(tc_exts)) {
119 netdev_info(bp->dev, "no actions");
120 return -EINVAL;
121 }
122
123 tcf_exts_to_list(tc_exts, &tc_actions);
124 list_for_each_entry(tc_act, &tc_actions, list) {
125 /* Drop action */
126 if (is_tcf_gact_shot(tc_act)) {
127 actions->flags |= BNXT_TC_ACTION_FLAG_DROP;
128 return 0; /* don't bother with other actions */
129 }
130
131 /* Redirect action */
132 if (is_tcf_mirred_egress_redirect(tc_act)) {
133 rc = bnxt_tc_parse_redir(bp, actions, tc_act);
134 if (rc)
135 return rc;
136 continue;
137 }
138
139 /* Push/pop VLAN */
140 if (is_tcf_vlan(tc_act)) {
141 bnxt_tc_parse_vlan(bp, actions, tc_act);
142 continue;
143 }
Sathya Perla8c95f772017-10-26 11:51:29 -0400144
145 /* Tunnel encap */
146 if (is_tcf_tunnel_set(tc_act)) {
147 rc = bnxt_tc_parse_tunnel_set(bp, actions, tc_act);
148 if (rc)
149 return rc;
150 continue;
151 }
152
153 /* Tunnel decap */
154 if (is_tcf_tunnel_release(tc_act)) {
155 actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_DECAP;
156 continue;
157 }
Sathya Perla2ae74082017-08-28 13:40:33 -0400158 }
159
Sathya Perla8c95f772017-10-26 11:51:29 -0400160 if (rc)
161 return rc;
162
163 /* Tunnel encap/decap action must be accompanied by a redirect action */
164 if ((actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP ||
165 actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP) &&
166 !(actions->flags & BNXT_TC_ACTION_FLAG_FWD)) {
167 netdev_info(bp->dev,
168 "error: no redir action along with encap/decap");
169 return -EINVAL;
170 }
171
172 return rc;
Sathya Perla2ae74082017-08-28 13:40:33 -0400173}
174
175#define GET_KEY(flow_cmd, key_type) \
176 skb_flow_dissector_target((flow_cmd)->dissector, key_type,\
177 (flow_cmd)->key)
178#define GET_MASK(flow_cmd, key_type) \
179 skb_flow_dissector_target((flow_cmd)->dissector, key_type,\
180 (flow_cmd)->mask)
181
182static int bnxt_tc_parse_flow(struct bnxt *bp,
183 struct tc_cls_flower_offload *tc_flow_cmd,
184 struct bnxt_tc_flow *flow)
185{
186 struct flow_dissector *dissector = tc_flow_cmd->dissector;
187 u16 addr_type = 0;
188
189 /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
190 if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 ||
191 (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) {
192 netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x",
193 dissector->used_keys);
194 return -EOPNOTSUPP;
195 }
196
197 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
198 struct flow_dissector_key_control *key =
199 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_CONTROL);
200
201 addr_type = key->addr_type;
202 }
203
204 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) {
205 struct flow_dissector_key_basic *key =
206 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);
207 struct flow_dissector_key_basic *mask =
208 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);
209
210 flow->l2_key.ether_type = key->n_proto;
211 flow->l2_mask.ether_type = mask->n_proto;
212
213 if (key->n_proto == htons(ETH_P_IP) ||
214 key->n_proto == htons(ETH_P_IPV6)) {
215 flow->l4_key.ip_proto = key->ip_proto;
216 flow->l4_mask.ip_proto = mask->ip_proto;
217 }
218 }
219
220 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
221 struct flow_dissector_key_eth_addrs *key =
222 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS);
223 struct flow_dissector_key_eth_addrs *mask =
224 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS);
225
226 flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS;
227 ether_addr_copy(flow->l2_key.dmac, key->dst);
228 ether_addr_copy(flow->l2_mask.dmac, mask->dst);
229 ether_addr_copy(flow->l2_key.smac, key->src);
230 ether_addr_copy(flow->l2_mask.smac, mask->src);
231 }
232
233 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN)) {
234 struct flow_dissector_key_vlan *key =
235 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN);
236 struct flow_dissector_key_vlan *mask =
237 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN);
238
239 flow->l2_key.inner_vlan_tci =
240 cpu_to_be16(VLAN_TCI(key->vlan_id, key->vlan_priority));
241 flow->l2_mask.inner_vlan_tci =
242 cpu_to_be16((VLAN_TCI(mask->vlan_id, mask->vlan_priority)));
243 flow->l2_key.inner_vlan_tpid = htons(ETH_P_8021Q);
244 flow->l2_mask.inner_vlan_tpid = htons(0xffff);
245 flow->l2_key.num_vlans = 1;
246 }
247
248 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
249 struct flow_dissector_key_ipv4_addrs *key =
250 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
251 struct flow_dissector_key_ipv4_addrs *mask =
252 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
253
254 flow->flags |= BNXT_TC_FLOW_FLAGS_IPV4_ADDRS;
255 flow->l3_key.ipv4.daddr.s_addr = key->dst;
256 flow->l3_mask.ipv4.daddr.s_addr = mask->dst;
257 flow->l3_key.ipv4.saddr.s_addr = key->src;
258 flow->l3_mask.ipv4.saddr.s_addr = mask->src;
259 } else if (dissector_uses_key(dissector,
260 FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
261 struct flow_dissector_key_ipv6_addrs *key =
262 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
263 struct flow_dissector_key_ipv6_addrs *mask =
264 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
265
266 flow->flags |= BNXT_TC_FLOW_FLAGS_IPV6_ADDRS;
267 flow->l3_key.ipv6.daddr = key->dst;
268 flow->l3_mask.ipv6.daddr = mask->dst;
269 flow->l3_key.ipv6.saddr = key->src;
270 flow->l3_mask.ipv6.saddr = mask->src;
271 }
272
273 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS)) {
274 struct flow_dissector_key_ports *key =
275 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS);
276 struct flow_dissector_key_ports *mask =
277 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS);
278
279 flow->flags |= BNXT_TC_FLOW_FLAGS_PORTS;
280 flow->l4_key.ports.dport = key->dst;
281 flow->l4_mask.ports.dport = mask->dst;
282 flow->l4_key.ports.sport = key->src;
283 flow->l4_mask.ports.sport = mask->src;
284 }
285
286 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ICMP)) {
287 struct flow_dissector_key_icmp *key =
288 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP);
289 struct flow_dissector_key_icmp *mask =
290 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP);
291
292 flow->flags |= BNXT_TC_FLOW_FLAGS_ICMP;
293 flow->l4_key.icmp.type = key->type;
294 flow->l4_key.icmp.code = key->code;
295 flow->l4_mask.icmp.type = mask->type;
296 flow->l4_mask.icmp.code = mask->code;
297 }
298
Sathya Perla8c95f772017-10-26 11:51:29 -0400299 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
300 struct flow_dissector_key_control *key =
301 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_CONTROL);
302
303 addr_type = key->addr_type;
304 }
305
306 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
307 struct flow_dissector_key_ipv4_addrs *key =
308 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
309 struct flow_dissector_key_ipv4_addrs *mask =
310 GET_MASK(tc_flow_cmd,
311 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
312
313 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS;
314 flow->tun_key.u.ipv4.dst = key->dst;
315 flow->tun_mask.u.ipv4.dst = mask->dst;
316 flow->tun_key.u.ipv4.src = key->src;
317 flow->tun_mask.u.ipv4.src = mask->src;
318 } else if (dissector_uses_key(dissector,
319 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
320 return -EOPNOTSUPP;
321 }
322
323 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
324 struct flow_dissector_key_keyid *key =
325 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID);
326 struct flow_dissector_key_keyid *mask =
327 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID);
328
329 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ID;
330 flow->tun_key.tun_id = key32_to_tunnel_id(key->keyid);
331 flow->tun_mask.tun_id = key32_to_tunnel_id(mask->keyid);
332 }
333
334 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
335 struct flow_dissector_key_ports *key =
336 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS);
337 struct flow_dissector_key_ports *mask =
338 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS);
339
340 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_PORTS;
341 flow->tun_key.tp_dst = key->dst;
342 flow->tun_mask.tp_dst = mask->dst;
343 flow->tun_key.tp_src = key->src;
344 flow->tun_mask.tp_src = mask->src;
345 }
346
Sathya Perla2ae74082017-08-28 13:40:33 -0400347 return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts);
348}
349
350static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, __le16 flow_handle)
351{
Sathya Perladb1d36a2017-08-28 13:40:34 -0400352 struct hwrm_cfa_flow_free_input req = { 0 };
353 int rc;
354
355 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1);
356 req.flow_handle = flow_handle;
357
358 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
359 if (rc)
360 netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d",
361 __func__, flow_handle, rc);
362 return rc;
363}
364
365static int ipv6_mask_len(struct in6_addr *mask)
366{
367 int mask_len = 0, i;
368
369 for (i = 0; i < 4; i++)
370 mask_len += inet_mask_len(mask->s6_addr32[i]);
371
372 return mask_len;
373}
374
375static bool is_wildcard(void *mask, int len)
376{
377 const u8 *p = mask;
378 int i;
379
380 for (i = 0; i < len; i++) {
381 if (p[i] != 0)
382 return false;
383 }
384 return true;
Sathya Perla2ae74082017-08-28 13:40:33 -0400385}
386
387static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
Sathya Perla8c95f772017-10-26 11:51:29 -0400388 __le16 ref_flow_handle,
389 __le32 tunnel_handle, __le16 *flow_handle)
Sathya Perla2ae74082017-08-28 13:40:33 -0400390{
Sathya Perladb1d36a2017-08-28 13:40:34 -0400391 struct hwrm_cfa_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
392 struct bnxt_tc_actions *actions = &flow->actions;
393 struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask;
394 struct bnxt_tc_l3_key *l3_key = &flow->l3_key;
395 struct hwrm_cfa_flow_alloc_input req = { 0 };
396 u16 flow_flags = 0, action_flags = 0;
397 int rc;
398
399 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_ALLOC, -1, -1);
400
401 req.src_fid = cpu_to_le16(flow->src_fid);
402 req.ref_flow_handle = ref_flow_handle;
Sathya Perla8c95f772017-10-26 11:51:29 -0400403
404 if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP ||
405 actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
406 req.tunnel_handle = tunnel_handle;
407 flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL;
408 action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL;
409 }
410
Sathya Perladb1d36a2017-08-28 13:40:34 -0400411 req.ethertype = flow->l2_key.ether_type;
412 req.ip_proto = flow->l4_key.ip_proto;
413
414 if (flow->flags & BNXT_TC_FLOW_FLAGS_ETH_ADDRS) {
415 memcpy(req.dmac, flow->l2_key.dmac, ETH_ALEN);
416 memcpy(req.smac, flow->l2_key.smac, ETH_ALEN);
417 }
418
419 if (flow->l2_key.num_vlans > 0) {
420 flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE;
421 /* FW expects the inner_vlan_tci value to be set
422 * in outer_vlan_tci when num_vlans is 1 (which is
423 * always the case in TC.)
424 */
425 req.outer_vlan_tci = flow->l2_key.inner_vlan_tci;
426 }
427
428 /* If all IP and L4 fields are wildcarded then this is an L2 flow */
429 if (is_wildcard(&l3_mask, sizeof(l3_mask)) &&
430 is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) {
431 flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2;
432 } else {
433 flow_flags |= flow->l2_key.ether_type == htons(ETH_P_IP) ?
434 CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 :
435 CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6;
436
437 if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV4_ADDRS) {
438 req.ip_dst[0] = l3_key->ipv4.daddr.s_addr;
439 req.ip_dst_mask_len =
440 inet_mask_len(l3_mask->ipv4.daddr.s_addr);
441 req.ip_src[0] = l3_key->ipv4.saddr.s_addr;
442 req.ip_src_mask_len =
443 inet_mask_len(l3_mask->ipv4.saddr.s_addr);
444 } else if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV6_ADDRS) {
445 memcpy(req.ip_dst, l3_key->ipv6.daddr.s6_addr32,
446 sizeof(req.ip_dst));
447 req.ip_dst_mask_len =
448 ipv6_mask_len(&l3_mask->ipv6.daddr);
449 memcpy(req.ip_src, l3_key->ipv6.saddr.s6_addr32,
450 sizeof(req.ip_src));
451 req.ip_src_mask_len =
452 ipv6_mask_len(&l3_mask->ipv6.saddr);
453 }
454 }
455
456 if (flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) {
457 req.l4_src_port = flow->l4_key.ports.sport;
458 req.l4_src_port_mask = flow->l4_mask.ports.sport;
459 req.l4_dst_port = flow->l4_key.ports.dport;
460 req.l4_dst_port_mask = flow->l4_mask.ports.dport;
461 } else if (flow->flags & BNXT_TC_FLOW_FLAGS_ICMP) {
462 /* l4 ports serve as type/code when ip_proto is ICMP */
463 req.l4_src_port = htons(flow->l4_key.icmp.type);
464 req.l4_src_port_mask = htons(flow->l4_mask.icmp.type);
465 req.l4_dst_port = htons(flow->l4_key.icmp.code);
466 req.l4_dst_port_mask = htons(flow->l4_mask.icmp.code);
467 }
468 req.flags = cpu_to_le16(flow_flags);
469
470 if (actions->flags & BNXT_TC_ACTION_FLAG_DROP) {
471 action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP;
472 } else {
473 if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
474 action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD;
475 req.dst_fid = cpu_to_le16(actions->dst_fid);
476 }
477 if (actions->flags & BNXT_TC_ACTION_FLAG_PUSH_VLAN) {
478 action_flags |=
479 CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
480 req.l2_rewrite_vlan_tpid = actions->push_vlan_tpid;
481 req.l2_rewrite_vlan_tci = actions->push_vlan_tci;
482 memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
483 memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
484 }
485 if (actions->flags & BNXT_TC_ACTION_FLAG_POP_VLAN) {
486 action_flags |=
487 CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
488 /* Rewrite config with tpid = 0 implies vlan pop */
489 req.l2_rewrite_vlan_tpid = 0;
490 memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
491 memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
492 }
493 }
494 req.action_flags = cpu_to_le16(action_flags);
495
496 mutex_lock(&bp->hwrm_cmd_lock);
497
498 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
499 if (!rc)
500 *flow_handle = resp->flow_handle;
501
502 mutex_unlock(&bp->hwrm_cmd_lock);
503
504 return rc;
Sathya Perla2ae74082017-08-28 13:40:33 -0400505}
506
Sathya Perla8c95f772017-10-26 11:51:29 -0400507static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
508 struct bnxt_tc_flow *flow,
509 struct bnxt_tc_l2_key *l2_info,
510 __le32 ref_decap_handle,
511 __le32 *decap_filter_handle)
512{
Sathya Perlaf484f672017-10-26 11:51:30 -0400513 struct hwrm_cfa_decap_filter_alloc_output *resp =
514 bp->hwrm_cmd_resp_addr;
515 struct hwrm_cfa_decap_filter_alloc_input req = { 0 };
516 struct ip_tunnel_key *tun_key = &flow->tun_key;
517 u32 enables = 0;
518 int rc;
519
520 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_ALLOC, -1, -1);
521
522 req.flags = cpu_to_le32(CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL);
523 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE |
524 CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL;
525 req.tunnel_type = CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
526 req.ip_protocol = CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP;
527
528 if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ID) {
529 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID;
530 /* tunnel_id is wrongly defined in hsi defn. as __le32 */
531 req.tunnel_id = tunnel_id_to_key32(tun_key->tun_id);
532 }
533
534 if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) {
535 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR |
536 CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_MACADDR;
537 ether_addr_copy(req.dst_macaddr, l2_info->dmac);
538 ether_addr_copy(req.src_macaddr, l2_info->smac);
539 }
540 if (l2_info->num_vlans) {
541 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID;
542 req.t_ivlan_vid = l2_info->inner_vlan_tci;
543 }
544
545 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE;
546 req.ethertype = htons(ETH_P_IP);
547
548 if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS) {
549 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |
550 CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |
551 CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE;
552 req.ip_addr_type = CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
553 req.dst_ipaddr[0] = tun_key->u.ipv4.dst;
554 req.src_ipaddr[0] = tun_key->u.ipv4.src;
555 }
556
557 if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_PORTS) {
558 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT;
559 req.dst_port = tun_key->tp_dst;
560 }
561
562 /* Eventhough the decap_handle returned by hwrm_cfa_decap_filter_alloc
563 * is defined as __le32, l2_ctxt_ref_id is defined in HSI as __le16.
564 */
565 req.l2_ctxt_ref_id = (__force __le16)ref_decap_handle;
566 req.enables = cpu_to_le32(enables);
567
568 mutex_lock(&bp->hwrm_cmd_lock);
569 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
570 if (!rc)
571 *decap_filter_handle = resp->decap_filter_id;
572 else
573 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
574 mutex_unlock(&bp->hwrm_cmd_lock);
575
576 return rc;
Sathya Perla8c95f772017-10-26 11:51:29 -0400577}
578
579static int hwrm_cfa_decap_filter_free(struct bnxt *bp,
580 __le32 decap_filter_handle)
581{
Sathya Perlaf484f672017-10-26 11:51:30 -0400582 struct hwrm_cfa_decap_filter_free_input req = { 0 };
583 int rc;
584
585 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_FREE, -1, -1);
586 req.decap_filter_id = decap_filter_handle;
587
588 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
589 if (rc)
590 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
591 return rc;
Sathya Perla8c95f772017-10-26 11:51:29 -0400592}
593
594static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
595 struct ip_tunnel_key *encap_key,
596 struct bnxt_tc_l2_key *l2_info,
597 __le32 *encap_record_handle)
598{
Sathya Perlaf484f672017-10-26 11:51:30 -0400599 struct hwrm_cfa_encap_record_alloc_output *resp =
600 bp->hwrm_cmd_resp_addr;
601 struct hwrm_cfa_encap_record_alloc_input req = { 0 };
602 struct hwrm_cfa_encap_data_vxlan *encap =
603 (struct hwrm_cfa_encap_data_vxlan *)&req.encap_data;
604 struct hwrm_vxlan_ipv4_hdr *encap_ipv4 =
605 (struct hwrm_vxlan_ipv4_hdr *)encap->l3;
606 int rc;
607
608 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_ALLOC, -1, -1);
609
610 req.encap_type = CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN;
611
612 ether_addr_copy(encap->dst_mac_addr, l2_info->dmac);
613 ether_addr_copy(encap->src_mac_addr, l2_info->smac);
614 if (l2_info->num_vlans) {
615 encap->num_vlan_tags = l2_info->num_vlans;
616 encap->ovlan_tci = l2_info->inner_vlan_tci;
617 encap->ovlan_tpid = l2_info->inner_vlan_tpid;
618 }
619
620 encap_ipv4->ver_hlen = 4 << VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT;
621 encap_ipv4->ver_hlen |= 5 << VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT;
622 encap_ipv4->ttl = encap_key->ttl;
623
624 encap_ipv4->dest_ip_addr = encap_key->u.ipv4.dst;
625 encap_ipv4->src_ip_addr = encap_key->u.ipv4.src;
626 encap_ipv4->protocol = IPPROTO_UDP;
627
628 encap->dst_port = encap_key->tp_dst;
629 encap->vni = tunnel_id_to_key32(encap_key->tun_id);
630
631 mutex_lock(&bp->hwrm_cmd_lock);
632 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
633 if (!rc)
634 *encap_record_handle = resp->encap_record_id;
635 else
636 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
637 mutex_unlock(&bp->hwrm_cmd_lock);
638
639 return rc;
Sathya Perla8c95f772017-10-26 11:51:29 -0400640}
641
642static int hwrm_cfa_encap_record_free(struct bnxt *bp,
643 __le32 encap_record_handle)
644{
Sathya Perlaf484f672017-10-26 11:51:30 -0400645 struct hwrm_cfa_encap_record_free_input req = { 0 };
646 int rc;
647
648 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_FREE, -1, -1);
649 req.encap_record_id = encap_record_handle;
650
651 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
652 if (rc)
653 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
654 return rc;
Sathya Perla8c95f772017-10-26 11:51:29 -0400655}
656
Sathya Perla2ae74082017-08-28 13:40:33 -0400657static int bnxt_tc_put_l2_node(struct bnxt *bp,
658 struct bnxt_tc_flow_node *flow_node)
659{
660 struct bnxt_tc_l2_node *l2_node = flow_node->l2_node;
661 struct bnxt_tc_info *tc_info = &bp->tc_info;
662 int rc;
663
664 /* remove flow_node from the L2 shared flow list */
665 list_del(&flow_node->l2_list_node);
666 if (--l2_node->refcount == 0) {
667 rc = rhashtable_remove_fast(&tc_info->l2_table, &l2_node->node,
668 tc_info->l2_ht_params);
669 if (rc)
670 netdev_err(bp->dev,
671 "Error: %s: rhashtable_remove_fast: %d",
672 __func__, rc);
673 kfree_rcu(l2_node, rcu);
674 }
675 return 0;
676}
677
678static struct bnxt_tc_l2_node *
679bnxt_tc_get_l2_node(struct bnxt *bp, struct rhashtable *l2_table,
680 struct rhashtable_params ht_params,
681 struct bnxt_tc_l2_key *l2_key)
682{
683 struct bnxt_tc_l2_node *l2_node;
684 int rc;
685
686 l2_node = rhashtable_lookup_fast(l2_table, l2_key, ht_params);
687 if (!l2_node) {
688 l2_node = kzalloc(sizeof(*l2_node), GFP_KERNEL);
689 if (!l2_node) {
690 rc = -ENOMEM;
691 return NULL;
692 }
693
694 l2_node->key = *l2_key;
695 rc = rhashtable_insert_fast(l2_table, &l2_node->node,
696 ht_params);
697 if (rc) {
Sathya Perla8c95f772017-10-26 11:51:29 -0400698 kfree_rcu(l2_node, rcu);
Sathya Perla2ae74082017-08-28 13:40:33 -0400699 netdev_err(bp->dev,
700 "Error: %s: rhashtable_insert_fast: %d",
701 __func__, rc);
702 return NULL;
703 }
704 INIT_LIST_HEAD(&l2_node->common_l2_flows);
705 }
706 return l2_node;
707}
708
709/* Get the ref_flow_handle for a flow by checking if there are any other
710 * flows that share the same L2 key as this flow.
711 */
712static int
713bnxt_tc_get_ref_flow_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
714 struct bnxt_tc_flow_node *flow_node,
715 __le16 *ref_flow_handle)
716{
717 struct bnxt_tc_info *tc_info = &bp->tc_info;
718 struct bnxt_tc_flow_node *ref_flow_node;
719 struct bnxt_tc_l2_node *l2_node;
720
721 l2_node = bnxt_tc_get_l2_node(bp, &tc_info->l2_table,
722 tc_info->l2_ht_params,
723 &flow->l2_key);
724 if (!l2_node)
725 return -1;
726
727 /* If any other flow is using this l2_node, use it's flow_handle
728 * as the ref_flow_handle
729 */
730 if (l2_node->refcount > 0) {
731 ref_flow_node = list_first_entry(&l2_node->common_l2_flows,
732 struct bnxt_tc_flow_node,
733 l2_list_node);
734 *ref_flow_handle = ref_flow_node->flow_handle;
735 } else {
736 *ref_flow_handle = cpu_to_le16(0xffff);
737 }
738
739 /* Insert the l2_node into the flow_node so that subsequent flows
740 * with a matching l2 key can use the flow_handle of this flow
741 * as their ref_flow_handle
742 */
743 flow_node->l2_node = l2_node;
744 list_add(&flow_node->l2_list_node, &l2_node->common_l2_flows);
745 l2_node->refcount++;
746 return 0;
747}
748
749/* After the flow parsing is done, this routine is used for checking
750 * if there are any aspects of the flow that prevent it from being
751 * offloaded.
752 */
753static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow)
754{
755 /* If L4 ports are specified then ip_proto must be TCP or UDP */
756 if ((flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) &&
757 (flow->l4_key.ip_proto != IPPROTO_TCP &&
758 flow->l4_key.ip_proto != IPPROTO_UDP)) {
759 netdev_info(bp->dev, "Cannot offload non-TCP/UDP (%d) ports",
760 flow->l4_key.ip_proto);
761 return false;
762 }
763
764 return true;
765}
766
Sathya Perla8c95f772017-10-26 11:51:29 -0400767/* Returns the final refcount of the node on success
768 * or a -ve error code on failure
769 */
770static int bnxt_tc_put_tunnel_node(struct bnxt *bp,
771 struct rhashtable *tunnel_table,
772 struct rhashtable_params *ht_params,
773 struct bnxt_tc_tunnel_node *tunnel_node)
774{
775 int rc;
776
777 if (--tunnel_node->refcount == 0) {
778 rc = rhashtable_remove_fast(tunnel_table, &tunnel_node->node,
779 *ht_params);
780 if (rc) {
781 netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc);
782 rc = -1;
783 }
784 kfree_rcu(tunnel_node, rcu);
785 return rc;
786 } else {
787 return tunnel_node->refcount;
788 }
789}
790
791/* Get (or add) either encap or decap tunnel node from/to the supplied
792 * hash table.
793 */
794static struct bnxt_tc_tunnel_node *
795bnxt_tc_get_tunnel_node(struct bnxt *bp, struct rhashtable *tunnel_table,
796 struct rhashtable_params *ht_params,
797 struct ip_tunnel_key *tun_key)
798{
799 struct bnxt_tc_tunnel_node *tunnel_node;
800 int rc;
801
802 tunnel_node = rhashtable_lookup_fast(tunnel_table, tun_key, *ht_params);
803 if (!tunnel_node) {
804 tunnel_node = kzalloc(sizeof(*tunnel_node), GFP_KERNEL);
805 if (!tunnel_node) {
806 rc = -ENOMEM;
807 goto err;
808 }
809
810 tunnel_node->key = *tun_key;
811 tunnel_node->tunnel_handle = INVALID_TUNNEL_HANDLE;
812 rc = rhashtable_insert_fast(tunnel_table, &tunnel_node->node,
813 *ht_params);
814 if (rc) {
815 kfree_rcu(tunnel_node, rcu);
816 goto err;
817 }
818 }
819 tunnel_node->refcount++;
820 return tunnel_node;
821err:
822 netdev_info(bp->dev, "error rc=%d", rc);
823 return NULL;
824}
825
826static int bnxt_tc_get_ref_decap_handle(struct bnxt *bp,
827 struct bnxt_tc_flow *flow,
828 struct bnxt_tc_l2_key *l2_key,
829 struct bnxt_tc_flow_node *flow_node,
830 __le32 *ref_decap_handle)
831{
832 struct bnxt_tc_info *tc_info = &bp->tc_info;
833 struct bnxt_tc_flow_node *ref_flow_node;
834 struct bnxt_tc_l2_node *decap_l2_node;
835
836 decap_l2_node = bnxt_tc_get_l2_node(bp, &tc_info->decap_l2_table,
837 tc_info->decap_l2_ht_params,
838 l2_key);
839 if (!decap_l2_node)
840 return -1;
841
842 /* If any other flow is using this decap_l2_node, use it's decap_handle
843 * as the ref_decap_handle
844 */
845 if (decap_l2_node->refcount > 0) {
846 ref_flow_node =
847 list_first_entry(&decap_l2_node->common_l2_flows,
848 struct bnxt_tc_flow_node,
849 decap_l2_list_node);
850 *ref_decap_handle = ref_flow_node->decap_node->tunnel_handle;
851 } else {
852 *ref_decap_handle = INVALID_TUNNEL_HANDLE;
853 }
854
855 /* Insert the l2_node into the flow_node so that subsequent flows
856 * with a matching decap l2 key can use the decap_filter_handle of
857 * this flow as their ref_decap_handle
858 */
859 flow_node->decap_l2_node = decap_l2_node;
860 list_add(&flow_node->decap_l2_list_node,
861 &decap_l2_node->common_l2_flows);
862 decap_l2_node->refcount++;
863 return 0;
864}
865
866static void bnxt_tc_put_decap_l2_node(struct bnxt *bp,
867 struct bnxt_tc_flow_node *flow_node)
868{
869 struct bnxt_tc_l2_node *decap_l2_node = flow_node->decap_l2_node;
870 struct bnxt_tc_info *tc_info = &bp->tc_info;
871 int rc;
872
873 /* remove flow_node from the decap L2 sharing flow list */
874 list_del(&flow_node->decap_l2_list_node);
875 if (--decap_l2_node->refcount == 0) {
876 rc = rhashtable_remove_fast(&tc_info->decap_l2_table,
877 &decap_l2_node->node,
878 tc_info->decap_l2_ht_params);
879 if (rc)
880 netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc);
881 kfree_rcu(decap_l2_node, rcu);
882 }
883}
884
885static void bnxt_tc_put_decap_handle(struct bnxt *bp,
886 struct bnxt_tc_flow_node *flow_node)
887{
888 __le32 decap_handle = flow_node->decap_node->tunnel_handle;
889 struct bnxt_tc_info *tc_info = &bp->tc_info;
890 int rc;
891
892 if (flow_node->decap_l2_node)
893 bnxt_tc_put_decap_l2_node(bp, flow_node);
894
895 rc = bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table,
896 &tc_info->decap_ht_params,
897 flow_node->decap_node);
898 if (!rc && decap_handle != INVALID_TUNNEL_HANDLE)
899 hwrm_cfa_decap_filter_free(bp, decap_handle);
900}
901
902static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp,
903 struct ip_tunnel_key *tun_key,
904 struct bnxt_tc_l2_key *l2_info,
905 struct net_device *real_dst_dev)
906{
907 struct flowi4 flow = { {0} };
908 struct net_device *dst_dev;
909 struct neighbour *nbr;
910 struct rtable *rt;
911 int rc;
912
913 flow.flowi4_proto = IPPROTO_UDP;
914 flow.fl4_dport = tun_key->tp_dst;
915 flow.daddr = tun_key->u.ipv4.dst;
916
917 rt = ip_route_output_key(dev_net(real_dst_dev), &flow);
918 if (IS_ERR(rt)) {
919 netdev_info(bp->dev, "no route to %pI4b", &flow.daddr);
920 return -EOPNOTSUPP;
921 }
922
923 /* The route must either point to the real_dst_dev or a dst_dev that
924 * uses the real_dst_dev.
925 */
926 dst_dev = rt->dst.dev;
927 if (is_vlan_dev(dst_dev)) {
928 struct vlan_dev_priv *vlan = vlan_dev_priv(dst_dev);
929
930 if (vlan->real_dev != real_dst_dev) {
931 netdev_info(bp->dev,
932 "dst_dev(%s) doesn't use PF-if(%s)",
933 netdev_name(dst_dev),
934 netdev_name(real_dst_dev));
935 rc = -EOPNOTSUPP;
936 goto put_rt;
937 }
938 l2_info->inner_vlan_tci = htons(vlan->vlan_id);
939 l2_info->inner_vlan_tpid = vlan->vlan_proto;
940 l2_info->num_vlans = 1;
941 } else if (dst_dev != real_dst_dev) {
942 netdev_info(bp->dev,
943 "dst_dev(%s) for %pI4b is not PF-if(%s)",
944 netdev_name(dst_dev), &flow.daddr,
945 netdev_name(real_dst_dev));
946 rc = -EOPNOTSUPP;
947 goto put_rt;
948 }
949
950 nbr = dst_neigh_lookup(&rt->dst, &flow.daddr);
951 if (!nbr) {
952 netdev_info(bp->dev, "can't lookup neighbor for %pI4b",
953 &flow.daddr);
954 rc = -EOPNOTSUPP;
955 goto put_rt;
956 }
957
958 tun_key->u.ipv4.src = flow.saddr;
959 tun_key->ttl = ip4_dst_hoplimit(&rt->dst);
960 neigh_ha_snapshot(l2_info->dmac, nbr, dst_dev);
961 ether_addr_copy(l2_info->smac, dst_dev->dev_addr);
962 neigh_release(nbr);
963 ip_rt_put(rt);
964
965 return 0;
966put_rt:
967 ip_rt_put(rt);
968 return rc;
969}
970
971static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
972 struct bnxt_tc_flow_node *flow_node,
973 __le32 *decap_filter_handle)
974{
975 struct ip_tunnel_key *decap_key = &flow->tun_key;
976 struct bnxt_tc_info *tc_info = &bp->tc_info;
977 struct bnxt_tc_l2_key l2_info = { {0} };
978 struct bnxt_tc_tunnel_node *decap_node;
979 struct ip_tunnel_key tun_key = { 0 };
980 struct bnxt_tc_l2_key *decap_l2_info;
981 __le32 ref_decap_handle;
982 int rc;
983
984 /* Check if there's another flow using the same tunnel decap.
985 * If not, add this tunnel to the table and resolve the other
986 * tunnel header fileds
987 */
988 decap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->decap_table,
989 &tc_info->decap_ht_params,
990 decap_key);
991 if (!decap_node)
992 return -ENOMEM;
993
994 flow_node->decap_node = decap_node;
995
996 if (decap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)
997 goto done;
998
999 /* Resolve the L2 fields for tunnel decap
1000 * Resolve the route for remote vtep (saddr) of the decap key
1001 * Find it's next-hop mac addrs
1002 */
1003 tun_key.u.ipv4.dst = flow->tun_key.u.ipv4.src;
1004 tun_key.tp_dst = flow->tun_key.tp_dst;
1005 rc = bnxt_tc_resolve_tunnel_hdrs(bp, &tun_key, &l2_info, bp->dev);
1006 if (rc)
1007 goto put_decap;
1008
1009 decap_key->ttl = tun_key.ttl;
1010 decap_l2_info = &decap_node->l2_info;
1011 ether_addr_copy(decap_l2_info->dmac, l2_info.smac);
1012 ether_addr_copy(decap_l2_info->smac, l2_info.dmac);
1013 if (l2_info.num_vlans) {
1014 decap_l2_info->num_vlans = l2_info.num_vlans;
1015 decap_l2_info->inner_vlan_tpid = l2_info.inner_vlan_tpid;
1016 decap_l2_info->inner_vlan_tci = l2_info.inner_vlan_tci;
1017 }
1018 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS;
1019
1020 /* For getting a decap_filter_handle we first need to check if
1021 * there are any other decap flows that share the same tunnel L2
1022 * key and if so, pass that flow's decap_filter_handle as the
1023 * ref_decap_handle for this flow.
1024 */
1025 rc = bnxt_tc_get_ref_decap_handle(bp, flow, decap_l2_info, flow_node,
1026 &ref_decap_handle);
1027 if (rc)
1028 goto put_decap;
1029
1030 /* Issue the hwrm cmd to allocate a decap filter handle */
1031 rc = hwrm_cfa_decap_filter_alloc(bp, flow, decap_l2_info,
1032 ref_decap_handle,
1033 &decap_node->tunnel_handle);
1034 if (rc)
1035 goto put_decap_l2;
1036
1037done:
1038 *decap_filter_handle = decap_node->tunnel_handle;
1039 return 0;
1040
1041put_decap_l2:
1042 bnxt_tc_put_decap_l2_node(bp, flow_node);
1043put_decap:
1044 bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table,
1045 &tc_info->decap_ht_params,
1046 flow_node->decap_node);
1047 return rc;
1048}
1049
1050static void bnxt_tc_put_encap_handle(struct bnxt *bp,
1051 struct bnxt_tc_tunnel_node *encap_node)
1052{
1053 __le32 encap_handle = encap_node->tunnel_handle;
1054 struct bnxt_tc_info *tc_info = &bp->tc_info;
1055 int rc;
1056
1057 rc = bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table,
1058 &tc_info->encap_ht_params, encap_node);
1059 if (!rc && encap_handle != INVALID_TUNNEL_HANDLE)
1060 hwrm_cfa_encap_record_free(bp, encap_handle);
1061}
1062
1063/* Lookup the tunnel encap table and check if there's an encap_handle
1064 * alloc'd already.
1065 * If not, query L2 info via a route lookup and issue an encap_record_alloc
1066 * cmd to FW.
1067 */
1068static int bnxt_tc_get_encap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
1069 struct bnxt_tc_flow_node *flow_node,
1070 __le32 *encap_handle)
1071{
1072 struct ip_tunnel_key *encap_key = &flow->actions.tun_encap_key;
1073 struct bnxt_tc_info *tc_info = &bp->tc_info;
1074 struct bnxt_tc_tunnel_node *encap_node;
1075 int rc;
1076
1077 /* Check if there's another flow using the same tunnel encap.
1078 * If not, add this tunnel to the table and resolve the other
1079 * tunnel header fileds
1080 */
1081 encap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->encap_table,
1082 &tc_info->encap_ht_params,
1083 encap_key);
1084 if (!encap_node)
1085 return -ENOMEM;
1086
1087 flow_node->encap_node = encap_node;
1088
1089 if (encap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)
1090 goto done;
1091
1092 rc = bnxt_tc_resolve_tunnel_hdrs(bp, encap_key, &encap_node->l2_info,
1093 flow->actions.dst_dev);
1094 if (rc)
1095 goto put_encap;
1096
1097 /* Allocate a new tunnel encap record */
1098 rc = hwrm_cfa_encap_record_alloc(bp, encap_key, &encap_node->l2_info,
1099 &encap_node->tunnel_handle);
1100 if (rc)
1101 goto put_encap;
1102
1103done:
1104 *encap_handle = encap_node->tunnel_handle;
1105 return 0;
1106
1107put_encap:
1108 bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table,
1109 &tc_info->encap_ht_params, encap_node);
1110 return rc;
1111}
1112
1113static void bnxt_tc_put_tunnel_handle(struct bnxt *bp,
1114 struct bnxt_tc_flow *flow,
1115 struct bnxt_tc_flow_node *flow_node)
1116{
1117 if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
1118 bnxt_tc_put_decap_handle(bp, flow_node);
1119 else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP)
1120 bnxt_tc_put_encap_handle(bp, flow_node->encap_node);
1121}
1122
1123static int bnxt_tc_get_tunnel_handle(struct bnxt *bp,
1124 struct bnxt_tc_flow *flow,
1125 struct bnxt_tc_flow_node *flow_node,
1126 __le32 *tunnel_handle)
1127{
1128 if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
1129 return bnxt_tc_get_decap_handle(bp, flow, flow_node,
1130 tunnel_handle);
1131 else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP)
1132 return bnxt_tc_get_encap_handle(bp, flow, flow_node,
1133 tunnel_handle);
1134 else
1135 return 0;
1136}
Sathya Perla2ae74082017-08-28 13:40:33 -04001137static int __bnxt_tc_del_flow(struct bnxt *bp,
1138 struct bnxt_tc_flow_node *flow_node)
1139{
1140 struct bnxt_tc_info *tc_info = &bp->tc_info;
1141 int rc;
1142
1143 /* send HWRM cmd to free the flow-id */
1144 bnxt_hwrm_cfa_flow_free(bp, flow_node->flow_handle);
1145
1146 mutex_lock(&tc_info->lock);
1147
Sathya Perla8c95f772017-10-26 11:51:29 -04001148 /* release references to any tunnel encap/decap nodes */
1149 bnxt_tc_put_tunnel_handle(bp, &flow_node->flow, flow_node);
1150
Sathya Perla2ae74082017-08-28 13:40:33 -04001151 /* release reference to l2 node */
1152 bnxt_tc_put_l2_node(bp, flow_node);
1153
1154 mutex_unlock(&tc_info->lock);
1155
1156 rc = rhashtable_remove_fast(&tc_info->flow_table, &flow_node->node,
1157 tc_info->flow_ht_params);
1158 if (rc)
1159 netdev_err(bp->dev, "Error: %s: rhashtable_remove_fast rc=%d",
1160 __func__, rc);
1161
1162 kfree_rcu(flow_node, rcu);
1163 return 0;
1164}
1165
1166/* Add a new flow or replace an existing flow.
1167 * Notes on locking:
1168 * There are essentially two critical sections here.
1169 * 1. while adding a new flow
1170 * a) lookup l2-key
1171 * b) issue HWRM cmd and get flow_handle
1172 * c) link l2-key with flow
1173 * 2. while deleting a flow
1174 * a) unlinking l2-key from flow
1175 * A lock is needed to protect these two critical sections.
1176 *
1177 * The hash-tables are already protected by the rhashtable API.
1178 */
1179static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
1180 struct tc_cls_flower_offload *tc_flow_cmd)
1181{
1182 struct bnxt_tc_flow_node *new_node, *old_node;
1183 struct bnxt_tc_info *tc_info = &bp->tc_info;
1184 struct bnxt_tc_flow *flow;
Sathya Perla8c95f772017-10-26 11:51:29 -04001185 __le32 tunnel_handle = 0;
Sathya Perla2ae74082017-08-28 13:40:33 -04001186 __le16 ref_flow_handle;
1187 int rc;
1188
1189 /* allocate memory for the new flow and it's node */
1190 new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
1191 if (!new_node) {
1192 rc = -ENOMEM;
1193 goto done;
1194 }
1195 new_node->cookie = tc_flow_cmd->cookie;
1196 flow = &new_node->flow;
1197
1198 rc = bnxt_tc_parse_flow(bp, tc_flow_cmd, flow);
1199 if (rc)
1200 goto free_node;
1201 flow->src_fid = src_fid;
1202
1203 if (!bnxt_tc_can_offload(bp, flow)) {
1204 rc = -ENOSPC;
1205 goto free_node;
1206 }
1207
1208 /* If a flow exists with the same cookie, delete it */
1209 old_node = rhashtable_lookup_fast(&tc_info->flow_table,
1210 &tc_flow_cmd->cookie,
1211 tc_info->flow_ht_params);
1212 if (old_node)
1213 __bnxt_tc_del_flow(bp, old_node);
1214
1215 /* Check if the L2 part of the flow has been offloaded already.
1216 * If so, bump up it's refcnt and get it's reference handle.
1217 */
1218 mutex_lock(&tc_info->lock);
1219 rc = bnxt_tc_get_ref_flow_handle(bp, flow, new_node, &ref_flow_handle);
1220 if (rc)
1221 goto unlock;
1222
Sathya Perla8c95f772017-10-26 11:51:29 -04001223 /* If the flow involves tunnel encap/decap, get tunnel_handle */
1224 rc = bnxt_tc_get_tunnel_handle(bp, flow, new_node, &tunnel_handle);
Sathya Perla2ae74082017-08-28 13:40:33 -04001225 if (rc)
1226 goto put_l2;
1227
Sathya Perla8c95f772017-10-26 11:51:29 -04001228 /* send HWRM cmd to alloc the flow */
1229 rc = bnxt_hwrm_cfa_flow_alloc(bp, flow, ref_flow_handle,
1230 tunnel_handle, &new_node->flow_handle);
1231 if (rc)
1232 goto put_tunnel;
1233
Sathya Perla5a84acb2017-10-26 11:51:31 -04001234 flow->lastused = jiffies;
1235 spin_lock_init(&flow->stats_lock);
Sathya Perla2ae74082017-08-28 13:40:33 -04001236 /* add new flow to flow-table */
1237 rc = rhashtable_insert_fast(&tc_info->flow_table, &new_node->node,
1238 tc_info->flow_ht_params);
1239 if (rc)
1240 goto hwrm_flow_free;
1241
1242 mutex_unlock(&tc_info->lock);
1243 return 0;
1244
1245hwrm_flow_free:
1246 bnxt_hwrm_cfa_flow_free(bp, new_node->flow_handle);
Sathya Perla8c95f772017-10-26 11:51:29 -04001247put_tunnel:
1248 bnxt_tc_put_tunnel_handle(bp, flow, new_node);
Sathya Perla2ae74082017-08-28 13:40:33 -04001249put_l2:
1250 bnxt_tc_put_l2_node(bp, new_node);
1251unlock:
1252 mutex_unlock(&tc_info->lock);
1253free_node:
Sathya Perla8c95f772017-10-26 11:51:29 -04001254 kfree_rcu(new_node, rcu);
Sathya Perla2ae74082017-08-28 13:40:33 -04001255done:
1256 netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d",
1257 __func__, tc_flow_cmd->cookie, rc);
1258 return rc;
1259}
1260
1261static int bnxt_tc_del_flow(struct bnxt *bp,
1262 struct tc_cls_flower_offload *tc_flow_cmd)
1263{
1264 struct bnxt_tc_info *tc_info = &bp->tc_info;
1265 struct bnxt_tc_flow_node *flow_node;
1266
1267 flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
1268 &tc_flow_cmd->cookie,
1269 tc_info->flow_ht_params);
1270 if (!flow_node) {
1271 netdev_info(bp->dev, "ERROR: no flow_node for cookie %lx",
1272 tc_flow_cmd->cookie);
1273 return -EINVAL;
1274 }
1275
1276 return __bnxt_tc_del_flow(bp, flow_node);
1277}
1278
1279static int bnxt_tc_get_flow_stats(struct bnxt *bp,
1280 struct tc_cls_flower_offload *tc_flow_cmd)
1281{
Sathya Perla5a84acb2017-10-26 11:51:31 -04001282 struct bnxt_tc_flow_stats stats, *curr_stats, *prev_stats;
Sathya Perlad7bc7302017-08-28 13:40:35 -04001283 struct bnxt_tc_info *tc_info = &bp->tc_info;
1284 struct bnxt_tc_flow_node *flow_node;
Sathya Perla5a84acb2017-10-26 11:51:31 -04001285 struct bnxt_tc_flow *flow;
1286 unsigned long lastused;
Sathya Perlad7bc7302017-08-28 13:40:35 -04001287
1288 flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
1289 &tc_flow_cmd->cookie,
1290 tc_info->flow_ht_params);
1291 if (!flow_node) {
1292 netdev_info(bp->dev, "Error: no flow_node for cookie %lx",
1293 tc_flow_cmd->cookie);
1294 return -1;
1295 }
1296
Sathya Perla5a84acb2017-10-26 11:51:31 -04001297 flow = &flow_node->flow;
1298 curr_stats = &flow->stats;
1299 prev_stats = &flow->prev_stats;
1300
1301 spin_lock(&flow->stats_lock);
1302 stats.packets = curr_stats->packets - prev_stats->packets;
1303 stats.bytes = curr_stats->bytes - prev_stats->bytes;
1304 *prev_stats = *curr_stats;
1305 lastused = flow->lastused;
1306 spin_unlock(&flow->stats_lock);
1307
1308 tcf_exts_stats_update(tc_flow_cmd->exts, stats.bytes, stats.packets,
1309 lastused);
1310 return 0;
1311}
1312
1313static int
1314bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
1315 struct bnxt_tc_stats_batch stats_batch[])
1316{
1317 struct hwrm_cfa_flow_stats_output *resp = bp->hwrm_cmd_resp_addr;
1318 struct hwrm_cfa_flow_stats_input req = { 0 };
1319 __le16 *req_flow_handles = &req.flow_handle_0;
1320 int rc, i;
1321
1322 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1);
1323 req.num_flows = cpu_to_le16(num_flows);
1324 for (i = 0; i < num_flows; i++) {
1325 struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
1326
1327 req_flow_handles[i] = flow_node->flow_handle;
1328 }
1329
1330 mutex_lock(&bp->hwrm_cmd_lock);
1331 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1332 if (!rc) {
1333 __le64 *resp_packets = &resp->packet_0;
1334 __le64 *resp_bytes = &resp->byte_0;
1335
1336 for (i = 0; i < num_flows; i++) {
1337 stats_batch[i].hw_stats.packets =
1338 le64_to_cpu(resp_packets[i]);
1339 stats_batch[i].hw_stats.bytes =
1340 le64_to_cpu(resp_bytes[i]);
1341 }
1342 } else {
1343 netdev_info(bp->dev, "error rc=%d", rc);
1344 }
1345
1346 mutex_unlock(&bp->hwrm_cmd_lock);
1347 return rc;
1348}
1349
1350/* Add val to accum while handling a possible wraparound
1351 * of val. Eventhough val is of type u64, its actual width
1352 * is denoted by mask and will wrap-around beyond that width.
1353 */
1354static void accumulate_val(u64 *accum, u64 val, u64 mask)
1355{
1356#define low_bits(x, mask) ((x) & (mask))
1357#define high_bits(x, mask) ((x) & ~(mask))
1358 bool wrapped = val < low_bits(*accum, mask);
1359
1360 *accum = high_bits(*accum, mask) + val;
1361 if (wrapped)
1362 *accum += (mask + 1);
1363}
1364
1365/* The HW counters' width is much less than 64bits.
1366 * Handle possible wrap-around while updating the stat counters
1367 */
1368static void bnxt_flow_stats_accum(struct bnxt_tc_info *tc_info,
1369 struct bnxt_tc_flow_stats *acc_stats,
1370 struct bnxt_tc_flow_stats *hw_stats)
1371{
1372 accumulate_val(&acc_stats->bytes, hw_stats->bytes, tc_info->bytes_mask);
1373 accumulate_val(&acc_stats->packets, hw_stats->packets,
1374 tc_info->packets_mask);
1375}
1376
1377static int
1378bnxt_tc_flow_stats_batch_update(struct bnxt *bp, int num_flows,
1379 struct bnxt_tc_stats_batch stats_batch[])
1380{
1381 struct bnxt_tc_info *tc_info = &bp->tc_info;
1382 int rc, i;
1383
1384 rc = bnxt_hwrm_cfa_flow_stats_get(bp, num_flows, stats_batch);
Sathya Perlad7bc7302017-08-28 13:40:35 -04001385 if (rc)
1386 return rc;
1387
Sathya Perla5a84acb2017-10-26 11:51:31 -04001388 for (i = 0; i < num_flows; i++) {
1389 struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
1390 struct bnxt_tc_flow *flow = &flow_node->flow;
1391
1392 spin_lock(&flow->stats_lock);
1393 bnxt_flow_stats_accum(tc_info, &flow->stats,
1394 &stats_batch[i].hw_stats);
1395 if (flow->stats.packets != flow->prev_stats.packets)
1396 flow->lastused = jiffies;
1397 spin_unlock(&flow->stats_lock);
1398 }
1399
Sathya Perla2ae74082017-08-28 13:40:33 -04001400 return 0;
1401}
1402
Sathya Perla5a84acb2017-10-26 11:51:31 -04001403static int
1404bnxt_tc_flow_stats_batch_prep(struct bnxt *bp,
1405 struct bnxt_tc_stats_batch stats_batch[],
1406 int *num_flows)
1407{
1408 struct bnxt_tc_info *tc_info = &bp->tc_info;
1409 struct rhashtable_iter *iter = &tc_info->iter;
1410 void *flow_node;
1411 int rc, i;
1412
1413 rc = rhashtable_walk_start(iter);
1414 if (rc && rc != -EAGAIN) {
1415 i = 0;
1416 goto done;
1417 }
1418
1419 rc = 0;
1420 for (i = 0; i < BNXT_FLOW_STATS_BATCH_MAX; i++) {
1421 flow_node = rhashtable_walk_next(iter);
1422 if (IS_ERR(flow_node)) {
1423 i = 0;
1424 if (PTR_ERR(flow_node) == -EAGAIN) {
1425 continue;
1426 } else {
1427 rc = PTR_ERR(flow_node);
1428 goto done;
1429 }
1430 }
1431
1432 /* No more flows */
1433 if (!flow_node)
1434 goto done;
1435
1436 stats_batch[i].flow_node = flow_node;
1437 }
1438done:
1439 rhashtable_walk_stop(iter);
1440 *num_flows = i;
1441 return rc;
1442}
1443
1444void bnxt_tc_flow_stats_work(struct bnxt *bp)
1445{
1446 struct bnxt_tc_info *tc_info = &bp->tc_info;
1447 int num_flows, rc;
1448
1449 num_flows = atomic_read(&tc_info->flow_table.nelems);
1450 if (!num_flows)
1451 return;
1452
1453 rhashtable_walk_enter(&tc_info->flow_table, &tc_info->iter);
1454
1455 for (;;) {
1456 rc = bnxt_tc_flow_stats_batch_prep(bp, tc_info->stats_batch,
1457 &num_flows);
1458 if (rc) {
1459 if (rc == -EAGAIN)
1460 continue;
1461 break;
1462 }
1463
1464 if (!num_flows)
1465 break;
1466
1467 bnxt_tc_flow_stats_batch_update(bp, num_flows,
1468 tc_info->stats_batch);
1469 }
1470
1471 rhashtable_walk_exit(&tc_info->iter);
1472}
1473
Sathya Perla2ae74082017-08-28 13:40:33 -04001474int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
1475 struct tc_cls_flower_offload *cls_flower)
1476{
1477 int rc = 0;
1478
Jiri Pirko9e0fd152017-10-19 15:50:39 +02001479 if (cls_flower->common.chain_index)
Sathya Perla1e3c5ec2017-09-18 17:05:37 +05301480 return -EOPNOTSUPP;
1481
Sathya Perla2ae74082017-08-28 13:40:33 -04001482 switch (cls_flower->command) {
1483 case TC_CLSFLOWER_REPLACE:
1484 rc = bnxt_tc_add_flow(bp, src_fid, cls_flower);
1485 break;
1486
1487 case TC_CLSFLOWER_DESTROY:
1488 rc = bnxt_tc_del_flow(bp, cls_flower);
1489 break;
1490
1491 case TC_CLSFLOWER_STATS:
1492 rc = bnxt_tc_get_flow_stats(bp, cls_flower);
1493 break;
1494 }
1495 return rc;
1496}
1497
1498static const struct rhashtable_params bnxt_tc_flow_ht_params = {
1499 .head_offset = offsetof(struct bnxt_tc_flow_node, node),
1500 .key_offset = offsetof(struct bnxt_tc_flow_node, cookie),
1501 .key_len = sizeof(((struct bnxt_tc_flow_node *)0)->cookie),
1502 .automatic_shrinking = true
1503};
1504
1505static const struct rhashtable_params bnxt_tc_l2_ht_params = {
1506 .head_offset = offsetof(struct bnxt_tc_l2_node, node),
1507 .key_offset = offsetof(struct bnxt_tc_l2_node, key),
1508 .key_len = BNXT_TC_L2_KEY_LEN,
1509 .automatic_shrinking = true
1510};
1511
Sathya Perla8c95f772017-10-26 11:51:29 -04001512static const struct rhashtable_params bnxt_tc_decap_l2_ht_params = {
1513 .head_offset = offsetof(struct bnxt_tc_l2_node, node),
1514 .key_offset = offsetof(struct bnxt_tc_l2_node, key),
1515 .key_len = BNXT_TC_L2_KEY_LEN,
1516 .automatic_shrinking = true
1517};
1518
1519static const struct rhashtable_params bnxt_tc_tunnel_ht_params = {
1520 .head_offset = offsetof(struct bnxt_tc_tunnel_node, node),
1521 .key_offset = offsetof(struct bnxt_tc_tunnel_node, key),
1522 .key_len = sizeof(struct ip_tunnel_key),
1523 .automatic_shrinking = true
1524};
1525
Sathya Perla2ae74082017-08-28 13:40:33 -04001526/* convert counter width in bits to a mask */
1527#define mask(width) ((u64)~0 >> (64 - (width)))
1528
1529int bnxt_init_tc(struct bnxt *bp)
1530{
1531 struct bnxt_tc_info *tc_info = &bp->tc_info;
1532 int rc;
1533
Sathya Perla8c95f772017-10-26 11:51:29 -04001534 if (bp->hwrm_spec_code < 0x10803) {
Sathya Perla2ae74082017-08-28 13:40:33 -04001535 netdev_warn(bp->dev,
1536 "Firmware does not support TC flower offload.\n");
1537 return -ENOTSUPP;
1538 }
1539 mutex_init(&tc_info->lock);
1540
1541 /* Counter widths are programmed by FW */
1542 tc_info->bytes_mask = mask(36);
1543 tc_info->packets_mask = mask(28);
1544
1545 tc_info->flow_ht_params = bnxt_tc_flow_ht_params;
1546 rc = rhashtable_init(&tc_info->flow_table, &tc_info->flow_ht_params);
1547 if (rc)
1548 return rc;
1549
1550 tc_info->l2_ht_params = bnxt_tc_l2_ht_params;
1551 rc = rhashtable_init(&tc_info->l2_table, &tc_info->l2_ht_params);
1552 if (rc)
1553 goto destroy_flow_table;
1554
Sathya Perla8c95f772017-10-26 11:51:29 -04001555 tc_info->decap_l2_ht_params = bnxt_tc_decap_l2_ht_params;
1556 rc = rhashtable_init(&tc_info->decap_l2_table,
1557 &tc_info->decap_l2_ht_params);
1558 if (rc)
1559 goto destroy_l2_table;
1560
1561 tc_info->decap_ht_params = bnxt_tc_tunnel_ht_params;
1562 rc = rhashtable_init(&tc_info->decap_table,
1563 &tc_info->decap_ht_params);
1564 if (rc)
1565 goto destroy_decap_l2_table;
1566
1567 tc_info->encap_ht_params = bnxt_tc_tunnel_ht_params;
1568 rc = rhashtable_init(&tc_info->encap_table,
1569 &tc_info->encap_ht_params);
1570 if (rc)
1571 goto destroy_decap_table;
1572
Sathya Perla2ae74082017-08-28 13:40:33 -04001573 tc_info->enabled = true;
1574 bp->dev->hw_features |= NETIF_F_HW_TC;
1575 bp->dev->features |= NETIF_F_HW_TC;
1576 return 0;
1577
Sathya Perla8c95f772017-10-26 11:51:29 -04001578destroy_decap_table:
1579 rhashtable_destroy(&tc_info->decap_table);
1580destroy_decap_l2_table:
1581 rhashtable_destroy(&tc_info->decap_l2_table);
1582destroy_l2_table:
1583 rhashtable_destroy(&tc_info->l2_table);
Sathya Perla2ae74082017-08-28 13:40:33 -04001584destroy_flow_table:
1585 rhashtable_destroy(&tc_info->flow_table);
1586 return rc;
1587}
1588
1589void bnxt_shutdown_tc(struct bnxt *bp)
1590{
1591 struct bnxt_tc_info *tc_info = &bp->tc_info;
1592
1593 if (!tc_info->enabled)
1594 return;
1595
1596 rhashtable_destroy(&tc_info->flow_table);
1597 rhashtable_destroy(&tc_info->l2_table);
Sathya Perla8c95f772017-10-26 11:51:29 -04001598 rhashtable_destroy(&tc_info->decap_l2_table);
1599 rhashtable_destroy(&tc_info->decap_table);
1600 rhashtable_destroy(&tc_info->encap_table);
Sathya Perla2ae74082017-08-28 13:40:33 -04001601}