blob: fbe6e208e17b9ad190d1ee017c68075d0af70bb9 [file] [log] [blame]
Sathya Perla2ae74082017-08-28 13:40:33 -04001/* Broadcom NetXtreme-C/E network driver.
2 *
3 * Copyright (c) 2017 Broadcom Limited
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation.
8 */
9
10#include <linux/netdevice.h>
11#include <linux/inetdevice.h>
12#include <linux/if_vlan.h>
13#include <net/flow_dissector.h>
14#include <net/pkt_cls.h>
15#include <net/tc_act/tc_gact.h>
16#include <net/tc_act/tc_skbedit.h>
17#include <net/tc_act/tc_mirred.h>
18#include <net/tc_act/tc_vlan.h>
Sathya Perla8c95f772017-10-26 11:51:29 -040019#include <net/tc_act/tc_tunnel_key.h>
Sathya Perla2ae74082017-08-28 13:40:33 -040020
21#include "bnxt_hsi.h"
22#include "bnxt.h"
23#include "bnxt_sriov.h"
24#include "bnxt_tc.h"
25#include "bnxt_vfr.h"
26
Sathya Perla2ae74082017-08-28 13:40:33 -040027#define BNXT_FID_INVALID 0xffff
28#define VLAN_TCI(vid, prio) ((vid) | ((prio) << VLAN_PRIO_SHIFT))
29
30/* Return the dst fid of the func for flow forwarding
31 * For PFs: src_fid is the fid of the PF
32 * For VF-reps: src_fid the fid of the VF
33 */
34static u16 bnxt_flow_get_dst_fid(struct bnxt *pf_bp, struct net_device *dev)
35{
36 struct bnxt *bp;
37
38 /* check if dev belongs to the same switch */
39 if (!switchdev_port_same_parent_id(pf_bp->dev, dev)) {
40 netdev_info(pf_bp->dev, "dev(ifindex=%d) not on same switch",
41 dev->ifindex);
42 return BNXT_FID_INVALID;
43 }
44
45 /* Is dev a VF-rep? */
Sathya Perladd4ea1d2018-01-17 03:21:16 -050046 if (bnxt_dev_is_vf_rep(dev))
Sathya Perla2ae74082017-08-28 13:40:33 -040047 return bnxt_vf_rep_get_fid(dev);
48
49 bp = netdev_priv(dev);
50 return bp->pf.fw_fid;
51}
52
53static int bnxt_tc_parse_redir(struct bnxt *bp,
54 struct bnxt_tc_actions *actions,
55 const struct tc_action *tc_act)
56{
Cong Wang9f8a7392017-12-05 16:17:26 -080057 struct net_device *dev = tcf_mirred_dev(tc_act);
Sathya Perla2ae74082017-08-28 13:40:33 -040058
Sathya Perla2ae74082017-08-28 13:40:33 -040059 if (!dev) {
Cong Wang9f8a7392017-12-05 16:17:26 -080060 netdev_info(bp->dev, "no dev in mirred action");
Sathya Perla2ae74082017-08-28 13:40:33 -040061 return -EINVAL;
62 }
63
Sathya Perla2ae74082017-08-28 13:40:33 -040064 actions->flags |= BNXT_TC_ACTION_FLAG_FWD;
Sathya Perla2ae74082017-08-28 13:40:33 -040065 actions->dst_dev = dev;
66 return 0;
67}
68
69static void bnxt_tc_parse_vlan(struct bnxt *bp,
70 struct bnxt_tc_actions *actions,
71 const struct tc_action *tc_act)
72{
73 if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_POP) {
74 actions->flags |= BNXT_TC_ACTION_FLAG_POP_VLAN;
75 } else if (tcf_vlan_action(tc_act) == TCA_VLAN_ACT_PUSH) {
76 actions->flags |= BNXT_TC_ACTION_FLAG_PUSH_VLAN;
77 actions->push_vlan_tci = htons(tcf_vlan_push_vid(tc_act));
78 actions->push_vlan_tpid = tcf_vlan_push_proto(tc_act);
79 }
80}
81
Sathya Perla8c95f772017-10-26 11:51:29 -040082static int bnxt_tc_parse_tunnel_set(struct bnxt *bp,
83 struct bnxt_tc_actions *actions,
84 const struct tc_action *tc_act)
85{
86 struct ip_tunnel_info *tun_info = tcf_tunnel_info(tc_act);
87 struct ip_tunnel_key *tun_key = &tun_info->key;
88
89 if (ip_tunnel_info_af(tun_info) != AF_INET) {
90 netdev_info(bp->dev, "only IPv4 tunnel-encap is supported");
91 return -EOPNOTSUPP;
92 }
93
94 actions->tun_encap_key = *tun_key;
95 actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP;
96 return 0;
97}
98
Sathya Perla2ae74082017-08-28 13:40:33 -040099static int bnxt_tc_parse_actions(struct bnxt *bp,
100 struct bnxt_tc_actions *actions,
101 struct tcf_exts *tc_exts)
102{
103 const struct tc_action *tc_act;
104 LIST_HEAD(tc_actions);
105 int rc;
106
107 if (!tcf_exts_has_actions(tc_exts)) {
108 netdev_info(bp->dev, "no actions");
109 return -EINVAL;
110 }
111
112 tcf_exts_to_list(tc_exts, &tc_actions);
113 list_for_each_entry(tc_act, &tc_actions, list) {
114 /* Drop action */
115 if (is_tcf_gact_shot(tc_act)) {
116 actions->flags |= BNXT_TC_ACTION_FLAG_DROP;
117 return 0; /* don't bother with other actions */
118 }
119
120 /* Redirect action */
121 if (is_tcf_mirred_egress_redirect(tc_act)) {
122 rc = bnxt_tc_parse_redir(bp, actions, tc_act);
123 if (rc)
124 return rc;
125 continue;
126 }
127
128 /* Push/pop VLAN */
129 if (is_tcf_vlan(tc_act)) {
130 bnxt_tc_parse_vlan(bp, actions, tc_act);
131 continue;
132 }
Sathya Perla8c95f772017-10-26 11:51:29 -0400133
134 /* Tunnel encap */
135 if (is_tcf_tunnel_set(tc_act)) {
136 rc = bnxt_tc_parse_tunnel_set(bp, actions, tc_act);
137 if (rc)
138 return rc;
139 continue;
140 }
141
142 /* Tunnel decap */
143 if (is_tcf_tunnel_release(tc_act)) {
144 actions->flags |= BNXT_TC_ACTION_FLAG_TUNNEL_DECAP;
145 continue;
146 }
Sathya Perla2ae74082017-08-28 13:40:33 -0400147 }
148
Sathya Perlae9ecc732017-12-01 03:13:04 -0500149 if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
150 if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
151 /* dst_fid is PF's fid */
152 actions->dst_fid = bp->pf.fw_fid;
153 } else {
154 /* find the FID from dst_dev */
155 actions->dst_fid =
156 bnxt_flow_get_dst_fid(bp, actions->dst_dev);
157 if (actions->dst_fid == BNXT_FID_INVALID)
158 return -EINVAL;
159 }
Sathya Perla8c95f772017-10-26 11:51:29 -0400160 }
161
Dan Carpenter92425c42017-12-05 17:37:52 +0300162 return 0;
Sathya Perla2ae74082017-08-28 13:40:33 -0400163}
164
165#define GET_KEY(flow_cmd, key_type) \
166 skb_flow_dissector_target((flow_cmd)->dissector, key_type,\
167 (flow_cmd)->key)
168#define GET_MASK(flow_cmd, key_type) \
169 skb_flow_dissector_target((flow_cmd)->dissector, key_type,\
170 (flow_cmd)->mask)
171
172static int bnxt_tc_parse_flow(struct bnxt *bp,
173 struct tc_cls_flower_offload *tc_flow_cmd,
174 struct bnxt_tc_flow *flow)
175{
176 struct flow_dissector *dissector = tc_flow_cmd->dissector;
177 u16 addr_type = 0;
178
179 /* KEY_CONTROL and KEY_BASIC are needed for forming a meaningful key */
180 if ((dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_CONTROL)) == 0 ||
181 (dissector->used_keys & BIT(FLOW_DISSECTOR_KEY_BASIC)) == 0) {
182 netdev_info(bp->dev, "cannot form TC key: used_keys = 0x%x",
183 dissector->used_keys);
184 return -EOPNOTSUPP;
185 }
186
187 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_CONTROL)) {
188 struct flow_dissector_key_control *key =
189 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_CONTROL);
190
191 addr_type = key->addr_type;
192 }
193
194 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_BASIC)) {
195 struct flow_dissector_key_basic *key =
196 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);
197 struct flow_dissector_key_basic *mask =
198 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_BASIC);
199
200 flow->l2_key.ether_type = key->n_proto;
201 flow->l2_mask.ether_type = mask->n_proto;
202
203 if (key->n_proto == htons(ETH_P_IP) ||
204 key->n_proto == htons(ETH_P_IPV6)) {
205 flow->l4_key.ip_proto = key->ip_proto;
206 flow->l4_mask.ip_proto = mask->ip_proto;
207 }
208 }
209
210 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ETH_ADDRS)) {
211 struct flow_dissector_key_eth_addrs *key =
212 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS);
213 struct flow_dissector_key_eth_addrs *mask =
214 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ETH_ADDRS);
215
216 flow->flags |= BNXT_TC_FLOW_FLAGS_ETH_ADDRS;
217 ether_addr_copy(flow->l2_key.dmac, key->dst);
218 ether_addr_copy(flow->l2_mask.dmac, mask->dst);
219 ether_addr_copy(flow->l2_key.smac, key->src);
220 ether_addr_copy(flow->l2_mask.smac, mask->src);
221 }
222
223 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_VLAN)) {
224 struct flow_dissector_key_vlan *key =
225 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN);
226 struct flow_dissector_key_vlan *mask =
227 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_VLAN);
228
229 flow->l2_key.inner_vlan_tci =
230 cpu_to_be16(VLAN_TCI(key->vlan_id, key->vlan_priority));
231 flow->l2_mask.inner_vlan_tci =
232 cpu_to_be16((VLAN_TCI(mask->vlan_id, mask->vlan_priority)));
233 flow->l2_key.inner_vlan_tpid = htons(ETH_P_8021Q);
234 flow->l2_mask.inner_vlan_tpid = htons(0xffff);
235 flow->l2_key.num_vlans = 1;
236 }
237
238 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_IPV4_ADDRS)) {
239 struct flow_dissector_key_ipv4_addrs *key =
240 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
241 struct flow_dissector_key_ipv4_addrs *mask =
242 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV4_ADDRS);
243
244 flow->flags |= BNXT_TC_FLOW_FLAGS_IPV4_ADDRS;
245 flow->l3_key.ipv4.daddr.s_addr = key->dst;
246 flow->l3_mask.ipv4.daddr.s_addr = mask->dst;
247 flow->l3_key.ipv4.saddr.s_addr = key->src;
248 flow->l3_mask.ipv4.saddr.s_addr = mask->src;
249 } else if (dissector_uses_key(dissector,
250 FLOW_DISSECTOR_KEY_IPV6_ADDRS)) {
251 struct flow_dissector_key_ipv6_addrs *key =
252 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
253 struct flow_dissector_key_ipv6_addrs *mask =
254 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_IPV6_ADDRS);
255
256 flow->flags |= BNXT_TC_FLOW_FLAGS_IPV6_ADDRS;
257 flow->l3_key.ipv6.daddr = key->dst;
258 flow->l3_mask.ipv6.daddr = mask->dst;
259 flow->l3_key.ipv6.saddr = key->src;
260 flow->l3_mask.ipv6.saddr = mask->src;
261 }
262
263 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_PORTS)) {
264 struct flow_dissector_key_ports *key =
265 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS);
266 struct flow_dissector_key_ports *mask =
267 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_PORTS);
268
269 flow->flags |= BNXT_TC_FLOW_FLAGS_PORTS;
270 flow->l4_key.ports.dport = key->dst;
271 flow->l4_mask.ports.dport = mask->dst;
272 flow->l4_key.ports.sport = key->src;
273 flow->l4_mask.ports.sport = mask->src;
274 }
275
276 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ICMP)) {
277 struct flow_dissector_key_icmp *key =
278 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP);
279 struct flow_dissector_key_icmp *mask =
280 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ICMP);
281
282 flow->flags |= BNXT_TC_FLOW_FLAGS_ICMP;
283 flow->l4_key.icmp.type = key->type;
284 flow->l4_key.icmp.code = key->code;
285 flow->l4_mask.icmp.type = mask->type;
286 flow->l4_mask.icmp.code = mask->code;
287 }
288
Sathya Perla8c95f772017-10-26 11:51:29 -0400289 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_CONTROL)) {
290 struct flow_dissector_key_control *key =
291 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_CONTROL);
292
293 addr_type = key->addr_type;
294 }
295
296 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS)) {
297 struct flow_dissector_key_ipv4_addrs *key =
298 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
299 struct flow_dissector_key_ipv4_addrs *mask =
300 GET_MASK(tc_flow_cmd,
301 FLOW_DISSECTOR_KEY_ENC_IPV4_ADDRS);
302
303 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS;
304 flow->tun_key.u.ipv4.dst = key->dst;
305 flow->tun_mask.u.ipv4.dst = mask->dst;
306 flow->tun_key.u.ipv4.src = key->src;
307 flow->tun_mask.u.ipv4.src = mask->src;
308 } else if (dissector_uses_key(dissector,
309 FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS)) {
310 return -EOPNOTSUPP;
311 }
312
313 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_KEYID)) {
314 struct flow_dissector_key_keyid *key =
315 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID);
316 struct flow_dissector_key_keyid *mask =
317 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_KEYID);
318
319 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ID;
320 flow->tun_key.tun_id = key32_to_tunnel_id(key->keyid);
321 flow->tun_mask.tun_id = key32_to_tunnel_id(mask->keyid);
322 }
323
324 if (dissector_uses_key(dissector, FLOW_DISSECTOR_KEY_ENC_PORTS)) {
325 struct flow_dissector_key_ports *key =
326 GET_KEY(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS);
327 struct flow_dissector_key_ports *mask =
328 GET_MASK(tc_flow_cmd, FLOW_DISSECTOR_KEY_ENC_PORTS);
329
330 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_PORTS;
331 flow->tun_key.tp_dst = key->dst;
332 flow->tun_mask.tp_dst = mask->dst;
333 flow->tun_key.tp_src = key->src;
334 flow->tun_mask.tp_src = mask->src;
335 }
336
Sathya Perla2ae74082017-08-28 13:40:33 -0400337 return bnxt_tc_parse_actions(bp, &flow->actions, tc_flow_cmd->exts);
338}
339
340static int bnxt_hwrm_cfa_flow_free(struct bnxt *bp, __le16 flow_handle)
341{
Sathya Perladb1d36a2017-08-28 13:40:34 -0400342 struct hwrm_cfa_flow_free_input req = { 0 };
343 int rc;
344
345 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_FREE, -1, -1);
346 req.flow_handle = flow_handle;
347
348 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
349 if (rc)
350 netdev_info(bp->dev, "Error: %s: flow_handle=0x%x rc=%d",
351 __func__, flow_handle, rc);
352 return rc;
353}
354
355static int ipv6_mask_len(struct in6_addr *mask)
356{
357 int mask_len = 0, i;
358
359 for (i = 0; i < 4; i++)
360 mask_len += inet_mask_len(mask->s6_addr32[i]);
361
362 return mask_len;
363}
364
365static bool is_wildcard(void *mask, int len)
366{
367 const u8 *p = mask;
368 int i;
369
370 for (i = 0; i < len; i++) {
371 if (p[i] != 0)
372 return false;
373 }
374 return true;
Sathya Perla2ae74082017-08-28 13:40:33 -0400375}
376
377static int bnxt_hwrm_cfa_flow_alloc(struct bnxt *bp, struct bnxt_tc_flow *flow,
Sathya Perla8c95f772017-10-26 11:51:29 -0400378 __le16 ref_flow_handle,
379 __le32 tunnel_handle, __le16 *flow_handle)
Sathya Perla2ae74082017-08-28 13:40:33 -0400380{
Sathya Perladb1d36a2017-08-28 13:40:34 -0400381 struct hwrm_cfa_flow_alloc_output *resp = bp->hwrm_cmd_resp_addr;
382 struct bnxt_tc_actions *actions = &flow->actions;
383 struct bnxt_tc_l3_key *l3_mask = &flow->l3_mask;
384 struct bnxt_tc_l3_key *l3_key = &flow->l3_key;
385 struct hwrm_cfa_flow_alloc_input req = { 0 };
386 u16 flow_flags = 0, action_flags = 0;
387 int rc;
388
389 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_ALLOC, -1, -1);
390
391 req.src_fid = cpu_to_le16(flow->src_fid);
392 req.ref_flow_handle = ref_flow_handle;
Sathya Perla8c95f772017-10-26 11:51:29 -0400393
394 if (actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP ||
395 actions->flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP) {
396 req.tunnel_handle = tunnel_handle;
397 flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_TUNNEL;
398 action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_TUNNEL;
399 }
400
Sathya Perladb1d36a2017-08-28 13:40:34 -0400401 req.ethertype = flow->l2_key.ether_type;
402 req.ip_proto = flow->l4_key.ip_proto;
403
404 if (flow->flags & BNXT_TC_FLOW_FLAGS_ETH_ADDRS) {
405 memcpy(req.dmac, flow->l2_key.dmac, ETH_ALEN);
406 memcpy(req.smac, flow->l2_key.smac, ETH_ALEN);
407 }
408
409 if (flow->l2_key.num_vlans > 0) {
410 flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_NUM_VLAN_ONE;
411 /* FW expects the inner_vlan_tci value to be set
412 * in outer_vlan_tci when num_vlans is 1 (which is
413 * always the case in TC.)
414 */
415 req.outer_vlan_tci = flow->l2_key.inner_vlan_tci;
416 }
417
418 /* If all IP and L4 fields are wildcarded then this is an L2 flow */
Sunil Challa7deea452018-01-04 18:46:54 -0500419 if (is_wildcard(l3_mask, sizeof(*l3_mask)) &&
Sathya Perladb1d36a2017-08-28 13:40:34 -0400420 is_wildcard(&flow->l4_mask, sizeof(flow->l4_mask))) {
421 flow_flags |= CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_L2;
422 } else {
423 flow_flags |= flow->l2_key.ether_type == htons(ETH_P_IP) ?
424 CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV4 :
425 CFA_FLOW_ALLOC_REQ_FLAGS_FLOWTYPE_IPV6;
426
427 if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV4_ADDRS) {
428 req.ip_dst[0] = l3_key->ipv4.daddr.s_addr;
429 req.ip_dst_mask_len =
430 inet_mask_len(l3_mask->ipv4.daddr.s_addr);
431 req.ip_src[0] = l3_key->ipv4.saddr.s_addr;
432 req.ip_src_mask_len =
433 inet_mask_len(l3_mask->ipv4.saddr.s_addr);
434 } else if (flow->flags & BNXT_TC_FLOW_FLAGS_IPV6_ADDRS) {
435 memcpy(req.ip_dst, l3_key->ipv6.daddr.s6_addr32,
436 sizeof(req.ip_dst));
437 req.ip_dst_mask_len =
438 ipv6_mask_len(&l3_mask->ipv6.daddr);
439 memcpy(req.ip_src, l3_key->ipv6.saddr.s6_addr32,
440 sizeof(req.ip_src));
441 req.ip_src_mask_len =
442 ipv6_mask_len(&l3_mask->ipv6.saddr);
443 }
444 }
445
446 if (flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) {
447 req.l4_src_port = flow->l4_key.ports.sport;
448 req.l4_src_port_mask = flow->l4_mask.ports.sport;
449 req.l4_dst_port = flow->l4_key.ports.dport;
450 req.l4_dst_port_mask = flow->l4_mask.ports.dport;
451 } else if (flow->flags & BNXT_TC_FLOW_FLAGS_ICMP) {
452 /* l4 ports serve as type/code when ip_proto is ICMP */
453 req.l4_src_port = htons(flow->l4_key.icmp.type);
454 req.l4_src_port_mask = htons(flow->l4_mask.icmp.type);
455 req.l4_dst_port = htons(flow->l4_key.icmp.code);
456 req.l4_dst_port_mask = htons(flow->l4_mask.icmp.code);
457 }
458 req.flags = cpu_to_le16(flow_flags);
459
460 if (actions->flags & BNXT_TC_ACTION_FLAG_DROP) {
461 action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_DROP;
462 } else {
463 if (actions->flags & BNXT_TC_ACTION_FLAG_FWD) {
464 action_flags |= CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_FWD;
465 req.dst_fid = cpu_to_le16(actions->dst_fid);
466 }
467 if (actions->flags & BNXT_TC_ACTION_FLAG_PUSH_VLAN) {
468 action_flags |=
469 CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
470 req.l2_rewrite_vlan_tpid = actions->push_vlan_tpid;
471 req.l2_rewrite_vlan_tci = actions->push_vlan_tci;
472 memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
473 memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
474 }
475 if (actions->flags & BNXT_TC_ACTION_FLAG_POP_VLAN) {
476 action_flags |=
477 CFA_FLOW_ALLOC_REQ_ACTION_FLAGS_L2_HEADER_REWRITE;
478 /* Rewrite config with tpid = 0 implies vlan pop */
479 req.l2_rewrite_vlan_tpid = 0;
480 memcpy(&req.l2_rewrite_dmac, &req.dmac, ETH_ALEN);
481 memcpy(&req.l2_rewrite_smac, &req.smac, ETH_ALEN);
482 }
483 }
484 req.action_flags = cpu_to_le16(action_flags);
485
486 mutex_lock(&bp->hwrm_cmd_lock);
487
488 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
489 if (!rc)
490 *flow_handle = resp->flow_handle;
491
492 mutex_unlock(&bp->hwrm_cmd_lock);
493
494 return rc;
Sathya Perla2ae74082017-08-28 13:40:33 -0400495}
496
Sathya Perla8c95f772017-10-26 11:51:29 -0400497static int hwrm_cfa_decap_filter_alloc(struct bnxt *bp,
498 struct bnxt_tc_flow *flow,
499 struct bnxt_tc_l2_key *l2_info,
500 __le32 ref_decap_handle,
501 __le32 *decap_filter_handle)
502{
Sathya Perlaf484f672017-10-26 11:51:30 -0400503 struct hwrm_cfa_decap_filter_alloc_output *resp =
504 bp->hwrm_cmd_resp_addr;
505 struct hwrm_cfa_decap_filter_alloc_input req = { 0 };
506 struct ip_tunnel_key *tun_key = &flow->tun_key;
507 u32 enables = 0;
508 int rc;
509
510 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_ALLOC, -1, -1);
511
512 req.flags = cpu_to_le32(CFA_DECAP_FILTER_ALLOC_REQ_FLAGS_OVS_TUNNEL);
513 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_TYPE |
514 CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IP_PROTOCOL;
515 req.tunnel_type = CFA_DECAP_FILTER_ALLOC_REQ_TUNNEL_TYPE_VXLAN;
516 req.ip_protocol = CFA_DECAP_FILTER_ALLOC_REQ_IP_PROTOCOL_UDP;
517
518 if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ID) {
519 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_TUNNEL_ID;
520 /* tunnel_id is wrongly defined in hsi defn. as __le32 */
521 req.tunnel_id = tunnel_id_to_key32(tun_key->tun_id);
522 }
523
524 if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS) {
Sunil Challac8fb7b82017-12-01 03:13:03 -0500525 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_MACADDR;
Sathya Perlaf484f672017-10-26 11:51:30 -0400526 ether_addr_copy(req.dst_macaddr, l2_info->dmac);
Sathya Perlaf484f672017-10-26 11:51:30 -0400527 }
528 if (l2_info->num_vlans) {
529 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_T_IVLAN_VID;
530 req.t_ivlan_vid = l2_info->inner_vlan_tci;
531 }
532
533 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_ETHERTYPE;
534 req.ethertype = htons(ETH_P_IP);
535
536 if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_IPV4_ADDRS) {
537 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_SRC_IPADDR |
538 CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_IPADDR |
539 CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_IPADDR_TYPE;
540 req.ip_addr_type = CFA_DECAP_FILTER_ALLOC_REQ_IP_ADDR_TYPE_IPV4;
541 req.dst_ipaddr[0] = tun_key->u.ipv4.dst;
542 req.src_ipaddr[0] = tun_key->u.ipv4.src;
543 }
544
545 if (flow->flags & BNXT_TC_FLOW_FLAGS_TUNL_PORTS) {
546 enables |= CFA_DECAP_FILTER_ALLOC_REQ_ENABLES_DST_PORT;
547 req.dst_port = tun_key->tp_dst;
548 }
549
550 /* Eventhough the decap_handle returned by hwrm_cfa_decap_filter_alloc
551 * is defined as __le32, l2_ctxt_ref_id is defined in HSI as __le16.
552 */
553 req.l2_ctxt_ref_id = (__force __le16)ref_decap_handle;
554 req.enables = cpu_to_le32(enables);
555
556 mutex_lock(&bp->hwrm_cmd_lock);
557 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
558 if (!rc)
559 *decap_filter_handle = resp->decap_filter_id;
560 else
561 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
562 mutex_unlock(&bp->hwrm_cmd_lock);
563
564 return rc;
Sathya Perla8c95f772017-10-26 11:51:29 -0400565}
566
567static int hwrm_cfa_decap_filter_free(struct bnxt *bp,
568 __le32 decap_filter_handle)
569{
Sathya Perlaf484f672017-10-26 11:51:30 -0400570 struct hwrm_cfa_decap_filter_free_input req = { 0 };
571 int rc;
572
573 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_DECAP_FILTER_FREE, -1, -1);
574 req.decap_filter_id = decap_filter_handle;
575
576 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
577 if (rc)
578 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
579 return rc;
Sathya Perla8c95f772017-10-26 11:51:29 -0400580}
581
582static int hwrm_cfa_encap_record_alloc(struct bnxt *bp,
583 struct ip_tunnel_key *encap_key,
584 struct bnxt_tc_l2_key *l2_info,
585 __le32 *encap_record_handle)
586{
Sathya Perlaf484f672017-10-26 11:51:30 -0400587 struct hwrm_cfa_encap_record_alloc_output *resp =
588 bp->hwrm_cmd_resp_addr;
589 struct hwrm_cfa_encap_record_alloc_input req = { 0 };
590 struct hwrm_cfa_encap_data_vxlan *encap =
591 (struct hwrm_cfa_encap_data_vxlan *)&req.encap_data;
592 struct hwrm_vxlan_ipv4_hdr *encap_ipv4 =
593 (struct hwrm_vxlan_ipv4_hdr *)encap->l3;
594 int rc;
595
596 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_ALLOC, -1, -1);
597
598 req.encap_type = CFA_ENCAP_RECORD_ALLOC_REQ_ENCAP_TYPE_VXLAN;
599
600 ether_addr_copy(encap->dst_mac_addr, l2_info->dmac);
601 ether_addr_copy(encap->src_mac_addr, l2_info->smac);
602 if (l2_info->num_vlans) {
603 encap->num_vlan_tags = l2_info->num_vlans;
604 encap->ovlan_tci = l2_info->inner_vlan_tci;
605 encap->ovlan_tpid = l2_info->inner_vlan_tpid;
606 }
607
608 encap_ipv4->ver_hlen = 4 << VXLAN_IPV4_HDR_VER_HLEN_VERSION_SFT;
609 encap_ipv4->ver_hlen |= 5 << VXLAN_IPV4_HDR_VER_HLEN_HEADER_LENGTH_SFT;
610 encap_ipv4->ttl = encap_key->ttl;
611
612 encap_ipv4->dest_ip_addr = encap_key->u.ipv4.dst;
613 encap_ipv4->src_ip_addr = encap_key->u.ipv4.src;
614 encap_ipv4->protocol = IPPROTO_UDP;
615
616 encap->dst_port = encap_key->tp_dst;
617 encap->vni = tunnel_id_to_key32(encap_key->tun_id);
618
619 mutex_lock(&bp->hwrm_cmd_lock);
620 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
621 if (!rc)
622 *encap_record_handle = resp->encap_record_id;
623 else
624 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
625 mutex_unlock(&bp->hwrm_cmd_lock);
626
627 return rc;
Sathya Perla8c95f772017-10-26 11:51:29 -0400628}
629
630static int hwrm_cfa_encap_record_free(struct bnxt *bp,
631 __le32 encap_record_handle)
632{
Sathya Perlaf484f672017-10-26 11:51:30 -0400633 struct hwrm_cfa_encap_record_free_input req = { 0 };
634 int rc;
635
636 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_ENCAP_RECORD_FREE, -1, -1);
637 req.encap_record_id = encap_record_handle;
638
639 rc = hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
640 if (rc)
641 netdev_info(bp->dev, "%s: Error rc=%d", __func__, rc);
642 return rc;
Sathya Perla8c95f772017-10-26 11:51:29 -0400643}
644
Sathya Perla2ae74082017-08-28 13:40:33 -0400645static int bnxt_tc_put_l2_node(struct bnxt *bp,
646 struct bnxt_tc_flow_node *flow_node)
647{
648 struct bnxt_tc_l2_node *l2_node = flow_node->l2_node;
Sathya Perlacd663582017-10-26 11:51:32 -0400649 struct bnxt_tc_info *tc_info = bp->tc_info;
Sathya Perla2ae74082017-08-28 13:40:33 -0400650 int rc;
651
652 /* remove flow_node from the L2 shared flow list */
653 list_del(&flow_node->l2_list_node);
654 if (--l2_node->refcount == 0) {
655 rc = rhashtable_remove_fast(&tc_info->l2_table, &l2_node->node,
656 tc_info->l2_ht_params);
657 if (rc)
658 netdev_err(bp->dev,
659 "Error: %s: rhashtable_remove_fast: %d",
660 __func__, rc);
661 kfree_rcu(l2_node, rcu);
662 }
663 return 0;
664}
665
666static struct bnxt_tc_l2_node *
667bnxt_tc_get_l2_node(struct bnxt *bp, struct rhashtable *l2_table,
668 struct rhashtable_params ht_params,
669 struct bnxt_tc_l2_key *l2_key)
670{
671 struct bnxt_tc_l2_node *l2_node;
672 int rc;
673
674 l2_node = rhashtable_lookup_fast(l2_table, l2_key, ht_params);
675 if (!l2_node) {
676 l2_node = kzalloc(sizeof(*l2_node), GFP_KERNEL);
677 if (!l2_node) {
678 rc = -ENOMEM;
679 return NULL;
680 }
681
682 l2_node->key = *l2_key;
683 rc = rhashtable_insert_fast(l2_table, &l2_node->node,
684 ht_params);
685 if (rc) {
Sathya Perla8c95f772017-10-26 11:51:29 -0400686 kfree_rcu(l2_node, rcu);
Sathya Perla2ae74082017-08-28 13:40:33 -0400687 netdev_err(bp->dev,
688 "Error: %s: rhashtable_insert_fast: %d",
689 __func__, rc);
690 return NULL;
691 }
692 INIT_LIST_HEAD(&l2_node->common_l2_flows);
693 }
694 return l2_node;
695}
696
697/* Get the ref_flow_handle for a flow by checking if there are any other
698 * flows that share the same L2 key as this flow.
699 */
700static int
701bnxt_tc_get_ref_flow_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
702 struct bnxt_tc_flow_node *flow_node,
703 __le16 *ref_flow_handle)
704{
Sathya Perlacd663582017-10-26 11:51:32 -0400705 struct bnxt_tc_info *tc_info = bp->tc_info;
Sathya Perla2ae74082017-08-28 13:40:33 -0400706 struct bnxt_tc_flow_node *ref_flow_node;
707 struct bnxt_tc_l2_node *l2_node;
708
709 l2_node = bnxt_tc_get_l2_node(bp, &tc_info->l2_table,
710 tc_info->l2_ht_params,
711 &flow->l2_key);
712 if (!l2_node)
713 return -1;
714
715 /* If any other flow is using this l2_node, use it's flow_handle
716 * as the ref_flow_handle
717 */
718 if (l2_node->refcount > 0) {
719 ref_flow_node = list_first_entry(&l2_node->common_l2_flows,
720 struct bnxt_tc_flow_node,
721 l2_list_node);
722 *ref_flow_handle = ref_flow_node->flow_handle;
723 } else {
724 *ref_flow_handle = cpu_to_le16(0xffff);
725 }
726
727 /* Insert the l2_node into the flow_node so that subsequent flows
728 * with a matching l2 key can use the flow_handle of this flow
729 * as their ref_flow_handle
730 */
731 flow_node->l2_node = l2_node;
732 list_add(&flow_node->l2_list_node, &l2_node->common_l2_flows);
733 l2_node->refcount++;
734 return 0;
735}
736
737/* After the flow parsing is done, this routine is used for checking
738 * if there are any aspects of the flow that prevent it from being
739 * offloaded.
740 */
741static bool bnxt_tc_can_offload(struct bnxt *bp, struct bnxt_tc_flow *flow)
742{
743 /* If L4 ports are specified then ip_proto must be TCP or UDP */
744 if ((flow->flags & BNXT_TC_FLOW_FLAGS_PORTS) &&
745 (flow->l4_key.ip_proto != IPPROTO_TCP &&
746 flow->l4_key.ip_proto != IPPROTO_UDP)) {
747 netdev_info(bp->dev, "Cannot offload non-TCP/UDP (%d) ports",
748 flow->l4_key.ip_proto);
749 return false;
750 }
751
752 return true;
753}
754
Sathya Perla8c95f772017-10-26 11:51:29 -0400755/* Returns the final refcount of the node on success
756 * or a -ve error code on failure
757 */
758static int bnxt_tc_put_tunnel_node(struct bnxt *bp,
759 struct rhashtable *tunnel_table,
760 struct rhashtable_params *ht_params,
761 struct bnxt_tc_tunnel_node *tunnel_node)
762{
763 int rc;
764
765 if (--tunnel_node->refcount == 0) {
766 rc = rhashtable_remove_fast(tunnel_table, &tunnel_node->node,
767 *ht_params);
768 if (rc) {
769 netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc);
770 rc = -1;
771 }
772 kfree_rcu(tunnel_node, rcu);
773 return rc;
774 } else {
775 return tunnel_node->refcount;
776 }
777}
778
779/* Get (or add) either encap or decap tunnel node from/to the supplied
780 * hash table.
781 */
782static struct bnxt_tc_tunnel_node *
783bnxt_tc_get_tunnel_node(struct bnxt *bp, struct rhashtable *tunnel_table,
784 struct rhashtable_params *ht_params,
785 struct ip_tunnel_key *tun_key)
786{
787 struct bnxt_tc_tunnel_node *tunnel_node;
788 int rc;
789
790 tunnel_node = rhashtable_lookup_fast(tunnel_table, tun_key, *ht_params);
791 if (!tunnel_node) {
792 tunnel_node = kzalloc(sizeof(*tunnel_node), GFP_KERNEL);
793 if (!tunnel_node) {
794 rc = -ENOMEM;
795 goto err;
796 }
797
798 tunnel_node->key = *tun_key;
799 tunnel_node->tunnel_handle = INVALID_TUNNEL_HANDLE;
800 rc = rhashtable_insert_fast(tunnel_table, &tunnel_node->node,
801 *ht_params);
802 if (rc) {
803 kfree_rcu(tunnel_node, rcu);
804 goto err;
805 }
806 }
807 tunnel_node->refcount++;
808 return tunnel_node;
809err:
810 netdev_info(bp->dev, "error rc=%d", rc);
811 return NULL;
812}
813
814static int bnxt_tc_get_ref_decap_handle(struct bnxt *bp,
815 struct bnxt_tc_flow *flow,
816 struct bnxt_tc_l2_key *l2_key,
817 struct bnxt_tc_flow_node *flow_node,
818 __le32 *ref_decap_handle)
819{
Sathya Perlacd663582017-10-26 11:51:32 -0400820 struct bnxt_tc_info *tc_info = bp->tc_info;
Sathya Perla8c95f772017-10-26 11:51:29 -0400821 struct bnxt_tc_flow_node *ref_flow_node;
822 struct bnxt_tc_l2_node *decap_l2_node;
823
824 decap_l2_node = bnxt_tc_get_l2_node(bp, &tc_info->decap_l2_table,
825 tc_info->decap_l2_ht_params,
826 l2_key);
827 if (!decap_l2_node)
828 return -1;
829
830 /* If any other flow is using this decap_l2_node, use it's decap_handle
831 * as the ref_decap_handle
832 */
833 if (decap_l2_node->refcount > 0) {
834 ref_flow_node =
835 list_first_entry(&decap_l2_node->common_l2_flows,
836 struct bnxt_tc_flow_node,
837 decap_l2_list_node);
838 *ref_decap_handle = ref_flow_node->decap_node->tunnel_handle;
839 } else {
840 *ref_decap_handle = INVALID_TUNNEL_HANDLE;
841 }
842
843 /* Insert the l2_node into the flow_node so that subsequent flows
844 * with a matching decap l2 key can use the decap_filter_handle of
845 * this flow as their ref_decap_handle
846 */
847 flow_node->decap_l2_node = decap_l2_node;
848 list_add(&flow_node->decap_l2_list_node,
849 &decap_l2_node->common_l2_flows);
850 decap_l2_node->refcount++;
851 return 0;
852}
853
854static void bnxt_tc_put_decap_l2_node(struct bnxt *bp,
855 struct bnxt_tc_flow_node *flow_node)
856{
857 struct bnxt_tc_l2_node *decap_l2_node = flow_node->decap_l2_node;
Sathya Perlacd663582017-10-26 11:51:32 -0400858 struct bnxt_tc_info *tc_info = bp->tc_info;
Sathya Perla8c95f772017-10-26 11:51:29 -0400859 int rc;
860
861 /* remove flow_node from the decap L2 sharing flow list */
862 list_del(&flow_node->decap_l2_list_node);
863 if (--decap_l2_node->refcount == 0) {
864 rc = rhashtable_remove_fast(&tc_info->decap_l2_table,
865 &decap_l2_node->node,
866 tc_info->decap_l2_ht_params);
867 if (rc)
868 netdev_err(bp->dev, "rhashtable_remove_fast rc=%d", rc);
869 kfree_rcu(decap_l2_node, rcu);
870 }
871}
872
873static void bnxt_tc_put_decap_handle(struct bnxt *bp,
874 struct bnxt_tc_flow_node *flow_node)
875{
876 __le32 decap_handle = flow_node->decap_node->tunnel_handle;
Sathya Perlacd663582017-10-26 11:51:32 -0400877 struct bnxt_tc_info *tc_info = bp->tc_info;
Sathya Perla8c95f772017-10-26 11:51:29 -0400878 int rc;
879
880 if (flow_node->decap_l2_node)
881 bnxt_tc_put_decap_l2_node(bp, flow_node);
882
883 rc = bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table,
884 &tc_info->decap_ht_params,
885 flow_node->decap_node);
886 if (!rc && decap_handle != INVALID_TUNNEL_HANDLE)
887 hwrm_cfa_decap_filter_free(bp, decap_handle);
888}
889
890static int bnxt_tc_resolve_tunnel_hdrs(struct bnxt *bp,
891 struct ip_tunnel_key *tun_key,
Sathya Perlae9ecc732017-12-01 03:13:04 -0500892 struct bnxt_tc_l2_key *l2_info)
Sathya Perla8c95f772017-10-26 11:51:29 -0400893{
Michael Chan952c5712017-10-28 01:56:10 -0400894#ifdef CONFIG_INET
Sathya Perlae9ecc732017-12-01 03:13:04 -0500895 struct net_device *real_dst_dev = bp->dev;
Sathya Perla8c95f772017-10-26 11:51:29 -0400896 struct flowi4 flow = { {0} };
897 struct net_device *dst_dev;
898 struct neighbour *nbr;
899 struct rtable *rt;
900 int rc;
901
902 flow.flowi4_proto = IPPROTO_UDP;
903 flow.fl4_dport = tun_key->tp_dst;
904 flow.daddr = tun_key->u.ipv4.dst;
905
906 rt = ip_route_output_key(dev_net(real_dst_dev), &flow);
907 if (IS_ERR(rt)) {
908 netdev_info(bp->dev, "no route to %pI4b", &flow.daddr);
909 return -EOPNOTSUPP;
910 }
911
912 /* The route must either point to the real_dst_dev or a dst_dev that
913 * uses the real_dst_dev.
914 */
915 dst_dev = rt->dst.dev;
916 if (is_vlan_dev(dst_dev)) {
Michael Chan952c5712017-10-28 01:56:10 -0400917#if IS_ENABLED(CONFIG_VLAN_8021Q)
Sathya Perla8c95f772017-10-26 11:51:29 -0400918 struct vlan_dev_priv *vlan = vlan_dev_priv(dst_dev);
919
920 if (vlan->real_dev != real_dst_dev) {
921 netdev_info(bp->dev,
922 "dst_dev(%s) doesn't use PF-if(%s)",
923 netdev_name(dst_dev),
924 netdev_name(real_dst_dev));
925 rc = -EOPNOTSUPP;
926 goto put_rt;
927 }
928 l2_info->inner_vlan_tci = htons(vlan->vlan_id);
929 l2_info->inner_vlan_tpid = vlan->vlan_proto;
930 l2_info->num_vlans = 1;
Michael Chan952c5712017-10-28 01:56:10 -0400931#endif
Sathya Perla8c95f772017-10-26 11:51:29 -0400932 } else if (dst_dev != real_dst_dev) {
933 netdev_info(bp->dev,
934 "dst_dev(%s) for %pI4b is not PF-if(%s)",
935 netdev_name(dst_dev), &flow.daddr,
936 netdev_name(real_dst_dev));
937 rc = -EOPNOTSUPP;
938 goto put_rt;
939 }
940
941 nbr = dst_neigh_lookup(&rt->dst, &flow.daddr);
942 if (!nbr) {
943 netdev_info(bp->dev, "can't lookup neighbor for %pI4b",
944 &flow.daddr);
945 rc = -EOPNOTSUPP;
946 goto put_rt;
947 }
948
949 tun_key->u.ipv4.src = flow.saddr;
950 tun_key->ttl = ip4_dst_hoplimit(&rt->dst);
951 neigh_ha_snapshot(l2_info->dmac, nbr, dst_dev);
952 ether_addr_copy(l2_info->smac, dst_dev->dev_addr);
953 neigh_release(nbr);
954 ip_rt_put(rt);
955
956 return 0;
957put_rt:
958 ip_rt_put(rt);
959 return rc;
Michael Chan952c5712017-10-28 01:56:10 -0400960#else
961 return -EOPNOTSUPP;
962#endif
Sathya Perla8c95f772017-10-26 11:51:29 -0400963}
964
965static int bnxt_tc_get_decap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
966 struct bnxt_tc_flow_node *flow_node,
967 __le32 *decap_filter_handle)
968{
969 struct ip_tunnel_key *decap_key = &flow->tun_key;
Sathya Perlacd663582017-10-26 11:51:32 -0400970 struct bnxt_tc_info *tc_info = bp->tc_info;
Sathya Perla8c95f772017-10-26 11:51:29 -0400971 struct bnxt_tc_l2_key l2_info = { {0} };
972 struct bnxt_tc_tunnel_node *decap_node;
973 struct ip_tunnel_key tun_key = { 0 };
974 struct bnxt_tc_l2_key *decap_l2_info;
975 __le32 ref_decap_handle;
976 int rc;
977
978 /* Check if there's another flow using the same tunnel decap.
979 * If not, add this tunnel to the table and resolve the other
980 * tunnel header fileds
981 */
982 decap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->decap_table,
983 &tc_info->decap_ht_params,
984 decap_key);
985 if (!decap_node)
986 return -ENOMEM;
987
988 flow_node->decap_node = decap_node;
989
990 if (decap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)
991 goto done;
992
993 /* Resolve the L2 fields for tunnel decap
994 * Resolve the route for remote vtep (saddr) of the decap key
995 * Find it's next-hop mac addrs
996 */
997 tun_key.u.ipv4.dst = flow->tun_key.u.ipv4.src;
998 tun_key.tp_dst = flow->tun_key.tp_dst;
Sathya Perlae9ecc732017-12-01 03:13:04 -0500999 rc = bnxt_tc_resolve_tunnel_hdrs(bp, &tun_key, &l2_info);
Sathya Perla8c95f772017-10-26 11:51:29 -04001000 if (rc)
1001 goto put_decap;
1002
Sathya Perla8c95f772017-10-26 11:51:29 -04001003 decap_l2_info = &decap_node->l2_info;
Sunil Challac8fb7b82017-12-01 03:13:03 -05001004 /* decap smac is wildcarded */
Sathya Perla8c95f772017-10-26 11:51:29 -04001005 ether_addr_copy(decap_l2_info->dmac, l2_info.smac);
Sathya Perla8c95f772017-10-26 11:51:29 -04001006 if (l2_info.num_vlans) {
1007 decap_l2_info->num_vlans = l2_info.num_vlans;
1008 decap_l2_info->inner_vlan_tpid = l2_info.inner_vlan_tpid;
1009 decap_l2_info->inner_vlan_tci = l2_info.inner_vlan_tci;
1010 }
1011 flow->flags |= BNXT_TC_FLOW_FLAGS_TUNL_ETH_ADDRS;
1012
1013 /* For getting a decap_filter_handle we first need to check if
1014 * there are any other decap flows that share the same tunnel L2
1015 * key and if so, pass that flow's decap_filter_handle as the
1016 * ref_decap_handle for this flow.
1017 */
1018 rc = bnxt_tc_get_ref_decap_handle(bp, flow, decap_l2_info, flow_node,
1019 &ref_decap_handle);
1020 if (rc)
1021 goto put_decap;
1022
1023 /* Issue the hwrm cmd to allocate a decap filter handle */
1024 rc = hwrm_cfa_decap_filter_alloc(bp, flow, decap_l2_info,
1025 ref_decap_handle,
1026 &decap_node->tunnel_handle);
1027 if (rc)
1028 goto put_decap_l2;
1029
1030done:
1031 *decap_filter_handle = decap_node->tunnel_handle;
1032 return 0;
1033
1034put_decap_l2:
1035 bnxt_tc_put_decap_l2_node(bp, flow_node);
1036put_decap:
1037 bnxt_tc_put_tunnel_node(bp, &tc_info->decap_table,
1038 &tc_info->decap_ht_params,
1039 flow_node->decap_node);
1040 return rc;
1041}
1042
1043static void bnxt_tc_put_encap_handle(struct bnxt *bp,
1044 struct bnxt_tc_tunnel_node *encap_node)
1045{
1046 __le32 encap_handle = encap_node->tunnel_handle;
Sathya Perlacd663582017-10-26 11:51:32 -04001047 struct bnxt_tc_info *tc_info = bp->tc_info;
Sathya Perla8c95f772017-10-26 11:51:29 -04001048 int rc;
1049
1050 rc = bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table,
1051 &tc_info->encap_ht_params, encap_node);
1052 if (!rc && encap_handle != INVALID_TUNNEL_HANDLE)
1053 hwrm_cfa_encap_record_free(bp, encap_handle);
1054}
1055
1056/* Lookup the tunnel encap table and check if there's an encap_handle
1057 * alloc'd already.
1058 * If not, query L2 info via a route lookup and issue an encap_record_alloc
1059 * cmd to FW.
1060 */
1061static int bnxt_tc_get_encap_handle(struct bnxt *bp, struct bnxt_tc_flow *flow,
1062 struct bnxt_tc_flow_node *flow_node,
1063 __le32 *encap_handle)
1064{
1065 struct ip_tunnel_key *encap_key = &flow->actions.tun_encap_key;
Sathya Perlacd663582017-10-26 11:51:32 -04001066 struct bnxt_tc_info *tc_info = bp->tc_info;
Sathya Perla8c95f772017-10-26 11:51:29 -04001067 struct bnxt_tc_tunnel_node *encap_node;
1068 int rc;
1069
1070 /* Check if there's another flow using the same tunnel encap.
1071 * If not, add this tunnel to the table and resolve the other
1072 * tunnel header fileds
1073 */
1074 encap_node = bnxt_tc_get_tunnel_node(bp, &tc_info->encap_table,
1075 &tc_info->encap_ht_params,
1076 encap_key);
1077 if (!encap_node)
1078 return -ENOMEM;
1079
1080 flow_node->encap_node = encap_node;
1081
1082 if (encap_node->tunnel_handle != INVALID_TUNNEL_HANDLE)
1083 goto done;
1084
Sathya Perlae9ecc732017-12-01 03:13:04 -05001085 rc = bnxt_tc_resolve_tunnel_hdrs(bp, encap_key, &encap_node->l2_info);
Sathya Perla8c95f772017-10-26 11:51:29 -04001086 if (rc)
1087 goto put_encap;
1088
1089 /* Allocate a new tunnel encap record */
1090 rc = hwrm_cfa_encap_record_alloc(bp, encap_key, &encap_node->l2_info,
1091 &encap_node->tunnel_handle);
1092 if (rc)
1093 goto put_encap;
1094
1095done:
1096 *encap_handle = encap_node->tunnel_handle;
1097 return 0;
1098
1099put_encap:
1100 bnxt_tc_put_tunnel_node(bp, &tc_info->encap_table,
1101 &tc_info->encap_ht_params, encap_node);
1102 return rc;
1103}
1104
1105static void bnxt_tc_put_tunnel_handle(struct bnxt *bp,
1106 struct bnxt_tc_flow *flow,
1107 struct bnxt_tc_flow_node *flow_node)
1108{
1109 if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
1110 bnxt_tc_put_decap_handle(bp, flow_node);
1111 else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP)
1112 bnxt_tc_put_encap_handle(bp, flow_node->encap_node);
1113}
1114
1115static int bnxt_tc_get_tunnel_handle(struct bnxt *bp,
1116 struct bnxt_tc_flow *flow,
1117 struct bnxt_tc_flow_node *flow_node,
1118 __le32 *tunnel_handle)
1119{
1120 if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
1121 return bnxt_tc_get_decap_handle(bp, flow, flow_node,
1122 tunnel_handle);
1123 else if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_ENCAP)
1124 return bnxt_tc_get_encap_handle(bp, flow, flow_node,
1125 tunnel_handle);
1126 else
1127 return 0;
1128}
Sathya Perla2ae74082017-08-28 13:40:33 -04001129static int __bnxt_tc_del_flow(struct bnxt *bp,
1130 struct bnxt_tc_flow_node *flow_node)
1131{
Sathya Perlacd663582017-10-26 11:51:32 -04001132 struct bnxt_tc_info *tc_info = bp->tc_info;
Sathya Perla2ae74082017-08-28 13:40:33 -04001133 int rc;
1134
1135 /* send HWRM cmd to free the flow-id */
1136 bnxt_hwrm_cfa_flow_free(bp, flow_node->flow_handle);
1137
1138 mutex_lock(&tc_info->lock);
1139
Sathya Perla8c95f772017-10-26 11:51:29 -04001140 /* release references to any tunnel encap/decap nodes */
1141 bnxt_tc_put_tunnel_handle(bp, &flow_node->flow, flow_node);
1142
Sathya Perla2ae74082017-08-28 13:40:33 -04001143 /* release reference to l2 node */
1144 bnxt_tc_put_l2_node(bp, flow_node);
1145
1146 mutex_unlock(&tc_info->lock);
1147
1148 rc = rhashtable_remove_fast(&tc_info->flow_table, &flow_node->node,
1149 tc_info->flow_ht_params);
1150 if (rc)
1151 netdev_err(bp->dev, "Error: %s: rhashtable_remove_fast rc=%d",
1152 __func__, rc);
1153
1154 kfree_rcu(flow_node, rcu);
1155 return 0;
1156}
1157
Sathya Perlae9ecc732017-12-01 03:13:04 -05001158static void bnxt_tc_set_src_fid(struct bnxt *bp, struct bnxt_tc_flow *flow,
1159 u16 src_fid)
1160{
1161 if (flow->actions.flags & BNXT_TC_ACTION_FLAG_TUNNEL_DECAP)
1162 flow->src_fid = bp->pf.fw_fid;
1163 else
1164 flow->src_fid = src_fid;
1165}
1166
Sathya Perla2ae74082017-08-28 13:40:33 -04001167/* Add a new flow or replace an existing flow.
1168 * Notes on locking:
1169 * There are essentially two critical sections here.
1170 * 1. while adding a new flow
1171 * a) lookup l2-key
1172 * b) issue HWRM cmd and get flow_handle
1173 * c) link l2-key with flow
1174 * 2. while deleting a flow
1175 * a) unlinking l2-key from flow
1176 * A lock is needed to protect these two critical sections.
1177 *
1178 * The hash-tables are already protected by the rhashtable API.
1179 */
1180static int bnxt_tc_add_flow(struct bnxt *bp, u16 src_fid,
1181 struct tc_cls_flower_offload *tc_flow_cmd)
1182{
1183 struct bnxt_tc_flow_node *new_node, *old_node;
Sathya Perlacd663582017-10-26 11:51:32 -04001184 struct bnxt_tc_info *tc_info = bp->tc_info;
Sathya Perla2ae74082017-08-28 13:40:33 -04001185 struct bnxt_tc_flow *flow;
Sathya Perla8c95f772017-10-26 11:51:29 -04001186 __le32 tunnel_handle = 0;
Sathya Perla2ae74082017-08-28 13:40:33 -04001187 __le16 ref_flow_handle;
1188 int rc;
1189
1190 /* allocate memory for the new flow and it's node */
1191 new_node = kzalloc(sizeof(*new_node), GFP_KERNEL);
1192 if (!new_node) {
1193 rc = -ENOMEM;
1194 goto done;
1195 }
1196 new_node->cookie = tc_flow_cmd->cookie;
1197 flow = &new_node->flow;
1198
1199 rc = bnxt_tc_parse_flow(bp, tc_flow_cmd, flow);
1200 if (rc)
1201 goto free_node;
Sathya Perlae9ecc732017-12-01 03:13:04 -05001202
1203 bnxt_tc_set_src_fid(bp, flow, src_fid);
Sathya Perla2ae74082017-08-28 13:40:33 -04001204
1205 if (!bnxt_tc_can_offload(bp, flow)) {
1206 rc = -ENOSPC;
1207 goto free_node;
1208 }
1209
1210 /* If a flow exists with the same cookie, delete it */
1211 old_node = rhashtable_lookup_fast(&tc_info->flow_table,
1212 &tc_flow_cmd->cookie,
1213 tc_info->flow_ht_params);
1214 if (old_node)
1215 __bnxt_tc_del_flow(bp, old_node);
1216
1217 /* Check if the L2 part of the flow has been offloaded already.
1218 * If so, bump up it's refcnt and get it's reference handle.
1219 */
1220 mutex_lock(&tc_info->lock);
1221 rc = bnxt_tc_get_ref_flow_handle(bp, flow, new_node, &ref_flow_handle);
1222 if (rc)
1223 goto unlock;
1224
Sathya Perla8c95f772017-10-26 11:51:29 -04001225 /* If the flow involves tunnel encap/decap, get tunnel_handle */
1226 rc = bnxt_tc_get_tunnel_handle(bp, flow, new_node, &tunnel_handle);
Sathya Perla2ae74082017-08-28 13:40:33 -04001227 if (rc)
1228 goto put_l2;
1229
Sathya Perla8c95f772017-10-26 11:51:29 -04001230 /* send HWRM cmd to alloc the flow */
1231 rc = bnxt_hwrm_cfa_flow_alloc(bp, flow, ref_flow_handle,
1232 tunnel_handle, &new_node->flow_handle);
1233 if (rc)
1234 goto put_tunnel;
1235
Sathya Perla5a84acb2017-10-26 11:51:31 -04001236 flow->lastused = jiffies;
1237 spin_lock_init(&flow->stats_lock);
Sathya Perla2ae74082017-08-28 13:40:33 -04001238 /* add new flow to flow-table */
1239 rc = rhashtable_insert_fast(&tc_info->flow_table, &new_node->node,
1240 tc_info->flow_ht_params);
1241 if (rc)
1242 goto hwrm_flow_free;
1243
1244 mutex_unlock(&tc_info->lock);
1245 return 0;
1246
1247hwrm_flow_free:
1248 bnxt_hwrm_cfa_flow_free(bp, new_node->flow_handle);
Sathya Perla8c95f772017-10-26 11:51:29 -04001249put_tunnel:
1250 bnxt_tc_put_tunnel_handle(bp, flow, new_node);
Sathya Perla2ae74082017-08-28 13:40:33 -04001251put_l2:
1252 bnxt_tc_put_l2_node(bp, new_node);
1253unlock:
1254 mutex_unlock(&tc_info->lock);
1255free_node:
Sathya Perla8c95f772017-10-26 11:51:29 -04001256 kfree_rcu(new_node, rcu);
Sathya Perla2ae74082017-08-28 13:40:33 -04001257done:
1258 netdev_err(bp->dev, "Error: %s: cookie=0x%lx error=%d",
1259 __func__, tc_flow_cmd->cookie, rc);
1260 return rc;
1261}
1262
1263static int bnxt_tc_del_flow(struct bnxt *bp,
1264 struct tc_cls_flower_offload *tc_flow_cmd)
1265{
Sathya Perlacd663582017-10-26 11:51:32 -04001266 struct bnxt_tc_info *tc_info = bp->tc_info;
Sathya Perla2ae74082017-08-28 13:40:33 -04001267 struct bnxt_tc_flow_node *flow_node;
1268
1269 flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
1270 &tc_flow_cmd->cookie,
1271 tc_info->flow_ht_params);
1272 if (!flow_node) {
1273 netdev_info(bp->dev, "ERROR: no flow_node for cookie %lx",
1274 tc_flow_cmd->cookie);
1275 return -EINVAL;
1276 }
1277
1278 return __bnxt_tc_del_flow(bp, flow_node);
1279}
1280
1281static int bnxt_tc_get_flow_stats(struct bnxt *bp,
1282 struct tc_cls_flower_offload *tc_flow_cmd)
1283{
Sathya Perla5a84acb2017-10-26 11:51:31 -04001284 struct bnxt_tc_flow_stats stats, *curr_stats, *prev_stats;
Sathya Perlacd663582017-10-26 11:51:32 -04001285 struct bnxt_tc_info *tc_info = bp->tc_info;
Sathya Perlad7bc7302017-08-28 13:40:35 -04001286 struct bnxt_tc_flow_node *flow_node;
Sathya Perla5a84acb2017-10-26 11:51:31 -04001287 struct bnxt_tc_flow *flow;
1288 unsigned long lastused;
Sathya Perlad7bc7302017-08-28 13:40:35 -04001289
1290 flow_node = rhashtable_lookup_fast(&tc_info->flow_table,
1291 &tc_flow_cmd->cookie,
1292 tc_info->flow_ht_params);
1293 if (!flow_node) {
1294 netdev_info(bp->dev, "Error: no flow_node for cookie %lx",
1295 tc_flow_cmd->cookie);
1296 return -1;
1297 }
1298
Sathya Perla5a84acb2017-10-26 11:51:31 -04001299 flow = &flow_node->flow;
1300 curr_stats = &flow->stats;
1301 prev_stats = &flow->prev_stats;
1302
1303 spin_lock(&flow->stats_lock);
1304 stats.packets = curr_stats->packets - prev_stats->packets;
1305 stats.bytes = curr_stats->bytes - prev_stats->bytes;
1306 *prev_stats = *curr_stats;
1307 lastused = flow->lastused;
1308 spin_unlock(&flow->stats_lock);
1309
1310 tcf_exts_stats_update(tc_flow_cmd->exts, stats.bytes, stats.packets,
1311 lastused);
1312 return 0;
1313}
1314
1315static int
1316bnxt_hwrm_cfa_flow_stats_get(struct bnxt *bp, int num_flows,
1317 struct bnxt_tc_stats_batch stats_batch[])
1318{
1319 struct hwrm_cfa_flow_stats_output *resp = bp->hwrm_cmd_resp_addr;
1320 struct hwrm_cfa_flow_stats_input req = { 0 };
1321 __le16 *req_flow_handles = &req.flow_handle_0;
1322 int rc, i;
1323
1324 bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_CFA_FLOW_STATS, -1, -1);
1325 req.num_flows = cpu_to_le16(num_flows);
1326 for (i = 0; i < num_flows; i++) {
1327 struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
1328
1329 req_flow_handles[i] = flow_node->flow_handle;
1330 }
1331
1332 mutex_lock(&bp->hwrm_cmd_lock);
1333 rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT);
1334 if (!rc) {
1335 __le64 *resp_packets = &resp->packet_0;
1336 __le64 *resp_bytes = &resp->byte_0;
1337
1338 for (i = 0; i < num_flows; i++) {
1339 stats_batch[i].hw_stats.packets =
1340 le64_to_cpu(resp_packets[i]);
1341 stats_batch[i].hw_stats.bytes =
1342 le64_to_cpu(resp_bytes[i]);
1343 }
1344 } else {
1345 netdev_info(bp->dev, "error rc=%d", rc);
1346 }
1347
1348 mutex_unlock(&bp->hwrm_cmd_lock);
1349 return rc;
1350}
1351
1352/* Add val to accum while handling a possible wraparound
1353 * of val. Eventhough val is of type u64, its actual width
1354 * is denoted by mask and will wrap-around beyond that width.
1355 */
1356static void accumulate_val(u64 *accum, u64 val, u64 mask)
1357{
1358#define low_bits(x, mask) ((x) & (mask))
1359#define high_bits(x, mask) ((x) & ~(mask))
1360 bool wrapped = val < low_bits(*accum, mask);
1361
1362 *accum = high_bits(*accum, mask) + val;
1363 if (wrapped)
1364 *accum += (mask + 1);
1365}
1366
1367/* The HW counters' width is much less than 64bits.
1368 * Handle possible wrap-around while updating the stat counters
1369 */
1370static void bnxt_flow_stats_accum(struct bnxt_tc_info *tc_info,
1371 struct bnxt_tc_flow_stats *acc_stats,
1372 struct bnxt_tc_flow_stats *hw_stats)
1373{
1374 accumulate_val(&acc_stats->bytes, hw_stats->bytes, tc_info->bytes_mask);
1375 accumulate_val(&acc_stats->packets, hw_stats->packets,
1376 tc_info->packets_mask);
1377}
1378
1379static int
1380bnxt_tc_flow_stats_batch_update(struct bnxt *bp, int num_flows,
1381 struct bnxt_tc_stats_batch stats_batch[])
1382{
Sathya Perlacd663582017-10-26 11:51:32 -04001383 struct bnxt_tc_info *tc_info = bp->tc_info;
Sathya Perla5a84acb2017-10-26 11:51:31 -04001384 int rc, i;
1385
1386 rc = bnxt_hwrm_cfa_flow_stats_get(bp, num_flows, stats_batch);
Sathya Perlad7bc7302017-08-28 13:40:35 -04001387 if (rc)
1388 return rc;
1389
Sathya Perla5a84acb2017-10-26 11:51:31 -04001390 for (i = 0; i < num_flows; i++) {
1391 struct bnxt_tc_flow_node *flow_node = stats_batch[i].flow_node;
1392 struct bnxt_tc_flow *flow = &flow_node->flow;
1393
1394 spin_lock(&flow->stats_lock);
1395 bnxt_flow_stats_accum(tc_info, &flow->stats,
1396 &stats_batch[i].hw_stats);
1397 if (flow->stats.packets != flow->prev_stats.packets)
1398 flow->lastused = jiffies;
1399 spin_unlock(&flow->stats_lock);
1400 }
1401
Sathya Perla2ae74082017-08-28 13:40:33 -04001402 return 0;
1403}
1404
Sathya Perla5a84acb2017-10-26 11:51:31 -04001405static int
1406bnxt_tc_flow_stats_batch_prep(struct bnxt *bp,
1407 struct bnxt_tc_stats_batch stats_batch[],
1408 int *num_flows)
1409{
Sathya Perlacd663582017-10-26 11:51:32 -04001410 struct bnxt_tc_info *tc_info = bp->tc_info;
Sathya Perla5a84acb2017-10-26 11:51:31 -04001411 struct rhashtable_iter *iter = &tc_info->iter;
1412 void *flow_node;
1413 int rc, i;
1414
Tom Herbert97a6ec42017-12-04 10:31:41 -08001415 rhashtable_walk_start(iter);
Sathya Perla5a84acb2017-10-26 11:51:31 -04001416
1417 rc = 0;
1418 for (i = 0; i < BNXT_FLOW_STATS_BATCH_MAX; i++) {
1419 flow_node = rhashtable_walk_next(iter);
1420 if (IS_ERR(flow_node)) {
1421 i = 0;
1422 if (PTR_ERR(flow_node) == -EAGAIN) {
1423 continue;
1424 } else {
1425 rc = PTR_ERR(flow_node);
1426 goto done;
1427 }
1428 }
1429
1430 /* No more flows */
1431 if (!flow_node)
1432 goto done;
1433
1434 stats_batch[i].flow_node = flow_node;
1435 }
1436done:
1437 rhashtable_walk_stop(iter);
1438 *num_flows = i;
1439 return rc;
1440}
1441
1442void bnxt_tc_flow_stats_work(struct bnxt *bp)
1443{
Sathya Perlacd663582017-10-26 11:51:32 -04001444 struct bnxt_tc_info *tc_info = bp->tc_info;
Sathya Perla5a84acb2017-10-26 11:51:31 -04001445 int num_flows, rc;
1446
1447 num_flows = atomic_read(&tc_info->flow_table.nelems);
1448 if (!num_flows)
1449 return;
1450
1451 rhashtable_walk_enter(&tc_info->flow_table, &tc_info->iter);
1452
1453 for (;;) {
1454 rc = bnxt_tc_flow_stats_batch_prep(bp, tc_info->stats_batch,
1455 &num_flows);
1456 if (rc) {
1457 if (rc == -EAGAIN)
1458 continue;
1459 break;
1460 }
1461
1462 if (!num_flows)
1463 break;
1464
1465 bnxt_tc_flow_stats_batch_update(bp, num_flows,
1466 tc_info->stats_batch);
1467 }
1468
1469 rhashtable_walk_exit(&tc_info->iter);
1470}
1471
Sathya Perla2ae74082017-08-28 13:40:33 -04001472int bnxt_tc_setup_flower(struct bnxt *bp, u16 src_fid,
1473 struct tc_cls_flower_offload *cls_flower)
1474{
1475 int rc = 0;
1476
Sathya Perla2ae74082017-08-28 13:40:33 -04001477 switch (cls_flower->command) {
1478 case TC_CLSFLOWER_REPLACE:
1479 rc = bnxt_tc_add_flow(bp, src_fid, cls_flower);
1480 break;
1481
1482 case TC_CLSFLOWER_DESTROY:
1483 rc = bnxt_tc_del_flow(bp, cls_flower);
1484 break;
1485
1486 case TC_CLSFLOWER_STATS:
1487 rc = bnxt_tc_get_flow_stats(bp, cls_flower);
1488 break;
1489 }
1490 return rc;
1491}
1492
1493static const struct rhashtable_params bnxt_tc_flow_ht_params = {
1494 .head_offset = offsetof(struct bnxt_tc_flow_node, node),
1495 .key_offset = offsetof(struct bnxt_tc_flow_node, cookie),
1496 .key_len = sizeof(((struct bnxt_tc_flow_node *)0)->cookie),
1497 .automatic_shrinking = true
1498};
1499
1500static const struct rhashtable_params bnxt_tc_l2_ht_params = {
1501 .head_offset = offsetof(struct bnxt_tc_l2_node, node),
1502 .key_offset = offsetof(struct bnxt_tc_l2_node, key),
1503 .key_len = BNXT_TC_L2_KEY_LEN,
1504 .automatic_shrinking = true
1505};
1506
Sathya Perla8c95f772017-10-26 11:51:29 -04001507static const struct rhashtable_params bnxt_tc_decap_l2_ht_params = {
1508 .head_offset = offsetof(struct bnxt_tc_l2_node, node),
1509 .key_offset = offsetof(struct bnxt_tc_l2_node, key),
1510 .key_len = BNXT_TC_L2_KEY_LEN,
1511 .automatic_shrinking = true
1512};
1513
1514static const struct rhashtable_params bnxt_tc_tunnel_ht_params = {
1515 .head_offset = offsetof(struct bnxt_tc_tunnel_node, node),
1516 .key_offset = offsetof(struct bnxt_tc_tunnel_node, key),
1517 .key_len = sizeof(struct ip_tunnel_key),
1518 .automatic_shrinking = true
1519};
1520
Sathya Perla2ae74082017-08-28 13:40:33 -04001521/* convert counter width in bits to a mask */
1522#define mask(width) ((u64)~0 >> (64 - (width)))
1523
1524int bnxt_init_tc(struct bnxt *bp)
1525{
Sathya Perlacd663582017-10-26 11:51:32 -04001526 struct bnxt_tc_info *tc_info;
Sathya Perla2ae74082017-08-28 13:40:33 -04001527 int rc;
1528
Sathya Perla8c95f772017-10-26 11:51:29 -04001529 if (bp->hwrm_spec_code < 0x10803) {
Sathya Perla2ae74082017-08-28 13:40:33 -04001530 netdev_warn(bp->dev,
1531 "Firmware does not support TC flower offload.\n");
1532 return -ENOTSUPP;
1533 }
Sathya Perlacd663582017-10-26 11:51:32 -04001534
1535 tc_info = kzalloc(sizeof(*tc_info), GFP_KERNEL);
1536 if (!tc_info)
1537 return -ENOMEM;
Sathya Perla2ae74082017-08-28 13:40:33 -04001538 mutex_init(&tc_info->lock);
1539
1540 /* Counter widths are programmed by FW */
1541 tc_info->bytes_mask = mask(36);
1542 tc_info->packets_mask = mask(28);
1543
1544 tc_info->flow_ht_params = bnxt_tc_flow_ht_params;
1545 rc = rhashtable_init(&tc_info->flow_table, &tc_info->flow_ht_params);
1546 if (rc)
Sathya Perlacd663582017-10-26 11:51:32 -04001547 goto free_tc_info;
Sathya Perla2ae74082017-08-28 13:40:33 -04001548
1549 tc_info->l2_ht_params = bnxt_tc_l2_ht_params;
1550 rc = rhashtable_init(&tc_info->l2_table, &tc_info->l2_ht_params);
1551 if (rc)
1552 goto destroy_flow_table;
1553
Sathya Perla8c95f772017-10-26 11:51:29 -04001554 tc_info->decap_l2_ht_params = bnxt_tc_decap_l2_ht_params;
1555 rc = rhashtable_init(&tc_info->decap_l2_table,
1556 &tc_info->decap_l2_ht_params);
1557 if (rc)
1558 goto destroy_l2_table;
1559
1560 tc_info->decap_ht_params = bnxt_tc_tunnel_ht_params;
1561 rc = rhashtable_init(&tc_info->decap_table,
1562 &tc_info->decap_ht_params);
1563 if (rc)
1564 goto destroy_decap_l2_table;
1565
1566 tc_info->encap_ht_params = bnxt_tc_tunnel_ht_params;
1567 rc = rhashtable_init(&tc_info->encap_table,
1568 &tc_info->encap_ht_params);
1569 if (rc)
1570 goto destroy_decap_table;
1571
Sathya Perla2ae74082017-08-28 13:40:33 -04001572 tc_info->enabled = true;
1573 bp->dev->hw_features |= NETIF_F_HW_TC;
1574 bp->dev->features |= NETIF_F_HW_TC;
Sathya Perlacd663582017-10-26 11:51:32 -04001575 bp->tc_info = tc_info;
Sathya Perla2ae74082017-08-28 13:40:33 -04001576 return 0;
1577
Sathya Perla8c95f772017-10-26 11:51:29 -04001578destroy_decap_table:
1579 rhashtable_destroy(&tc_info->decap_table);
1580destroy_decap_l2_table:
1581 rhashtable_destroy(&tc_info->decap_l2_table);
1582destroy_l2_table:
1583 rhashtable_destroy(&tc_info->l2_table);
Sathya Perla2ae74082017-08-28 13:40:33 -04001584destroy_flow_table:
1585 rhashtable_destroy(&tc_info->flow_table);
Sathya Perlacd663582017-10-26 11:51:32 -04001586free_tc_info:
1587 kfree(tc_info);
Sathya Perla2ae74082017-08-28 13:40:33 -04001588 return rc;
1589}
1590
1591void bnxt_shutdown_tc(struct bnxt *bp)
1592{
Sathya Perlacd663582017-10-26 11:51:32 -04001593 struct bnxt_tc_info *tc_info = bp->tc_info;
Sathya Perla2ae74082017-08-28 13:40:33 -04001594
Sathya Perlacd663582017-10-26 11:51:32 -04001595 if (!bnxt_tc_flower_enabled(bp))
Sathya Perla2ae74082017-08-28 13:40:33 -04001596 return;
1597
1598 rhashtable_destroy(&tc_info->flow_table);
1599 rhashtable_destroy(&tc_info->l2_table);
Sathya Perla8c95f772017-10-26 11:51:29 -04001600 rhashtable_destroy(&tc_info->decap_l2_table);
1601 rhashtable_destroy(&tc_info->decap_table);
1602 rhashtable_destroy(&tc_info->encap_table);
Sathya Perlacd663582017-10-26 11:51:32 -04001603 kfree(tc_info);
1604 bp->tc_info = NULL;
Sathya Perla2ae74082017-08-28 13:40:33 -04001605}