blob: 9da6e37fb70c7a63412ef8161075da2e1cd60ac8 [file] [log] [blame]
Moni Shoua8700e3e2016-06-16 16:45:23 +03001/*
2 * Copyright (c) 2016 Mellanox Technologies Ltd. All rights reserved.
3 * Copyright (c) 2015 System Fabric Works, Inc. All rights reserved.
4 *
5 * This software is available to you under a choice of one of two
6 * licenses. You may choose to be licensed under the terms of the GNU
7 * General Public License (GPL) Version 2, available from the file
8 * COPYING in the main directory of this source tree, or the
9 * OpenIB.org BSD license below:
10 *
11 * Redistribution and use in source and binary forms, with or
12 * without modification, are permitted provided that the following
13 * conditions are met:
14 *
15 * - Redistributions of source code must retain the above
16 * copyright notice, this list of conditions and the following
17 * disclaimer.
18 *
19 * - Redistributions in binary form must reproduce the above
20 * copyright notice, this list of conditions and the following
21 * disclaimer in the documentation and/or other materials
22 * provided with the distribution.
23 *
24 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
25 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
26 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
27 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
28 * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
29 * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
30 * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
31 * SOFTWARE.
32 */
33
34#include <linux/skbuff.h>
35#include <linux/if_arp.h>
36#include <linux/netdevice.h>
37#include <linux/if.h>
38#include <linux/if_vlan.h>
39#include <net/udp_tunnel.h>
40#include <net/sch_generic.h>
41#include <linux/netfilter.h>
42#include <rdma/ib_addr.h>
43
44#include "rxe.h"
45#include "rxe_net.h"
46#include "rxe_loc.h"
47
48static LIST_HEAD(rxe_dev_list);
Wei Yongjun4ac47072016-10-29 16:19:33 +000049static DEFINE_SPINLOCK(dev_list_lock); /* spinlock for device list */
Moni Shoua8700e3e2016-06-16 16:45:23 +030050
51struct rxe_dev *net_to_rxe(struct net_device *ndev)
52{
53 struct rxe_dev *rxe;
54 struct rxe_dev *found = NULL;
55
56 spin_lock_bh(&dev_list_lock);
57 list_for_each_entry(rxe, &rxe_dev_list, list) {
58 if (rxe->ndev == ndev) {
59 found = rxe;
60 break;
61 }
62 }
63 spin_unlock_bh(&dev_list_lock);
64
65 return found;
66}
67
Parav Pandite404f942016-09-28 20:26:26 +000068struct rxe_dev *get_rxe_by_name(const char *name)
Moni Shoua8700e3e2016-06-16 16:45:23 +030069{
70 struct rxe_dev *rxe;
71 struct rxe_dev *found = NULL;
72
73 spin_lock_bh(&dev_list_lock);
74 list_for_each_entry(rxe, &rxe_dev_list, list) {
75 if (!strcmp(name, rxe->ib_dev.name)) {
76 found = rxe;
77 break;
78 }
79 }
80 spin_unlock_bh(&dev_list_lock);
81 return found;
82}
83
84
Zhu Yanjun91eab792018-01-07 07:08:48 -050085static struct rxe_recv_sockets recv_sockets;
Moni Shoua8700e3e2016-06-16 16:45:23 +030086
Bart Van Assche839f5ac2017-01-10 11:15:53 -080087struct device *rxe_dma_device(struct rxe_dev *rxe)
Moni Shoua8700e3e2016-06-16 16:45:23 +030088{
89 struct net_device *ndev;
90
91 ndev = rxe->ndev;
92
Parav Panditd0d7b102017-02-04 11:00:49 -060093 if (is_vlan_dev(ndev))
Moni Shoua8700e3e2016-06-16 16:45:23 +030094 ndev = vlan_dev_real_dev(ndev);
95
96 return ndev->dev.parent;
97}
98
Bart Van Assche839f5ac2017-01-10 11:15:53 -080099int rxe_mcast_add(struct rxe_dev *rxe, union ib_gid *mgid)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300100{
101 int err;
102 unsigned char ll_addr[ETH_ALEN];
103
104 ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
105 err = dev_mc_add(rxe->ndev, ll_addr);
106
107 return err;
108}
109
Bart Van Assche839f5ac2017-01-10 11:15:53 -0800110int rxe_mcast_delete(struct rxe_dev *rxe, union ib_gid *mgid)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300111{
112 int err;
113 unsigned char ll_addr[ETH_ALEN];
114
115 ipv6_eth_mc_map((struct in6_addr *)mgid->raw, ll_addr);
116 err = dev_mc_del(rxe->ndev, ll_addr);
117
118 return err;
119}
120
121static struct dst_entry *rxe_find_route4(struct net_device *ndev,
122 struct in_addr *saddr,
123 struct in_addr *daddr)
124{
125 struct rtable *rt;
126 struct flowi4 fl = { { 0 } };
127
128 memset(&fl, 0, sizeof(fl));
129 fl.flowi4_oif = ndev->ifindex;
130 memcpy(&fl.saddr, saddr, sizeof(*saddr));
131 memcpy(&fl.daddr, daddr, sizeof(*daddr));
132 fl.flowi4_proto = IPPROTO_UDP;
133
134 rt = ip_route_output_key(&init_net, &fl);
135 if (IS_ERR(rt)) {
136 pr_err_ratelimited("no route to %pI4\n", &daddr->s_addr);
137 return NULL;
138 }
139
140 return &rt->dst;
141}
142
143#if IS_ENABLED(CONFIG_IPV6)
144static struct dst_entry *rxe_find_route6(struct net_device *ndev,
145 struct in6_addr *saddr,
146 struct in6_addr *daddr)
147{
148 struct dst_entry *ndst;
149 struct flowi6 fl6 = { { 0 } };
150
151 memset(&fl6, 0, sizeof(fl6));
152 fl6.flowi6_oif = ndev->ifindex;
153 memcpy(&fl6.saddr, saddr, sizeof(*saddr));
154 memcpy(&fl6.daddr, daddr, sizeof(*daddr));
155 fl6.flowi6_proto = IPPROTO_UDP;
156
157 if (unlikely(ipv6_stub->ipv6_dst_lookup(sock_net(recv_sockets.sk6->sk),
158 recv_sockets.sk6->sk, &ndst, &fl6))) {
159 pr_err_ratelimited("no route to %pI6\n", daddr);
160 goto put;
161 }
162
163 if (unlikely(ndst->error)) {
164 pr_err("no route to %pI6\n", daddr);
165 goto put;
166 }
167
168 return ndst;
169put:
170 dst_release(ndst);
171 return NULL;
172}
173
174#else
175
176static struct dst_entry *rxe_find_route6(struct net_device *ndev,
177 struct in6_addr *saddr,
178 struct in6_addr *daddr)
179{
180 return NULL;
181}
182
183#endif
184
Martin Wilck43c9fc52018-02-14 21:45:43 +0100185/*
186 * Derive the net_device from the av.
187 * For physical devices, this will just return rxe->ndev.
188 * But for VLAN devices, it will return the vlan dev.
189 * Caller should dev_put() the returned net_device.
190 */
191static struct net_device *rxe_netdev_from_av(struct rxe_dev *rxe,
192 int port_num,
193 struct rxe_av *av)
194{
195 union ib_gid gid;
196 struct ib_gid_attr attr;
197 struct net_device *ndev = rxe->ndev;
198
199 if (ib_get_cached_gid(&rxe->ib_dev, port_num, av->grh.sgid_index,
200 &gid, &attr) == 0 &&
201 attr.ndev && attr.ndev != ndev)
202 ndev = attr.ndev;
203 else
204 /* Only to ensure that caller may call dev_put() */
205 dev_hold(ndev);
206
207 return ndev;
208}
209
yonatanc4ed6ad12017-04-20 20:55:56 +0300210static struct dst_entry *rxe_find_route(struct rxe_dev *rxe,
211 struct rxe_qp *qp,
212 struct rxe_av *av)
213{
214 struct dst_entry *dst = NULL;
Martin Wilck43c9fc52018-02-14 21:45:43 +0100215 struct net_device *ndev;
216
217 ndev = rxe_netdev_from_av(rxe, qp->attr.port_num, av);
yonatanc4ed6ad12017-04-20 20:55:56 +0300218
219 if (qp_type(qp) == IB_QPT_RC)
220 dst = sk_dst_get(qp->sk->sk);
221
Andrew Boyerb9109b7d2017-08-28 16:11:53 -0400222 if (!dst || !dst_check(dst, qp->dst_cookie)) {
yonatanc4ed6ad12017-04-20 20:55:56 +0300223 if (dst)
224 dst_release(dst);
225
226 if (av->network_type == RDMA_NETWORK_IPV4) {
227 struct in_addr *saddr;
228 struct in_addr *daddr;
229
230 saddr = &av->sgid_addr._sockaddr_in.sin_addr;
231 daddr = &av->dgid_addr._sockaddr_in.sin_addr;
Martin Wilck43c9fc52018-02-14 21:45:43 +0100232 dst = rxe_find_route4(ndev, saddr, daddr);
yonatanc4ed6ad12017-04-20 20:55:56 +0300233 } else if (av->network_type == RDMA_NETWORK_IPV6) {
234 struct in6_addr *saddr6;
235 struct in6_addr *daddr6;
236
237 saddr6 = &av->sgid_addr._sockaddr_in6.sin6_addr;
238 daddr6 = &av->dgid_addr._sockaddr_in6.sin6_addr;
Martin Wilck43c9fc52018-02-14 21:45:43 +0100239 dst = rxe_find_route6(ndev, saddr6, daddr6);
Andrew Boyerb9109b7d2017-08-28 16:11:53 -0400240#if IS_ENABLED(CONFIG_IPV6)
241 if (dst)
242 qp->dst_cookie =
243 rt6_get_cookie((struct rt6_info *)dst);
244#endif
yonatanc4ed6ad12017-04-20 20:55:56 +0300245 }
246 }
247
Martin Wilck43c9fc52018-02-14 21:45:43 +0100248 dev_put(ndev);
yonatanc4ed6ad12017-04-20 20:55:56 +0300249 return dst;
250}
251
Moni Shoua8700e3e2016-06-16 16:45:23 +0300252static int rxe_udp_encap_recv(struct sock *sk, struct sk_buff *skb)
253{
254 struct udphdr *udph;
255 struct net_device *ndev = skb->dev;
Martin Wilck43c9fc52018-02-14 21:45:43 +0100256 struct net_device *rdev = ndev;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300257 struct rxe_dev *rxe = net_to_rxe(ndev);
258 struct rxe_pkt_info *pkt = SKB_TO_PKT(skb);
259
Martin Wilck43c9fc52018-02-14 21:45:43 +0100260 if (!rxe && is_vlan_dev(rdev)) {
261 rdev = vlan_dev_real_dev(ndev);
262 rxe = net_to_rxe(rdev);
263 }
Moni Shoua8700e3e2016-06-16 16:45:23 +0300264 if (!rxe)
265 goto drop;
266
267 if (skb_linearize(skb)) {
268 pr_err("skb_linearize failed\n");
269 goto drop;
270 }
271
272 udph = udp_hdr(skb);
273 pkt->rxe = rxe;
274 pkt->port_num = 1;
275 pkt->hdr = (u8 *)(udph + 1);
276 pkt->mask = RXE_GRH_MASK;
277 pkt->paylen = be16_to_cpu(udph->len) - sizeof(*udph);
278
279 return rxe_rcv(skb);
280drop:
281 kfree_skb(skb);
282 return 0;
283}
284
285static struct socket *rxe_setup_udp_tunnel(struct net *net, __be16 port,
286 bool ipv6)
287{
288 int err;
289 struct socket *sock;
Bart Van Assche8d8f0832017-01-10 11:15:40 -0800290 struct udp_port_cfg udp_cfg = { };
291 struct udp_tunnel_sock_cfg tnl_cfg = { };
Moni Shoua8700e3e2016-06-16 16:45:23 +0300292
293 if (ipv6) {
294 udp_cfg.family = AF_INET6;
295 udp_cfg.ipv6_v6only = 1;
296 } else {
297 udp_cfg.family = AF_INET;
298 }
299
300 udp_cfg.local_udp_port = port;
301
302 /* Create UDP socket */
303 err = udp_sock_create(net, &udp_cfg, &sock);
304 if (err < 0) {
305 pr_err("failed to create udp socket. err = %d\n", err);
306 return ERR_PTR(err);
307 }
308
Moni Shoua8700e3e2016-06-16 16:45:23 +0300309 tnl_cfg.encap_type = 1;
310 tnl_cfg.encap_rcv = rxe_udp_encap_recv;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300311
312 /* Setup UDP tunnel */
313 setup_udp_tunnel_sock(net, sock, &tnl_cfg);
314
315 return sock;
316}
317
Yonatan Cohendfdd6152016-09-07 14:04:04 +0300318void rxe_release_udp_tunnel(struct socket *sk)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300319{
Yonatan Cohendfdd6152016-09-07 14:04:04 +0300320 if (sk)
321 udp_tunnel_sock_release(sk);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300322}
323
324static void prepare_udp_hdr(struct sk_buff *skb, __be16 src_port,
325 __be16 dst_port)
326{
327 struct udphdr *udph;
328
329 __skb_push(skb, sizeof(*udph));
330 skb_reset_transport_header(skb);
331 udph = udp_hdr(skb);
332
333 udph->dest = dst_port;
334 udph->source = src_port;
335 udph->len = htons(skb->len);
336 udph->check = 0;
337}
338
339static void prepare_ipv4_hdr(struct dst_entry *dst, struct sk_buff *skb,
340 __be32 saddr, __be32 daddr, __u8 proto,
341 __u8 tos, __u8 ttl, __be16 df, bool xnet)
342{
343 struct iphdr *iph;
344
345 skb_scrub_packet(skb, xnet);
346
347 skb_clear_hash(skb);
yonatanc4ed6ad12017-04-20 20:55:56 +0300348 skb_dst_set(skb, dst_clone(dst));
Moni Shoua8700e3e2016-06-16 16:45:23 +0300349 memset(IPCB(skb), 0, sizeof(*IPCB(skb)));
350
351 skb_push(skb, sizeof(struct iphdr));
352 skb_reset_network_header(skb);
353
354 iph = ip_hdr(skb);
355
356 iph->version = IPVERSION;
357 iph->ihl = sizeof(struct iphdr) >> 2;
358 iph->frag_off = df;
359 iph->protocol = proto;
360 iph->tos = tos;
361 iph->daddr = daddr;
362 iph->saddr = saddr;
363 iph->ttl = ttl;
364 __ip_select_ident(dev_net(dst->dev), iph,
365 skb_shinfo(skb)->gso_segs ?: 1);
366 iph->tot_len = htons(skb->len);
367 ip_send_check(iph);
368}
369
370static void prepare_ipv6_hdr(struct dst_entry *dst, struct sk_buff *skb,
371 struct in6_addr *saddr, struct in6_addr *daddr,
372 __u8 proto, __u8 prio, __u8 ttl)
373{
374 struct ipv6hdr *ip6h;
375
376 memset(&(IPCB(skb)->opt), 0, sizeof(IPCB(skb)->opt));
377 IPCB(skb)->flags &= ~(IPSKB_XFRM_TUNNEL_SIZE | IPSKB_XFRM_TRANSFORMED
378 | IPSKB_REROUTED);
Andrew Boyer48c22be2017-08-28 16:11:54 -0400379 skb_dst_set(skb, dst_clone(dst));
Moni Shoua8700e3e2016-06-16 16:45:23 +0300380
381 __skb_push(skb, sizeof(*ip6h));
382 skb_reset_network_header(skb);
383 ip6h = ipv6_hdr(skb);
384 ip6_flow_hdr(ip6h, prio, htonl(0));
385 ip6h->payload_len = htons(skb->len);
386 ip6h->nexthdr = proto;
387 ip6h->hop_limit = ttl;
388 ip6h->daddr = *daddr;
389 ip6h->saddr = *saddr;
390 ip6h->payload_len = htons(skb->len - sizeof(*ip6h));
391}
392
Parav Panditffae9552016-09-28 20:24:42 +0000393static int prepare4(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
394 struct sk_buff *skb, struct rxe_av *av)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300395{
yonatanc4ed6ad12017-04-20 20:55:56 +0300396 struct rxe_qp *qp = pkt->qp;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300397 struct dst_entry *dst;
398 bool xnet = false;
399 __be16 df = htons(IP_DF);
400 struct in_addr *saddr = &av->sgid_addr._sockaddr_in.sin_addr;
401 struct in_addr *daddr = &av->dgid_addr._sockaddr_in.sin_addr;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300402
yonatanc4ed6ad12017-04-20 20:55:56 +0300403 dst = rxe_find_route(rxe, qp, av);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300404 if (!dst) {
405 pr_err("Host not reachable\n");
406 return -EHOSTUNREACH;
407 }
408
409 if (!memcmp(saddr, daddr, sizeof(*daddr)))
410 pkt->mask |= RXE_LOOPBACK_MASK;
411
412 prepare_udp_hdr(skb, htons(RXE_ROCE_V2_SPORT),
413 htons(ROCE_V2_UDP_DPORT));
414
415 prepare_ipv4_hdr(dst, skb, saddr->s_addr, daddr->s_addr, IPPROTO_UDP,
416 av->grh.traffic_class, av->grh.hop_limit, df, xnet);
yonatanc4ed6ad12017-04-20 20:55:56 +0300417
418 if (qp_type(qp) == IB_QPT_RC)
419 sk_dst_set(qp->sk->sk, dst);
420 else
421 dst_release(dst);
422
Moni Shoua8700e3e2016-06-16 16:45:23 +0300423 return 0;
424}
425
Parav Panditffae9552016-09-28 20:24:42 +0000426static int prepare6(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
427 struct sk_buff *skb, struct rxe_av *av)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300428{
yonatanc4ed6ad12017-04-20 20:55:56 +0300429 struct rxe_qp *qp = pkt->qp;
Andrew Boyer2418ada2017-08-28 16:11:56 -0400430 struct dst_entry *dst;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300431 struct in6_addr *saddr = &av->sgid_addr._sockaddr_in6.sin6_addr;
432 struct in6_addr *daddr = &av->dgid_addr._sockaddr_in6.sin6_addr;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300433
yonatanc4ed6ad12017-04-20 20:55:56 +0300434 dst = rxe_find_route(rxe, qp, av);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300435 if (!dst) {
436 pr_err("Host not reachable\n");
437 return -EHOSTUNREACH;
438 }
439
440 if (!memcmp(saddr, daddr, sizeof(*daddr)))
441 pkt->mask |= RXE_LOOPBACK_MASK;
442
443 prepare_udp_hdr(skb, htons(RXE_ROCE_V2_SPORT),
444 htons(ROCE_V2_UDP_DPORT));
445
446 prepare_ipv6_hdr(dst, skb, saddr, daddr, IPPROTO_UDP,
447 av->grh.traffic_class,
448 av->grh.hop_limit);
yonatanc4ed6ad12017-04-20 20:55:56 +0300449
450 if (qp_type(qp) == IB_QPT_RC)
451 sk_dst_set(qp->sk->sk, dst);
452 else
453 dst_release(dst);
454
Moni Shoua8700e3e2016-06-16 16:45:23 +0300455 return 0;
456}
457
Bart Van Assche839f5ac2017-01-10 11:15:53 -0800458int rxe_prepare(struct rxe_dev *rxe, struct rxe_pkt_info *pkt,
459 struct sk_buff *skb, u32 *crc)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300460{
461 int err = 0;
462 struct rxe_av *av = rxe_get_av(pkt);
463
464 if (av->network_type == RDMA_NETWORK_IPV4)
Parav Panditffae9552016-09-28 20:24:42 +0000465 err = prepare4(rxe, pkt, skb, av);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300466 else if (av->network_type == RDMA_NETWORK_IPV6)
Parav Panditffae9552016-09-28 20:24:42 +0000467 err = prepare6(rxe, pkt, skb, av);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300468
469 *crc = rxe_icrc_hdr(pkt, skb);
470
471 return err;
472}
473
474static void rxe_skb_tx_dtor(struct sk_buff *skb)
475{
476 struct sock *sk = skb->sk;
477 struct rxe_qp *qp = sk->sk_user_data;
478 int skb_out = atomic_dec_return(&qp->skb_out);
479
480 if (unlikely(qp->need_req_skb &&
481 skb_out < RXE_INFLIGHT_SKBS_PER_QP_LOW))
482 rxe_run_task(&qp->req.task, 1);
Yonatan Cohenfda85ce2017-06-22 17:09:59 +0300483
484 rxe_drop_ref(qp);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300485}
486
Zhu Yanjun31f1bd12018-02-27 06:04:33 -0500487int rxe_send(struct rxe_pkt_info *pkt, struct sk_buff *skb)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300488{
Moni Shoua8700e3e2016-06-16 16:45:23 +0300489 struct rxe_av *av;
490 int err;
491
492 av = rxe_get_av(pkt);
493
Zhu Yanjun5793b462018-01-08 00:14:25 -0500494 skb->destructor = rxe_skb_tx_dtor;
495 skb->sk = pkt->qp->sk->sk;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300496
Andrew Boyer9eb7f8e2017-08-28 16:11:49 -0400497 rxe_add_ref(pkt->qp);
498 atomic_inc(&pkt->qp->skb_out);
499
Moni Shoua8700e3e2016-06-16 16:45:23 +0300500 if (av->network_type == RDMA_NETWORK_IPV4) {
Zhu Yanjun5793b462018-01-08 00:14:25 -0500501 err = ip_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300502 } else if (av->network_type == RDMA_NETWORK_IPV6) {
Zhu Yanjun5793b462018-01-08 00:14:25 -0500503 err = ip6_local_out(dev_net(skb_dst(skb)->dev), skb->sk, skb);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300504 } else {
505 pr_err("Unknown layer 3 protocol: %d\n", av->network_type);
Andrew Boyer9eb7f8e2017-08-28 16:11:49 -0400506 atomic_dec(&pkt->qp->skb_out);
507 rxe_drop_ref(pkt->qp);
Zhu Yanjun5793b462018-01-08 00:14:25 -0500508 kfree_skb(skb);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300509 return -EINVAL;
510 }
511
512 if (unlikely(net_xmit_eval(err))) {
513 pr_debug("error sending packet: %d\n", err);
514 return -EAGAIN;
515 }
516
Moni Shoua8700e3e2016-06-16 16:45:23 +0300517 return 0;
518}
519
Bart Van Assche839f5ac2017-01-10 11:15:53 -0800520int rxe_loopback(struct sk_buff *skb)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300521{
522 return rxe_rcv(skb);
523}
524
525static inline int addr_same(struct rxe_dev *rxe, struct rxe_av *av)
526{
527 return rxe->port.port_guid == av->grh.dgid.global.interface_id;
528}
529
Bart Van Assche839f5ac2017-01-10 11:15:53 -0800530struct sk_buff *rxe_init_packet(struct rxe_dev *rxe, struct rxe_av *av,
531 int paylen, struct rxe_pkt_info *pkt)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300532{
533 unsigned int hdr_len;
534 struct sk_buff *skb;
Martin Wilck43c9fc52018-02-14 21:45:43 +0100535 struct net_device *ndev;
536 const int port_num = 1;
537
538 ndev = rxe_netdev_from_av(rxe, port_num, av);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300539
540 if (av->network_type == RDMA_NETWORK_IPV4)
541 hdr_len = ETH_HLEN + sizeof(struct udphdr) +
542 sizeof(struct iphdr);
543 else
544 hdr_len = ETH_HLEN + sizeof(struct udphdr) +
545 sizeof(struct ipv6hdr);
546
Martin Wilck43c9fc52018-02-14 21:45:43 +0100547 skb = alloc_skb(paylen + hdr_len + LL_RESERVED_SPACE(ndev),
Moni Shoua8700e3e2016-06-16 16:45:23 +0300548 GFP_ATOMIC);
Martin Wilck43c9fc52018-02-14 21:45:43 +0100549
550 if (unlikely(!skb)) {
551 dev_put(ndev);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300552 return NULL;
Martin Wilck43c9fc52018-02-14 21:45:43 +0100553 }
Moni Shoua8700e3e2016-06-16 16:45:23 +0300554
555 skb_reserve(skb, hdr_len + LL_RESERVED_SPACE(rxe->ndev));
556
Martin Wilck43c9fc52018-02-14 21:45:43 +0100557 skb->dev = ndev;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300558 if (av->network_type == RDMA_NETWORK_IPV4)
559 skb->protocol = htons(ETH_P_IP);
560 else
561 skb->protocol = htons(ETH_P_IPV6);
562
563 pkt->rxe = rxe;
Martin Wilck43c9fc52018-02-14 21:45:43 +0100564 pkt->port_num = port_num;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300565 pkt->hdr = skb_put(skb, paylen);
566 pkt->mask |= RXE_GRH_MASK;
567
568 memset(pkt->hdr, 0, paylen);
569
Martin Wilck43c9fc52018-02-14 21:45:43 +0100570 dev_put(ndev);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300571 return skb;
572}
573
574/*
575 * this is required by rxe_cfg to match rxe devices in
576 * /sys/class/infiniband up with their underlying ethernet devices
577 */
Bart Van Assche839f5ac2017-01-10 11:15:53 -0800578const char *rxe_parent_name(struct rxe_dev *rxe, unsigned int port_num)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300579{
580 return rxe->ndev->name;
581}
582
Bart Van Assche839f5ac2017-01-10 11:15:53 -0800583enum rdma_link_layer rxe_link_layer(struct rxe_dev *rxe, unsigned int port_num)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300584{
585 return IB_LINK_LAYER_ETHERNET;
586}
587
Moni Shoua8700e3e2016-06-16 16:45:23 +0300588struct rxe_dev *rxe_net_add(struct net_device *ndev)
589{
590 int err;
591 struct rxe_dev *rxe = NULL;
592
593 rxe = (struct rxe_dev *)ib_alloc_device(sizeof(*rxe));
594 if (!rxe)
595 return NULL;
596
Moni Shoua8700e3e2016-06-16 16:45:23 +0300597 rxe->ndev = ndev;
598
599 err = rxe_add(rxe, ndev->mtu);
600 if (err) {
601 ib_dealloc_device(&rxe->ib_dev);
602 return NULL;
603 }
604
605 spin_lock_bh(&dev_list_lock);
Maor Gottliebf39f7752017-01-19 15:25:58 +0200606 list_add_tail(&rxe->list, &rxe_dev_list);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300607 spin_unlock_bh(&dev_list_lock);
608 return rxe;
609}
610
611void rxe_remove_all(void)
612{
613 spin_lock_bh(&dev_list_lock);
614 while (!list_empty(&rxe_dev_list)) {
615 struct rxe_dev *rxe =
616 list_first_entry(&rxe_dev_list, struct rxe_dev, list);
617
618 list_del(&rxe->list);
619 spin_unlock_bh(&dev_list_lock);
620 rxe_remove(rxe);
621 spin_lock_bh(&dev_list_lock);
622 }
623 spin_unlock_bh(&dev_list_lock);
624}
625EXPORT_SYMBOL(rxe_remove_all);
626
627static void rxe_port_event(struct rxe_dev *rxe,
628 enum ib_event_type event)
629{
630 struct ib_event ev;
631
632 ev.device = &rxe->ib_dev;
633 ev.element.port_num = 1;
634 ev.event = event;
635
636 ib_dispatch_event(&ev);
637}
638
639/* Caller must hold net_info_lock */
640void rxe_port_up(struct rxe_dev *rxe)
641{
642 struct rxe_port *port;
643
644 port = &rxe->port;
645 port->attr.state = IB_PORT_ACTIVE;
646 port->attr.phys_state = IB_PHYS_STATE_LINK_UP;
647
648 rxe_port_event(rxe, IB_EVENT_PORT_ACTIVE);
Parav Pandite404f942016-09-28 20:26:26 +0000649 pr_info("set %s active\n", rxe->ib_dev.name);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300650}
651
652/* Caller must hold net_info_lock */
653void rxe_port_down(struct rxe_dev *rxe)
654{
655 struct rxe_port *port;
656
657 port = &rxe->port;
658 port->attr.state = IB_PORT_DOWN;
659 port->attr.phys_state = IB_PHYS_STATE_LINK_DOWN;
660
661 rxe_port_event(rxe, IB_EVENT_PORT_ERR);
Parav Pandite404f942016-09-28 20:26:26 +0000662 pr_info("set %s down\n", rxe->ib_dev.name);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300663}
664
665static int rxe_notify(struct notifier_block *not_blk,
666 unsigned long event,
667 void *arg)
668{
669 struct net_device *ndev = netdev_notifier_info_to_dev(arg);
670 struct rxe_dev *rxe = net_to_rxe(ndev);
671
672 if (!rxe)
673 goto out;
674
675 switch (event) {
676 case NETDEV_UNREGISTER:
677 list_del(&rxe->list);
678 rxe_remove(rxe);
679 break;
680 case NETDEV_UP:
681 rxe_port_up(rxe);
682 break;
683 case NETDEV_DOWN:
684 rxe_port_down(rxe);
685 break;
686 case NETDEV_CHANGEMTU:
Parav Pandite404f942016-09-28 20:26:26 +0000687 pr_info("%s changed mtu to %d\n", ndev->name, ndev->mtu);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300688 rxe_set_mtu(rxe, ndev->mtu);
689 break;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300690 case NETDEV_CHANGE:
Andrew Boyer5c50f1d2017-08-28 16:11:59 -0400691 if (netif_running(ndev) && netif_carrier_ok(ndev))
692 rxe_port_up(rxe);
693 else
694 rxe_port_down(rxe);
695 break;
696 case NETDEV_REBOOT:
Moni Shoua8700e3e2016-06-16 16:45:23 +0300697 case NETDEV_GOING_DOWN:
698 case NETDEV_CHANGEADDR:
699 case NETDEV_CHANGENAME:
700 case NETDEV_FEAT_CHANGE:
701 default:
Parav Pandite404f942016-09-28 20:26:26 +0000702 pr_info("ignoring netdev event = %ld for %s\n",
Moni Shoua8700e3e2016-06-16 16:45:23 +0300703 event, ndev->name);
704 break;
705 }
706out:
707 return NOTIFY_OK;
708}
709
Yonatan Cohendfdd6152016-09-07 14:04:04 +0300710struct notifier_block rxe_net_notifier = {
Moni Shoua8700e3e2016-06-16 16:45:23 +0300711 .notifier_call = rxe_notify,
712};
713
Bart Van Assche8d8f0832017-01-10 11:15:40 -0800714static int rxe_net_ipv4_init(void)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300715{
Yonatan Cohendfdd6152016-09-07 14:04:04 +0300716 recv_sockets.sk4 = rxe_setup_udp_tunnel(&init_net,
717 htons(ROCE_V2_UDP_DPORT), false);
718 if (IS_ERR(recv_sockets.sk4)) {
719 recv_sockets.sk4 = NULL;
Parav Pandite404f942016-09-28 20:26:26 +0000720 pr_err("Failed to create IPv4 UDP tunnel\n");
Yonatan Cohendfdd6152016-09-07 14:04:04 +0300721 return -1;
722 }
723
724 return 0;
725}
726
Bart Van Assche8d8f0832017-01-10 11:15:40 -0800727static int rxe_net_ipv6_init(void)
Yonatan Cohendfdd6152016-09-07 14:04:04 +0300728{
729#if IS_ENABLED(CONFIG_IPV6)
Moni Shoua8700e3e2016-06-16 16:45:23 +0300730
Moni Shoua8700e3e2016-06-16 16:45:23 +0300731 recv_sockets.sk6 = rxe_setup_udp_tunnel(&init_net,
Yonatan Cohendfdd6152016-09-07 14:04:04 +0300732 htons(ROCE_V2_UDP_DPORT), true);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300733 if (IS_ERR(recv_sockets.sk6)) {
734 recv_sockets.sk6 = NULL;
Parav Pandite404f942016-09-28 20:26:26 +0000735 pr_err("Failed to create IPv6 UDP tunnel\n");
Moni Shoua8700e3e2016-06-16 16:45:23 +0300736 return -1;
737 }
Yonatan Cohendfdd6152016-09-07 14:04:04 +0300738#endif
739 return 0;
Moni Shoua8700e3e2016-06-16 16:45:23 +0300740}
741
742void rxe_net_exit(void)
743{
Yonatan Cohendfdd6152016-09-07 14:04:04 +0300744 rxe_release_udp_tunnel(recv_sockets.sk6);
745 rxe_release_udp_tunnel(recv_sockets.sk4);
Moni Shoua8700e3e2016-06-16 16:45:23 +0300746 unregister_netdevice_notifier(&rxe_net_notifier);
747}
Parav Pandite404f942016-09-28 20:26:26 +0000748
749int rxe_net_init(void)
750{
751 int err;
752
753 recv_sockets.sk6 = NULL;
754
755 err = rxe_net_ipv4_init();
756 if (err)
757 return err;
758 err = rxe_net_ipv6_init();
759 if (err)
760 goto err_out;
761 err = register_netdevice_notifier(&rxe_net_notifier);
762 if (err) {
763 pr_err("Failed to register netdev notifier\n");
764 goto err_out;
765 }
766 return 0;
767err_out:
768 rxe_net_exit();
769 return err;
770}