blob: 0f940f0e488db81618262a8c25bff3bfb56b5805 [file] [log] [blame]
Aaron Young31762ea2016-03-15 11:35:37 -07001/* sunvnet.c: Sun LDOM Virtual Network Driver.
2 *
3 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
Aaron Young67d07192016-03-15 11:35:38 -07004 * Copyright (C) 2016 Oracle. All rights reserved.
Aaron Young31762ea2016-03-15 11:35:37 -07005 */
6
7#include <linux/module.h>
8#include <linux/kernel.h>
9#include <linux/types.h>
10#include <linux/slab.h>
11#include <linux/delay.h>
12#include <linux/init.h>
13#include <linux/netdevice.h>
14#include <linux/ethtool.h>
15#include <linux/etherdevice.h>
16#include <linux/mutex.h>
17#include <linux/highmem.h>
18#include <linux/if_vlan.h>
19#define CREATE_TRACE_POINTS
20#include <trace/events/sunvnet.h>
21
22#if IS_ENABLED(CONFIG_IPV6)
23#include <linux/icmpv6.h>
24#endif
25
26#include <net/ip.h>
27#include <net/icmp.h>
28#include <net/route.h>
29
30#include <asm/vio.h>
31#include <asm/ldc.h>
32
33#include "sunvnet_common.h"
34
35/* Heuristic for the number of times to exponentially backoff and
36 * retry sending an LDC trigger when EAGAIN is encountered
37 */
38#define VNET_MAX_RETRIES 10
39
Shannon Nelson2493b842017-02-13 10:56:57 -080040MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
41MODULE_DESCRIPTION("Sun LDOM virtual network support library");
42MODULE_LICENSE("GPL");
43MODULE_VERSION("1.1");
44
Aaron Young31762ea2016-03-15 11:35:37 -070045static int __vnet_tx_trigger(struct vnet_port *port, u32 start);
46static void vnet_port_reset(struct vnet_port *port);
47
48static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr)
49{
50 return vio_dring_avail(dr, VNET_TX_RING_SIZE);
51}
52
53static int vnet_handle_unknown(struct vnet_port *port, void *arg)
54{
55 struct vio_msg_tag *pkt = arg;
56
57 pr_err("Received unknown msg [%02x:%02x:%04x:%08x]\n",
58 pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
59 pr_err("Resetting connection\n");
60
61 ldc_disconnect(port->vio.lp);
62
63 return -ECONNRESET;
64}
65
66static int vnet_port_alloc_tx_ring(struct vnet_port *port);
67
68int sunvnet_send_attr_common(struct vio_driver_state *vio)
69{
70 struct vnet_port *port = to_vnet_port(vio);
Aaron Young67d07192016-03-15 11:35:38 -070071 struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
Aaron Young31762ea2016-03-15 11:35:37 -070072 struct vio_net_attr_info pkt;
73 int framelen = ETH_FRAME_LEN;
74 int i, err;
75
76 err = vnet_port_alloc_tx_ring(to_vnet_port(vio));
77 if (err)
78 return err;
79
80 memset(&pkt, 0, sizeof(pkt));
81 pkt.tag.type = VIO_TYPE_CTRL;
82 pkt.tag.stype = VIO_SUBTYPE_INFO;
83 pkt.tag.stype_env = VIO_ATTR_INFO;
84 pkt.tag.sid = vio_send_sid(vio);
85 if (vio_version_before(vio, 1, 2))
86 pkt.xfer_mode = VIO_DRING_MODE;
87 else
88 pkt.xfer_mode = VIO_NEW_DRING_MODE;
89 pkt.addr_type = VNET_ADDR_ETHERMAC;
90 pkt.ack_freq = 0;
91 for (i = 0; i < 6; i++)
92 pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8);
93 if (vio_version_after(vio, 1, 3)) {
94 if (port->rmtu) {
95 port->rmtu = min(VNET_MAXPACKET, port->rmtu);
96 pkt.mtu = port->rmtu;
97 } else {
98 port->rmtu = VNET_MAXPACKET;
99 pkt.mtu = port->rmtu;
100 }
101 if (vio_version_after_eq(vio, 1, 6))
102 pkt.options = VIO_TX_DRING;
103 } else if (vio_version_before(vio, 1, 3)) {
104 pkt.mtu = framelen;
105 } else { /* v1.3 */
106 pkt.mtu = framelen + VLAN_HLEN;
107 }
108
109 pkt.cflags = 0;
110 if (vio_version_after_eq(vio, 1, 7) && port->tso) {
111 pkt.cflags |= VNET_LSO_IPV4_CAPAB;
112 if (!port->tsolen)
113 port->tsolen = VNET_MAXTSO;
114 pkt.ipv4_lso_maxlen = port->tsolen;
115 }
116
117 pkt.plnk_updt = PHYSLINK_UPDATE_NONE;
118
119 viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
120 "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] "
121 "cflags[0x%04x] lso_max[%u]\n",
122 pkt.xfer_mode, pkt.addr_type,
123 (unsigned long long)pkt.addr,
124 pkt.ack_freq, pkt.plnk_updt, pkt.options,
125 (unsigned long long)pkt.mtu, pkt.cflags, pkt.ipv4_lso_maxlen);
126
Aaron Young31762ea2016-03-15 11:35:37 -0700127 return vio_ldc_send(vio, &pkt, sizeof(pkt));
128}
129EXPORT_SYMBOL_GPL(sunvnet_send_attr_common);
130
131static int handle_attr_info(struct vio_driver_state *vio,
132 struct vio_net_attr_info *pkt)
133{
134 struct vnet_port *port = to_vnet_port(vio);
135 u64 localmtu;
136 u8 xfer_mode;
137
138 viodbg(HS, "GOT NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
139 "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] "
140 " (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n",
141 pkt->xfer_mode, pkt->addr_type,
142 (unsigned long long)pkt->addr,
143 pkt->ack_freq, pkt->plnk_updt, pkt->options,
144 (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags,
145 pkt->ipv4_lso_maxlen);
146
147 pkt->tag.sid = vio_send_sid(vio);
148
149 xfer_mode = pkt->xfer_mode;
150 /* for version < 1.2, VIO_DRING_MODE = 0x3 and no bitmask */
151 if (vio_version_before(vio, 1, 2) && xfer_mode == VIO_DRING_MODE)
152 xfer_mode = VIO_NEW_DRING_MODE;
153
154 /* MTU negotiation:
155 * < v1.3 - ETH_FRAME_LEN exactly
156 * > v1.3 - MIN(pkt.mtu, VNET_MAXPACKET, port->rmtu) and change
157 * pkt->mtu for ACK
158 * = v1.3 - ETH_FRAME_LEN + VLAN_HLEN exactly
159 */
160 if (vio_version_before(vio, 1, 3)) {
161 localmtu = ETH_FRAME_LEN;
162 } else if (vio_version_after(vio, 1, 3)) {
163 localmtu = port->rmtu ? port->rmtu : VNET_MAXPACKET;
164 localmtu = min(pkt->mtu, localmtu);
165 pkt->mtu = localmtu;
166 } else { /* v1.3 */
167 localmtu = ETH_FRAME_LEN + VLAN_HLEN;
168 }
169 port->rmtu = localmtu;
170
171 /* LSO negotiation */
172 if (vio_version_after_eq(vio, 1, 7))
173 port->tso &= !!(pkt->cflags & VNET_LSO_IPV4_CAPAB);
174 else
175 port->tso = false;
176 if (port->tso) {
177 if (!port->tsolen)
178 port->tsolen = VNET_MAXTSO;
179 port->tsolen = min(port->tsolen, pkt->ipv4_lso_maxlen);
180 if (port->tsolen < VNET_MINTSO) {
181 port->tso = false;
182 port->tsolen = 0;
183 pkt->cflags &= ~VNET_LSO_IPV4_CAPAB;
184 }
185 pkt->ipv4_lso_maxlen = port->tsolen;
186 } else {
187 pkt->cflags &= ~VNET_LSO_IPV4_CAPAB;
188 pkt->ipv4_lso_maxlen = 0;
189 }
190
191 /* for version >= 1.6, ACK packet mode we support */
192 if (vio_version_after_eq(vio, 1, 6)) {
193 pkt->xfer_mode = VIO_NEW_DRING_MODE;
194 pkt->options = VIO_TX_DRING;
195 }
196
197 if (!(xfer_mode | VIO_NEW_DRING_MODE) ||
198 pkt->addr_type != VNET_ADDR_ETHERMAC ||
199 pkt->mtu != localmtu) {
200 viodbg(HS, "SEND NET ATTR NACK\n");
201
202 pkt->tag.stype = VIO_SUBTYPE_NACK;
203
Aaron Youngdc153f82016-03-15 11:35:40 -0700204 (void)vio_ldc_send(vio, pkt, sizeof(*pkt));
Aaron Young31762ea2016-03-15 11:35:37 -0700205
206 return -ECONNRESET;
Aaron Young31762ea2016-03-15 11:35:37 -0700207 }
208
Aaron Youngdc153f82016-03-15 11:35:40 -0700209 viodbg(HS, "SEND NET ATTR ACK xmode[0x%x] atype[0x%x] "
210 "addr[%llx] ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] "
211 "mtu[%llu] (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n",
212 pkt->xfer_mode, pkt->addr_type,
213 (unsigned long long)pkt->addr,
214 pkt->ack_freq, pkt->plnk_updt, pkt->options,
215 (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags,
216 pkt->ipv4_lso_maxlen);
217
218 pkt->tag.stype = VIO_SUBTYPE_ACK;
219
220 return vio_ldc_send(vio, pkt, sizeof(*pkt));
Aaron Young31762ea2016-03-15 11:35:37 -0700221}
222
223static int handle_attr_ack(struct vio_driver_state *vio,
224 struct vio_net_attr_info *pkt)
225{
226 viodbg(HS, "GOT NET ATTR ACK\n");
227
228 return 0;
229}
230
231static int handle_attr_nack(struct vio_driver_state *vio,
232 struct vio_net_attr_info *pkt)
233{
234 viodbg(HS, "GOT NET ATTR NACK\n");
235
236 return -ECONNRESET;
237}
238
239int sunvnet_handle_attr_common(struct vio_driver_state *vio, void *arg)
240{
241 struct vio_net_attr_info *pkt = arg;
242
243 switch (pkt->tag.stype) {
244 case VIO_SUBTYPE_INFO:
245 return handle_attr_info(vio, pkt);
246
247 case VIO_SUBTYPE_ACK:
248 return handle_attr_ack(vio, pkt);
249
250 case VIO_SUBTYPE_NACK:
251 return handle_attr_nack(vio, pkt);
252
253 default:
254 return -ECONNRESET;
255 }
256}
257EXPORT_SYMBOL_GPL(sunvnet_handle_attr_common);
258
259void sunvnet_handshake_complete_common(struct vio_driver_state *vio)
260{
261 struct vio_dring_state *dr;
262
263 dr = &vio->drings[VIO_DRIVER_RX_RING];
Aaron Youngdc153f82016-03-15 11:35:40 -0700264 dr->rcv_nxt = 1;
265 dr->snd_nxt = 1;
Aaron Young31762ea2016-03-15 11:35:37 -0700266
267 dr = &vio->drings[VIO_DRIVER_TX_RING];
Aaron Youngdc153f82016-03-15 11:35:40 -0700268 dr->rcv_nxt = 1;
269 dr->snd_nxt = 1;
Aaron Young31762ea2016-03-15 11:35:37 -0700270}
271EXPORT_SYMBOL_GPL(sunvnet_handshake_complete_common);
272
273/* The hypervisor interface that implements copying to/from imported
274 * memory from another domain requires that copies are done to 8-byte
275 * aligned buffers, and that the lengths of such copies are also 8-byte
276 * multiples.
277 *
278 * So we align skb->data to an 8-byte multiple and pad-out the data
279 * area so we can round the copy length up to the next multiple of
280 * 8 for the copy.
281 *
282 * The transmitter puts the actual start of the packet 6 bytes into
283 * the buffer it sends over, so that the IP headers after the ethernet
284 * header are aligned properly. These 6 bytes are not in the descriptor
285 * length, they are simply implied. This offset is represented using
286 * the VNET_PACKET_SKIP macro.
287 */
288static struct sk_buff *alloc_and_align_skb(struct net_device *dev,
289 unsigned int len)
290{
Aaron Youngdc153f82016-03-15 11:35:40 -0700291 struct sk_buff *skb;
Aaron Young31762ea2016-03-15 11:35:37 -0700292 unsigned long addr, off;
293
Aaron Youngdc153f82016-03-15 11:35:40 -0700294 skb = netdev_alloc_skb(dev, len + VNET_PACKET_SKIP + 8 + 8);
Aaron Young31762ea2016-03-15 11:35:37 -0700295 if (unlikely(!skb))
296 return NULL;
297
Aaron Youngdc153f82016-03-15 11:35:40 -0700298 addr = (unsigned long)skb->data;
Aaron Young31762ea2016-03-15 11:35:37 -0700299 off = ((addr + 7UL) & ~7UL) - addr;
300 if (off)
301 skb_reserve(skb, off);
302
303 return skb;
304}
305
306static inline void vnet_fullcsum(struct sk_buff *skb)
307{
308 struct iphdr *iph = ip_hdr(skb);
309 int offset = skb_transport_offset(skb);
310
311 if (skb->protocol != htons(ETH_P_IP))
312 return;
313 if (iph->protocol != IPPROTO_TCP &&
314 iph->protocol != IPPROTO_UDP)
315 return;
316 skb->ip_summed = CHECKSUM_NONE;
317 skb->csum_level = 1;
318 skb->csum = 0;
319 if (iph->protocol == IPPROTO_TCP) {
320 struct tcphdr *ptcp = tcp_hdr(skb);
321
322 ptcp->check = 0;
323 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
324 ptcp->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
325 skb->len - offset, IPPROTO_TCP,
326 skb->csum);
327 } else if (iph->protocol == IPPROTO_UDP) {
328 struct udphdr *pudp = udp_hdr(skb);
329
330 pudp->check = 0;
331 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
332 pudp->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
333 skb->len - offset, IPPROTO_UDP,
334 skb->csum);
335 }
336}
337
338static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc)
339{
Aaron Young67d07192016-03-15 11:35:38 -0700340 struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
Aaron Young31762ea2016-03-15 11:35:37 -0700341 unsigned int len = desc->size;
342 unsigned int copy_len;
343 struct sk_buff *skb;
344 int maxlen;
345 int err;
346
347 err = -EMSGSIZE;
348 if (port->tso && port->tsolen > port->rmtu)
349 maxlen = port->tsolen;
350 else
351 maxlen = port->rmtu;
352 if (unlikely(len < ETH_ZLEN || len > maxlen)) {
353 dev->stats.rx_length_errors++;
354 goto out_dropped;
355 }
356
357 skb = alloc_and_align_skb(dev, len);
358 err = -ENOMEM;
359 if (unlikely(!skb)) {
360 dev->stats.rx_missed_errors++;
361 goto out_dropped;
362 }
363
364 copy_len = (len + VNET_PACKET_SKIP + 7U) & ~7U;
365 skb_put(skb, copy_len);
366 err = ldc_copy(port->vio.lp, LDC_COPY_IN,
367 skb->data, copy_len, 0,
368 desc->cookies, desc->ncookies);
369 if (unlikely(err < 0)) {
370 dev->stats.rx_frame_errors++;
371 goto out_free_skb;
372 }
373
374 skb_pull(skb, VNET_PACKET_SKIP);
375 skb_trim(skb, len);
376 skb->protocol = eth_type_trans(skb, dev);
377
378 if (vio_version_after_eq(&port->vio, 1, 8)) {
379 struct vio_net_dext *dext = vio_net_ext(desc);
380
381 skb_reset_network_header(skb);
382
383 if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) {
384 if (skb->protocol == ETH_P_IP) {
385 struct iphdr *iph = ip_hdr(skb);
386
387 iph->check = 0;
388 ip_send_check(iph);
389 }
390 }
391 if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) &&
392 skb->ip_summed == CHECKSUM_NONE) {
393 if (skb->protocol == htons(ETH_P_IP)) {
394 struct iphdr *iph = ip_hdr(skb);
395 int ihl = iph->ihl * 4;
396
397 skb_reset_transport_header(skb);
398 skb_set_transport_header(skb, ihl);
399 vnet_fullcsum(skb);
400 }
401 }
402 if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) {
403 skb->ip_summed = CHECKSUM_PARTIAL;
404 skb->csum_level = 0;
405 if (dext->flags & VNET_PKT_HCK_FULLCKSUM_OK)
406 skb->csum_level = 1;
407 }
408 }
409
410 skb->ip_summed = port->switch_port ? CHECKSUM_NONE : CHECKSUM_PARTIAL;
411
412 dev->stats.rx_packets++;
413 dev->stats.rx_bytes += len;
414 napi_gro_receive(&port->napi, skb);
415 return 0;
416
417out_free_skb:
418 kfree_skb(skb);
419
420out_dropped:
421 dev->stats.rx_dropped++;
422 return err;
423}
424
425static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr,
426 u32 start, u32 end, u8 vio_dring_state)
427{
428 struct vio_dring_data hdr = {
429 .tag = {
430 .type = VIO_TYPE_DATA,
431 .stype = VIO_SUBTYPE_ACK,
432 .stype_env = VIO_DRING_DATA,
433 .sid = vio_send_sid(&port->vio),
434 },
435 .dring_ident = dr->ident,
436 .start_idx = start,
437 .end_idx = end,
438 .state = vio_dring_state,
439 };
440 int err, delay;
441 int retries = 0;
442
443 hdr.seq = dr->snd_nxt;
444 delay = 1;
445 do {
446 err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
447 if (err > 0) {
448 dr->snd_nxt++;
449 break;
450 }
451 udelay(delay);
452 if ((delay <<= 1) > 128)
453 delay = 128;
454 if (retries++ > VNET_MAX_RETRIES) {
455 pr_info("ECONNRESET %x:%x:%x:%x:%x:%x\n",
456 port->raddr[0], port->raddr[1],
457 port->raddr[2], port->raddr[3],
458 port->raddr[4], port->raddr[5]);
459 break;
460 }
461 } while (err == -EAGAIN);
462
463 if (err <= 0 && vio_dring_state == VIO_DRING_STOPPED) {
464 port->stop_rx_idx = end;
465 port->stop_rx = true;
466 } else {
467 port->stop_rx_idx = 0;
468 port->stop_rx = false;
469 }
470
471 return err;
472}
473
474static struct vio_net_desc *get_rx_desc(struct vnet_port *port,
475 struct vio_dring_state *dr,
476 u32 index)
477{
478 struct vio_net_desc *desc = port->vio.desc_buf;
479 int err;
480
481 err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size,
482 (index * dr->entry_size),
483 dr->cookies, dr->ncookies);
484 if (err < 0)
485 return ERR_PTR(err);
486
487 return desc;
488}
489
490static int put_rx_desc(struct vnet_port *port,
491 struct vio_dring_state *dr,
492 struct vio_net_desc *desc,
493 u32 index)
494{
495 int err;
496
497 err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size,
498 (index * dr->entry_size),
499 dr->cookies, dr->ncookies);
500 if (err < 0)
501 return err;
502
503 return 0;
504}
505
506static int vnet_walk_rx_one(struct vnet_port *port,
507 struct vio_dring_state *dr,
508 u32 index, int *needs_ack)
509{
510 struct vio_net_desc *desc = get_rx_desc(port, dr, index);
511 struct vio_driver_state *vio = &port->vio;
512 int err;
513
Aaron Youngdc153f82016-03-15 11:35:40 -0700514 BUG_ON(!desc);
Aaron Young31762ea2016-03-15 11:35:37 -0700515 if (IS_ERR(desc))
516 return PTR_ERR(desc);
517
518 if (desc->hdr.state != VIO_DESC_READY)
519 return 1;
520
521 dma_rmb();
522
523 viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n",
524 desc->hdr.state, desc->hdr.ack,
525 desc->size, desc->ncookies,
526 desc->cookies[0].cookie_addr,
527 desc->cookies[0].cookie_size);
528
529 err = vnet_rx_one(port, desc);
530 if (err == -ECONNRESET)
531 return err;
532 trace_vnet_rx_one(port->vio._local_sid, port->vio._peer_sid,
533 index, desc->hdr.ack);
534 desc->hdr.state = VIO_DESC_DONE;
535 err = put_rx_desc(port, dr, desc, index);
536 if (err < 0)
537 return err;
538 *needs_ack = desc->hdr.ack;
539 return 0;
540}
541
542static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
543 u32 start, u32 end, int *npkts, int budget)
544{
545 struct vio_driver_state *vio = &port->vio;
546 int ack_start = -1, ack_end = -1;
547 bool send_ack = true;
548
Aaron Youngdc153f82016-03-15 11:35:40 -0700549 end = (end == (u32)-1) ? vio_dring_prev(dr, start)
550 : vio_dring_next(dr, end);
Aaron Young31762ea2016-03-15 11:35:37 -0700551
552 viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end);
553
554 while (start != end) {
555 int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack);
Aaron Youngdc153f82016-03-15 11:35:40 -0700556
Aaron Young31762ea2016-03-15 11:35:37 -0700557 if (err == -ECONNRESET)
558 return err;
559 if (err != 0)
560 break;
561 (*npkts)++;
562 if (ack_start == -1)
563 ack_start = start;
564 ack_end = start;
565 start = vio_dring_next(dr, start);
566 if (ack && start != end) {
567 err = vnet_send_ack(port, dr, ack_start, ack_end,
568 VIO_DRING_ACTIVE);
569 if (err == -ECONNRESET)
570 return err;
571 ack_start = -1;
572 }
573 if ((*npkts) >= budget) {
574 send_ack = false;
575 break;
576 }
577 }
Aaron Youngdc153f82016-03-15 11:35:40 -0700578 if (unlikely(ack_start == -1)) {
579 ack_end = vio_dring_prev(dr, start);
580 ack_start = ack_end;
581 }
Aaron Young31762ea2016-03-15 11:35:37 -0700582 if (send_ack) {
583 port->napi_resume = false;
584 trace_vnet_tx_send_stopped_ack(port->vio._local_sid,
585 port->vio._peer_sid,
586 ack_end, *npkts);
587 return vnet_send_ack(port, dr, ack_start, ack_end,
588 VIO_DRING_STOPPED);
589 } else {
590 trace_vnet_tx_defer_stopped_ack(port->vio._local_sid,
591 port->vio._peer_sid,
592 ack_end, *npkts);
593 port->napi_resume = true;
594 port->napi_stop_idx = ack_end;
595 return 1;
596 }
597}
598
599static int vnet_rx(struct vnet_port *port, void *msgbuf, int *npkts,
600 int budget)
601{
602 struct vio_dring_data *pkt = msgbuf;
603 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING];
604 struct vio_driver_state *vio = &port->vio;
605
606 viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016llx] rcv_nxt[%016llx]\n",
607 pkt->tag.stype_env, pkt->seq, dr->rcv_nxt);
608
609 if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
610 return 0;
611 if (unlikely(pkt->seq != dr->rcv_nxt)) {
612 pr_err("RX out of sequence seq[0x%llx] rcv_nxt[0x%llx]\n",
613 pkt->seq, dr->rcv_nxt);
614 return 0;
615 }
616
617 if (!port->napi_resume)
618 dr->rcv_nxt++;
619
620 /* XXX Validate pkt->start_idx and pkt->end_idx XXX */
621
622 return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx,
623 npkts, budget);
624}
625
626static int idx_is_pending(struct vio_dring_state *dr, u32 end)
627{
628 u32 idx = dr->cons;
629 int found = 0;
630
631 while (idx != dr->prod) {
632 if (idx == end) {
633 found = 1;
634 break;
635 }
636 idx = vio_dring_next(dr, idx);
637 }
638 return found;
639}
640
641static int vnet_ack(struct vnet_port *port, void *msgbuf)
642{
643 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
644 struct vio_dring_data *pkt = msgbuf;
645 struct net_device *dev;
Aaron Young31762ea2016-03-15 11:35:37 -0700646 u32 end;
647 struct vio_net_desc *desc;
648 struct netdev_queue *txq;
649
650 if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
651 return 0;
652
653 end = pkt->end_idx;
Aaron Young67d07192016-03-15 11:35:38 -0700654 dev = VNET_PORT_TO_NET_DEVICE(port);
Aaron Young31762ea2016-03-15 11:35:37 -0700655 netif_tx_lock(dev);
656 if (unlikely(!idx_is_pending(dr, end))) {
657 netif_tx_unlock(dev);
658 return 0;
659 }
660
661 /* sync for race conditions with vnet_start_xmit() and tell xmit it
662 * is time to send a trigger.
663 */
664 trace_vnet_rx_stopped_ack(port->vio._local_sid,
665 port->vio._peer_sid, end);
666 dr->cons = vio_dring_next(dr, end);
667 desc = vio_dring_entry(dr, dr->cons);
668 if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) {
669 /* vnet_start_xmit() just populated this dring but missed
670 * sending the "start" LDC message to the consumer.
671 * Send a "start" trigger on its behalf.
672 */
673 if (__vnet_tx_trigger(port, dr->cons) > 0)
674 port->start_cons = false;
675 else
676 port->start_cons = true;
677 } else {
678 port->start_cons = true;
679 }
680 netif_tx_unlock(dev);
681
682 txq = netdev_get_tx_queue(dev, port->q_index);
683 if (unlikely(netif_tx_queue_stopped(txq) &&
684 vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr)))
685 return 1;
686
687 return 0;
688}
689
690static int vnet_nack(struct vnet_port *port, void *msgbuf)
691{
692 /* XXX just reset or similar XXX */
693 return 0;
694}
695
696static int handle_mcast(struct vnet_port *port, void *msgbuf)
697{
698 struct vio_net_mcast_info *pkt = msgbuf;
Aaron Young67d07192016-03-15 11:35:38 -0700699 struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
Aaron Young31762ea2016-03-15 11:35:37 -0700700
701 if (pkt->tag.stype != VIO_SUBTYPE_ACK)
702 pr_err("%s: Got unexpected MCAST reply [%02x:%02x:%04x:%08x]\n",
Aaron Young67d07192016-03-15 11:35:38 -0700703 dev->name,
Aaron Young31762ea2016-03-15 11:35:37 -0700704 pkt->tag.type,
705 pkt->tag.stype,
706 pkt->tag.stype_env,
707 pkt->tag.sid);
708
709 return 0;
710}
711
Aaron Young8778b272016-10-28 14:26:19 -0400712/* If the queue is stopped, wake it up so that we'll
713 * send out another START message at the next TX.
Aaron Young31762ea2016-03-15 11:35:37 -0700714 */
715static void maybe_tx_wakeup(struct vnet_port *port)
716{
717 struct netdev_queue *txq;
718
Aaron Young67d07192016-03-15 11:35:38 -0700719 txq = netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port),
720 port->q_index);
Aaron Young31762ea2016-03-15 11:35:37 -0700721 __netif_tx_lock(txq, smp_processor_id());
Sowmini Varadhand4aa89c2017-02-13 10:56:58 -0800722 if (likely(netif_tx_queue_stopped(txq)))
Aaron Young31762ea2016-03-15 11:35:37 -0700723 netif_tx_wake_queue(txq);
Aaron Young31762ea2016-03-15 11:35:37 -0700724 __netif_tx_unlock(txq);
725}
726
Aaron Young67d07192016-03-15 11:35:38 -0700727bool sunvnet_port_is_up_common(struct vnet_port *vnet)
Aaron Young31762ea2016-03-15 11:35:37 -0700728{
729 struct vio_driver_state *vio = &vnet->vio;
730
731 return !!(vio->hs_state & VIO_HS_COMPLETE);
732}
Aaron Young67d07192016-03-15 11:35:38 -0700733EXPORT_SYMBOL_GPL(sunvnet_port_is_up_common);
Aaron Young31762ea2016-03-15 11:35:37 -0700734
735static int vnet_event_napi(struct vnet_port *port, int budget)
736{
Aaron Young8778b272016-10-28 14:26:19 -0400737 struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
Aaron Young31762ea2016-03-15 11:35:37 -0700738 struct vio_driver_state *vio = &port->vio;
739 int tx_wakeup, err;
740 int npkts = 0;
741 int event = (port->rx_event & LDC_EVENT_RESET);
742
743ldc_ctrl:
744 if (unlikely(event == LDC_EVENT_RESET ||
745 event == LDC_EVENT_UP)) {
746 vio_link_state_change(vio, event);
747
748 if (event == LDC_EVENT_RESET) {
749 vnet_port_reset(port);
750 vio_port_up(vio);
Aaron Young8778b272016-10-28 14:26:19 -0400751
752 /* If the device is running but its tx queue was
753 * stopped (due to flow control), restart it.
754 * This is necessary since vnet_port_reset()
755 * clears the tx drings and thus we may never get
756 * back a VIO_TYPE_DATA ACK packet - which is
757 * the normal mechanism to restart the tx queue.
758 */
759 if (netif_running(dev))
760 maybe_tx_wakeup(port);
Aaron Young31762ea2016-03-15 11:35:37 -0700761 }
762 port->rx_event = 0;
763 return 0;
764 }
765 /* We may have multiple LDC events in rx_event. Unroll send_events() */
766 event = (port->rx_event & LDC_EVENT_UP);
Aaron Youngdc153f82016-03-15 11:35:40 -0700767 port->rx_event &= ~(LDC_EVENT_RESET | LDC_EVENT_UP);
Aaron Young31762ea2016-03-15 11:35:37 -0700768 if (event == LDC_EVENT_UP)
769 goto ldc_ctrl;
770 event = port->rx_event;
771 if (!(event & LDC_EVENT_DATA_READY))
772 return 0;
773
774 /* we dont expect any other bits than RESET, UP, DATA_READY */
775 BUG_ON(event != LDC_EVENT_DATA_READY);
776
Aaron Youngdc153f82016-03-15 11:35:40 -0700777 err = 0;
778 tx_wakeup = 0;
Aaron Young31762ea2016-03-15 11:35:37 -0700779 while (1) {
780 union {
781 struct vio_msg_tag tag;
782 u64 raw[8];
783 } msgbuf;
784
785 if (port->napi_resume) {
786 struct vio_dring_data *pkt =
787 (struct vio_dring_data *)&msgbuf;
788 struct vio_dring_state *dr =
789 &port->vio.drings[VIO_DRIVER_RX_RING];
790
791 pkt->tag.type = VIO_TYPE_DATA;
792 pkt->tag.stype = VIO_SUBTYPE_INFO;
793 pkt->tag.stype_env = VIO_DRING_DATA;
794 pkt->seq = dr->rcv_nxt;
Aaron Youngdc153f82016-03-15 11:35:40 -0700795 pkt->start_idx = vio_dring_next(dr,
796 port->napi_stop_idx);
Aaron Young31762ea2016-03-15 11:35:37 -0700797 pkt->end_idx = -1;
798 goto napi_resume;
799 }
800 err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
801 if (unlikely(err < 0)) {
802 if (err == -ECONNRESET)
803 vio_conn_reset(vio);
804 break;
805 }
806 if (err == 0)
807 break;
808 viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
809 msgbuf.tag.type,
810 msgbuf.tag.stype,
811 msgbuf.tag.stype_env,
812 msgbuf.tag.sid);
813 err = vio_validate_sid(vio, &msgbuf.tag);
814 if (err < 0)
815 break;
816napi_resume:
817 if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
818 if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) {
Aaron Young67d07192016-03-15 11:35:38 -0700819 if (!sunvnet_port_is_up_common(port)) {
Aaron Young31762ea2016-03-15 11:35:37 -0700820 /* failures like handshake_failure()
821 * may have cleaned up dring, but
822 * NAPI polling may bring us here.
823 */
824 err = -ECONNRESET;
825 break;
826 }
827 err = vnet_rx(port, &msgbuf, &npkts, budget);
828 if (npkts >= budget)
829 break;
830 if (npkts == 0)
831 break;
832 } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) {
833 err = vnet_ack(port, &msgbuf);
834 if (err > 0)
835 tx_wakeup |= err;
836 } else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) {
837 err = vnet_nack(port, &msgbuf);
838 }
839 } else if (msgbuf.tag.type == VIO_TYPE_CTRL) {
840 if (msgbuf.tag.stype_env == VNET_MCAST_INFO)
841 err = handle_mcast(port, &msgbuf);
842 else
843 err = vio_control_pkt_engine(vio, &msgbuf);
844 if (err)
845 break;
846 } else {
847 err = vnet_handle_unknown(port, &msgbuf);
848 }
849 if (err == -ECONNRESET)
850 break;
851 }
852 if (unlikely(tx_wakeup && err != -ECONNRESET))
853 maybe_tx_wakeup(port);
854 return npkts;
855}
856
857int sunvnet_poll_common(struct napi_struct *napi, int budget)
858{
859 struct vnet_port *port = container_of(napi, struct vnet_port, napi);
860 struct vio_driver_state *vio = &port->vio;
861 int processed = vnet_event_napi(port, budget);
862
863 if (processed < budget) {
Eric Dumazet6ad20162017-01-30 08:22:01 -0800864 napi_complete_done(napi, processed);
Aaron Young31762ea2016-03-15 11:35:37 -0700865 port->rx_event &= ~LDC_EVENT_DATA_READY;
866 vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED);
867 }
868 return processed;
869}
870EXPORT_SYMBOL_GPL(sunvnet_poll_common);
871
872void sunvnet_event_common(void *arg, int event)
873{
874 struct vnet_port *port = arg;
875 struct vio_driver_state *vio = &port->vio;
876
877 port->rx_event |= event;
878 vio_set_intr(vio->vdev->rx_ino, HV_INTR_DISABLED);
879 napi_schedule(&port->napi);
Aaron Young31762ea2016-03-15 11:35:37 -0700880}
881EXPORT_SYMBOL_GPL(sunvnet_event_common);
882
883static int __vnet_tx_trigger(struct vnet_port *port, u32 start)
884{
885 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
886 struct vio_dring_data hdr = {
887 .tag = {
888 .type = VIO_TYPE_DATA,
889 .stype = VIO_SUBTYPE_INFO,
890 .stype_env = VIO_DRING_DATA,
891 .sid = vio_send_sid(&port->vio),
892 },
893 .dring_ident = dr->ident,
894 .start_idx = start,
Aaron Youngdc153f82016-03-15 11:35:40 -0700895 .end_idx = (u32)-1,
Aaron Young31762ea2016-03-15 11:35:37 -0700896 };
897 int err, delay;
898 int retries = 0;
899
900 if (port->stop_rx) {
901 trace_vnet_tx_pending_stopped_ack(port->vio._local_sid,
902 port->vio._peer_sid,
903 port->stop_rx_idx, -1);
904 err = vnet_send_ack(port,
905 &port->vio.drings[VIO_DRIVER_RX_RING],
906 port->stop_rx_idx, -1,
907 VIO_DRING_STOPPED);
908 if (err <= 0)
909 return err;
910 }
911
912 hdr.seq = dr->snd_nxt;
913 delay = 1;
914 do {
915 err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
916 if (err > 0) {
917 dr->snd_nxt++;
918 break;
919 }
920 udelay(delay);
921 if ((delay <<= 1) > 128)
922 delay = 128;
923 if (retries++ > VNET_MAX_RETRIES)
924 break;
925 } while (err == -EAGAIN);
926 trace_vnet_tx_trigger(port->vio._local_sid,
927 port->vio._peer_sid, start, err);
928
929 return err;
930}
931
Aaron Young31762ea2016-03-15 11:35:37 -0700932static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port,
933 unsigned *pending)
934{
935 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
936 struct sk_buff *skb = NULL;
937 int i, txi;
938
939 *pending = 0;
940
941 txi = dr->prod;
942 for (i = 0; i < VNET_TX_RING_SIZE; ++i) {
943 struct vio_net_desc *d;
944
945 --txi;
946 if (txi < 0)
Aaron Youngdc153f82016-03-15 11:35:40 -0700947 txi = VNET_TX_RING_SIZE - 1;
Aaron Young31762ea2016-03-15 11:35:37 -0700948
949 d = vio_dring_entry(dr, txi);
950
951 if (d->hdr.state == VIO_DESC_READY) {
952 (*pending)++;
953 continue;
954 }
955 if (port->tx_bufs[txi].skb) {
956 if (d->hdr.state != VIO_DESC_DONE)
957 pr_notice("invalid ring buffer state %d\n",
958 d->hdr.state);
959 BUG_ON(port->tx_bufs[txi].skb->next);
960
961 port->tx_bufs[txi].skb->next = skb;
962 skb = port->tx_bufs[txi].skb;
963 port->tx_bufs[txi].skb = NULL;
964
965 ldc_unmap(port->vio.lp,
966 port->tx_bufs[txi].cookies,
967 port->tx_bufs[txi].ncookies);
Aaron Youngdc153f82016-03-15 11:35:40 -0700968 } else if (d->hdr.state == VIO_DESC_FREE) {
Aaron Young31762ea2016-03-15 11:35:37 -0700969 break;
Aaron Youngdc153f82016-03-15 11:35:40 -0700970 }
Aaron Young31762ea2016-03-15 11:35:37 -0700971 d->hdr.state = VIO_DESC_FREE;
972 }
973 return skb;
974}
975
976static inline void vnet_free_skbs(struct sk_buff *skb)
977{
978 struct sk_buff *next;
979
980 while (skb) {
981 next = skb->next;
982 skb->next = NULL;
983 dev_kfree_skb(skb);
984 skb = next;
985 }
986}
987
988void sunvnet_clean_timer_expire_common(unsigned long port0)
989{
990 struct vnet_port *port = (struct vnet_port *)port0;
991 struct sk_buff *freeskbs;
992 unsigned pending;
993
Aaron Young67d07192016-03-15 11:35:38 -0700994 netif_tx_lock(VNET_PORT_TO_NET_DEVICE(port));
Aaron Young31762ea2016-03-15 11:35:37 -0700995 freeskbs = vnet_clean_tx_ring(port, &pending);
Aaron Young67d07192016-03-15 11:35:38 -0700996 netif_tx_unlock(VNET_PORT_TO_NET_DEVICE(port));
Aaron Young31762ea2016-03-15 11:35:37 -0700997
998 vnet_free_skbs(freeskbs);
999
1000 if (pending)
1001 (void)mod_timer(&port->clean_timer,
1002 jiffies + VNET_CLEAN_TIMEOUT);
1003 else
1004 del_timer(&port->clean_timer);
1005}
1006EXPORT_SYMBOL_GPL(sunvnet_clean_timer_expire_common);
1007
1008static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb,
1009 struct ldc_trans_cookie *cookies, int ncookies,
1010 unsigned int map_perm)
1011{
1012 int i, nc, err, blen;
1013
1014 /* header */
1015 blen = skb_headlen(skb);
1016 if (blen < ETH_ZLEN)
1017 blen = ETH_ZLEN;
1018 blen += VNET_PACKET_SKIP;
1019 blen += 8 - (blen & 7);
1020
Aaron Youngdc153f82016-03-15 11:35:40 -07001021 err = ldc_map_single(lp, skb->data - VNET_PACKET_SKIP, blen, cookies,
Aaron Young31762ea2016-03-15 11:35:37 -07001022 ncookies, map_perm);
1023 if (err < 0)
1024 return err;
1025 nc = err;
1026
1027 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1028 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1029 u8 *vaddr;
1030
1031 if (nc < ncookies) {
1032 vaddr = kmap_atomic(skb_frag_page(f));
1033 blen = skb_frag_size(f);
1034 blen += 8 - (blen & 7);
1035 err = ldc_map_single(lp, vaddr + f->page_offset,
1036 blen, cookies + nc, ncookies - nc,
1037 map_perm);
1038 kunmap_atomic(vaddr);
1039 } else {
1040 err = -EMSGSIZE;
1041 }
1042
1043 if (err < 0) {
1044 ldc_unmap(lp, cookies, nc);
1045 return err;
1046 }
1047 nc += err;
1048 }
1049 return nc;
1050}
1051
1052static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
1053{
1054 struct sk_buff *nskb;
1055 int i, len, pad, docopy;
1056
1057 len = skb->len;
1058 pad = 0;
1059 if (len < ETH_ZLEN) {
1060 pad += ETH_ZLEN - skb->len;
1061 len += pad;
1062 }
1063 len += VNET_PACKET_SKIP;
1064 pad += 8 - (len & 7);
1065
1066 /* make sure we have enough cookies and alignment in every frag */
1067 docopy = skb_shinfo(skb)->nr_frags >= ncookies;
1068 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1069 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1070
1071 docopy |= f->page_offset & 7;
1072 }
1073 if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP ||
1074 skb_tailroom(skb) < pad ||
1075 skb_headroom(skb) < VNET_PACKET_SKIP || docopy) {
1076 int start = 0, offset;
1077 __wsum csum;
1078
1079 len = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN;
1080 nskb = alloc_and_align_skb(skb->dev, len);
Aaron Youngdc153f82016-03-15 11:35:40 -07001081 if (!nskb) {
Aaron Young31762ea2016-03-15 11:35:37 -07001082 dev_kfree_skb(skb);
1083 return NULL;
1084 }
1085 skb_reserve(nskb, VNET_PACKET_SKIP);
1086
1087 nskb->protocol = skb->protocol;
1088 offset = skb_mac_header(skb) - skb->data;
1089 skb_set_mac_header(nskb, offset);
1090 offset = skb_network_header(skb) - skb->data;
1091 skb_set_network_header(nskb, offset);
1092 offset = skb_transport_header(skb) - skb->data;
1093 skb_set_transport_header(nskb, offset);
1094
1095 offset = 0;
1096 nskb->csum_offset = skb->csum_offset;
1097 nskb->ip_summed = skb->ip_summed;
1098
1099 if (skb->ip_summed == CHECKSUM_PARTIAL)
1100 start = skb_checksum_start_offset(skb);
1101 if (start) {
1102 struct iphdr *iph = ip_hdr(nskb);
1103 int offset = start + nskb->csum_offset;
1104
1105 if (skb_copy_bits(skb, 0, nskb->data, start)) {
1106 dev_kfree_skb(nskb);
1107 dev_kfree_skb(skb);
1108 return NULL;
1109 }
1110 *(__sum16 *)(skb->data + offset) = 0;
1111 csum = skb_copy_and_csum_bits(skb, start,
1112 nskb->data + start,
1113 skb->len - start, 0);
1114 if (iph->protocol == IPPROTO_TCP ||
1115 iph->protocol == IPPROTO_UDP) {
1116 csum = csum_tcpudp_magic(iph->saddr, iph->daddr,
1117 skb->len - start,
1118 iph->protocol, csum);
1119 }
1120 *(__sum16 *)(nskb->data + offset) = csum;
1121
1122 nskb->ip_summed = CHECKSUM_NONE;
1123 } else if (skb_copy_bits(skb, 0, nskb->data, skb->len)) {
1124 dev_kfree_skb(nskb);
1125 dev_kfree_skb(skb);
1126 return NULL;
1127 }
1128 (void)skb_put(nskb, skb->len);
1129 if (skb_is_gso(skb)) {
1130 skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size;
1131 skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
1132 }
1133 nskb->queue_mapping = skb->queue_mapping;
1134 dev_kfree_skb(skb);
1135 skb = nskb;
1136 }
1137 return skb;
1138}
1139
Aaron Young67d07192016-03-15 11:35:38 -07001140static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb,
1141 struct vnet_port *(*vnet_tx_port)
1142 (struct sk_buff *, struct net_device *))
Aaron Young31762ea2016-03-15 11:35:37 -07001143{
Aaron Young67d07192016-03-15 11:35:38 -07001144 struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
Aaron Young31762ea2016-03-15 11:35:37 -07001145 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1146 struct sk_buff *segs;
1147 int maclen, datalen;
1148 int status;
1149 int gso_size, gso_type, gso_segs;
1150 int hlen = skb_transport_header(skb) - skb_mac_header(skb);
1151 int proto = IPPROTO_IP;
1152
1153 if (skb->protocol == htons(ETH_P_IP))
1154 proto = ip_hdr(skb)->protocol;
1155 else if (skb->protocol == htons(ETH_P_IPV6))
1156 proto = ipv6_hdr(skb)->nexthdr;
1157
Aaron Youngdc153f82016-03-15 11:35:40 -07001158 if (proto == IPPROTO_TCP) {
Aaron Young31762ea2016-03-15 11:35:37 -07001159 hlen += tcp_hdr(skb)->doff * 4;
Aaron Youngdc153f82016-03-15 11:35:40 -07001160 } else if (proto == IPPROTO_UDP) {
Aaron Young31762ea2016-03-15 11:35:37 -07001161 hlen += sizeof(struct udphdr);
Aaron Youngdc153f82016-03-15 11:35:40 -07001162 } else {
Aaron Young31762ea2016-03-15 11:35:37 -07001163 pr_err("vnet_handle_offloads GSO with unknown transport "
1164 "protocol %d tproto %d\n", skb->protocol, proto);
1165 hlen = 128; /* XXX */
1166 }
1167 datalen = port->tsolen - hlen;
1168
1169 gso_size = skb_shinfo(skb)->gso_size;
1170 gso_type = skb_shinfo(skb)->gso_type;
1171 gso_segs = skb_shinfo(skb)->gso_segs;
1172
1173 if (port->tso && gso_size < datalen)
1174 gso_segs = DIV_ROUND_UP(skb->len - hlen, datalen);
1175
1176 if (unlikely(vnet_tx_dring_avail(dr) < gso_segs)) {
1177 struct netdev_queue *txq;
1178
1179 txq = netdev_get_tx_queue(dev, port->q_index);
1180 netif_tx_stop_queue(txq);
1181 if (vnet_tx_dring_avail(dr) < skb_shinfo(skb)->gso_segs)
1182 return NETDEV_TX_BUSY;
1183 netif_tx_wake_queue(txq);
1184 }
1185
1186 maclen = skb_network_header(skb) - skb_mac_header(skb);
1187 skb_pull(skb, maclen);
1188
1189 if (port->tso && gso_size < datalen) {
1190 if (skb_unclone(skb, GFP_ATOMIC))
1191 goto out_dropped;
1192
1193 /* segment to TSO size */
1194 skb_shinfo(skb)->gso_size = datalen;
1195 skb_shinfo(skb)->gso_segs = gso_segs;
1196 }
1197 segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO);
1198 if (IS_ERR(segs))
1199 goto out_dropped;
1200
1201 skb_push(skb, maclen);
1202 skb_reset_mac_header(skb);
1203
1204 status = 0;
1205 while (segs) {
1206 struct sk_buff *curr = segs;
1207
1208 segs = segs->next;
1209 curr->next = NULL;
1210 if (port->tso && curr->len > dev->mtu) {
1211 skb_shinfo(curr)->gso_size = gso_size;
1212 skb_shinfo(curr)->gso_type = gso_type;
1213 skb_shinfo(curr)->gso_segs =
1214 DIV_ROUND_UP(curr->len - hlen, gso_size);
Aaron Youngdc153f82016-03-15 11:35:40 -07001215 } else {
Aaron Young31762ea2016-03-15 11:35:37 -07001216 skb_shinfo(curr)->gso_size = 0;
Aaron Youngdc153f82016-03-15 11:35:40 -07001217 }
Aaron Young31762ea2016-03-15 11:35:37 -07001218
1219 skb_push(curr, maclen);
1220 skb_reset_mac_header(curr);
1221 memcpy(skb_mac_header(curr), skb_mac_header(skb),
1222 maclen);
1223 curr->csum_start = skb_transport_header(curr) - curr->head;
1224 if (ip_hdr(curr)->protocol == IPPROTO_TCP)
1225 curr->csum_offset = offsetof(struct tcphdr, check);
1226 else if (ip_hdr(curr)->protocol == IPPROTO_UDP)
1227 curr->csum_offset = offsetof(struct udphdr, check);
1228
1229 if (!(status & NETDEV_TX_MASK))
Aaron Young67d07192016-03-15 11:35:38 -07001230 status = sunvnet_start_xmit_common(curr, dev,
1231 vnet_tx_port);
Aaron Young31762ea2016-03-15 11:35:37 -07001232 if (status & NETDEV_TX_MASK)
1233 dev_kfree_skb_any(curr);
1234 }
1235
1236 if (!(status & NETDEV_TX_MASK))
1237 dev_kfree_skb_any(skb);
1238 return status;
1239out_dropped:
1240 dev->stats.tx_dropped++;
1241 dev_kfree_skb_any(skb);
1242 return NETDEV_TX_OK;
1243}
1244
Aaron Young67d07192016-03-15 11:35:38 -07001245int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev,
1246 struct vnet_port *(*vnet_tx_port)
1247 (struct sk_buff *, struct net_device *))
Aaron Young31762ea2016-03-15 11:35:37 -07001248{
Aaron Young31762ea2016-03-15 11:35:37 -07001249 struct vnet_port *port = NULL;
1250 struct vio_dring_state *dr;
1251 struct vio_net_desc *d;
1252 unsigned int len;
1253 struct sk_buff *freeskbs = NULL;
1254 int i, err, txi;
1255 unsigned pending = 0;
1256 struct netdev_queue *txq;
1257
1258 rcu_read_lock();
Aaron Young67d07192016-03-15 11:35:38 -07001259 port = vnet_tx_port(skb, dev);
Aaron Young31762ea2016-03-15 11:35:37 -07001260 if (unlikely(!port)) {
1261 rcu_read_unlock();
1262 goto out_dropped;
1263 }
1264
1265 if (skb_is_gso(skb) && skb->len > port->tsolen) {
Aaron Young67d07192016-03-15 11:35:38 -07001266 err = vnet_handle_offloads(port, skb, vnet_tx_port);
Aaron Young31762ea2016-03-15 11:35:37 -07001267 rcu_read_unlock();
1268 return err;
1269 }
1270
1271 if (!skb_is_gso(skb) && skb->len > port->rmtu) {
1272 unsigned long localmtu = port->rmtu - ETH_HLEN;
1273
1274 if (vio_version_after_eq(&port->vio, 1, 3))
1275 localmtu -= VLAN_HLEN;
1276
1277 if (skb->protocol == htons(ETH_P_IP)) {
1278 struct flowi4 fl4;
1279 struct rtable *rt = NULL;
1280
1281 memset(&fl4, 0, sizeof(fl4));
1282 fl4.flowi4_oif = dev->ifindex;
1283 fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
1284 fl4.daddr = ip_hdr(skb)->daddr;
1285 fl4.saddr = ip_hdr(skb)->saddr;
1286
1287 rt = ip_route_output_key(dev_net(dev), &fl4);
1288 rcu_read_unlock();
1289 if (!IS_ERR(rt)) {
1290 skb_dst_set(skb, &rt->dst);
1291 icmp_send(skb, ICMP_DEST_UNREACH,
1292 ICMP_FRAG_NEEDED,
1293 htonl(localmtu));
1294 }
1295 }
1296#if IS_ENABLED(CONFIG_IPV6)
1297 else if (skb->protocol == htons(ETH_P_IPV6))
1298 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu);
1299#endif
1300 goto out_dropped;
1301 }
1302
1303 skb = vnet_skb_shape(skb, 2);
1304
1305 if (unlikely(!skb))
1306 goto out_dropped;
1307
1308 if (skb->ip_summed == CHECKSUM_PARTIAL)
1309 vnet_fullcsum(skb);
1310
1311 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1312 i = skb_get_queue_mapping(skb);
1313 txq = netdev_get_tx_queue(dev, i);
1314 if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
1315 if (!netif_tx_queue_stopped(txq)) {
1316 netif_tx_stop_queue(txq);
1317
1318 /* This is a hard error, log it. */
1319 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
1320 dev->stats.tx_errors++;
1321 }
1322 rcu_read_unlock();
1323 return NETDEV_TX_BUSY;
1324 }
1325
1326 d = vio_dring_cur(dr);
1327
1328 txi = dr->prod;
1329
1330 freeskbs = vnet_clean_tx_ring(port, &pending);
1331
1332 BUG_ON(port->tx_bufs[txi].skb);
1333
1334 len = skb->len;
1335 if (len < ETH_ZLEN)
1336 len = ETH_ZLEN;
1337
1338 err = vnet_skb_map(port->vio.lp, skb, port->tx_bufs[txi].cookies, 2,
1339 (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW));
1340 if (err < 0) {
1341 netdev_info(dev, "tx buffer map error %d\n", err);
1342 goto out_dropped;
1343 }
1344
1345 port->tx_bufs[txi].skb = skb;
1346 skb = NULL;
1347 port->tx_bufs[txi].ncookies = err;
1348
1349 /* We don't rely on the ACKs to free the skb in vnet_start_xmit(),
1350 * thus it is safe to not set VIO_ACK_ENABLE for each transmission:
1351 * the protocol itself does not require it as long as the peer
1352 * sends a VIO_SUBTYPE_ACK for VIO_DRING_STOPPED.
1353 *
1354 * An ACK for every packet in the ring is expensive as the
1355 * sending of LDC messages is slow and affects performance.
1356 */
1357 d->hdr.ack = VIO_ACK_DISABLE;
1358 d->size = len;
1359 d->ncookies = port->tx_bufs[txi].ncookies;
1360 for (i = 0; i < d->ncookies; i++)
1361 d->cookies[i] = port->tx_bufs[txi].cookies[i];
1362 if (vio_version_after_eq(&port->vio, 1, 7)) {
1363 struct vio_net_dext *dext = vio_net_ext(d);
1364
1365 memset(dext, 0, sizeof(*dext));
1366 if (skb_is_gso(port->tx_bufs[txi].skb)) {
1367 dext->ipv4_lso_mss = skb_shinfo(port->tx_bufs[txi].skb)
1368 ->gso_size;
1369 dext->flags |= VNET_PKT_IPV4_LSO;
1370 }
1371 if (vio_version_after_eq(&port->vio, 1, 8) &&
1372 !port->switch_port) {
1373 dext->flags |= VNET_PKT_HCK_IPV4_HDRCKSUM_OK;
1374 dext->flags |= VNET_PKT_HCK_FULLCKSUM_OK;
1375 }
1376 }
1377
1378 /* This has to be a non-SMP write barrier because we are writing
1379 * to memory which is shared with the peer LDOM.
1380 */
1381 dma_wmb();
1382
1383 d->hdr.state = VIO_DESC_READY;
1384
1385 /* Exactly one ldc "start" trigger (for dr->cons) needs to be sent
1386 * to notify the consumer that some descriptors are READY.
1387 * After that "start" trigger, no additional triggers are needed until
1388 * a DRING_STOPPED is received from the consumer. The dr->cons field
1389 * (set up by vnet_ack()) has the value of the next dring index
1390 * that has not yet been ack-ed. We send a "start" trigger here
1391 * if, and only if, start_cons is true (reset it afterward). Conversely,
1392 * vnet_ack() should check if the dring corresponding to cons
1393 * is marked READY, but start_cons was false.
1394 * If so, vnet_ack() should send out the missed "start" trigger.
1395 *
1396 * Note that the dma_wmb() above makes sure the cookies et al. are
1397 * not globally visible before the VIO_DESC_READY, and that the
1398 * stores are ordered correctly by the compiler. The consumer will
1399 * not proceed until the VIO_DESC_READY is visible assuring that
1400 * the consumer does not observe anything related to descriptors
1401 * out of order. The HV trap from the LDC start trigger is the
1402 * producer to consumer announcement that work is available to the
1403 * consumer
1404 */
1405 if (!port->start_cons) { /* previous trigger suffices */
1406 trace_vnet_skip_tx_trigger(port->vio._local_sid,
1407 port->vio._peer_sid, dr->cons);
1408 goto ldc_start_done;
1409 }
1410
1411 err = __vnet_tx_trigger(port, dr->cons);
1412 if (unlikely(err < 0)) {
1413 netdev_info(dev, "TX trigger error %d\n", err);
1414 d->hdr.state = VIO_DESC_FREE;
1415 skb = port->tx_bufs[txi].skb;
1416 port->tx_bufs[txi].skb = NULL;
1417 dev->stats.tx_carrier_errors++;
1418 goto out_dropped;
1419 }
1420
1421ldc_start_done:
1422 port->start_cons = false;
1423
1424 dev->stats.tx_packets++;
1425 dev->stats.tx_bytes += port->tx_bufs[txi].skb->len;
1426
1427 dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
1428 if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
1429 netif_tx_stop_queue(txq);
1430 if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr))
1431 netif_tx_wake_queue(txq);
1432 }
1433
1434 (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT);
1435 rcu_read_unlock();
1436
1437 vnet_free_skbs(freeskbs);
1438
1439 return NETDEV_TX_OK;
1440
1441out_dropped:
1442 if (pending)
1443 (void)mod_timer(&port->clean_timer,
1444 jiffies + VNET_CLEAN_TIMEOUT);
1445 else if (port)
1446 del_timer(&port->clean_timer);
1447 if (port)
1448 rcu_read_unlock();
1449 if (skb)
1450 dev_kfree_skb(skb);
1451 vnet_free_skbs(freeskbs);
1452 dev->stats.tx_dropped++;
1453 return NETDEV_TX_OK;
1454}
1455EXPORT_SYMBOL_GPL(sunvnet_start_xmit_common);
1456
1457void sunvnet_tx_timeout_common(struct net_device *dev)
1458{
1459 /* XXX Implement me XXX */
1460}
1461EXPORT_SYMBOL_GPL(sunvnet_tx_timeout_common);
1462
1463int sunvnet_open_common(struct net_device *dev)
1464{
1465 netif_carrier_on(dev);
1466 netif_tx_start_all_queues(dev);
1467
1468 return 0;
1469}
1470EXPORT_SYMBOL_GPL(sunvnet_open_common);
1471
1472int sunvnet_close_common(struct net_device *dev)
1473{
1474 netif_tx_stop_all_queues(dev);
1475 netif_carrier_off(dev);
1476
1477 return 0;
1478}
1479EXPORT_SYMBOL_GPL(sunvnet_close_common);
1480
1481static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr)
1482{
1483 struct vnet_mcast_entry *m;
1484
1485 for (m = vp->mcast_list; m; m = m->next) {
1486 if (ether_addr_equal(m->addr, addr))
1487 return m;
1488 }
1489 return NULL;
1490}
1491
1492static void __update_mc_list(struct vnet *vp, struct net_device *dev)
1493{
1494 struct netdev_hw_addr *ha;
1495
1496 netdev_for_each_mc_addr(ha, dev) {
1497 struct vnet_mcast_entry *m;
1498
1499 m = __vnet_mc_find(vp, ha->addr);
1500 if (m) {
1501 m->hit = 1;
1502 continue;
1503 }
1504
1505 if (!m) {
1506 m = kzalloc(sizeof(*m), GFP_ATOMIC);
1507 if (!m)
1508 continue;
1509 memcpy(m->addr, ha->addr, ETH_ALEN);
1510 m->hit = 1;
1511
1512 m->next = vp->mcast_list;
1513 vp->mcast_list = m;
1514 }
1515 }
1516}
1517
1518static void __send_mc_list(struct vnet *vp, struct vnet_port *port)
1519{
1520 struct vio_net_mcast_info info;
1521 struct vnet_mcast_entry *m, **pp;
1522 int n_addrs;
1523
1524 memset(&info, 0, sizeof(info));
1525
1526 info.tag.type = VIO_TYPE_CTRL;
1527 info.tag.stype = VIO_SUBTYPE_INFO;
1528 info.tag.stype_env = VNET_MCAST_INFO;
1529 info.tag.sid = vio_send_sid(&port->vio);
1530 info.set = 1;
1531
1532 n_addrs = 0;
1533 for (m = vp->mcast_list; m; m = m->next) {
1534 if (m->sent)
1535 continue;
1536 m->sent = 1;
1537 memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
1538 m->addr, ETH_ALEN);
1539 if (++n_addrs == VNET_NUM_MCAST) {
1540 info.count = n_addrs;
1541
Aaron Youngdc153f82016-03-15 11:35:40 -07001542 (void)vio_ldc_send(&port->vio, &info,
1543 sizeof(info));
Aaron Young31762ea2016-03-15 11:35:37 -07001544 n_addrs = 0;
1545 }
1546 }
1547 if (n_addrs) {
1548 info.count = n_addrs;
Aaron Youngdc153f82016-03-15 11:35:40 -07001549 (void)vio_ldc_send(&port->vio, &info, sizeof(info));
Aaron Young31762ea2016-03-15 11:35:37 -07001550 }
1551
1552 info.set = 0;
1553
1554 n_addrs = 0;
1555 pp = &vp->mcast_list;
1556 while ((m = *pp) != NULL) {
1557 if (m->hit) {
1558 m->hit = 0;
1559 pp = &m->next;
1560 continue;
1561 }
1562
1563 memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
1564 m->addr, ETH_ALEN);
1565 if (++n_addrs == VNET_NUM_MCAST) {
1566 info.count = n_addrs;
Aaron Youngdc153f82016-03-15 11:35:40 -07001567 (void)vio_ldc_send(&port->vio, &info,
1568 sizeof(info));
Aaron Young31762ea2016-03-15 11:35:37 -07001569 n_addrs = 0;
1570 }
1571
1572 *pp = m->next;
1573 kfree(m);
1574 }
1575 if (n_addrs) {
1576 info.count = n_addrs;
Aaron Youngdc153f82016-03-15 11:35:40 -07001577 (void)vio_ldc_send(&port->vio, &info, sizeof(info));
Aaron Young31762ea2016-03-15 11:35:37 -07001578 }
1579}
1580
Aaron Young67d07192016-03-15 11:35:38 -07001581void sunvnet_set_rx_mode_common(struct net_device *dev, struct vnet *vp)
Aaron Young31762ea2016-03-15 11:35:37 -07001582{
Aaron Young31762ea2016-03-15 11:35:37 -07001583 struct vnet_port *port;
1584
1585 rcu_read_lock();
1586 list_for_each_entry_rcu(port, &vp->port_list, list) {
Aaron Young31762ea2016-03-15 11:35:37 -07001587 if (port->switch_port) {
1588 __update_mc_list(vp, dev);
1589 __send_mc_list(vp, port);
1590 break;
1591 }
1592 }
1593 rcu_read_unlock();
1594}
1595EXPORT_SYMBOL_GPL(sunvnet_set_rx_mode_common);
1596
Aaron Young31762ea2016-03-15 11:35:37 -07001597int sunvnet_set_mac_addr_common(struct net_device *dev, void *p)
1598{
1599 return -EINVAL;
1600}
1601EXPORT_SYMBOL_GPL(sunvnet_set_mac_addr_common);
1602
1603void sunvnet_port_free_tx_bufs_common(struct vnet_port *port)
1604{
1605 struct vio_dring_state *dr;
1606 int i;
1607
1608 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1609
Aaron Youngdc153f82016-03-15 11:35:40 -07001610 if (!dr->base)
Aaron Young31762ea2016-03-15 11:35:37 -07001611 return;
1612
1613 for (i = 0; i < VNET_TX_RING_SIZE; i++) {
1614 struct vio_net_desc *d;
1615 void *skb = port->tx_bufs[i].skb;
1616
1617 if (!skb)
1618 continue;
1619
1620 d = vio_dring_entry(dr, i);
1621
1622 ldc_unmap(port->vio.lp,
1623 port->tx_bufs[i].cookies,
1624 port->tx_bufs[i].ncookies);
1625 dev_kfree_skb(skb);
1626 port->tx_bufs[i].skb = NULL;
1627 d->hdr.state = VIO_DESC_FREE;
1628 }
1629 ldc_free_exp_dring(port->vio.lp, dr->base,
1630 (dr->entry_size * dr->num_entries),
1631 dr->cookies, dr->ncookies);
1632 dr->base = NULL;
1633 dr->entry_size = 0;
1634 dr->num_entries = 0;
1635 dr->pending = 0;
1636 dr->ncookies = 0;
1637}
1638EXPORT_SYMBOL_GPL(sunvnet_port_free_tx_bufs_common);
1639
1640static void vnet_port_reset(struct vnet_port *port)
1641{
1642 del_timer(&port->clean_timer);
1643 sunvnet_port_free_tx_bufs_common(port);
1644 port->rmtu = 0;
1645 port->tso = true;
1646 port->tsolen = 0;
1647}
1648
1649static int vnet_port_alloc_tx_ring(struct vnet_port *port)
1650{
1651 struct vio_dring_state *dr;
1652 unsigned long len, elen;
1653 int i, err, ncookies;
1654 void *dring;
1655
1656 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1657
1658 elen = sizeof(struct vio_net_desc) +
1659 sizeof(struct ldc_trans_cookie) * 2;
1660 if (vio_version_after_eq(&port->vio, 1, 7))
1661 elen += sizeof(struct vio_net_dext);
1662 len = VNET_TX_RING_SIZE * elen;
1663
1664 ncookies = VIO_MAX_RING_COOKIES;
1665 dring = ldc_alloc_exp_dring(port->vio.lp, len,
1666 dr->cookies, &ncookies,
1667 (LDC_MAP_SHADOW |
1668 LDC_MAP_DIRECT |
1669 LDC_MAP_RW));
1670 if (IS_ERR(dring)) {
1671 err = PTR_ERR(dring);
1672 goto err_out;
1673 }
1674
1675 dr->base = dring;
1676 dr->entry_size = elen;
1677 dr->num_entries = VNET_TX_RING_SIZE;
Aaron Youngdc153f82016-03-15 11:35:40 -07001678 dr->prod = 0;
1679 dr->cons = 0;
Aaron Young31762ea2016-03-15 11:35:37 -07001680 port->start_cons = true; /* need an initial trigger */
1681 dr->pending = VNET_TX_RING_SIZE;
1682 dr->ncookies = ncookies;
1683
1684 for (i = 0; i < VNET_TX_RING_SIZE; ++i) {
1685 struct vio_net_desc *d;
1686
1687 d = vio_dring_entry(dr, i);
1688 d->hdr.state = VIO_DESC_FREE;
1689 }
1690 return 0;
1691
1692err_out:
1693 sunvnet_port_free_tx_bufs_common(port);
1694
1695 return err;
1696}
1697
1698#ifdef CONFIG_NET_POLL_CONTROLLER
Aaron Young67d07192016-03-15 11:35:38 -07001699void sunvnet_poll_controller_common(struct net_device *dev, struct vnet *vp)
Aaron Young31762ea2016-03-15 11:35:37 -07001700{
Aaron Young31762ea2016-03-15 11:35:37 -07001701 struct vnet_port *port;
1702 unsigned long flags;
1703
1704 spin_lock_irqsave(&vp->lock, flags);
1705 if (!list_empty(&vp->port_list)) {
1706 port = list_entry(vp->port_list.next, struct vnet_port, list);
1707 napi_schedule(&port->napi);
1708 }
1709 spin_unlock_irqrestore(&vp->lock, flags);
1710}
1711EXPORT_SYMBOL_GPL(sunvnet_poll_controller_common);
1712#endif
1713
1714void sunvnet_port_add_txq_common(struct vnet_port *port)
1715{
1716 struct vnet *vp = port->vp;
1717 int n;
1718
1719 n = vp->nports++;
1720 n = n & (VNET_MAX_TXQS - 1);
1721 port->q_index = n;
Aaron Young67d07192016-03-15 11:35:38 -07001722 netif_tx_wake_queue(netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port),
1723 port->q_index));
Aaron Young31762ea2016-03-15 11:35:37 -07001724}
1725EXPORT_SYMBOL_GPL(sunvnet_port_add_txq_common);
1726
1727void sunvnet_port_rm_txq_common(struct vnet_port *port)
1728{
1729 port->vp->nports--;
Aaron Young67d07192016-03-15 11:35:38 -07001730 netif_tx_stop_queue(netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port),
1731 port->q_index));
Aaron Young31762ea2016-03-15 11:35:37 -07001732}
1733EXPORT_SYMBOL_GPL(sunvnet_port_rm_txq_common);