blob: c71f0007ad5b6d52272fc495afdf268fa689c8bf [file] [log] [blame]
Aaron Young31762ea2016-03-15 11:35:37 -07001/* sunvnet.c: Sun LDOM Virtual Network Driver.
2 *
3 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
Aaron Young67d07192016-03-15 11:35:38 -07004 * Copyright (C) 2016 Oracle. All rights reserved.
Aaron Young31762ea2016-03-15 11:35:37 -07005 */
6
7#include <linux/module.h>
8#include <linux/kernel.h>
9#include <linux/types.h>
10#include <linux/slab.h>
11#include <linux/delay.h>
12#include <linux/init.h>
13#include <linux/netdevice.h>
14#include <linux/ethtool.h>
15#include <linux/etherdevice.h>
16#include <linux/mutex.h>
17#include <linux/highmem.h>
18#include <linux/if_vlan.h>
19#define CREATE_TRACE_POINTS
20#include <trace/events/sunvnet.h>
21
22#if IS_ENABLED(CONFIG_IPV6)
23#include <linux/icmpv6.h>
24#endif
25
26#include <net/ip.h>
27#include <net/icmp.h>
28#include <net/route.h>
29
30#include <asm/vio.h>
31#include <asm/ldc.h>
32
33#include "sunvnet_common.h"
34
35/* Heuristic for the number of times to exponentially backoff and
36 * retry sending an LDC trigger when EAGAIN is encountered
37 */
38#define VNET_MAX_RETRIES 10
39
Shannon Nelson2493b842017-02-13 10:56:57 -080040MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
41MODULE_DESCRIPTION("Sun LDOM virtual network support library");
42MODULE_LICENSE("GPL");
43MODULE_VERSION("1.1");
44
Aaron Young31762ea2016-03-15 11:35:37 -070045static int __vnet_tx_trigger(struct vnet_port *port, u32 start);
46static void vnet_port_reset(struct vnet_port *port);
47
48static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr)
49{
50 return vio_dring_avail(dr, VNET_TX_RING_SIZE);
51}
52
53static int vnet_handle_unknown(struct vnet_port *port, void *arg)
54{
55 struct vio_msg_tag *pkt = arg;
56
57 pr_err("Received unknown msg [%02x:%02x:%04x:%08x]\n",
58 pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
59 pr_err("Resetting connection\n");
60
61 ldc_disconnect(port->vio.lp);
62
63 return -ECONNRESET;
64}
65
66static int vnet_port_alloc_tx_ring(struct vnet_port *port);
67
68int sunvnet_send_attr_common(struct vio_driver_state *vio)
69{
70 struct vnet_port *port = to_vnet_port(vio);
Aaron Young67d07192016-03-15 11:35:38 -070071 struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
Aaron Young31762ea2016-03-15 11:35:37 -070072 struct vio_net_attr_info pkt;
73 int framelen = ETH_FRAME_LEN;
74 int i, err;
75
76 err = vnet_port_alloc_tx_ring(to_vnet_port(vio));
77 if (err)
78 return err;
79
80 memset(&pkt, 0, sizeof(pkt));
81 pkt.tag.type = VIO_TYPE_CTRL;
82 pkt.tag.stype = VIO_SUBTYPE_INFO;
83 pkt.tag.stype_env = VIO_ATTR_INFO;
84 pkt.tag.sid = vio_send_sid(vio);
85 if (vio_version_before(vio, 1, 2))
86 pkt.xfer_mode = VIO_DRING_MODE;
87 else
88 pkt.xfer_mode = VIO_NEW_DRING_MODE;
89 pkt.addr_type = VNET_ADDR_ETHERMAC;
90 pkt.ack_freq = 0;
91 for (i = 0; i < 6; i++)
92 pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8);
93 if (vio_version_after(vio, 1, 3)) {
94 if (port->rmtu) {
95 port->rmtu = min(VNET_MAXPACKET, port->rmtu);
96 pkt.mtu = port->rmtu;
97 } else {
98 port->rmtu = VNET_MAXPACKET;
99 pkt.mtu = port->rmtu;
100 }
101 if (vio_version_after_eq(vio, 1, 6))
102 pkt.options = VIO_TX_DRING;
103 } else if (vio_version_before(vio, 1, 3)) {
104 pkt.mtu = framelen;
105 } else { /* v1.3 */
106 pkt.mtu = framelen + VLAN_HLEN;
107 }
108
109 pkt.cflags = 0;
110 if (vio_version_after_eq(vio, 1, 7) && port->tso) {
111 pkt.cflags |= VNET_LSO_IPV4_CAPAB;
112 if (!port->tsolen)
113 port->tsolen = VNET_MAXTSO;
114 pkt.ipv4_lso_maxlen = port->tsolen;
115 }
116
117 pkt.plnk_updt = PHYSLINK_UPDATE_NONE;
118
119 viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
120 "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] "
121 "cflags[0x%04x] lso_max[%u]\n",
122 pkt.xfer_mode, pkt.addr_type,
123 (unsigned long long)pkt.addr,
124 pkt.ack_freq, pkt.plnk_updt, pkt.options,
125 (unsigned long long)pkt.mtu, pkt.cflags, pkt.ipv4_lso_maxlen);
126
Aaron Young31762ea2016-03-15 11:35:37 -0700127 return vio_ldc_send(vio, &pkt, sizeof(pkt));
128}
129EXPORT_SYMBOL_GPL(sunvnet_send_attr_common);
130
131static int handle_attr_info(struct vio_driver_state *vio,
132 struct vio_net_attr_info *pkt)
133{
134 struct vnet_port *port = to_vnet_port(vio);
135 u64 localmtu;
136 u8 xfer_mode;
137
138 viodbg(HS, "GOT NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
139 "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] "
140 " (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n",
141 pkt->xfer_mode, pkt->addr_type,
142 (unsigned long long)pkt->addr,
143 pkt->ack_freq, pkt->plnk_updt, pkt->options,
144 (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags,
145 pkt->ipv4_lso_maxlen);
146
147 pkt->tag.sid = vio_send_sid(vio);
148
149 xfer_mode = pkt->xfer_mode;
150 /* for version < 1.2, VIO_DRING_MODE = 0x3 and no bitmask */
151 if (vio_version_before(vio, 1, 2) && xfer_mode == VIO_DRING_MODE)
152 xfer_mode = VIO_NEW_DRING_MODE;
153
154 /* MTU negotiation:
155 * < v1.3 - ETH_FRAME_LEN exactly
156 * > v1.3 - MIN(pkt.mtu, VNET_MAXPACKET, port->rmtu) and change
157 * pkt->mtu for ACK
158 * = v1.3 - ETH_FRAME_LEN + VLAN_HLEN exactly
159 */
160 if (vio_version_before(vio, 1, 3)) {
161 localmtu = ETH_FRAME_LEN;
162 } else if (vio_version_after(vio, 1, 3)) {
163 localmtu = port->rmtu ? port->rmtu : VNET_MAXPACKET;
164 localmtu = min(pkt->mtu, localmtu);
165 pkt->mtu = localmtu;
166 } else { /* v1.3 */
167 localmtu = ETH_FRAME_LEN + VLAN_HLEN;
168 }
169 port->rmtu = localmtu;
170
171 /* LSO negotiation */
172 if (vio_version_after_eq(vio, 1, 7))
173 port->tso &= !!(pkt->cflags & VNET_LSO_IPV4_CAPAB);
174 else
175 port->tso = false;
176 if (port->tso) {
177 if (!port->tsolen)
178 port->tsolen = VNET_MAXTSO;
179 port->tsolen = min(port->tsolen, pkt->ipv4_lso_maxlen);
180 if (port->tsolen < VNET_MINTSO) {
181 port->tso = false;
182 port->tsolen = 0;
183 pkt->cflags &= ~VNET_LSO_IPV4_CAPAB;
184 }
185 pkt->ipv4_lso_maxlen = port->tsolen;
186 } else {
187 pkt->cflags &= ~VNET_LSO_IPV4_CAPAB;
188 pkt->ipv4_lso_maxlen = 0;
189 }
190
191 /* for version >= 1.6, ACK packet mode we support */
192 if (vio_version_after_eq(vio, 1, 6)) {
193 pkt->xfer_mode = VIO_NEW_DRING_MODE;
194 pkt->options = VIO_TX_DRING;
195 }
196
197 if (!(xfer_mode | VIO_NEW_DRING_MODE) ||
198 pkt->addr_type != VNET_ADDR_ETHERMAC ||
199 pkt->mtu != localmtu) {
200 viodbg(HS, "SEND NET ATTR NACK\n");
201
202 pkt->tag.stype = VIO_SUBTYPE_NACK;
203
Aaron Youngdc153f82016-03-15 11:35:40 -0700204 (void)vio_ldc_send(vio, pkt, sizeof(*pkt));
Aaron Young31762ea2016-03-15 11:35:37 -0700205
206 return -ECONNRESET;
Aaron Young31762ea2016-03-15 11:35:37 -0700207 }
208
Aaron Youngdc153f82016-03-15 11:35:40 -0700209 viodbg(HS, "SEND NET ATTR ACK xmode[0x%x] atype[0x%x] "
210 "addr[%llx] ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] "
211 "mtu[%llu] (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n",
212 pkt->xfer_mode, pkt->addr_type,
213 (unsigned long long)pkt->addr,
214 pkt->ack_freq, pkt->plnk_updt, pkt->options,
215 (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags,
216 pkt->ipv4_lso_maxlen);
217
218 pkt->tag.stype = VIO_SUBTYPE_ACK;
219
220 return vio_ldc_send(vio, pkt, sizeof(*pkt));
Aaron Young31762ea2016-03-15 11:35:37 -0700221}
222
223static int handle_attr_ack(struct vio_driver_state *vio,
224 struct vio_net_attr_info *pkt)
225{
226 viodbg(HS, "GOT NET ATTR ACK\n");
227
228 return 0;
229}
230
231static int handle_attr_nack(struct vio_driver_state *vio,
232 struct vio_net_attr_info *pkt)
233{
234 viodbg(HS, "GOT NET ATTR NACK\n");
235
236 return -ECONNRESET;
237}
238
239int sunvnet_handle_attr_common(struct vio_driver_state *vio, void *arg)
240{
241 struct vio_net_attr_info *pkt = arg;
242
243 switch (pkt->tag.stype) {
244 case VIO_SUBTYPE_INFO:
245 return handle_attr_info(vio, pkt);
246
247 case VIO_SUBTYPE_ACK:
248 return handle_attr_ack(vio, pkt);
249
250 case VIO_SUBTYPE_NACK:
251 return handle_attr_nack(vio, pkt);
252
253 default:
254 return -ECONNRESET;
255 }
256}
257EXPORT_SYMBOL_GPL(sunvnet_handle_attr_common);
258
259void sunvnet_handshake_complete_common(struct vio_driver_state *vio)
260{
261 struct vio_dring_state *dr;
262
263 dr = &vio->drings[VIO_DRIVER_RX_RING];
Aaron Youngdc153f82016-03-15 11:35:40 -0700264 dr->rcv_nxt = 1;
265 dr->snd_nxt = 1;
Aaron Young31762ea2016-03-15 11:35:37 -0700266
267 dr = &vio->drings[VIO_DRIVER_TX_RING];
Aaron Youngdc153f82016-03-15 11:35:40 -0700268 dr->rcv_nxt = 1;
269 dr->snd_nxt = 1;
Aaron Young31762ea2016-03-15 11:35:37 -0700270}
271EXPORT_SYMBOL_GPL(sunvnet_handshake_complete_common);
272
273/* The hypervisor interface that implements copying to/from imported
274 * memory from another domain requires that copies are done to 8-byte
275 * aligned buffers, and that the lengths of such copies are also 8-byte
276 * multiples.
277 *
278 * So we align skb->data to an 8-byte multiple and pad-out the data
279 * area so we can round the copy length up to the next multiple of
280 * 8 for the copy.
281 *
282 * The transmitter puts the actual start of the packet 6 bytes into
283 * the buffer it sends over, so that the IP headers after the ethernet
284 * header are aligned properly. These 6 bytes are not in the descriptor
285 * length, they are simply implied. This offset is represented using
286 * the VNET_PACKET_SKIP macro.
287 */
288static struct sk_buff *alloc_and_align_skb(struct net_device *dev,
289 unsigned int len)
290{
Aaron Youngdc153f82016-03-15 11:35:40 -0700291 struct sk_buff *skb;
Aaron Young31762ea2016-03-15 11:35:37 -0700292 unsigned long addr, off;
293
Aaron Youngdc153f82016-03-15 11:35:40 -0700294 skb = netdev_alloc_skb(dev, len + VNET_PACKET_SKIP + 8 + 8);
Aaron Young31762ea2016-03-15 11:35:37 -0700295 if (unlikely(!skb))
296 return NULL;
297
Aaron Youngdc153f82016-03-15 11:35:40 -0700298 addr = (unsigned long)skb->data;
Aaron Young31762ea2016-03-15 11:35:37 -0700299 off = ((addr + 7UL) & ~7UL) - addr;
300 if (off)
301 skb_reserve(skb, off);
302
303 return skb;
304}
305
306static inline void vnet_fullcsum(struct sk_buff *skb)
307{
308 struct iphdr *iph = ip_hdr(skb);
309 int offset = skb_transport_offset(skb);
310
311 if (skb->protocol != htons(ETH_P_IP))
312 return;
313 if (iph->protocol != IPPROTO_TCP &&
314 iph->protocol != IPPROTO_UDP)
315 return;
316 skb->ip_summed = CHECKSUM_NONE;
317 skb->csum_level = 1;
318 skb->csum = 0;
319 if (iph->protocol == IPPROTO_TCP) {
320 struct tcphdr *ptcp = tcp_hdr(skb);
321
322 ptcp->check = 0;
323 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
324 ptcp->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
325 skb->len - offset, IPPROTO_TCP,
326 skb->csum);
327 } else if (iph->protocol == IPPROTO_UDP) {
328 struct udphdr *pudp = udp_hdr(skb);
329
330 pudp->check = 0;
331 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
332 pudp->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
333 skb->len - offset, IPPROTO_UDP,
334 skb->csum);
335 }
336}
337
338static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc)
339{
Aaron Young67d07192016-03-15 11:35:38 -0700340 struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
Aaron Young31762ea2016-03-15 11:35:37 -0700341 unsigned int len = desc->size;
342 unsigned int copy_len;
343 struct sk_buff *skb;
344 int maxlen;
345 int err;
346
347 err = -EMSGSIZE;
348 if (port->tso && port->tsolen > port->rmtu)
349 maxlen = port->tsolen;
350 else
351 maxlen = port->rmtu;
352 if (unlikely(len < ETH_ZLEN || len > maxlen)) {
353 dev->stats.rx_length_errors++;
354 goto out_dropped;
355 }
356
357 skb = alloc_and_align_skb(dev, len);
358 err = -ENOMEM;
359 if (unlikely(!skb)) {
360 dev->stats.rx_missed_errors++;
361 goto out_dropped;
362 }
363
364 copy_len = (len + VNET_PACKET_SKIP + 7U) & ~7U;
365 skb_put(skb, copy_len);
366 err = ldc_copy(port->vio.lp, LDC_COPY_IN,
367 skb->data, copy_len, 0,
368 desc->cookies, desc->ncookies);
369 if (unlikely(err < 0)) {
370 dev->stats.rx_frame_errors++;
371 goto out_free_skb;
372 }
373
374 skb_pull(skb, VNET_PACKET_SKIP);
375 skb_trim(skb, len);
376 skb->protocol = eth_type_trans(skb, dev);
377
378 if (vio_version_after_eq(&port->vio, 1, 8)) {
379 struct vio_net_dext *dext = vio_net_ext(desc);
380
381 skb_reset_network_header(skb);
382
383 if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) {
384 if (skb->protocol == ETH_P_IP) {
385 struct iphdr *iph = ip_hdr(skb);
386
387 iph->check = 0;
388 ip_send_check(iph);
389 }
390 }
391 if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) &&
392 skb->ip_summed == CHECKSUM_NONE) {
393 if (skb->protocol == htons(ETH_P_IP)) {
394 struct iphdr *iph = ip_hdr(skb);
395 int ihl = iph->ihl * 4;
396
397 skb_reset_transport_header(skb);
398 skb_set_transport_header(skb, ihl);
399 vnet_fullcsum(skb);
400 }
401 }
402 if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) {
403 skb->ip_summed = CHECKSUM_PARTIAL;
404 skb->csum_level = 0;
405 if (dext->flags & VNET_PKT_HCK_FULLCKSUM_OK)
406 skb->csum_level = 1;
407 }
408 }
409
410 skb->ip_summed = port->switch_port ? CHECKSUM_NONE : CHECKSUM_PARTIAL;
411
412 dev->stats.rx_packets++;
413 dev->stats.rx_bytes += len;
414 napi_gro_receive(&port->napi, skb);
415 return 0;
416
417out_free_skb:
418 kfree_skb(skb);
419
420out_dropped:
421 dev->stats.rx_dropped++;
422 return err;
423}
424
425static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr,
426 u32 start, u32 end, u8 vio_dring_state)
427{
428 struct vio_dring_data hdr = {
429 .tag = {
430 .type = VIO_TYPE_DATA,
431 .stype = VIO_SUBTYPE_ACK,
432 .stype_env = VIO_DRING_DATA,
433 .sid = vio_send_sid(&port->vio),
434 },
435 .dring_ident = dr->ident,
436 .start_idx = start,
437 .end_idx = end,
438 .state = vio_dring_state,
439 };
440 int err, delay;
441 int retries = 0;
442
443 hdr.seq = dr->snd_nxt;
444 delay = 1;
445 do {
446 err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
447 if (err > 0) {
448 dr->snd_nxt++;
449 break;
450 }
451 udelay(delay);
452 if ((delay <<= 1) > 128)
453 delay = 128;
454 if (retries++ > VNET_MAX_RETRIES) {
455 pr_info("ECONNRESET %x:%x:%x:%x:%x:%x\n",
456 port->raddr[0], port->raddr[1],
457 port->raddr[2], port->raddr[3],
458 port->raddr[4], port->raddr[5]);
459 break;
460 }
461 } while (err == -EAGAIN);
462
463 if (err <= 0 && vio_dring_state == VIO_DRING_STOPPED) {
464 port->stop_rx_idx = end;
465 port->stop_rx = true;
466 } else {
467 port->stop_rx_idx = 0;
468 port->stop_rx = false;
469 }
470
471 return err;
472}
473
474static struct vio_net_desc *get_rx_desc(struct vnet_port *port,
475 struct vio_dring_state *dr,
476 u32 index)
477{
478 struct vio_net_desc *desc = port->vio.desc_buf;
479 int err;
480
481 err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size,
482 (index * dr->entry_size),
483 dr->cookies, dr->ncookies);
484 if (err < 0)
485 return ERR_PTR(err);
486
487 return desc;
488}
489
490static int put_rx_desc(struct vnet_port *port,
491 struct vio_dring_state *dr,
492 struct vio_net_desc *desc,
493 u32 index)
494{
495 int err;
496
497 err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size,
498 (index * dr->entry_size),
499 dr->cookies, dr->ncookies);
500 if (err < 0)
501 return err;
502
503 return 0;
504}
505
506static int vnet_walk_rx_one(struct vnet_port *port,
507 struct vio_dring_state *dr,
508 u32 index, int *needs_ack)
509{
510 struct vio_net_desc *desc = get_rx_desc(port, dr, index);
511 struct vio_driver_state *vio = &port->vio;
512 int err;
513
Aaron Youngdc153f82016-03-15 11:35:40 -0700514 BUG_ON(!desc);
Aaron Young31762ea2016-03-15 11:35:37 -0700515 if (IS_ERR(desc))
516 return PTR_ERR(desc);
517
518 if (desc->hdr.state != VIO_DESC_READY)
519 return 1;
520
521 dma_rmb();
522
523 viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n",
524 desc->hdr.state, desc->hdr.ack,
525 desc->size, desc->ncookies,
526 desc->cookies[0].cookie_addr,
527 desc->cookies[0].cookie_size);
528
529 err = vnet_rx_one(port, desc);
530 if (err == -ECONNRESET)
531 return err;
532 trace_vnet_rx_one(port->vio._local_sid, port->vio._peer_sid,
533 index, desc->hdr.ack);
534 desc->hdr.state = VIO_DESC_DONE;
535 err = put_rx_desc(port, dr, desc, index);
536 if (err < 0)
537 return err;
538 *needs_ack = desc->hdr.ack;
539 return 0;
540}
541
542static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
543 u32 start, u32 end, int *npkts, int budget)
544{
545 struct vio_driver_state *vio = &port->vio;
546 int ack_start = -1, ack_end = -1;
547 bool send_ack = true;
548
Aaron Youngdc153f82016-03-15 11:35:40 -0700549 end = (end == (u32)-1) ? vio_dring_prev(dr, start)
550 : vio_dring_next(dr, end);
Aaron Young31762ea2016-03-15 11:35:37 -0700551
552 viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end);
553
554 while (start != end) {
555 int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack);
Aaron Youngdc153f82016-03-15 11:35:40 -0700556
Aaron Young31762ea2016-03-15 11:35:37 -0700557 if (err == -ECONNRESET)
558 return err;
559 if (err != 0)
560 break;
561 (*npkts)++;
562 if (ack_start == -1)
563 ack_start = start;
564 ack_end = start;
565 start = vio_dring_next(dr, start);
566 if (ack && start != end) {
567 err = vnet_send_ack(port, dr, ack_start, ack_end,
568 VIO_DRING_ACTIVE);
569 if (err == -ECONNRESET)
570 return err;
571 ack_start = -1;
572 }
573 if ((*npkts) >= budget) {
574 send_ack = false;
575 break;
576 }
577 }
Aaron Youngdc153f82016-03-15 11:35:40 -0700578 if (unlikely(ack_start == -1)) {
579 ack_end = vio_dring_prev(dr, start);
580 ack_start = ack_end;
581 }
Aaron Young31762ea2016-03-15 11:35:37 -0700582 if (send_ack) {
583 port->napi_resume = false;
584 trace_vnet_tx_send_stopped_ack(port->vio._local_sid,
585 port->vio._peer_sid,
586 ack_end, *npkts);
587 return vnet_send_ack(port, dr, ack_start, ack_end,
588 VIO_DRING_STOPPED);
589 } else {
590 trace_vnet_tx_defer_stopped_ack(port->vio._local_sid,
591 port->vio._peer_sid,
592 ack_end, *npkts);
593 port->napi_resume = true;
594 port->napi_stop_idx = ack_end;
595 return 1;
596 }
597}
598
599static int vnet_rx(struct vnet_port *port, void *msgbuf, int *npkts,
600 int budget)
601{
602 struct vio_dring_data *pkt = msgbuf;
603 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING];
604 struct vio_driver_state *vio = &port->vio;
605
606 viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016llx] rcv_nxt[%016llx]\n",
607 pkt->tag.stype_env, pkt->seq, dr->rcv_nxt);
608
609 if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
610 return 0;
611 if (unlikely(pkt->seq != dr->rcv_nxt)) {
612 pr_err("RX out of sequence seq[0x%llx] rcv_nxt[0x%llx]\n",
613 pkt->seq, dr->rcv_nxt);
614 return 0;
615 }
616
617 if (!port->napi_resume)
618 dr->rcv_nxt++;
619
620 /* XXX Validate pkt->start_idx and pkt->end_idx XXX */
621
622 return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx,
623 npkts, budget);
624}
625
626static int idx_is_pending(struct vio_dring_state *dr, u32 end)
627{
628 u32 idx = dr->cons;
629 int found = 0;
630
631 while (idx != dr->prod) {
632 if (idx == end) {
633 found = 1;
634 break;
635 }
636 idx = vio_dring_next(dr, idx);
637 }
638 return found;
639}
640
641static int vnet_ack(struct vnet_port *port, void *msgbuf)
642{
643 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
644 struct vio_dring_data *pkt = msgbuf;
645 struct net_device *dev;
Aaron Young31762ea2016-03-15 11:35:37 -0700646 u32 end;
647 struct vio_net_desc *desc;
648 struct netdev_queue *txq;
649
650 if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
651 return 0;
652
653 end = pkt->end_idx;
Aaron Young67d07192016-03-15 11:35:38 -0700654 dev = VNET_PORT_TO_NET_DEVICE(port);
Aaron Young31762ea2016-03-15 11:35:37 -0700655 netif_tx_lock(dev);
656 if (unlikely(!idx_is_pending(dr, end))) {
657 netif_tx_unlock(dev);
658 return 0;
659 }
660
661 /* sync for race conditions with vnet_start_xmit() and tell xmit it
662 * is time to send a trigger.
663 */
664 trace_vnet_rx_stopped_ack(port->vio._local_sid,
665 port->vio._peer_sid, end);
666 dr->cons = vio_dring_next(dr, end);
667 desc = vio_dring_entry(dr, dr->cons);
668 if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) {
669 /* vnet_start_xmit() just populated this dring but missed
670 * sending the "start" LDC message to the consumer.
671 * Send a "start" trigger on its behalf.
672 */
673 if (__vnet_tx_trigger(port, dr->cons) > 0)
674 port->start_cons = false;
675 else
676 port->start_cons = true;
677 } else {
678 port->start_cons = true;
679 }
680 netif_tx_unlock(dev);
681
682 txq = netdev_get_tx_queue(dev, port->q_index);
683 if (unlikely(netif_tx_queue_stopped(txq) &&
684 vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr)))
685 return 1;
686
687 return 0;
688}
689
690static int vnet_nack(struct vnet_port *port, void *msgbuf)
691{
692 /* XXX just reset or similar XXX */
693 return 0;
694}
695
696static int handle_mcast(struct vnet_port *port, void *msgbuf)
697{
698 struct vio_net_mcast_info *pkt = msgbuf;
Aaron Young67d07192016-03-15 11:35:38 -0700699 struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
Aaron Young31762ea2016-03-15 11:35:37 -0700700
701 if (pkt->tag.stype != VIO_SUBTYPE_ACK)
702 pr_err("%s: Got unexpected MCAST reply [%02x:%02x:%04x:%08x]\n",
Aaron Young67d07192016-03-15 11:35:38 -0700703 dev->name,
Aaron Young31762ea2016-03-15 11:35:37 -0700704 pkt->tag.type,
705 pkt->tag.stype,
706 pkt->tag.stype_env,
707 pkt->tag.sid);
708
709 return 0;
710}
711
Aaron Young8778b272016-10-28 14:26:19 -0400712/* If the queue is stopped, wake it up so that we'll
713 * send out another START message at the next TX.
Aaron Young31762ea2016-03-15 11:35:37 -0700714 */
715static void maybe_tx_wakeup(struct vnet_port *port)
716{
717 struct netdev_queue *txq;
718
Aaron Young67d07192016-03-15 11:35:38 -0700719 txq = netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port),
720 port->q_index);
Aaron Young31762ea2016-03-15 11:35:37 -0700721 __netif_tx_lock(txq, smp_processor_id());
722 if (likely(netif_tx_queue_stopped(txq))) {
723 struct vio_dring_state *dr;
724
725 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
726 netif_tx_wake_queue(txq);
727 }
728 __netif_tx_unlock(txq);
729}
730
Aaron Young67d07192016-03-15 11:35:38 -0700731bool sunvnet_port_is_up_common(struct vnet_port *vnet)
Aaron Young31762ea2016-03-15 11:35:37 -0700732{
733 struct vio_driver_state *vio = &vnet->vio;
734
735 return !!(vio->hs_state & VIO_HS_COMPLETE);
736}
Aaron Young67d07192016-03-15 11:35:38 -0700737EXPORT_SYMBOL_GPL(sunvnet_port_is_up_common);
Aaron Young31762ea2016-03-15 11:35:37 -0700738
739static int vnet_event_napi(struct vnet_port *port, int budget)
740{
Aaron Young8778b272016-10-28 14:26:19 -0400741 struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
Aaron Young31762ea2016-03-15 11:35:37 -0700742 struct vio_driver_state *vio = &port->vio;
743 int tx_wakeup, err;
744 int npkts = 0;
745 int event = (port->rx_event & LDC_EVENT_RESET);
746
747ldc_ctrl:
748 if (unlikely(event == LDC_EVENT_RESET ||
749 event == LDC_EVENT_UP)) {
750 vio_link_state_change(vio, event);
751
752 if (event == LDC_EVENT_RESET) {
753 vnet_port_reset(port);
754 vio_port_up(vio);
Aaron Young8778b272016-10-28 14:26:19 -0400755
756 /* If the device is running but its tx queue was
757 * stopped (due to flow control), restart it.
758 * This is necessary since vnet_port_reset()
759 * clears the tx drings and thus we may never get
760 * back a VIO_TYPE_DATA ACK packet - which is
761 * the normal mechanism to restart the tx queue.
762 */
763 if (netif_running(dev))
764 maybe_tx_wakeup(port);
Aaron Young31762ea2016-03-15 11:35:37 -0700765 }
766 port->rx_event = 0;
767 return 0;
768 }
769 /* We may have multiple LDC events in rx_event. Unroll send_events() */
770 event = (port->rx_event & LDC_EVENT_UP);
Aaron Youngdc153f82016-03-15 11:35:40 -0700771 port->rx_event &= ~(LDC_EVENT_RESET | LDC_EVENT_UP);
Aaron Young31762ea2016-03-15 11:35:37 -0700772 if (event == LDC_EVENT_UP)
773 goto ldc_ctrl;
774 event = port->rx_event;
775 if (!(event & LDC_EVENT_DATA_READY))
776 return 0;
777
778 /* we dont expect any other bits than RESET, UP, DATA_READY */
779 BUG_ON(event != LDC_EVENT_DATA_READY);
780
Aaron Youngdc153f82016-03-15 11:35:40 -0700781 err = 0;
782 tx_wakeup = 0;
Aaron Young31762ea2016-03-15 11:35:37 -0700783 while (1) {
784 union {
785 struct vio_msg_tag tag;
786 u64 raw[8];
787 } msgbuf;
788
789 if (port->napi_resume) {
790 struct vio_dring_data *pkt =
791 (struct vio_dring_data *)&msgbuf;
792 struct vio_dring_state *dr =
793 &port->vio.drings[VIO_DRIVER_RX_RING];
794
795 pkt->tag.type = VIO_TYPE_DATA;
796 pkt->tag.stype = VIO_SUBTYPE_INFO;
797 pkt->tag.stype_env = VIO_DRING_DATA;
798 pkt->seq = dr->rcv_nxt;
Aaron Youngdc153f82016-03-15 11:35:40 -0700799 pkt->start_idx = vio_dring_next(dr,
800 port->napi_stop_idx);
Aaron Young31762ea2016-03-15 11:35:37 -0700801 pkt->end_idx = -1;
802 goto napi_resume;
803 }
804 err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
805 if (unlikely(err < 0)) {
806 if (err == -ECONNRESET)
807 vio_conn_reset(vio);
808 break;
809 }
810 if (err == 0)
811 break;
812 viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
813 msgbuf.tag.type,
814 msgbuf.tag.stype,
815 msgbuf.tag.stype_env,
816 msgbuf.tag.sid);
817 err = vio_validate_sid(vio, &msgbuf.tag);
818 if (err < 0)
819 break;
820napi_resume:
821 if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
822 if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) {
Aaron Young67d07192016-03-15 11:35:38 -0700823 if (!sunvnet_port_is_up_common(port)) {
Aaron Young31762ea2016-03-15 11:35:37 -0700824 /* failures like handshake_failure()
825 * may have cleaned up dring, but
826 * NAPI polling may bring us here.
827 */
828 err = -ECONNRESET;
829 break;
830 }
831 err = vnet_rx(port, &msgbuf, &npkts, budget);
832 if (npkts >= budget)
833 break;
834 if (npkts == 0)
835 break;
836 } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) {
837 err = vnet_ack(port, &msgbuf);
838 if (err > 0)
839 tx_wakeup |= err;
840 } else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) {
841 err = vnet_nack(port, &msgbuf);
842 }
843 } else if (msgbuf.tag.type == VIO_TYPE_CTRL) {
844 if (msgbuf.tag.stype_env == VNET_MCAST_INFO)
845 err = handle_mcast(port, &msgbuf);
846 else
847 err = vio_control_pkt_engine(vio, &msgbuf);
848 if (err)
849 break;
850 } else {
851 err = vnet_handle_unknown(port, &msgbuf);
852 }
853 if (err == -ECONNRESET)
854 break;
855 }
856 if (unlikely(tx_wakeup && err != -ECONNRESET))
857 maybe_tx_wakeup(port);
858 return npkts;
859}
860
861int sunvnet_poll_common(struct napi_struct *napi, int budget)
862{
863 struct vnet_port *port = container_of(napi, struct vnet_port, napi);
864 struct vio_driver_state *vio = &port->vio;
865 int processed = vnet_event_napi(port, budget);
866
867 if (processed < budget) {
Eric Dumazet6ad20162017-01-30 08:22:01 -0800868 napi_complete_done(napi, processed);
Aaron Young31762ea2016-03-15 11:35:37 -0700869 port->rx_event &= ~LDC_EVENT_DATA_READY;
870 vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED);
871 }
872 return processed;
873}
874EXPORT_SYMBOL_GPL(sunvnet_poll_common);
875
876void sunvnet_event_common(void *arg, int event)
877{
878 struct vnet_port *port = arg;
879 struct vio_driver_state *vio = &port->vio;
880
881 port->rx_event |= event;
882 vio_set_intr(vio->vdev->rx_ino, HV_INTR_DISABLED);
883 napi_schedule(&port->napi);
Aaron Young31762ea2016-03-15 11:35:37 -0700884}
885EXPORT_SYMBOL_GPL(sunvnet_event_common);
886
887static int __vnet_tx_trigger(struct vnet_port *port, u32 start)
888{
889 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
890 struct vio_dring_data hdr = {
891 .tag = {
892 .type = VIO_TYPE_DATA,
893 .stype = VIO_SUBTYPE_INFO,
894 .stype_env = VIO_DRING_DATA,
895 .sid = vio_send_sid(&port->vio),
896 },
897 .dring_ident = dr->ident,
898 .start_idx = start,
Aaron Youngdc153f82016-03-15 11:35:40 -0700899 .end_idx = (u32)-1,
Aaron Young31762ea2016-03-15 11:35:37 -0700900 };
901 int err, delay;
902 int retries = 0;
903
904 if (port->stop_rx) {
905 trace_vnet_tx_pending_stopped_ack(port->vio._local_sid,
906 port->vio._peer_sid,
907 port->stop_rx_idx, -1);
908 err = vnet_send_ack(port,
909 &port->vio.drings[VIO_DRIVER_RX_RING],
910 port->stop_rx_idx, -1,
911 VIO_DRING_STOPPED);
912 if (err <= 0)
913 return err;
914 }
915
916 hdr.seq = dr->snd_nxt;
917 delay = 1;
918 do {
919 err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
920 if (err > 0) {
921 dr->snd_nxt++;
922 break;
923 }
924 udelay(delay);
925 if ((delay <<= 1) > 128)
926 delay = 128;
927 if (retries++ > VNET_MAX_RETRIES)
928 break;
929 } while (err == -EAGAIN);
930 trace_vnet_tx_trigger(port->vio._local_sid,
931 port->vio._peer_sid, start, err);
932
933 return err;
934}
935
Aaron Young31762ea2016-03-15 11:35:37 -0700936static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port,
937 unsigned *pending)
938{
939 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
940 struct sk_buff *skb = NULL;
941 int i, txi;
942
943 *pending = 0;
944
945 txi = dr->prod;
946 for (i = 0; i < VNET_TX_RING_SIZE; ++i) {
947 struct vio_net_desc *d;
948
949 --txi;
950 if (txi < 0)
Aaron Youngdc153f82016-03-15 11:35:40 -0700951 txi = VNET_TX_RING_SIZE - 1;
Aaron Young31762ea2016-03-15 11:35:37 -0700952
953 d = vio_dring_entry(dr, txi);
954
955 if (d->hdr.state == VIO_DESC_READY) {
956 (*pending)++;
957 continue;
958 }
959 if (port->tx_bufs[txi].skb) {
960 if (d->hdr.state != VIO_DESC_DONE)
961 pr_notice("invalid ring buffer state %d\n",
962 d->hdr.state);
963 BUG_ON(port->tx_bufs[txi].skb->next);
964
965 port->tx_bufs[txi].skb->next = skb;
966 skb = port->tx_bufs[txi].skb;
967 port->tx_bufs[txi].skb = NULL;
968
969 ldc_unmap(port->vio.lp,
970 port->tx_bufs[txi].cookies,
971 port->tx_bufs[txi].ncookies);
Aaron Youngdc153f82016-03-15 11:35:40 -0700972 } else if (d->hdr.state == VIO_DESC_FREE) {
Aaron Young31762ea2016-03-15 11:35:37 -0700973 break;
Aaron Youngdc153f82016-03-15 11:35:40 -0700974 }
Aaron Young31762ea2016-03-15 11:35:37 -0700975 d->hdr.state = VIO_DESC_FREE;
976 }
977 return skb;
978}
979
980static inline void vnet_free_skbs(struct sk_buff *skb)
981{
982 struct sk_buff *next;
983
984 while (skb) {
985 next = skb->next;
986 skb->next = NULL;
987 dev_kfree_skb(skb);
988 skb = next;
989 }
990}
991
992void sunvnet_clean_timer_expire_common(unsigned long port0)
993{
994 struct vnet_port *port = (struct vnet_port *)port0;
995 struct sk_buff *freeskbs;
996 unsigned pending;
997
Aaron Young67d07192016-03-15 11:35:38 -0700998 netif_tx_lock(VNET_PORT_TO_NET_DEVICE(port));
Aaron Young31762ea2016-03-15 11:35:37 -0700999 freeskbs = vnet_clean_tx_ring(port, &pending);
Aaron Young67d07192016-03-15 11:35:38 -07001000 netif_tx_unlock(VNET_PORT_TO_NET_DEVICE(port));
Aaron Young31762ea2016-03-15 11:35:37 -07001001
1002 vnet_free_skbs(freeskbs);
1003
1004 if (pending)
1005 (void)mod_timer(&port->clean_timer,
1006 jiffies + VNET_CLEAN_TIMEOUT);
1007 else
1008 del_timer(&port->clean_timer);
1009}
1010EXPORT_SYMBOL_GPL(sunvnet_clean_timer_expire_common);
1011
1012static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb,
1013 struct ldc_trans_cookie *cookies, int ncookies,
1014 unsigned int map_perm)
1015{
1016 int i, nc, err, blen;
1017
1018 /* header */
1019 blen = skb_headlen(skb);
1020 if (blen < ETH_ZLEN)
1021 blen = ETH_ZLEN;
1022 blen += VNET_PACKET_SKIP;
1023 blen += 8 - (blen & 7);
1024
Aaron Youngdc153f82016-03-15 11:35:40 -07001025 err = ldc_map_single(lp, skb->data - VNET_PACKET_SKIP, blen, cookies,
Aaron Young31762ea2016-03-15 11:35:37 -07001026 ncookies, map_perm);
1027 if (err < 0)
1028 return err;
1029 nc = err;
1030
1031 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1032 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1033 u8 *vaddr;
1034
1035 if (nc < ncookies) {
1036 vaddr = kmap_atomic(skb_frag_page(f));
1037 blen = skb_frag_size(f);
1038 blen += 8 - (blen & 7);
1039 err = ldc_map_single(lp, vaddr + f->page_offset,
1040 blen, cookies + nc, ncookies - nc,
1041 map_perm);
1042 kunmap_atomic(vaddr);
1043 } else {
1044 err = -EMSGSIZE;
1045 }
1046
1047 if (err < 0) {
1048 ldc_unmap(lp, cookies, nc);
1049 return err;
1050 }
1051 nc += err;
1052 }
1053 return nc;
1054}
1055
1056static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
1057{
1058 struct sk_buff *nskb;
1059 int i, len, pad, docopy;
1060
1061 len = skb->len;
1062 pad = 0;
1063 if (len < ETH_ZLEN) {
1064 pad += ETH_ZLEN - skb->len;
1065 len += pad;
1066 }
1067 len += VNET_PACKET_SKIP;
1068 pad += 8 - (len & 7);
1069
1070 /* make sure we have enough cookies and alignment in every frag */
1071 docopy = skb_shinfo(skb)->nr_frags >= ncookies;
1072 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1073 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1074
1075 docopy |= f->page_offset & 7;
1076 }
1077 if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP ||
1078 skb_tailroom(skb) < pad ||
1079 skb_headroom(skb) < VNET_PACKET_SKIP || docopy) {
1080 int start = 0, offset;
1081 __wsum csum;
1082
1083 len = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN;
1084 nskb = alloc_and_align_skb(skb->dev, len);
Aaron Youngdc153f82016-03-15 11:35:40 -07001085 if (!nskb) {
Aaron Young31762ea2016-03-15 11:35:37 -07001086 dev_kfree_skb(skb);
1087 return NULL;
1088 }
1089 skb_reserve(nskb, VNET_PACKET_SKIP);
1090
1091 nskb->protocol = skb->protocol;
1092 offset = skb_mac_header(skb) - skb->data;
1093 skb_set_mac_header(nskb, offset);
1094 offset = skb_network_header(skb) - skb->data;
1095 skb_set_network_header(nskb, offset);
1096 offset = skb_transport_header(skb) - skb->data;
1097 skb_set_transport_header(nskb, offset);
1098
1099 offset = 0;
1100 nskb->csum_offset = skb->csum_offset;
1101 nskb->ip_summed = skb->ip_summed;
1102
1103 if (skb->ip_summed == CHECKSUM_PARTIAL)
1104 start = skb_checksum_start_offset(skb);
1105 if (start) {
1106 struct iphdr *iph = ip_hdr(nskb);
1107 int offset = start + nskb->csum_offset;
1108
1109 if (skb_copy_bits(skb, 0, nskb->data, start)) {
1110 dev_kfree_skb(nskb);
1111 dev_kfree_skb(skb);
1112 return NULL;
1113 }
1114 *(__sum16 *)(skb->data + offset) = 0;
1115 csum = skb_copy_and_csum_bits(skb, start,
1116 nskb->data + start,
1117 skb->len - start, 0);
1118 if (iph->protocol == IPPROTO_TCP ||
1119 iph->protocol == IPPROTO_UDP) {
1120 csum = csum_tcpudp_magic(iph->saddr, iph->daddr,
1121 skb->len - start,
1122 iph->protocol, csum);
1123 }
1124 *(__sum16 *)(nskb->data + offset) = csum;
1125
1126 nskb->ip_summed = CHECKSUM_NONE;
1127 } else if (skb_copy_bits(skb, 0, nskb->data, skb->len)) {
1128 dev_kfree_skb(nskb);
1129 dev_kfree_skb(skb);
1130 return NULL;
1131 }
1132 (void)skb_put(nskb, skb->len);
1133 if (skb_is_gso(skb)) {
1134 skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size;
1135 skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
1136 }
1137 nskb->queue_mapping = skb->queue_mapping;
1138 dev_kfree_skb(skb);
1139 skb = nskb;
1140 }
1141 return skb;
1142}
1143
Aaron Young67d07192016-03-15 11:35:38 -07001144static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb,
1145 struct vnet_port *(*vnet_tx_port)
1146 (struct sk_buff *, struct net_device *))
Aaron Young31762ea2016-03-15 11:35:37 -07001147{
Aaron Young67d07192016-03-15 11:35:38 -07001148 struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
Aaron Young31762ea2016-03-15 11:35:37 -07001149 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1150 struct sk_buff *segs;
1151 int maclen, datalen;
1152 int status;
1153 int gso_size, gso_type, gso_segs;
1154 int hlen = skb_transport_header(skb) - skb_mac_header(skb);
1155 int proto = IPPROTO_IP;
1156
1157 if (skb->protocol == htons(ETH_P_IP))
1158 proto = ip_hdr(skb)->protocol;
1159 else if (skb->protocol == htons(ETH_P_IPV6))
1160 proto = ipv6_hdr(skb)->nexthdr;
1161
Aaron Youngdc153f82016-03-15 11:35:40 -07001162 if (proto == IPPROTO_TCP) {
Aaron Young31762ea2016-03-15 11:35:37 -07001163 hlen += tcp_hdr(skb)->doff * 4;
Aaron Youngdc153f82016-03-15 11:35:40 -07001164 } else if (proto == IPPROTO_UDP) {
Aaron Young31762ea2016-03-15 11:35:37 -07001165 hlen += sizeof(struct udphdr);
Aaron Youngdc153f82016-03-15 11:35:40 -07001166 } else {
Aaron Young31762ea2016-03-15 11:35:37 -07001167 pr_err("vnet_handle_offloads GSO with unknown transport "
1168 "protocol %d tproto %d\n", skb->protocol, proto);
1169 hlen = 128; /* XXX */
1170 }
1171 datalen = port->tsolen - hlen;
1172
1173 gso_size = skb_shinfo(skb)->gso_size;
1174 gso_type = skb_shinfo(skb)->gso_type;
1175 gso_segs = skb_shinfo(skb)->gso_segs;
1176
1177 if (port->tso && gso_size < datalen)
1178 gso_segs = DIV_ROUND_UP(skb->len - hlen, datalen);
1179
1180 if (unlikely(vnet_tx_dring_avail(dr) < gso_segs)) {
1181 struct netdev_queue *txq;
1182
1183 txq = netdev_get_tx_queue(dev, port->q_index);
1184 netif_tx_stop_queue(txq);
1185 if (vnet_tx_dring_avail(dr) < skb_shinfo(skb)->gso_segs)
1186 return NETDEV_TX_BUSY;
1187 netif_tx_wake_queue(txq);
1188 }
1189
1190 maclen = skb_network_header(skb) - skb_mac_header(skb);
1191 skb_pull(skb, maclen);
1192
1193 if (port->tso && gso_size < datalen) {
1194 if (skb_unclone(skb, GFP_ATOMIC))
1195 goto out_dropped;
1196
1197 /* segment to TSO size */
1198 skb_shinfo(skb)->gso_size = datalen;
1199 skb_shinfo(skb)->gso_segs = gso_segs;
1200 }
1201 segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO);
1202 if (IS_ERR(segs))
1203 goto out_dropped;
1204
1205 skb_push(skb, maclen);
1206 skb_reset_mac_header(skb);
1207
1208 status = 0;
1209 while (segs) {
1210 struct sk_buff *curr = segs;
1211
1212 segs = segs->next;
1213 curr->next = NULL;
1214 if (port->tso && curr->len > dev->mtu) {
1215 skb_shinfo(curr)->gso_size = gso_size;
1216 skb_shinfo(curr)->gso_type = gso_type;
1217 skb_shinfo(curr)->gso_segs =
1218 DIV_ROUND_UP(curr->len - hlen, gso_size);
Aaron Youngdc153f82016-03-15 11:35:40 -07001219 } else {
Aaron Young31762ea2016-03-15 11:35:37 -07001220 skb_shinfo(curr)->gso_size = 0;
Aaron Youngdc153f82016-03-15 11:35:40 -07001221 }
Aaron Young31762ea2016-03-15 11:35:37 -07001222
1223 skb_push(curr, maclen);
1224 skb_reset_mac_header(curr);
1225 memcpy(skb_mac_header(curr), skb_mac_header(skb),
1226 maclen);
1227 curr->csum_start = skb_transport_header(curr) - curr->head;
1228 if (ip_hdr(curr)->protocol == IPPROTO_TCP)
1229 curr->csum_offset = offsetof(struct tcphdr, check);
1230 else if (ip_hdr(curr)->protocol == IPPROTO_UDP)
1231 curr->csum_offset = offsetof(struct udphdr, check);
1232
1233 if (!(status & NETDEV_TX_MASK))
Aaron Young67d07192016-03-15 11:35:38 -07001234 status = sunvnet_start_xmit_common(curr, dev,
1235 vnet_tx_port);
Aaron Young31762ea2016-03-15 11:35:37 -07001236 if (status & NETDEV_TX_MASK)
1237 dev_kfree_skb_any(curr);
1238 }
1239
1240 if (!(status & NETDEV_TX_MASK))
1241 dev_kfree_skb_any(skb);
1242 return status;
1243out_dropped:
1244 dev->stats.tx_dropped++;
1245 dev_kfree_skb_any(skb);
1246 return NETDEV_TX_OK;
1247}
1248
Aaron Young67d07192016-03-15 11:35:38 -07001249int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev,
1250 struct vnet_port *(*vnet_tx_port)
1251 (struct sk_buff *, struct net_device *))
Aaron Young31762ea2016-03-15 11:35:37 -07001252{
Aaron Young31762ea2016-03-15 11:35:37 -07001253 struct vnet_port *port = NULL;
1254 struct vio_dring_state *dr;
1255 struct vio_net_desc *d;
1256 unsigned int len;
1257 struct sk_buff *freeskbs = NULL;
1258 int i, err, txi;
1259 unsigned pending = 0;
1260 struct netdev_queue *txq;
1261
1262 rcu_read_lock();
Aaron Young67d07192016-03-15 11:35:38 -07001263 port = vnet_tx_port(skb, dev);
Aaron Young31762ea2016-03-15 11:35:37 -07001264 if (unlikely(!port)) {
1265 rcu_read_unlock();
1266 goto out_dropped;
1267 }
1268
1269 if (skb_is_gso(skb) && skb->len > port->tsolen) {
Aaron Young67d07192016-03-15 11:35:38 -07001270 err = vnet_handle_offloads(port, skb, vnet_tx_port);
Aaron Young31762ea2016-03-15 11:35:37 -07001271 rcu_read_unlock();
1272 return err;
1273 }
1274
1275 if (!skb_is_gso(skb) && skb->len > port->rmtu) {
1276 unsigned long localmtu = port->rmtu - ETH_HLEN;
1277
1278 if (vio_version_after_eq(&port->vio, 1, 3))
1279 localmtu -= VLAN_HLEN;
1280
1281 if (skb->protocol == htons(ETH_P_IP)) {
1282 struct flowi4 fl4;
1283 struct rtable *rt = NULL;
1284
1285 memset(&fl4, 0, sizeof(fl4));
1286 fl4.flowi4_oif = dev->ifindex;
1287 fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
1288 fl4.daddr = ip_hdr(skb)->daddr;
1289 fl4.saddr = ip_hdr(skb)->saddr;
1290
1291 rt = ip_route_output_key(dev_net(dev), &fl4);
1292 rcu_read_unlock();
1293 if (!IS_ERR(rt)) {
1294 skb_dst_set(skb, &rt->dst);
1295 icmp_send(skb, ICMP_DEST_UNREACH,
1296 ICMP_FRAG_NEEDED,
1297 htonl(localmtu));
1298 }
1299 }
1300#if IS_ENABLED(CONFIG_IPV6)
1301 else if (skb->protocol == htons(ETH_P_IPV6))
1302 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu);
1303#endif
1304 goto out_dropped;
1305 }
1306
1307 skb = vnet_skb_shape(skb, 2);
1308
1309 if (unlikely(!skb))
1310 goto out_dropped;
1311
1312 if (skb->ip_summed == CHECKSUM_PARTIAL)
1313 vnet_fullcsum(skb);
1314
1315 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1316 i = skb_get_queue_mapping(skb);
1317 txq = netdev_get_tx_queue(dev, i);
1318 if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
1319 if (!netif_tx_queue_stopped(txq)) {
1320 netif_tx_stop_queue(txq);
1321
1322 /* This is a hard error, log it. */
1323 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
1324 dev->stats.tx_errors++;
1325 }
1326 rcu_read_unlock();
1327 return NETDEV_TX_BUSY;
1328 }
1329
1330 d = vio_dring_cur(dr);
1331
1332 txi = dr->prod;
1333
1334 freeskbs = vnet_clean_tx_ring(port, &pending);
1335
1336 BUG_ON(port->tx_bufs[txi].skb);
1337
1338 len = skb->len;
1339 if (len < ETH_ZLEN)
1340 len = ETH_ZLEN;
1341
1342 err = vnet_skb_map(port->vio.lp, skb, port->tx_bufs[txi].cookies, 2,
1343 (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW));
1344 if (err < 0) {
1345 netdev_info(dev, "tx buffer map error %d\n", err);
1346 goto out_dropped;
1347 }
1348
1349 port->tx_bufs[txi].skb = skb;
1350 skb = NULL;
1351 port->tx_bufs[txi].ncookies = err;
1352
1353 /* We don't rely on the ACKs to free the skb in vnet_start_xmit(),
1354 * thus it is safe to not set VIO_ACK_ENABLE for each transmission:
1355 * the protocol itself does not require it as long as the peer
1356 * sends a VIO_SUBTYPE_ACK for VIO_DRING_STOPPED.
1357 *
1358 * An ACK for every packet in the ring is expensive as the
1359 * sending of LDC messages is slow and affects performance.
1360 */
1361 d->hdr.ack = VIO_ACK_DISABLE;
1362 d->size = len;
1363 d->ncookies = port->tx_bufs[txi].ncookies;
1364 for (i = 0; i < d->ncookies; i++)
1365 d->cookies[i] = port->tx_bufs[txi].cookies[i];
1366 if (vio_version_after_eq(&port->vio, 1, 7)) {
1367 struct vio_net_dext *dext = vio_net_ext(d);
1368
1369 memset(dext, 0, sizeof(*dext));
1370 if (skb_is_gso(port->tx_bufs[txi].skb)) {
1371 dext->ipv4_lso_mss = skb_shinfo(port->tx_bufs[txi].skb)
1372 ->gso_size;
1373 dext->flags |= VNET_PKT_IPV4_LSO;
1374 }
1375 if (vio_version_after_eq(&port->vio, 1, 8) &&
1376 !port->switch_port) {
1377 dext->flags |= VNET_PKT_HCK_IPV4_HDRCKSUM_OK;
1378 dext->flags |= VNET_PKT_HCK_FULLCKSUM_OK;
1379 }
1380 }
1381
1382 /* This has to be a non-SMP write barrier because we are writing
1383 * to memory which is shared with the peer LDOM.
1384 */
1385 dma_wmb();
1386
1387 d->hdr.state = VIO_DESC_READY;
1388
1389 /* Exactly one ldc "start" trigger (for dr->cons) needs to be sent
1390 * to notify the consumer that some descriptors are READY.
1391 * After that "start" trigger, no additional triggers are needed until
1392 * a DRING_STOPPED is received from the consumer. The dr->cons field
1393 * (set up by vnet_ack()) has the value of the next dring index
1394 * that has not yet been ack-ed. We send a "start" trigger here
1395 * if, and only if, start_cons is true (reset it afterward). Conversely,
1396 * vnet_ack() should check if the dring corresponding to cons
1397 * is marked READY, but start_cons was false.
1398 * If so, vnet_ack() should send out the missed "start" trigger.
1399 *
1400 * Note that the dma_wmb() above makes sure the cookies et al. are
1401 * not globally visible before the VIO_DESC_READY, and that the
1402 * stores are ordered correctly by the compiler. The consumer will
1403 * not proceed until the VIO_DESC_READY is visible assuring that
1404 * the consumer does not observe anything related to descriptors
1405 * out of order. The HV trap from the LDC start trigger is the
1406 * producer to consumer announcement that work is available to the
1407 * consumer
1408 */
1409 if (!port->start_cons) { /* previous trigger suffices */
1410 trace_vnet_skip_tx_trigger(port->vio._local_sid,
1411 port->vio._peer_sid, dr->cons);
1412 goto ldc_start_done;
1413 }
1414
1415 err = __vnet_tx_trigger(port, dr->cons);
1416 if (unlikely(err < 0)) {
1417 netdev_info(dev, "TX trigger error %d\n", err);
1418 d->hdr.state = VIO_DESC_FREE;
1419 skb = port->tx_bufs[txi].skb;
1420 port->tx_bufs[txi].skb = NULL;
1421 dev->stats.tx_carrier_errors++;
1422 goto out_dropped;
1423 }
1424
1425ldc_start_done:
1426 port->start_cons = false;
1427
1428 dev->stats.tx_packets++;
1429 dev->stats.tx_bytes += port->tx_bufs[txi].skb->len;
1430
1431 dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
1432 if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
1433 netif_tx_stop_queue(txq);
1434 if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr))
1435 netif_tx_wake_queue(txq);
1436 }
1437
1438 (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT);
1439 rcu_read_unlock();
1440
1441 vnet_free_skbs(freeskbs);
1442
1443 return NETDEV_TX_OK;
1444
1445out_dropped:
1446 if (pending)
1447 (void)mod_timer(&port->clean_timer,
1448 jiffies + VNET_CLEAN_TIMEOUT);
1449 else if (port)
1450 del_timer(&port->clean_timer);
1451 if (port)
1452 rcu_read_unlock();
1453 if (skb)
1454 dev_kfree_skb(skb);
1455 vnet_free_skbs(freeskbs);
1456 dev->stats.tx_dropped++;
1457 return NETDEV_TX_OK;
1458}
1459EXPORT_SYMBOL_GPL(sunvnet_start_xmit_common);
1460
1461void sunvnet_tx_timeout_common(struct net_device *dev)
1462{
1463 /* XXX Implement me XXX */
1464}
1465EXPORT_SYMBOL_GPL(sunvnet_tx_timeout_common);
1466
1467int sunvnet_open_common(struct net_device *dev)
1468{
1469 netif_carrier_on(dev);
1470 netif_tx_start_all_queues(dev);
1471
1472 return 0;
1473}
1474EXPORT_SYMBOL_GPL(sunvnet_open_common);
1475
1476int sunvnet_close_common(struct net_device *dev)
1477{
1478 netif_tx_stop_all_queues(dev);
1479 netif_carrier_off(dev);
1480
1481 return 0;
1482}
1483EXPORT_SYMBOL_GPL(sunvnet_close_common);
1484
1485static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr)
1486{
1487 struct vnet_mcast_entry *m;
1488
1489 for (m = vp->mcast_list; m; m = m->next) {
1490 if (ether_addr_equal(m->addr, addr))
1491 return m;
1492 }
1493 return NULL;
1494}
1495
1496static void __update_mc_list(struct vnet *vp, struct net_device *dev)
1497{
1498 struct netdev_hw_addr *ha;
1499
1500 netdev_for_each_mc_addr(ha, dev) {
1501 struct vnet_mcast_entry *m;
1502
1503 m = __vnet_mc_find(vp, ha->addr);
1504 if (m) {
1505 m->hit = 1;
1506 continue;
1507 }
1508
1509 if (!m) {
1510 m = kzalloc(sizeof(*m), GFP_ATOMIC);
1511 if (!m)
1512 continue;
1513 memcpy(m->addr, ha->addr, ETH_ALEN);
1514 m->hit = 1;
1515
1516 m->next = vp->mcast_list;
1517 vp->mcast_list = m;
1518 }
1519 }
1520}
1521
1522static void __send_mc_list(struct vnet *vp, struct vnet_port *port)
1523{
1524 struct vio_net_mcast_info info;
1525 struct vnet_mcast_entry *m, **pp;
1526 int n_addrs;
1527
1528 memset(&info, 0, sizeof(info));
1529
1530 info.tag.type = VIO_TYPE_CTRL;
1531 info.tag.stype = VIO_SUBTYPE_INFO;
1532 info.tag.stype_env = VNET_MCAST_INFO;
1533 info.tag.sid = vio_send_sid(&port->vio);
1534 info.set = 1;
1535
1536 n_addrs = 0;
1537 for (m = vp->mcast_list; m; m = m->next) {
1538 if (m->sent)
1539 continue;
1540 m->sent = 1;
1541 memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
1542 m->addr, ETH_ALEN);
1543 if (++n_addrs == VNET_NUM_MCAST) {
1544 info.count = n_addrs;
1545
Aaron Youngdc153f82016-03-15 11:35:40 -07001546 (void)vio_ldc_send(&port->vio, &info,
1547 sizeof(info));
Aaron Young31762ea2016-03-15 11:35:37 -07001548 n_addrs = 0;
1549 }
1550 }
1551 if (n_addrs) {
1552 info.count = n_addrs;
Aaron Youngdc153f82016-03-15 11:35:40 -07001553 (void)vio_ldc_send(&port->vio, &info, sizeof(info));
Aaron Young31762ea2016-03-15 11:35:37 -07001554 }
1555
1556 info.set = 0;
1557
1558 n_addrs = 0;
1559 pp = &vp->mcast_list;
1560 while ((m = *pp) != NULL) {
1561 if (m->hit) {
1562 m->hit = 0;
1563 pp = &m->next;
1564 continue;
1565 }
1566
1567 memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
1568 m->addr, ETH_ALEN);
1569 if (++n_addrs == VNET_NUM_MCAST) {
1570 info.count = n_addrs;
Aaron Youngdc153f82016-03-15 11:35:40 -07001571 (void)vio_ldc_send(&port->vio, &info,
1572 sizeof(info));
Aaron Young31762ea2016-03-15 11:35:37 -07001573 n_addrs = 0;
1574 }
1575
1576 *pp = m->next;
1577 kfree(m);
1578 }
1579 if (n_addrs) {
1580 info.count = n_addrs;
Aaron Youngdc153f82016-03-15 11:35:40 -07001581 (void)vio_ldc_send(&port->vio, &info, sizeof(info));
Aaron Young31762ea2016-03-15 11:35:37 -07001582 }
1583}
1584
Aaron Young67d07192016-03-15 11:35:38 -07001585void sunvnet_set_rx_mode_common(struct net_device *dev, struct vnet *vp)
Aaron Young31762ea2016-03-15 11:35:37 -07001586{
Aaron Young31762ea2016-03-15 11:35:37 -07001587 struct vnet_port *port;
1588
1589 rcu_read_lock();
1590 list_for_each_entry_rcu(port, &vp->port_list, list) {
Aaron Young31762ea2016-03-15 11:35:37 -07001591 if (port->switch_port) {
1592 __update_mc_list(vp, dev);
1593 __send_mc_list(vp, port);
1594 break;
1595 }
1596 }
1597 rcu_read_unlock();
1598}
1599EXPORT_SYMBOL_GPL(sunvnet_set_rx_mode_common);
1600
Aaron Young31762ea2016-03-15 11:35:37 -07001601int sunvnet_set_mac_addr_common(struct net_device *dev, void *p)
1602{
1603 return -EINVAL;
1604}
1605EXPORT_SYMBOL_GPL(sunvnet_set_mac_addr_common);
1606
1607void sunvnet_port_free_tx_bufs_common(struct vnet_port *port)
1608{
1609 struct vio_dring_state *dr;
1610 int i;
1611
1612 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1613
Aaron Youngdc153f82016-03-15 11:35:40 -07001614 if (!dr->base)
Aaron Young31762ea2016-03-15 11:35:37 -07001615 return;
1616
1617 for (i = 0; i < VNET_TX_RING_SIZE; i++) {
1618 struct vio_net_desc *d;
1619 void *skb = port->tx_bufs[i].skb;
1620
1621 if (!skb)
1622 continue;
1623
1624 d = vio_dring_entry(dr, i);
1625
1626 ldc_unmap(port->vio.lp,
1627 port->tx_bufs[i].cookies,
1628 port->tx_bufs[i].ncookies);
1629 dev_kfree_skb(skb);
1630 port->tx_bufs[i].skb = NULL;
1631 d->hdr.state = VIO_DESC_FREE;
1632 }
1633 ldc_free_exp_dring(port->vio.lp, dr->base,
1634 (dr->entry_size * dr->num_entries),
1635 dr->cookies, dr->ncookies);
1636 dr->base = NULL;
1637 dr->entry_size = 0;
1638 dr->num_entries = 0;
1639 dr->pending = 0;
1640 dr->ncookies = 0;
1641}
1642EXPORT_SYMBOL_GPL(sunvnet_port_free_tx_bufs_common);
1643
1644static void vnet_port_reset(struct vnet_port *port)
1645{
1646 del_timer(&port->clean_timer);
1647 sunvnet_port_free_tx_bufs_common(port);
1648 port->rmtu = 0;
1649 port->tso = true;
1650 port->tsolen = 0;
1651}
1652
1653static int vnet_port_alloc_tx_ring(struct vnet_port *port)
1654{
1655 struct vio_dring_state *dr;
1656 unsigned long len, elen;
1657 int i, err, ncookies;
1658 void *dring;
1659
1660 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1661
1662 elen = sizeof(struct vio_net_desc) +
1663 sizeof(struct ldc_trans_cookie) * 2;
1664 if (vio_version_after_eq(&port->vio, 1, 7))
1665 elen += sizeof(struct vio_net_dext);
1666 len = VNET_TX_RING_SIZE * elen;
1667
1668 ncookies = VIO_MAX_RING_COOKIES;
1669 dring = ldc_alloc_exp_dring(port->vio.lp, len,
1670 dr->cookies, &ncookies,
1671 (LDC_MAP_SHADOW |
1672 LDC_MAP_DIRECT |
1673 LDC_MAP_RW));
1674 if (IS_ERR(dring)) {
1675 err = PTR_ERR(dring);
1676 goto err_out;
1677 }
1678
1679 dr->base = dring;
1680 dr->entry_size = elen;
1681 dr->num_entries = VNET_TX_RING_SIZE;
Aaron Youngdc153f82016-03-15 11:35:40 -07001682 dr->prod = 0;
1683 dr->cons = 0;
Aaron Young31762ea2016-03-15 11:35:37 -07001684 port->start_cons = true; /* need an initial trigger */
1685 dr->pending = VNET_TX_RING_SIZE;
1686 dr->ncookies = ncookies;
1687
1688 for (i = 0; i < VNET_TX_RING_SIZE; ++i) {
1689 struct vio_net_desc *d;
1690
1691 d = vio_dring_entry(dr, i);
1692 d->hdr.state = VIO_DESC_FREE;
1693 }
1694 return 0;
1695
1696err_out:
1697 sunvnet_port_free_tx_bufs_common(port);
1698
1699 return err;
1700}
1701
1702#ifdef CONFIG_NET_POLL_CONTROLLER
Aaron Young67d07192016-03-15 11:35:38 -07001703void sunvnet_poll_controller_common(struct net_device *dev, struct vnet *vp)
Aaron Young31762ea2016-03-15 11:35:37 -07001704{
Aaron Young31762ea2016-03-15 11:35:37 -07001705 struct vnet_port *port;
1706 unsigned long flags;
1707
1708 spin_lock_irqsave(&vp->lock, flags);
1709 if (!list_empty(&vp->port_list)) {
1710 port = list_entry(vp->port_list.next, struct vnet_port, list);
1711 napi_schedule(&port->napi);
1712 }
1713 spin_unlock_irqrestore(&vp->lock, flags);
1714}
1715EXPORT_SYMBOL_GPL(sunvnet_poll_controller_common);
1716#endif
1717
1718void sunvnet_port_add_txq_common(struct vnet_port *port)
1719{
1720 struct vnet *vp = port->vp;
1721 int n;
1722
1723 n = vp->nports++;
1724 n = n & (VNET_MAX_TXQS - 1);
1725 port->q_index = n;
Aaron Young67d07192016-03-15 11:35:38 -07001726 netif_tx_wake_queue(netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port),
1727 port->q_index));
Aaron Young31762ea2016-03-15 11:35:37 -07001728}
1729EXPORT_SYMBOL_GPL(sunvnet_port_add_txq_common);
1730
1731void sunvnet_port_rm_txq_common(struct vnet_port *port)
1732{
1733 port->vp->nports--;
Aaron Young67d07192016-03-15 11:35:38 -07001734 netif_tx_stop_queue(netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port),
1735 port->q_index));
Aaron Young31762ea2016-03-15 11:35:37 -07001736}
1737EXPORT_SYMBOL_GPL(sunvnet_port_rm_txq_common);