blob: 083f41c93933e7b4ee646fc932ac1bc9a04a0e2e [file] [log] [blame]
Aaron Young31762ea2016-03-15 11:35:37 -07001/* sunvnet.c: Sun LDOM Virtual Network Driver.
2 *
3 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
Aaron Young67d07192016-03-15 11:35:38 -07004 * Copyright (C) 2016 Oracle. All rights reserved.
Aaron Young31762ea2016-03-15 11:35:37 -07005 */
6
7#include <linux/module.h>
8#include <linux/kernel.h>
9#include <linux/types.h>
10#include <linux/slab.h>
11#include <linux/delay.h>
12#include <linux/init.h>
13#include <linux/netdevice.h>
14#include <linux/ethtool.h>
15#include <linux/etherdevice.h>
16#include <linux/mutex.h>
17#include <linux/highmem.h>
18#include <linux/if_vlan.h>
19#define CREATE_TRACE_POINTS
20#include <trace/events/sunvnet.h>
21
22#if IS_ENABLED(CONFIG_IPV6)
23#include <linux/icmpv6.h>
24#endif
25
26#include <net/ip.h>
27#include <net/icmp.h>
28#include <net/route.h>
29
30#include <asm/vio.h>
31#include <asm/ldc.h>
32
33#include "sunvnet_common.h"
34
35/* Heuristic for the number of times to exponentially backoff and
36 * retry sending an LDC trigger when EAGAIN is encountered
37 */
38#define VNET_MAX_RETRIES 10
39
40static int __vnet_tx_trigger(struct vnet_port *port, u32 start);
41static void vnet_port_reset(struct vnet_port *port);
42
43static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr)
44{
45 return vio_dring_avail(dr, VNET_TX_RING_SIZE);
46}
47
48static int vnet_handle_unknown(struct vnet_port *port, void *arg)
49{
50 struct vio_msg_tag *pkt = arg;
51
52 pr_err("Received unknown msg [%02x:%02x:%04x:%08x]\n",
53 pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
54 pr_err("Resetting connection\n");
55
56 ldc_disconnect(port->vio.lp);
57
58 return -ECONNRESET;
59}
60
61static int vnet_port_alloc_tx_ring(struct vnet_port *port);
62
63int sunvnet_send_attr_common(struct vio_driver_state *vio)
64{
65 struct vnet_port *port = to_vnet_port(vio);
Aaron Young67d07192016-03-15 11:35:38 -070066 struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
Aaron Young31762ea2016-03-15 11:35:37 -070067 struct vio_net_attr_info pkt;
68 int framelen = ETH_FRAME_LEN;
69 int i, err;
70
71 err = vnet_port_alloc_tx_ring(to_vnet_port(vio));
72 if (err)
73 return err;
74
75 memset(&pkt, 0, sizeof(pkt));
76 pkt.tag.type = VIO_TYPE_CTRL;
77 pkt.tag.stype = VIO_SUBTYPE_INFO;
78 pkt.tag.stype_env = VIO_ATTR_INFO;
79 pkt.tag.sid = vio_send_sid(vio);
80 if (vio_version_before(vio, 1, 2))
81 pkt.xfer_mode = VIO_DRING_MODE;
82 else
83 pkt.xfer_mode = VIO_NEW_DRING_MODE;
84 pkt.addr_type = VNET_ADDR_ETHERMAC;
85 pkt.ack_freq = 0;
86 for (i = 0; i < 6; i++)
87 pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8);
88 if (vio_version_after(vio, 1, 3)) {
89 if (port->rmtu) {
90 port->rmtu = min(VNET_MAXPACKET, port->rmtu);
91 pkt.mtu = port->rmtu;
92 } else {
93 port->rmtu = VNET_MAXPACKET;
94 pkt.mtu = port->rmtu;
95 }
96 if (vio_version_after_eq(vio, 1, 6))
97 pkt.options = VIO_TX_DRING;
98 } else if (vio_version_before(vio, 1, 3)) {
99 pkt.mtu = framelen;
100 } else { /* v1.3 */
101 pkt.mtu = framelen + VLAN_HLEN;
102 }
103
104 pkt.cflags = 0;
105 if (vio_version_after_eq(vio, 1, 7) && port->tso) {
106 pkt.cflags |= VNET_LSO_IPV4_CAPAB;
107 if (!port->tsolen)
108 port->tsolen = VNET_MAXTSO;
109 pkt.ipv4_lso_maxlen = port->tsolen;
110 }
111
112 pkt.plnk_updt = PHYSLINK_UPDATE_NONE;
113
114 viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
115 "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] "
116 "cflags[0x%04x] lso_max[%u]\n",
117 pkt.xfer_mode, pkt.addr_type,
118 (unsigned long long)pkt.addr,
119 pkt.ack_freq, pkt.plnk_updt, pkt.options,
120 (unsigned long long)pkt.mtu, pkt.cflags, pkt.ipv4_lso_maxlen);
121
122
123 return vio_ldc_send(vio, &pkt, sizeof(pkt));
124}
125EXPORT_SYMBOL_GPL(sunvnet_send_attr_common);
126
127static int handle_attr_info(struct vio_driver_state *vio,
128 struct vio_net_attr_info *pkt)
129{
130 struct vnet_port *port = to_vnet_port(vio);
131 u64 localmtu;
132 u8 xfer_mode;
133
134 viodbg(HS, "GOT NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
135 "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] "
136 " (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n",
137 pkt->xfer_mode, pkt->addr_type,
138 (unsigned long long)pkt->addr,
139 pkt->ack_freq, pkt->plnk_updt, pkt->options,
140 (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags,
141 pkt->ipv4_lso_maxlen);
142
143 pkt->tag.sid = vio_send_sid(vio);
144
145 xfer_mode = pkt->xfer_mode;
146 /* for version < 1.2, VIO_DRING_MODE = 0x3 and no bitmask */
147 if (vio_version_before(vio, 1, 2) && xfer_mode == VIO_DRING_MODE)
148 xfer_mode = VIO_NEW_DRING_MODE;
149
150 /* MTU negotiation:
151 * < v1.3 - ETH_FRAME_LEN exactly
152 * > v1.3 - MIN(pkt.mtu, VNET_MAXPACKET, port->rmtu) and change
153 * pkt->mtu for ACK
154 * = v1.3 - ETH_FRAME_LEN + VLAN_HLEN exactly
155 */
156 if (vio_version_before(vio, 1, 3)) {
157 localmtu = ETH_FRAME_LEN;
158 } else if (vio_version_after(vio, 1, 3)) {
159 localmtu = port->rmtu ? port->rmtu : VNET_MAXPACKET;
160 localmtu = min(pkt->mtu, localmtu);
161 pkt->mtu = localmtu;
162 } else { /* v1.3 */
163 localmtu = ETH_FRAME_LEN + VLAN_HLEN;
164 }
165 port->rmtu = localmtu;
166
167 /* LSO negotiation */
168 if (vio_version_after_eq(vio, 1, 7))
169 port->tso &= !!(pkt->cflags & VNET_LSO_IPV4_CAPAB);
170 else
171 port->tso = false;
172 if (port->tso) {
173 if (!port->tsolen)
174 port->tsolen = VNET_MAXTSO;
175 port->tsolen = min(port->tsolen, pkt->ipv4_lso_maxlen);
176 if (port->tsolen < VNET_MINTSO) {
177 port->tso = false;
178 port->tsolen = 0;
179 pkt->cflags &= ~VNET_LSO_IPV4_CAPAB;
180 }
181 pkt->ipv4_lso_maxlen = port->tsolen;
182 } else {
183 pkt->cflags &= ~VNET_LSO_IPV4_CAPAB;
184 pkt->ipv4_lso_maxlen = 0;
185 }
186
187 /* for version >= 1.6, ACK packet mode we support */
188 if (vio_version_after_eq(vio, 1, 6)) {
189 pkt->xfer_mode = VIO_NEW_DRING_MODE;
190 pkt->options = VIO_TX_DRING;
191 }
192
193 if (!(xfer_mode | VIO_NEW_DRING_MODE) ||
194 pkt->addr_type != VNET_ADDR_ETHERMAC ||
195 pkt->mtu != localmtu) {
196 viodbg(HS, "SEND NET ATTR NACK\n");
197
198 pkt->tag.stype = VIO_SUBTYPE_NACK;
199
200 (void) vio_ldc_send(vio, pkt, sizeof(*pkt));
201
202 return -ECONNRESET;
203 } else {
204 viodbg(HS, "SEND NET ATTR ACK xmode[0x%x] atype[0x%x] "
205 "addr[%llx] ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] "
206 "mtu[%llu] (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n",
207 pkt->xfer_mode, pkt->addr_type,
208 (unsigned long long)pkt->addr,
209 pkt->ack_freq, pkt->plnk_updt, pkt->options,
210 (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags,
211 pkt->ipv4_lso_maxlen);
212
213 pkt->tag.stype = VIO_SUBTYPE_ACK;
214
215 return vio_ldc_send(vio, pkt, sizeof(*pkt));
216 }
217
218}
219
220static int handle_attr_ack(struct vio_driver_state *vio,
221 struct vio_net_attr_info *pkt)
222{
223 viodbg(HS, "GOT NET ATTR ACK\n");
224
225 return 0;
226}
227
228static int handle_attr_nack(struct vio_driver_state *vio,
229 struct vio_net_attr_info *pkt)
230{
231 viodbg(HS, "GOT NET ATTR NACK\n");
232
233 return -ECONNRESET;
234}
235
236int sunvnet_handle_attr_common(struct vio_driver_state *vio, void *arg)
237{
238 struct vio_net_attr_info *pkt = arg;
239
240 switch (pkt->tag.stype) {
241 case VIO_SUBTYPE_INFO:
242 return handle_attr_info(vio, pkt);
243
244 case VIO_SUBTYPE_ACK:
245 return handle_attr_ack(vio, pkt);
246
247 case VIO_SUBTYPE_NACK:
248 return handle_attr_nack(vio, pkt);
249
250 default:
251 return -ECONNRESET;
252 }
253}
254EXPORT_SYMBOL_GPL(sunvnet_handle_attr_common);
255
256void sunvnet_handshake_complete_common(struct vio_driver_state *vio)
257{
258 struct vio_dring_state *dr;
259
260 dr = &vio->drings[VIO_DRIVER_RX_RING];
261 dr->snd_nxt = dr->rcv_nxt = 1;
262
263 dr = &vio->drings[VIO_DRIVER_TX_RING];
264 dr->snd_nxt = dr->rcv_nxt = 1;
265}
266EXPORT_SYMBOL_GPL(sunvnet_handshake_complete_common);
267
268/* The hypervisor interface that implements copying to/from imported
269 * memory from another domain requires that copies are done to 8-byte
270 * aligned buffers, and that the lengths of such copies are also 8-byte
271 * multiples.
272 *
273 * So we align skb->data to an 8-byte multiple and pad-out the data
274 * area so we can round the copy length up to the next multiple of
275 * 8 for the copy.
276 *
277 * The transmitter puts the actual start of the packet 6 bytes into
278 * the buffer it sends over, so that the IP headers after the ethernet
279 * header are aligned properly. These 6 bytes are not in the descriptor
280 * length, they are simply implied. This offset is represented using
281 * the VNET_PACKET_SKIP macro.
282 */
283static struct sk_buff *alloc_and_align_skb(struct net_device *dev,
284 unsigned int len)
285{
286 struct sk_buff *skb = netdev_alloc_skb(dev, len+VNET_PACKET_SKIP+8+8);
287 unsigned long addr, off;
288
289 if (unlikely(!skb))
290 return NULL;
291
292 addr = (unsigned long) skb->data;
293 off = ((addr + 7UL) & ~7UL) - addr;
294 if (off)
295 skb_reserve(skb, off);
296
297 return skb;
298}
299
300static inline void vnet_fullcsum(struct sk_buff *skb)
301{
302 struct iphdr *iph = ip_hdr(skb);
303 int offset = skb_transport_offset(skb);
304
305 if (skb->protocol != htons(ETH_P_IP))
306 return;
307 if (iph->protocol != IPPROTO_TCP &&
308 iph->protocol != IPPROTO_UDP)
309 return;
310 skb->ip_summed = CHECKSUM_NONE;
311 skb->csum_level = 1;
312 skb->csum = 0;
313 if (iph->protocol == IPPROTO_TCP) {
314 struct tcphdr *ptcp = tcp_hdr(skb);
315
316 ptcp->check = 0;
317 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
318 ptcp->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
319 skb->len - offset, IPPROTO_TCP,
320 skb->csum);
321 } else if (iph->protocol == IPPROTO_UDP) {
322 struct udphdr *pudp = udp_hdr(skb);
323
324 pudp->check = 0;
325 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
326 pudp->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
327 skb->len - offset, IPPROTO_UDP,
328 skb->csum);
329 }
330}
331
332static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc)
333{
Aaron Young67d07192016-03-15 11:35:38 -0700334 struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
Aaron Young31762ea2016-03-15 11:35:37 -0700335 unsigned int len = desc->size;
336 unsigned int copy_len;
337 struct sk_buff *skb;
338 int maxlen;
339 int err;
340
341 err = -EMSGSIZE;
342 if (port->tso && port->tsolen > port->rmtu)
343 maxlen = port->tsolen;
344 else
345 maxlen = port->rmtu;
346 if (unlikely(len < ETH_ZLEN || len > maxlen)) {
347 dev->stats.rx_length_errors++;
348 goto out_dropped;
349 }
350
351 skb = alloc_and_align_skb(dev, len);
352 err = -ENOMEM;
353 if (unlikely(!skb)) {
354 dev->stats.rx_missed_errors++;
355 goto out_dropped;
356 }
357
358 copy_len = (len + VNET_PACKET_SKIP + 7U) & ~7U;
359 skb_put(skb, copy_len);
360 err = ldc_copy(port->vio.lp, LDC_COPY_IN,
361 skb->data, copy_len, 0,
362 desc->cookies, desc->ncookies);
363 if (unlikely(err < 0)) {
364 dev->stats.rx_frame_errors++;
365 goto out_free_skb;
366 }
367
368 skb_pull(skb, VNET_PACKET_SKIP);
369 skb_trim(skb, len);
370 skb->protocol = eth_type_trans(skb, dev);
371
372 if (vio_version_after_eq(&port->vio, 1, 8)) {
373 struct vio_net_dext *dext = vio_net_ext(desc);
374
375 skb_reset_network_header(skb);
376
377 if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) {
378 if (skb->protocol == ETH_P_IP) {
379 struct iphdr *iph = ip_hdr(skb);
380
381 iph->check = 0;
382 ip_send_check(iph);
383 }
384 }
385 if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) &&
386 skb->ip_summed == CHECKSUM_NONE) {
387 if (skb->protocol == htons(ETH_P_IP)) {
388 struct iphdr *iph = ip_hdr(skb);
389 int ihl = iph->ihl * 4;
390
391 skb_reset_transport_header(skb);
392 skb_set_transport_header(skb, ihl);
393 vnet_fullcsum(skb);
394 }
395 }
396 if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) {
397 skb->ip_summed = CHECKSUM_PARTIAL;
398 skb->csum_level = 0;
399 if (dext->flags & VNET_PKT_HCK_FULLCKSUM_OK)
400 skb->csum_level = 1;
401 }
402 }
403
404 skb->ip_summed = port->switch_port ? CHECKSUM_NONE : CHECKSUM_PARTIAL;
405
406 dev->stats.rx_packets++;
407 dev->stats.rx_bytes += len;
408 napi_gro_receive(&port->napi, skb);
409 return 0;
410
411out_free_skb:
412 kfree_skb(skb);
413
414out_dropped:
415 dev->stats.rx_dropped++;
416 return err;
417}
418
419static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr,
420 u32 start, u32 end, u8 vio_dring_state)
421{
422 struct vio_dring_data hdr = {
423 .tag = {
424 .type = VIO_TYPE_DATA,
425 .stype = VIO_SUBTYPE_ACK,
426 .stype_env = VIO_DRING_DATA,
427 .sid = vio_send_sid(&port->vio),
428 },
429 .dring_ident = dr->ident,
430 .start_idx = start,
431 .end_idx = end,
432 .state = vio_dring_state,
433 };
434 int err, delay;
435 int retries = 0;
436
437 hdr.seq = dr->snd_nxt;
438 delay = 1;
439 do {
440 err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
441 if (err > 0) {
442 dr->snd_nxt++;
443 break;
444 }
445 udelay(delay);
446 if ((delay <<= 1) > 128)
447 delay = 128;
448 if (retries++ > VNET_MAX_RETRIES) {
449 pr_info("ECONNRESET %x:%x:%x:%x:%x:%x\n",
450 port->raddr[0], port->raddr[1],
451 port->raddr[2], port->raddr[3],
452 port->raddr[4], port->raddr[5]);
453 break;
454 }
455 } while (err == -EAGAIN);
456
457 if (err <= 0 && vio_dring_state == VIO_DRING_STOPPED) {
458 port->stop_rx_idx = end;
459 port->stop_rx = true;
460 } else {
461 port->stop_rx_idx = 0;
462 port->stop_rx = false;
463 }
464
465 return err;
466}
467
468static struct vio_net_desc *get_rx_desc(struct vnet_port *port,
469 struct vio_dring_state *dr,
470 u32 index)
471{
472 struct vio_net_desc *desc = port->vio.desc_buf;
473 int err;
474
475 err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size,
476 (index * dr->entry_size),
477 dr->cookies, dr->ncookies);
478 if (err < 0)
479 return ERR_PTR(err);
480
481 return desc;
482}
483
484static int put_rx_desc(struct vnet_port *port,
485 struct vio_dring_state *dr,
486 struct vio_net_desc *desc,
487 u32 index)
488{
489 int err;
490
491 err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size,
492 (index * dr->entry_size),
493 dr->cookies, dr->ncookies);
494 if (err < 0)
495 return err;
496
497 return 0;
498}
499
500static int vnet_walk_rx_one(struct vnet_port *port,
501 struct vio_dring_state *dr,
502 u32 index, int *needs_ack)
503{
504 struct vio_net_desc *desc = get_rx_desc(port, dr, index);
505 struct vio_driver_state *vio = &port->vio;
506 int err;
507
508 BUG_ON(desc == NULL);
509 if (IS_ERR(desc))
510 return PTR_ERR(desc);
511
512 if (desc->hdr.state != VIO_DESC_READY)
513 return 1;
514
515 dma_rmb();
516
517 viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n",
518 desc->hdr.state, desc->hdr.ack,
519 desc->size, desc->ncookies,
520 desc->cookies[0].cookie_addr,
521 desc->cookies[0].cookie_size);
522
523 err = vnet_rx_one(port, desc);
524 if (err == -ECONNRESET)
525 return err;
526 trace_vnet_rx_one(port->vio._local_sid, port->vio._peer_sid,
527 index, desc->hdr.ack);
528 desc->hdr.state = VIO_DESC_DONE;
529 err = put_rx_desc(port, dr, desc, index);
530 if (err < 0)
531 return err;
532 *needs_ack = desc->hdr.ack;
533 return 0;
534}
535
536static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
537 u32 start, u32 end, int *npkts, int budget)
538{
539 struct vio_driver_state *vio = &port->vio;
540 int ack_start = -1, ack_end = -1;
541 bool send_ack = true;
542
543 end = (end == (u32) -1) ? vio_dring_prev(dr, start)
544 : vio_dring_next(dr, end);
545
546 viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end);
547
548 while (start != end) {
549 int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack);
550 if (err == -ECONNRESET)
551 return err;
552 if (err != 0)
553 break;
554 (*npkts)++;
555 if (ack_start == -1)
556 ack_start = start;
557 ack_end = start;
558 start = vio_dring_next(dr, start);
559 if (ack && start != end) {
560 err = vnet_send_ack(port, dr, ack_start, ack_end,
561 VIO_DRING_ACTIVE);
562 if (err == -ECONNRESET)
563 return err;
564 ack_start = -1;
565 }
566 if ((*npkts) >= budget) {
567 send_ack = false;
568 break;
569 }
570 }
571 if (unlikely(ack_start == -1))
572 ack_start = ack_end = vio_dring_prev(dr, start);
573 if (send_ack) {
574 port->napi_resume = false;
575 trace_vnet_tx_send_stopped_ack(port->vio._local_sid,
576 port->vio._peer_sid,
577 ack_end, *npkts);
578 return vnet_send_ack(port, dr, ack_start, ack_end,
579 VIO_DRING_STOPPED);
580 } else {
581 trace_vnet_tx_defer_stopped_ack(port->vio._local_sid,
582 port->vio._peer_sid,
583 ack_end, *npkts);
584 port->napi_resume = true;
585 port->napi_stop_idx = ack_end;
586 return 1;
587 }
588}
589
590static int vnet_rx(struct vnet_port *port, void *msgbuf, int *npkts,
591 int budget)
592{
593 struct vio_dring_data *pkt = msgbuf;
594 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING];
595 struct vio_driver_state *vio = &port->vio;
596
597 viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016llx] rcv_nxt[%016llx]\n",
598 pkt->tag.stype_env, pkt->seq, dr->rcv_nxt);
599
600 if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
601 return 0;
602 if (unlikely(pkt->seq != dr->rcv_nxt)) {
603 pr_err("RX out of sequence seq[0x%llx] rcv_nxt[0x%llx]\n",
604 pkt->seq, dr->rcv_nxt);
605 return 0;
606 }
607
608 if (!port->napi_resume)
609 dr->rcv_nxt++;
610
611 /* XXX Validate pkt->start_idx and pkt->end_idx XXX */
612
613 return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx,
614 npkts, budget);
615}
616
617static int idx_is_pending(struct vio_dring_state *dr, u32 end)
618{
619 u32 idx = dr->cons;
620 int found = 0;
621
622 while (idx != dr->prod) {
623 if (idx == end) {
624 found = 1;
625 break;
626 }
627 idx = vio_dring_next(dr, idx);
628 }
629 return found;
630}
631
632static int vnet_ack(struct vnet_port *port, void *msgbuf)
633{
634 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
635 struct vio_dring_data *pkt = msgbuf;
636 struct net_device *dev;
Aaron Young31762ea2016-03-15 11:35:37 -0700637 u32 end;
638 struct vio_net_desc *desc;
639 struct netdev_queue *txq;
640
641 if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
642 return 0;
643
644 end = pkt->end_idx;
Aaron Young67d07192016-03-15 11:35:38 -0700645 dev = VNET_PORT_TO_NET_DEVICE(port);
Aaron Young31762ea2016-03-15 11:35:37 -0700646 netif_tx_lock(dev);
647 if (unlikely(!idx_is_pending(dr, end))) {
648 netif_tx_unlock(dev);
649 return 0;
650 }
651
652 /* sync for race conditions with vnet_start_xmit() and tell xmit it
653 * is time to send a trigger.
654 */
655 trace_vnet_rx_stopped_ack(port->vio._local_sid,
656 port->vio._peer_sid, end);
657 dr->cons = vio_dring_next(dr, end);
658 desc = vio_dring_entry(dr, dr->cons);
659 if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) {
660 /* vnet_start_xmit() just populated this dring but missed
661 * sending the "start" LDC message to the consumer.
662 * Send a "start" trigger on its behalf.
663 */
664 if (__vnet_tx_trigger(port, dr->cons) > 0)
665 port->start_cons = false;
666 else
667 port->start_cons = true;
668 } else {
669 port->start_cons = true;
670 }
671 netif_tx_unlock(dev);
672
673 txq = netdev_get_tx_queue(dev, port->q_index);
674 if (unlikely(netif_tx_queue_stopped(txq) &&
675 vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr)))
676 return 1;
677
678 return 0;
679}
680
681static int vnet_nack(struct vnet_port *port, void *msgbuf)
682{
683 /* XXX just reset or similar XXX */
684 return 0;
685}
686
687static int handle_mcast(struct vnet_port *port, void *msgbuf)
688{
689 struct vio_net_mcast_info *pkt = msgbuf;
Aaron Young67d07192016-03-15 11:35:38 -0700690 struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
Aaron Young31762ea2016-03-15 11:35:37 -0700691
692 if (pkt->tag.stype != VIO_SUBTYPE_ACK)
693 pr_err("%s: Got unexpected MCAST reply [%02x:%02x:%04x:%08x]\n",
Aaron Young67d07192016-03-15 11:35:38 -0700694 dev->name,
Aaron Young31762ea2016-03-15 11:35:37 -0700695 pkt->tag.type,
696 pkt->tag.stype,
697 pkt->tag.stype_env,
698 pkt->tag.sid);
699
700 return 0;
701}
702
703/* Got back a STOPPED LDC message on port. If the queue is stopped,
704 * wake it up so that we'll send out another START message at the
705 * next TX.
706 */
707static void maybe_tx_wakeup(struct vnet_port *port)
708{
709 struct netdev_queue *txq;
710
Aaron Young67d07192016-03-15 11:35:38 -0700711 txq = netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port),
712 port->q_index);
Aaron Young31762ea2016-03-15 11:35:37 -0700713 __netif_tx_lock(txq, smp_processor_id());
714 if (likely(netif_tx_queue_stopped(txq))) {
715 struct vio_dring_state *dr;
716
717 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
718 netif_tx_wake_queue(txq);
719 }
720 __netif_tx_unlock(txq);
721}
722
Aaron Young67d07192016-03-15 11:35:38 -0700723bool sunvnet_port_is_up_common(struct vnet_port *vnet)
Aaron Young31762ea2016-03-15 11:35:37 -0700724{
725 struct vio_driver_state *vio = &vnet->vio;
726
727 return !!(vio->hs_state & VIO_HS_COMPLETE);
728}
Aaron Young67d07192016-03-15 11:35:38 -0700729EXPORT_SYMBOL_GPL(sunvnet_port_is_up_common);
Aaron Young31762ea2016-03-15 11:35:37 -0700730
731static int vnet_event_napi(struct vnet_port *port, int budget)
732{
733 struct vio_driver_state *vio = &port->vio;
734 int tx_wakeup, err;
735 int npkts = 0;
736 int event = (port->rx_event & LDC_EVENT_RESET);
737
738ldc_ctrl:
739 if (unlikely(event == LDC_EVENT_RESET ||
740 event == LDC_EVENT_UP)) {
741 vio_link_state_change(vio, event);
742
743 if (event == LDC_EVENT_RESET) {
744 vnet_port_reset(port);
745 vio_port_up(vio);
746 }
747 port->rx_event = 0;
748 return 0;
749 }
750 /* We may have multiple LDC events in rx_event. Unroll send_events() */
751 event = (port->rx_event & LDC_EVENT_UP);
752 port->rx_event &= ~(LDC_EVENT_RESET|LDC_EVENT_UP);
753 if (event == LDC_EVENT_UP)
754 goto ldc_ctrl;
755 event = port->rx_event;
756 if (!(event & LDC_EVENT_DATA_READY))
757 return 0;
758
759 /* we dont expect any other bits than RESET, UP, DATA_READY */
760 BUG_ON(event != LDC_EVENT_DATA_READY);
761
762 tx_wakeup = err = 0;
763 while (1) {
764 union {
765 struct vio_msg_tag tag;
766 u64 raw[8];
767 } msgbuf;
768
769 if (port->napi_resume) {
770 struct vio_dring_data *pkt =
771 (struct vio_dring_data *)&msgbuf;
772 struct vio_dring_state *dr =
773 &port->vio.drings[VIO_DRIVER_RX_RING];
774
775 pkt->tag.type = VIO_TYPE_DATA;
776 pkt->tag.stype = VIO_SUBTYPE_INFO;
777 pkt->tag.stype_env = VIO_DRING_DATA;
778 pkt->seq = dr->rcv_nxt;
779 pkt->start_idx = vio_dring_next(dr, port->napi_stop_idx);
780 pkt->end_idx = -1;
781 goto napi_resume;
782 }
783 err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
784 if (unlikely(err < 0)) {
785 if (err == -ECONNRESET)
786 vio_conn_reset(vio);
787 break;
788 }
789 if (err == 0)
790 break;
791 viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
792 msgbuf.tag.type,
793 msgbuf.tag.stype,
794 msgbuf.tag.stype_env,
795 msgbuf.tag.sid);
796 err = vio_validate_sid(vio, &msgbuf.tag);
797 if (err < 0)
798 break;
799napi_resume:
800 if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
801 if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) {
Aaron Young67d07192016-03-15 11:35:38 -0700802 if (!sunvnet_port_is_up_common(port)) {
Aaron Young31762ea2016-03-15 11:35:37 -0700803 /* failures like handshake_failure()
804 * may have cleaned up dring, but
805 * NAPI polling may bring us here.
806 */
807 err = -ECONNRESET;
808 break;
809 }
810 err = vnet_rx(port, &msgbuf, &npkts, budget);
811 if (npkts >= budget)
812 break;
813 if (npkts == 0)
814 break;
815 } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) {
816 err = vnet_ack(port, &msgbuf);
817 if (err > 0)
818 tx_wakeup |= err;
819 } else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) {
820 err = vnet_nack(port, &msgbuf);
821 }
822 } else if (msgbuf.tag.type == VIO_TYPE_CTRL) {
823 if (msgbuf.tag.stype_env == VNET_MCAST_INFO)
824 err = handle_mcast(port, &msgbuf);
825 else
826 err = vio_control_pkt_engine(vio, &msgbuf);
827 if (err)
828 break;
829 } else {
830 err = vnet_handle_unknown(port, &msgbuf);
831 }
832 if (err == -ECONNRESET)
833 break;
834 }
835 if (unlikely(tx_wakeup && err != -ECONNRESET))
836 maybe_tx_wakeup(port);
837 return npkts;
838}
839
840int sunvnet_poll_common(struct napi_struct *napi, int budget)
841{
842 struct vnet_port *port = container_of(napi, struct vnet_port, napi);
843 struct vio_driver_state *vio = &port->vio;
844 int processed = vnet_event_napi(port, budget);
845
846 if (processed < budget) {
847 napi_complete(napi);
848 port->rx_event &= ~LDC_EVENT_DATA_READY;
849 vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED);
850 }
851 return processed;
852}
853EXPORT_SYMBOL_GPL(sunvnet_poll_common);
854
855void sunvnet_event_common(void *arg, int event)
856{
857 struct vnet_port *port = arg;
858 struct vio_driver_state *vio = &port->vio;
859
860 port->rx_event |= event;
861 vio_set_intr(vio->vdev->rx_ino, HV_INTR_DISABLED);
862 napi_schedule(&port->napi);
863
864}
865EXPORT_SYMBOL_GPL(sunvnet_event_common);
866
867static int __vnet_tx_trigger(struct vnet_port *port, u32 start)
868{
869 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
870 struct vio_dring_data hdr = {
871 .tag = {
872 .type = VIO_TYPE_DATA,
873 .stype = VIO_SUBTYPE_INFO,
874 .stype_env = VIO_DRING_DATA,
875 .sid = vio_send_sid(&port->vio),
876 },
877 .dring_ident = dr->ident,
878 .start_idx = start,
879 .end_idx = (u32) -1,
880 };
881 int err, delay;
882 int retries = 0;
883
884 if (port->stop_rx) {
885 trace_vnet_tx_pending_stopped_ack(port->vio._local_sid,
886 port->vio._peer_sid,
887 port->stop_rx_idx, -1);
888 err = vnet_send_ack(port,
889 &port->vio.drings[VIO_DRIVER_RX_RING],
890 port->stop_rx_idx, -1,
891 VIO_DRING_STOPPED);
892 if (err <= 0)
893 return err;
894 }
895
896 hdr.seq = dr->snd_nxt;
897 delay = 1;
898 do {
899 err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
900 if (err > 0) {
901 dr->snd_nxt++;
902 break;
903 }
904 udelay(delay);
905 if ((delay <<= 1) > 128)
906 delay = 128;
907 if (retries++ > VNET_MAX_RETRIES)
908 break;
909 } while (err == -EAGAIN);
910 trace_vnet_tx_trigger(port->vio._local_sid,
911 port->vio._peer_sid, start, err);
912
913 return err;
914}
915
Aaron Young31762ea2016-03-15 11:35:37 -0700916static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port,
917 unsigned *pending)
918{
919 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
920 struct sk_buff *skb = NULL;
921 int i, txi;
922
923 *pending = 0;
924
925 txi = dr->prod;
926 for (i = 0; i < VNET_TX_RING_SIZE; ++i) {
927 struct vio_net_desc *d;
928
929 --txi;
930 if (txi < 0)
931 txi = VNET_TX_RING_SIZE-1;
932
933 d = vio_dring_entry(dr, txi);
934
935 if (d->hdr.state == VIO_DESC_READY) {
936 (*pending)++;
937 continue;
938 }
939 if (port->tx_bufs[txi].skb) {
940 if (d->hdr.state != VIO_DESC_DONE)
941 pr_notice("invalid ring buffer state %d\n",
942 d->hdr.state);
943 BUG_ON(port->tx_bufs[txi].skb->next);
944
945 port->tx_bufs[txi].skb->next = skb;
946 skb = port->tx_bufs[txi].skb;
947 port->tx_bufs[txi].skb = NULL;
948
949 ldc_unmap(port->vio.lp,
950 port->tx_bufs[txi].cookies,
951 port->tx_bufs[txi].ncookies);
952 } else if (d->hdr.state == VIO_DESC_FREE)
953 break;
954 d->hdr.state = VIO_DESC_FREE;
955 }
956 return skb;
957}
958
959static inline void vnet_free_skbs(struct sk_buff *skb)
960{
961 struct sk_buff *next;
962
963 while (skb) {
964 next = skb->next;
965 skb->next = NULL;
966 dev_kfree_skb(skb);
967 skb = next;
968 }
969}
970
971void sunvnet_clean_timer_expire_common(unsigned long port0)
972{
973 struct vnet_port *port = (struct vnet_port *)port0;
974 struct sk_buff *freeskbs;
975 unsigned pending;
976
Aaron Young67d07192016-03-15 11:35:38 -0700977 netif_tx_lock(VNET_PORT_TO_NET_DEVICE(port));
Aaron Young31762ea2016-03-15 11:35:37 -0700978 freeskbs = vnet_clean_tx_ring(port, &pending);
Aaron Young67d07192016-03-15 11:35:38 -0700979 netif_tx_unlock(VNET_PORT_TO_NET_DEVICE(port));
Aaron Young31762ea2016-03-15 11:35:37 -0700980
981 vnet_free_skbs(freeskbs);
982
983 if (pending)
984 (void)mod_timer(&port->clean_timer,
985 jiffies + VNET_CLEAN_TIMEOUT);
986 else
987 del_timer(&port->clean_timer);
988}
989EXPORT_SYMBOL_GPL(sunvnet_clean_timer_expire_common);
990
991static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb,
992 struct ldc_trans_cookie *cookies, int ncookies,
993 unsigned int map_perm)
994{
995 int i, nc, err, blen;
996
997 /* header */
998 blen = skb_headlen(skb);
999 if (blen < ETH_ZLEN)
1000 blen = ETH_ZLEN;
1001 blen += VNET_PACKET_SKIP;
1002 blen += 8 - (blen & 7);
1003
1004 err = ldc_map_single(lp, skb->data-VNET_PACKET_SKIP, blen, cookies,
1005 ncookies, map_perm);
1006 if (err < 0)
1007 return err;
1008 nc = err;
1009
1010 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1011 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1012 u8 *vaddr;
1013
1014 if (nc < ncookies) {
1015 vaddr = kmap_atomic(skb_frag_page(f));
1016 blen = skb_frag_size(f);
1017 blen += 8 - (blen & 7);
1018 err = ldc_map_single(lp, vaddr + f->page_offset,
1019 blen, cookies + nc, ncookies - nc,
1020 map_perm);
1021 kunmap_atomic(vaddr);
1022 } else {
1023 err = -EMSGSIZE;
1024 }
1025
1026 if (err < 0) {
1027 ldc_unmap(lp, cookies, nc);
1028 return err;
1029 }
1030 nc += err;
1031 }
1032 return nc;
1033}
1034
1035static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
1036{
1037 struct sk_buff *nskb;
1038 int i, len, pad, docopy;
1039
1040 len = skb->len;
1041 pad = 0;
1042 if (len < ETH_ZLEN) {
1043 pad += ETH_ZLEN - skb->len;
1044 len += pad;
1045 }
1046 len += VNET_PACKET_SKIP;
1047 pad += 8 - (len & 7);
1048
1049 /* make sure we have enough cookies and alignment in every frag */
1050 docopy = skb_shinfo(skb)->nr_frags >= ncookies;
1051 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1052 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1053
1054 docopy |= f->page_offset & 7;
1055 }
1056 if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP ||
1057 skb_tailroom(skb) < pad ||
1058 skb_headroom(skb) < VNET_PACKET_SKIP || docopy) {
1059 int start = 0, offset;
1060 __wsum csum;
1061
1062 len = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN;
1063 nskb = alloc_and_align_skb(skb->dev, len);
1064 if (nskb == NULL) {
1065 dev_kfree_skb(skb);
1066 return NULL;
1067 }
1068 skb_reserve(nskb, VNET_PACKET_SKIP);
1069
1070 nskb->protocol = skb->protocol;
1071 offset = skb_mac_header(skb) - skb->data;
1072 skb_set_mac_header(nskb, offset);
1073 offset = skb_network_header(skb) - skb->data;
1074 skb_set_network_header(nskb, offset);
1075 offset = skb_transport_header(skb) - skb->data;
1076 skb_set_transport_header(nskb, offset);
1077
1078 offset = 0;
1079 nskb->csum_offset = skb->csum_offset;
1080 nskb->ip_summed = skb->ip_summed;
1081
1082 if (skb->ip_summed == CHECKSUM_PARTIAL)
1083 start = skb_checksum_start_offset(skb);
1084 if (start) {
1085 struct iphdr *iph = ip_hdr(nskb);
1086 int offset = start + nskb->csum_offset;
1087
1088 if (skb_copy_bits(skb, 0, nskb->data, start)) {
1089 dev_kfree_skb(nskb);
1090 dev_kfree_skb(skb);
1091 return NULL;
1092 }
1093 *(__sum16 *)(skb->data + offset) = 0;
1094 csum = skb_copy_and_csum_bits(skb, start,
1095 nskb->data + start,
1096 skb->len - start, 0);
1097 if (iph->protocol == IPPROTO_TCP ||
1098 iph->protocol == IPPROTO_UDP) {
1099 csum = csum_tcpudp_magic(iph->saddr, iph->daddr,
1100 skb->len - start,
1101 iph->protocol, csum);
1102 }
1103 *(__sum16 *)(nskb->data + offset) = csum;
1104
1105 nskb->ip_summed = CHECKSUM_NONE;
1106 } else if (skb_copy_bits(skb, 0, nskb->data, skb->len)) {
1107 dev_kfree_skb(nskb);
1108 dev_kfree_skb(skb);
1109 return NULL;
1110 }
1111 (void)skb_put(nskb, skb->len);
1112 if (skb_is_gso(skb)) {
1113 skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size;
1114 skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
1115 }
1116 nskb->queue_mapping = skb->queue_mapping;
1117 dev_kfree_skb(skb);
1118 skb = nskb;
1119 }
1120 return skb;
1121}
1122
Aaron Young67d07192016-03-15 11:35:38 -07001123static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb,
1124 struct vnet_port *(*vnet_tx_port)
1125 (struct sk_buff *, struct net_device *))
Aaron Young31762ea2016-03-15 11:35:37 -07001126{
Aaron Young67d07192016-03-15 11:35:38 -07001127 struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
Aaron Young31762ea2016-03-15 11:35:37 -07001128 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1129 struct sk_buff *segs;
1130 int maclen, datalen;
1131 int status;
1132 int gso_size, gso_type, gso_segs;
1133 int hlen = skb_transport_header(skb) - skb_mac_header(skb);
1134 int proto = IPPROTO_IP;
1135
1136 if (skb->protocol == htons(ETH_P_IP))
1137 proto = ip_hdr(skb)->protocol;
1138 else if (skb->protocol == htons(ETH_P_IPV6))
1139 proto = ipv6_hdr(skb)->nexthdr;
1140
1141 if (proto == IPPROTO_TCP)
1142 hlen += tcp_hdr(skb)->doff * 4;
1143 else if (proto == IPPROTO_UDP)
1144 hlen += sizeof(struct udphdr);
1145 else {
1146 pr_err("vnet_handle_offloads GSO with unknown transport "
1147 "protocol %d tproto %d\n", skb->protocol, proto);
1148 hlen = 128; /* XXX */
1149 }
1150 datalen = port->tsolen - hlen;
1151
1152 gso_size = skb_shinfo(skb)->gso_size;
1153 gso_type = skb_shinfo(skb)->gso_type;
1154 gso_segs = skb_shinfo(skb)->gso_segs;
1155
1156 if (port->tso && gso_size < datalen)
1157 gso_segs = DIV_ROUND_UP(skb->len - hlen, datalen);
1158
1159 if (unlikely(vnet_tx_dring_avail(dr) < gso_segs)) {
1160 struct netdev_queue *txq;
1161
1162 txq = netdev_get_tx_queue(dev, port->q_index);
1163 netif_tx_stop_queue(txq);
1164 if (vnet_tx_dring_avail(dr) < skb_shinfo(skb)->gso_segs)
1165 return NETDEV_TX_BUSY;
1166 netif_tx_wake_queue(txq);
1167 }
1168
1169 maclen = skb_network_header(skb) - skb_mac_header(skb);
1170 skb_pull(skb, maclen);
1171
1172 if (port->tso && gso_size < datalen) {
1173 if (skb_unclone(skb, GFP_ATOMIC))
1174 goto out_dropped;
1175
1176 /* segment to TSO size */
1177 skb_shinfo(skb)->gso_size = datalen;
1178 skb_shinfo(skb)->gso_segs = gso_segs;
1179 }
1180 segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO);
1181 if (IS_ERR(segs))
1182 goto out_dropped;
1183
1184 skb_push(skb, maclen);
1185 skb_reset_mac_header(skb);
1186
1187 status = 0;
1188 while (segs) {
1189 struct sk_buff *curr = segs;
1190
1191 segs = segs->next;
1192 curr->next = NULL;
1193 if (port->tso && curr->len > dev->mtu) {
1194 skb_shinfo(curr)->gso_size = gso_size;
1195 skb_shinfo(curr)->gso_type = gso_type;
1196 skb_shinfo(curr)->gso_segs =
1197 DIV_ROUND_UP(curr->len - hlen, gso_size);
1198 } else
1199 skb_shinfo(curr)->gso_size = 0;
1200
1201 skb_push(curr, maclen);
1202 skb_reset_mac_header(curr);
1203 memcpy(skb_mac_header(curr), skb_mac_header(skb),
1204 maclen);
1205 curr->csum_start = skb_transport_header(curr) - curr->head;
1206 if (ip_hdr(curr)->protocol == IPPROTO_TCP)
1207 curr->csum_offset = offsetof(struct tcphdr, check);
1208 else if (ip_hdr(curr)->protocol == IPPROTO_UDP)
1209 curr->csum_offset = offsetof(struct udphdr, check);
1210
1211 if (!(status & NETDEV_TX_MASK))
Aaron Young67d07192016-03-15 11:35:38 -07001212 status = sunvnet_start_xmit_common(curr, dev,
1213 vnet_tx_port);
Aaron Young31762ea2016-03-15 11:35:37 -07001214 if (status & NETDEV_TX_MASK)
1215 dev_kfree_skb_any(curr);
1216 }
1217
1218 if (!(status & NETDEV_TX_MASK))
1219 dev_kfree_skb_any(skb);
1220 return status;
1221out_dropped:
1222 dev->stats.tx_dropped++;
1223 dev_kfree_skb_any(skb);
1224 return NETDEV_TX_OK;
1225}
1226
Aaron Young67d07192016-03-15 11:35:38 -07001227int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev,
1228 struct vnet_port *(*vnet_tx_port)
1229 (struct sk_buff *, struct net_device *))
Aaron Young31762ea2016-03-15 11:35:37 -07001230{
Aaron Young31762ea2016-03-15 11:35:37 -07001231 struct vnet_port *port = NULL;
1232 struct vio_dring_state *dr;
1233 struct vio_net_desc *d;
1234 unsigned int len;
1235 struct sk_buff *freeskbs = NULL;
1236 int i, err, txi;
1237 unsigned pending = 0;
1238 struct netdev_queue *txq;
1239
1240 rcu_read_lock();
Aaron Young67d07192016-03-15 11:35:38 -07001241 port = vnet_tx_port(skb, dev);
Aaron Young31762ea2016-03-15 11:35:37 -07001242 if (unlikely(!port)) {
1243 rcu_read_unlock();
1244 goto out_dropped;
1245 }
1246
1247 if (skb_is_gso(skb) && skb->len > port->tsolen) {
Aaron Young67d07192016-03-15 11:35:38 -07001248 err = vnet_handle_offloads(port, skb, vnet_tx_port);
Aaron Young31762ea2016-03-15 11:35:37 -07001249 rcu_read_unlock();
1250 return err;
1251 }
1252
1253 if (!skb_is_gso(skb) && skb->len > port->rmtu) {
1254 unsigned long localmtu = port->rmtu - ETH_HLEN;
1255
1256 if (vio_version_after_eq(&port->vio, 1, 3))
1257 localmtu -= VLAN_HLEN;
1258
1259 if (skb->protocol == htons(ETH_P_IP)) {
1260 struct flowi4 fl4;
1261 struct rtable *rt = NULL;
1262
1263 memset(&fl4, 0, sizeof(fl4));
1264 fl4.flowi4_oif = dev->ifindex;
1265 fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
1266 fl4.daddr = ip_hdr(skb)->daddr;
1267 fl4.saddr = ip_hdr(skb)->saddr;
1268
1269 rt = ip_route_output_key(dev_net(dev), &fl4);
1270 rcu_read_unlock();
1271 if (!IS_ERR(rt)) {
1272 skb_dst_set(skb, &rt->dst);
1273 icmp_send(skb, ICMP_DEST_UNREACH,
1274 ICMP_FRAG_NEEDED,
1275 htonl(localmtu));
1276 }
1277 }
1278#if IS_ENABLED(CONFIG_IPV6)
1279 else if (skb->protocol == htons(ETH_P_IPV6))
1280 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu);
1281#endif
1282 goto out_dropped;
1283 }
1284
1285 skb = vnet_skb_shape(skb, 2);
1286
1287 if (unlikely(!skb))
1288 goto out_dropped;
1289
1290 if (skb->ip_summed == CHECKSUM_PARTIAL)
1291 vnet_fullcsum(skb);
1292
1293 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1294 i = skb_get_queue_mapping(skb);
1295 txq = netdev_get_tx_queue(dev, i);
1296 if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
1297 if (!netif_tx_queue_stopped(txq)) {
1298 netif_tx_stop_queue(txq);
1299
1300 /* This is a hard error, log it. */
1301 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
1302 dev->stats.tx_errors++;
1303 }
1304 rcu_read_unlock();
1305 return NETDEV_TX_BUSY;
1306 }
1307
1308 d = vio_dring_cur(dr);
1309
1310 txi = dr->prod;
1311
1312 freeskbs = vnet_clean_tx_ring(port, &pending);
1313
1314 BUG_ON(port->tx_bufs[txi].skb);
1315
1316 len = skb->len;
1317 if (len < ETH_ZLEN)
1318 len = ETH_ZLEN;
1319
1320 err = vnet_skb_map(port->vio.lp, skb, port->tx_bufs[txi].cookies, 2,
1321 (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW));
1322 if (err < 0) {
1323 netdev_info(dev, "tx buffer map error %d\n", err);
1324 goto out_dropped;
1325 }
1326
1327 port->tx_bufs[txi].skb = skb;
1328 skb = NULL;
1329 port->tx_bufs[txi].ncookies = err;
1330
1331 /* We don't rely on the ACKs to free the skb in vnet_start_xmit(),
1332 * thus it is safe to not set VIO_ACK_ENABLE for each transmission:
1333 * the protocol itself does not require it as long as the peer
1334 * sends a VIO_SUBTYPE_ACK for VIO_DRING_STOPPED.
1335 *
1336 * An ACK for every packet in the ring is expensive as the
1337 * sending of LDC messages is slow and affects performance.
1338 */
1339 d->hdr.ack = VIO_ACK_DISABLE;
1340 d->size = len;
1341 d->ncookies = port->tx_bufs[txi].ncookies;
1342 for (i = 0; i < d->ncookies; i++)
1343 d->cookies[i] = port->tx_bufs[txi].cookies[i];
1344 if (vio_version_after_eq(&port->vio, 1, 7)) {
1345 struct vio_net_dext *dext = vio_net_ext(d);
1346
1347 memset(dext, 0, sizeof(*dext));
1348 if (skb_is_gso(port->tx_bufs[txi].skb)) {
1349 dext->ipv4_lso_mss = skb_shinfo(port->tx_bufs[txi].skb)
1350 ->gso_size;
1351 dext->flags |= VNET_PKT_IPV4_LSO;
1352 }
1353 if (vio_version_after_eq(&port->vio, 1, 8) &&
1354 !port->switch_port) {
1355 dext->flags |= VNET_PKT_HCK_IPV4_HDRCKSUM_OK;
1356 dext->flags |= VNET_PKT_HCK_FULLCKSUM_OK;
1357 }
1358 }
1359
1360 /* This has to be a non-SMP write barrier because we are writing
1361 * to memory which is shared with the peer LDOM.
1362 */
1363 dma_wmb();
1364
1365 d->hdr.state = VIO_DESC_READY;
1366
1367 /* Exactly one ldc "start" trigger (for dr->cons) needs to be sent
1368 * to notify the consumer that some descriptors are READY.
1369 * After that "start" trigger, no additional triggers are needed until
1370 * a DRING_STOPPED is received from the consumer. The dr->cons field
1371 * (set up by vnet_ack()) has the value of the next dring index
1372 * that has not yet been ack-ed. We send a "start" trigger here
1373 * if, and only if, start_cons is true (reset it afterward). Conversely,
1374 * vnet_ack() should check if the dring corresponding to cons
1375 * is marked READY, but start_cons was false.
1376 * If so, vnet_ack() should send out the missed "start" trigger.
1377 *
1378 * Note that the dma_wmb() above makes sure the cookies et al. are
1379 * not globally visible before the VIO_DESC_READY, and that the
1380 * stores are ordered correctly by the compiler. The consumer will
1381 * not proceed until the VIO_DESC_READY is visible assuring that
1382 * the consumer does not observe anything related to descriptors
1383 * out of order. The HV trap from the LDC start trigger is the
1384 * producer to consumer announcement that work is available to the
1385 * consumer
1386 */
1387 if (!port->start_cons) { /* previous trigger suffices */
1388 trace_vnet_skip_tx_trigger(port->vio._local_sid,
1389 port->vio._peer_sid, dr->cons);
1390 goto ldc_start_done;
1391 }
1392
1393 err = __vnet_tx_trigger(port, dr->cons);
1394 if (unlikely(err < 0)) {
1395 netdev_info(dev, "TX trigger error %d\n", err);
1396 d->hdr.state = VIO_DESC_FREE;
1397 skb = port->tx_bufs[txi].skb;
1398 port->tx_bufs[txi].skb = NULL;
1399 dev->stats.tx_carrier_errors++;
1400 goto out_dropped;
1401 }
1402
1403ldc_start_done:
1404 port->start_cons = false;
1405
1406 dev->stats.tx_packets++;
1407 dev->stats.tx_bytes += port->tx_bufs[txi].skb->len;
1408
1409 dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
1410 if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
1411 netif_tx_stop_queue(txq);
1412 if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr))
1413 netif_tx_wake_queue(txq);
1414 }
1415
1416 (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT);
1417 rcu_read_unlock();
1418
1419 vnet_free_skbs(freeskbs);
1420
1421 return NETDEV_TX_OK;
1422
1423out_dropped:
1424 if (pending)
1425 (void)mod_timer(&port->clean_timer,
1426 jiffies + VNET_CLEAN_TIMEOUT);
1427 else if (port)
1428 del_timer(&port->clean_timer);
1429 if (port)
1430 rcu_read_unlock();
1431 if (skb)
1432 dev_kfree_skb(skb);
1433 vnet_free_skbs(freeskbs);
1434 dev->stats.tx_dropped++;
1435 return NETDEV_TX_OK;
1436}
1437EXPORT_SYMBOL_GPL(sunvnet_start_xmit_common);
1438
1439void sunvnet_tx_timeout_common(struct net_device *dev)
1440{
1441 /* XXX Implement me XXX */
1442}
1443EXPORT_SYMBOL_GPL(sunvnet_tx_timeout_common);
1444
1445int sunvnet_open_common(struct net_device *dev)
1446{
1447 netif_carrier_on(dev);
1448 netif_tx_start_all_queues(dev);
1449
1450 return 0;
1451}
1452EXPORT_SYMBOL_GPL(sunvnet_open_common);
1453
1454int sunvnet_close_common(struct net_device *dev)
1455{
1456 netif_tx_stop_all_queues(dev);
1457 netif_carrier_off(dev);
1458
1459 return 0;
1460}
1461EXPORT_SYMBOL_GPL(sunvnet_close_common);
1462
1463static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr)
1464{
1465 struct vnet_mcast_entry *m;
1466
1467 for (m = vp->mcast_list; m; m = m->next) {
1468 if (ether_addr_equal(m->addr, addr))
1469 return m;
1470 }
1471 return NULL;
1472}
1473
1474static void __update_mc_list(struct vnet *vp, struct net_device *dev)
1475{
1476 struct netdev_hw_addr *ha;
1477
1478 netdev_for_each_mc_addr(ha, dev) {
1479 struct vnet_mcast_entry *m;
1480
1481 m = __vnet_mc_find(vp, ha->addr);
1482 if (m) {
1483 m->hit = 1;
1484 continue;
1485 }
1486
1487 if (!m) {
1488 m = kzalloc(sizeof(*m), GFP_ATOMIC);
1489 if (!m)
1490 continue;
1491 memcpy(m->addr, ha->addr, ETH_ALEN);
1492 m->hit = 1;
1493
1494 m->next = vp->mcast_list;
1495 vp->mcast_list = m;
1496 }
1497 }
1498}
1499
1500static void __send_mc_list(struct vnet *vp, struct vnet_port *port)
1501{
1502 struct vio_net_mcast_info info;
1503 struct vnet_mcast_entry *m, **pp;
1504 int n_addrs;
1505
1506 memset(&info, 0, sizeof(info));
1507
1508 info.tag.type = VIO_TYPE_CTRL;
1509 info.tag.stype = VIO_SUBTYPE_INFO;
1510 info.tag.stype_env = VNET_MCAST_INFO;
1511 info.tag.sid = vio_send_sid(&port->vio);
1512 info.set = 1;
1513
1514 n_addrs = 0;
1515 for (m = vp->mcast_list; m; m = m->next) {
1516 if (m->sent)
1517 continue;
1518 m->sent = 1;
1519 memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
1520 m->addr, ETH_ALEN);
1521 if (++n_addrs == VNET_NUM_MCAST) {
1522 info.count = n_addrs;
1523
1524 (void) vio_ldc_send(&port->vio, &info,
1525 sizeof(info));
1526 n_addrs = 0;
1527 }
1528 }
1529 if (n_addrs) {
1530 info.count = n_addrs;
1531 (void) vio_ldc_send(&port->vio, &info, sizeof(info));
1532 }
1533
1534 info.set = 0;
1535
1536 n_addrs = 0;
1537 pp = &vp->mcast_list;
1538 while ((m = *pp) != NULL) {
1539 if (m->hit) {
1540 m->hit = 0;
1541 pp = &m->next;
1542 continue;
1543 }
1544
1545 memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
1546 m->addr, ETH_ALEN);
1547 if (++n_addrs == VNET_NUM_MCAST) {
1548 info.count = n_addrs;
1549 (void) vio_ldc_send(&port->vio, &info,
1550 sizeof(info));
1551 n_addrs = 0;
1552 }
1553
1554 *pp = m->next;
1555 kfree(m);
1556 }
1557 if (n_addrs) {
1558 info.count = n_addrs;
1559 (void) vio_ldc_send(&port->vio, &info, sizeof(info));
1560 }
1561}
1562
Aaron Young67d07192016-03-15 11:35:38 -07001563void sunvnet_set_rx_mode_common(struct net_device *dev, struct vnet *vp)
Aaron Young31762ea2016-03-15 11:35:37 -07001564{
Aaron Young31762ea2016-03-15 11:35:37 -07001565 struct vnet_port *port;
1566
1567 rcu_read_lock();
1568 list_for_each_entry_rcu(port, &vp->port_list, list) {
1569
1570 if (port->switch_port) {
1571 __update_mc_list(vp, dev);
1572 __send_mc_list(vp, port);
1573 break;
1574 }
1575 }
1576 rcu_read_unlock();
1577}
1578EXPORT_SYMBOL_GPL(sunvnet_set_rx_mode_common);
1579
1580int sunvnet_change_mtu_common(struct net_device *dev, int new_mtu)
1581{
1582 if (new_mtu < 68 || new_mtu > 65535)
1583 return -EINVAL;
1584
1585 dev->mtu = new_mtu;
1586 return 0;
1587}
1588EXPORT_SYMBOL_GPL(sunvnet_change_mtu_common);
1589
1590int sunvnet_set_mac_addr_common(struct net_device *dev, void *p)
1591{
1592 return -EINVAL;
1593}
1594EXPORT_SYMBOL_GPL(sunvnet_set_mac_addr_common);
1595
1596void sunvnet_port_free_tx_bufs_common(struct vnet_port *port)
1597{
1598 struct vio_dring_state *dr;
1599 int i;
1600
1601 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1602
1603 if (dr->base == NULL)
1604 return;
1605
1606 for (i = 0; i < VNET_TX_RING_SIZE; i++) {
1607 struct vio_net_desc *d;
1608 void *skb = port->tx_bufs[i].skb;
1609
1610 if (!skb)
1611 continue;
1612
1613 d = vio_dring_entry(dr, i);
1614
1615 ldc_unmap(port->vio.lp,
1616 port->tx_bufs[i].cookies,
1617 port->tx_bufs[i].ncookies);
1618 dev_kfree_skb(skb);
1619 port->tx_bufs[i].skb = NULL;
1620 d->hdr.state = VIO_DESC_FREE;
1621 }
1622 ldc_free_exp_dring(port->vio.lp, dr->base,
1623 (dr->entry_size * dr->num_entries),
1624 dr->cookies, dr->ncookies);
1625 dr->base = NULL;
1626 dr->entry_size = 0;
1627 dr->num_entries = 0;
1628 dr->pending = 0;
1629 dr->ncookies = 0;
1630}
1631EXPORT_SYMBOL_GPL(sunvnet_port_free_tx_bufs_common);
1632
1633static void vnet_port_reset(struct vnet_port *port)
1634{
1635 del_timer(&port->clean_timer);
1636 sunvnet_port_free_tx_bufs_common(port);
1637 port->rmtu = 0;
1638 port->tso = true;
1639 port->tsolen = 0;
1640}
1641
1642static int vnet_port_alloc_tx_ring(struct vnet_port *port)
1643{
1644 struct vio_dring_state *dr;
1645 unsigned long len, elen;
1646 int i, err, ncookies;
1647 void *dring;
1648
1649 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1650
1651 elen = sizeof(struct vio_net_desc) +
1652 sizeof(struct ldc_trans_cookie) * 2;
1653 if (vio_version_after_eq(&port->vio, 1, 7))
1654 elen += sizeof(struct vio_net_dext);
1655 len = VNET_TX_RING_SIZE * elen;
1656
1657 ncookies = VIO_MAX_RING_COOKIES;
1658 dring = ldc_alloc_exp_dring(port->vio.lp, len,
1659 dr->cookies, &ncookies,
1660 (LDC_MAP_SHADOW |
1661 LDC_MAP_DIRECT |
1662 LDC_MAP_RW));
1663 if (IS_ERR(dring)) {
1664 err = PTR_ERR(dring);
1665 goto err_out;
1666 }
1667
1668 dr->base = dring;
1669 dr->entry_size = elen;
1670 dr->num_entries = VNET_TX_RING_SIZE;
1671 dr->prod = dr->cons = 0;
1672 port->start_cons = true; /* need an initial trigger */
1673 dr->pending = VNET_TX_RING_SIZE;
1674 dr->ncookies = ncookies;
1675
1676 for (i = 0; i < VNET_TX_RING_SIZE; ++i) {
1677 struct vio_net_desc *d;
1678
1679 d = vio_dring_entry(dr, i);
1680 d->hdr.state = VIO_DESC_FREE;
1681 }
1682 return 0;
1683
1684err_out:
1685 sunvnet_port_free_tx_bufs_common(port);
1686
1687 return err;
1688}
1689
1690#ifdef CONFIG_NET_POLL_CONTROLLER
Aaron Young67d07192016-03-15 11:35:38 -07001691void sunvnet_poll_controller_common(struct net_device *dev, struct vnet *vp)
Aaron Young31762ea2016-03-15 11:35:37 -07001692{
Aaron Young31762ea2016-03-15 11:35:37 -07001693 struct vnet_port *port;
1694 unsigned long flags;
1695
1696 spin_lock_irqsave(&vp->lock, flags);
1697 if (!list_empty(&vp->port_list)) {
1698 port = list_entry(vp->port_list.next, struct vnet_port, list);
1699 napi_schedule(&port->napi);
1700 }
1701 spin_unlock_irqrestore(&vp->lock, flags);
1702}
1703EXPORT_SYMBOL_GPL(sunvnet_poll_controller_common);
1704#endif
1705
1706void sunvnet_port_add_txq_common(struct vnet_port *port)
1707{
1708 struct vnet *vp = port->vp;
1709 int n;
1710
1711 n = vp->nports++;
1712 n = n & (VNET_MAX_TXQS - 1);
1713 port->q_index = n;
Aaron Young67d07192016-03-15 11:35:38 -07001714 netif_tx_wake_queue(netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port),
1715 port->q_index));
1716
Aaron Young31762ea2016-03-15 11:35:37 -07001717}
1718EXPORT_SYMBOL_GPL(sunvnet_port_add_txq_common);
1719
1720void sunvnet_port_rm_txq_common(struct vnet_port *port)
1721{
1722 port->vp->nports--;
Aaron Young67d07192016-03-15 11:35:38 -07001723 netif_tx_stop_queue(netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port),
1724 port->q_index));
Aaron Young31762ea2016-03-15 11:35:37 -07001725}
1726EXPORT_SYMBOL_GPL(sunvnet_port_rm_txq_common);