blob: fbd0f1e5210eccc56442868caf6c37e4129f0f64 [file] [log] [blame]
Aaron Young31762ea2016-03-15 11:35:37 -07001/* sunvnet.c: Sun LDOM Virtual Network Driver.
2 *
3 * Copyright (C) 2007, 2008 David S. Miller <davem@davemloft.net>
Shannon Nelson867fa152017-03-14 10:24:39 -07004 * Copyright (C) 2016-2017 Oracle. All rights reserved.
Aaron Young31762ea2016-03-15 11:35:37 -07005 */
6
7#include <linux/module.h>
8#include <linux/kernel.h>
9#include <linux/types.h>
10#include <linux/slab.h>
11#include <linux/delay.h>
12#include <linux/init.h>
13#include <linux/netdevice.h>
14#include <linux/ethtool.h>
15#include <linux/etherdevice.h>
16#include <linux/mutex.h>
17#include <linux/highmem.h>
18#include <linux/if_vlan.h>
19#define CREATE_TRACE_POINTS
20#include <trace/events/sunvnet.h>
21
22#if IS_ENABLED(CONFIG_IPV6)
23#include <linux/icmpv6.h>
24#endif
25
26#include <net/ip.h>
27#include <net/icmp.h>
28#include <net/route.h>
29
30#include <asm/vio.h>
31#include <asm/ldc.h>
32
33#include "sunvnet_common.h"
34
35/* Heuristic for the number of times to exponentially backoff and
36 * retry sending an LDC trigger when EAGAIN is encountered
37 */
38#define VNET_MAX_RETRIES 10
39
Shannon Nelson2493b842017-02-13 10:56:57 -080040MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
41MODULE_DESCRIPTION("Sun LDOM virtual network support library");
42MODULE_LICENSE("GPL");
43MODULE_VERSION("1.1");
44
Aaron Young31762ea2016-03-15 11:35:37 -070045static int __vnet_tx_trigger(struct vnet_port *port, u32 start);
Aaron Young31762ea2016-03-15 11:35:37 -070046
47static inline u32 vnet_tx_dring_avail(struct vio_dring_state *dr)
48{
49 return vio_dring_avail(dr, VNET_TX_RING_SIZE);
50}
51
52static int vnet_handle_unknown(struct vnet_port *port, void *arg)
53{
54 struct vio_msg_tag *pkt = arg;
55
56 pr_err("Received unknown msg [%02x:%02x:%04x:%08x]\n",
57 pkt->type, pkt->stype, pkt->stype_env, pkt->sid);
58 pr_err("Resetting connection\n");
59
60 ldc_disconnect(port->vio.lp);
61
62 return -ECONNRESET;
63}
64
65static int vnet_port_alloc_tx_ring(struct vnet_port *port);
66
67int sunvnet_send_attr_common(struct vio_driver_state *vio)
68{
69 struct vnet_port *port = to_vnet_port(vio);
Aaron Young67d07192016-03-15 11:35:38 -070070 struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
Aaron Young31762ea2016-03-15 11:35:37 -070071 struct vio_net_attr_info pkt;
72 int framelen = ETH_FRAME_LEN;
73 int i, err;
74
75 err = vnet_port_alloc_tx_ring(to_vnet_port(vio));
76 if (err)
77 return err;
78
79 memset(&pkt, 0, sizeof(pkt));
80 pkt.tag.type = VIO_TYPE_CTRL;
81 pkt.tag.stype = VIO_SUBTYPE_INFO;
82 pkt.tag.stype_env = VIO_ATTR_INFO;
83 pkt.tag.sid = vio_send_sid(vio);
84 if (vio_version_before(vio, 1, 2))
85 pkt.xfer_mode = VIO_DRING_MODE;
86 else
87 pkt.xfer_mode = VIO_NEW_DRING_MODE;
88 pkt.addr_type = VNET_ADDR_ETHERMAC;
89 pkt.ack_freq = 0;
90 for (i = 0; i < 6; i++)
91 pkt.addr |= (u64)dev->dev_addr[i] << ((5 - i) * 8);
92 if (vio_version_after(vio, 1, 3)) {
93 if (port->rmtu) {
94 port->rmtu = min(VNET_MAXPACKET, port->rmtu);
95 pkt.mtu = port->rmtu;
96 } else {
97 port->rmtu = VNET_MAXPACKET;
98 pkt.mtu = port->rmtu;
99 }
100 if (vio_version_after_eq(vio, 1, 6))
101 pkt.options = VIO_TX_DRING;
102 } else if (vio_version_before(vio, 1, 3)) {
103 pkt.mtu = framelen;
104 } else { /* v1.3 */
105 pkt.mtu = framelen + VLAN_HLEN;
106 }
107
108 pkt.cflags = 0;
109 if (vio_version_after_eq(vio, 1, 7) && port->tso) {
110 pkt.cflags |= VNET_LSO_IPV4_CAPAB;
111 if (!port->tsolen)
112 port->tsolen = VNET_MAXTSO;
113 pkt.ipv4_lso_maxlen = port->tsolen;
114 }
115
116 pkt.plnk_updt = PHYSLINK_UPDATE_NONE;
117
118 viodbg(HS, "SEND NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
119 "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] "
120 "cflags[0x%04x] lso_max[%u]\n",
121 pkt.xfer_mode, pkt.addr_type,
122 (unsigned long long)pkt.addr,
123 pkt.ack_freq, pkt.plnk_updt, pkt.options,
124 (unsigned long long)pkt.mtu, pkt.cflags, pkt.ipv4_lso_maxlen);
125
Aaron Young31762ea2016-03-15 11:35:37 -0700126 return vio_ldc_send(vio, &pkt, sizeof(pkt));
127}
128EXPORT_SYMBOL_GPL(sunvnet_send_attr_common);
129
130static int handle_attr_info(struct vio_driver_state *vio,
131 struct vio_net_attr_info *pkt)
132{
133 struct vnet_port *port = to_vnet_port(vio);
134 u64 localmtu;
135 u8 xfer_mode;
136
137 viodbg(HS, "GOT NET ATTR xmode[0x%x] atype[0x%x] addr[%llx] "
138 "ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] mtu[%llu] "
139 " (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n",
140 pkt->xfer_mode, pkt->addr_type,
141 (unsigned long long)pkt->addr,
142 pkt->ack_freq, pkt->plnk_updt, pkt->options,
143 (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags,
144 pkt->ipv4_lso_maxlen);
145
146 pkt->tag.sid = vio_send_sid(vio);
147
148 xfer_mode = pkt->xfer_mode;
149 /* for version < 1.2, VIO_DRING_MODE = 0x3 and no bitmask */
150 if (vio_version_before(vio, 1, 2) && xfer_mode == VIO_DRING_MODE)
151 xfer_mode = VIO_NEW_DRING_MODE;
152
153 /* MTU negotiation:
154 * < v1.3 - ETH_FRAME_LEN exactly
155 * > v1.3 - MIN(pkt.mtu, VNET_MAXPACKET, port->rmtu) and change
156 * pkt->mtu for ACK
157 * = v1.3 - ETH_FRAME_LEN + VLAN_HLEN exactly
158 */
159 if (vio_version_before(vio, 1, 3)) {
160 localmtu = ETH_FRAME_LEN;
161 } else if (vio_version_after(vio, 1, 3)) {
162 localmtu = port->rmtu ? port->rmtu : VNET_MAXPACKET;
163 localmtu = min(pkt->mtu, localmtu);
164 pkt->mtu = localmtu;
165 } else { /* v1.3 */
166 localmtu = ETH_FRAME_LEN + VLAN_HLEN;
167 }
168 port->rmtu = localmtu;
169
170 /* LSO negotiation */
171 if (vio_version_after_eq(vio, 1, 7))
172 port->tso &= !!(pkt->cflags & VNET_LSO_IPV4_CAPAB);
173 else
174 port->tso = false;
175 if (port->tso) {
176 if (!port->tsolen)
177 port->tsolen = VNET_MAXTSO;
178 port->tsolen = min(port->tsolen, pkt->ipv4_lso_maxlen);
179 if (port->tsolen < VNET_MINTSO) {
180 port->tso = false;
181 port->tsolen = 0;
182 pkt->cflags &= ~VNET_LSO_IPV4_CAPAB;
183 }
184 pkt->ipv4_lso_maxlen = port->tsolen;
185 } else {
186 pkt->cflags &= ~VNET_LSO_IPV4_CAPAB;
187 pkt->ipv4_lso_maxlen = 0;
Shannon Nelsonbc221a32017-02-13 10:57:04 -0800188 port->tsolen = 0;
Aaron Young31762ea2016-03-15 11:35:37 -0700189 }
190
191 /* for version >= 1.6, ACK packet mode we support */
192 if (vio_version_after_eq(vio, 1, 6)) {
193 pkt->xfer_mode = VIO_NEW_DRING_MODE;
194 pkt->options = VIO_TX_DRING;
195 }
196
197 if (!(xfer_mode | VIO_NEW_DRING_MODE) ||
198 pkt->addr_type != VNET_ADDR_ETHERMAC ||
199 pkt->mtu != localmtu) {
200 viodbg(HS, "SEND NET ATTR NACK\n");
201
202 pkt->tag.stype = VIO_SUBTYPE_NACK;
203
Aaron Youngdc153f82016-03-15 11:35:40 -0700204 (void)vio_ldc_send(vio, pkt, sizeof(*pkt));
Aaron Young31762ea2016-03-15 11:35:37 -0700205
206 return -ECONNRESET;
Aaron Young31762ea2016-03-15 11:35:37 -0700207 }
208
Aaron Youngdc153f82016-03-15 11:35:40 -0700209 viodbg(HS, "SEND NET ATTR ACK xmode[0x%x] atype[0x%x] "
210 "addr[%llx] ackfreq[%u] plnk_updt[0x%02x] opts[0x%02x] "
211 "mtu[%llu] (rmtu[%llu]) cflags[0x%04x] lso_max[%u]\n",
212 pkt->xfer_mode, pkt->addr_type,
213 (unsigned long long)pkt->addr,
214 pkt->ack_freq, pkt->plnk_updt, pkt->options,
215 (unsigned long long)pkt->mtu, port->rmtu, pkt->cflags,
216 pkt->ipv4_lso_maxlen);
217
218 pkt->tag.stype = VIO_SUBTYPE_ACK;
219
220 return vio_ldc_send(vio, pkt, sizeof(*pkt));
Aaron Young31762ea2016-03-15 11:35:37 -0700221}
222
223static int handle_attr_ack(struct vio_driver_state *vio,
224 struct vio_net_attr_info *pkt)
225{
226 viodbg(HS, "GOT NET ATTR ACK\n");
227
228 return 0;
229}
230
231static int handle_attr_nack(struct vio_driver_state *vio,
232 struct vio_net_attr_info *pkt)
233{
234 viodbg(HS, "GOT NET ATTR NACK\n");
235
236 return -ECONNRESET;
237}
238
239int sunvnet_handle_attr_common(struct vio_driver_state *vio, void *arg)
240{
241 struct vio_net_attr_info *pkt = arg;
242
243 switch (pkt->tag.stype) {
244 case VIO_SUBTYPE_INFO:
245 return handle_attr_info(vio, pkt);
246
247 case VIO_SUBTYPE_ACK:
248 return handle_attr_ack(vio, pkt);
249
250 case VIO_SUBTYPE_NACK:
251 return handle_attr_nack(vio, pkt);
252
253 default:
254 return -ECONNRESET;
255 }
256}
257EXPORT_SYMBOL_GPL(sunvnet_handle_attr_common);
258
259void sunvnet_handshake_complete_common(struct vio_driver_state *vio)
260{
261 struct vio_dring_state *dr;
262
263 dr = &vio->drings[VIO_DRIVER_RX_RING];
Aaron Youngdc153f82016-03-15 11:35:40 -0700264 dr->rcv_nxt = 1;
265 dr->snd_nxt = 1;
Aaron Young31762ea2016-03-15 11:35:37 -0700266
267 dr = &vio->drings[VIO_DRIVER_TX_RING];
Aaron Youngdc153f82016-03-15 11:35:40 -0700268 dr->rcv_nxt = 1;
269 dr->snd_nxt = 1;
Aaron Young31762ea2016-03-15 11:35:37 -0700270}
271EXPORT_SYMBOL_GPL(sunvnet_handshake_complete_common);
272
273/* The hypervisor interface that implements copying to/from imported
274 * memory from another domain requires that copies are done to 8-byte
275 * aligned buffers, and that the lengths of such copies are also 8-byte
276 * multiples.
277 *
278 * So we align skb->data to an 8-byte multiple and pad-out the data
279 * area so we can round the copy length up to the next multiple of
280 * 8 for the copy.
281 *
282 * The transmitter puts the actual start of the packet 6 bytes into
283 * the buffer it sends over, so that the IP headers after the ethernet
284 * header are aligned properly. These 6 bytes are not in the descriptor
285 * length, they are simply implied. This offset is represented using
286 * the VNET_PACKET_SKIP macro.
287 */
288static struct sk_buff *alloc_and_align_skb(struct net_device *dev,
289 unsigned int len)
290{
Aaron Youngdc153f82016-03-15 11:35:40 -0700291 struct sk_buff *skb;
Aaron Young31762ea2016-03-15 11:35:37 -0700292 unsigned long addr, off;
293
Aaron Youngdc153f82016-03-15 11:35:40 -0700294 skb = netdev_alloc_skb(dev, len + VNET_PACKET_SKIP + 8 + 8);
Aaron Young31762ea2016-03-15 11:35:37 -0700295 if (unlikely(!skb))
296 return NULL;
297
Aaron Youngdc153f82016-03-15 11:35:40 -0700298 addr = (unsigned long)skb->data;
Aaron Young31762ea2016-03-15 11:35:37 -0700299 off = ((addr + 7UL) & ~7UL) - addr;
300 if (off)
301 skb_reserve(skb, off);
302
303 return skb;
304}
305
306static inline void vnet_fullcsum(struct sk_buff *skb)
307{
308 struct iphdr *iph = ip_hdr(skb);
309 int offset = skb_transport_offset(skb);
310
311 if (skb->protocol != htons(ETH_P_IP))
312 return;
313 if (iph->protocol != IPPROTO_TCP &&
314 iph->protocol != IPPROTO_UDP)
315 return;
316 skb->ip_summed = CHECKSUM_NONE;
317 skb->csum_level = 1;
318 skb->csum = 0;
319 if (iph->protocol == IPPROTO_TCP) {
320 struct tcphdr *ptcp = tcp_hdr(skb);
321
322 ptcp->check = 0;
323 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
324 ptcp->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
325 skb->len - offset, IPPROTO_TCP,
326 skb->csum);
327 } else if (iph->protocol == IPPROTO_UDP) {
328 struct udphdr *pudp = udp_hdr(skb);
329
330 pudp->check = 0;
331 skb->csum = skb_checksum(skb, offset, skb->len - offset, 0);
332 pudp->check = csum_tcpudp_magic(iph->saddr, iph->daddr,
333 skb->len - offset, IPPROTO_UDP,
334 skb->csum);
335 }
336}
337
338static int vnet_rx_one(struct vnet_port *port, struct vio_net_desc *desc)
339{
Aaron Young67d07192016-03-15 11:35:38 -0700340 struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
Aaron Young31762ea2016-03-15 11:35:37 -0700341 unsigned int len = desc->size;
342 unsigned int copy_len;
343 struct sk_buff *skb;
344 int maxlen;
345 int err;
346
347 err = -EMSGSIZE;
348 if (port->tso && port->tsolen > port->rmtu)
349 maxlen = port->tsolen;
350 else
351 maxlen = port->rmtu;
352 if (unlikely(len < ETH_ZLEN || len > maxlen)) {
353 dev->stats.rx_length_errors++;
354 goto out_dropped;
355 }
356
357 skb = alloc_and_align_skb(dev, len);
358 err = -ENOMEM;
359 if (unlikely(!skb)) {
360 dev->stats.rx_missed_errors++;
361 goto out_dropped;
362 }
363
364 copy_len = (len + VNET_PACKET_SKIP + 7U) & ~7U;
365 skb_put(skb, copy_len);
366 err = ldc_copy(port->vio.lp, LDC_COPY_IN,
367 skb->data, copy_len, 0,
368 desc->cookies, desc->ncookies);
369 if (unlikely(err < 0)) {
370 dev->stats.rx_frame_errors++;
371 goto out_free_skb;
372 }
373
374 skb_pull(skb, VNET_PACKET_SKIP);
375 skb_trim(skb, len);
376 skb->protocol = eth_type_trans(skb, dev);
377
378 if (vio_version_after_eq(&port->vio, 1, 8)) {
379 struct vio_net_dext *dext = vio_net_ext(desc);
380
381 skb_reset_network_header(skb);
382
383 if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM) {
384 if (skb->protocol == ETH_P_IP) {
385 struct iphdr *iph = ip_hdr(skb);
386
387 iph->check = 0;
388 ip_send_check(iph);
389 }
390 }
391 if ((dext->flags & VNET_PKT_HCK_FULLCKSUM) &&
392 skb->ip_summed == CHECKSUM_NONE) {
393 if (skb->protocol == htons(ETH_P_IP)) {
394 struct iphdr *iph = ip_hdr(skb);
395 int ihl = iph->ihl * 4;
396
397 skb_reset_transport_header(skb);
398 skb_set_transport_header(skb, ihl);
399 vnet_fullcsum(skb);
400 }
401 }
402 if (dext->flags & VNET_PKT_HCK_IPV4_HDRCKSUM_OK) {
403 skb->ip_summed = CHECKSUM_PARTIAL;
404 skb->csum_level = 0;
405 if (dext->flags & VNET_PKT_HCK_FULLCKSUM_OK)
406 skb->csum_level = 1;
407 }
408 }
409
410 skb->ip_summed = port->switch_port ? CHECKSUM_NONE : CHECKSUM_PARTIAL;
411
412 dev->stats.rx_packets++;
413 dev->stats.rx_bytes += len;
414 napi_gro_receive(&port->napi, skb);
415 return 0;
416
417out_free_skb:
418 kfree_skb(skb);
419
420out_dropped:
421 dev->stats.rx_dropped++;
422 return err;
423}
424
425static int vnet_send_ack(struct vnet_port *port, struct vio_dring_state *dr,
426 u32 start, u32 end, u8 vio_dring_state)
427{
428 struct vio_dring_data hdr = {
429 .tag = {
430 .type = VIO_TYPE_DATA,
431 .stype = VIO_SUBTYPE_ACK,
432 .stype_env = VIO_DRING_DATA,
433 .sid = vio_send_sid(&port->vio),
434 },
435 .dring_ident = dr->ident,
436 .start_idx = start,
437 .end_idx = end,
438 .state = vio_dring_state,
439 };
440 int err, delay;
441 int retries = 0;
442
443 hdr.seq = dr->snd_nxt;
444 delay = 1;
445 do {
446 err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
447 if (err > 0) {
448 dr->snd_nxt++;
449 break;
450 }
451 udelay(delay);
452 if ((delay <<= 1) > 128)
453 delay = 128;
454 if (retries++ > VNET_MAX_RETRIES) {
455 pr_info("ECONNRESET %x:%x:%x:%x:%x:%x\n",
456 port->raddr[0], port->raddr[1],
457 port->raddr[2], port->raddr[3],
458 port->raddr[4], port->raddr[5]);
459 break;
460 }
461 } while (err == -EAGAIN);
462
463 if (err <= 0 && vio_dring_state == VIO_DRING_STOPPED) {
464 port->stop_rx_idx = end;
465 port->stop_rx = true;
466 } else {
467 port->stop_rx_idx = 0;
468 port->stop_rx = false;
469 }
470
471 return err;
472}
473
474static struct vio_net_desc *get_rx_desc(struct vnet_port *port,
475 struct vio_dring_state *dr,
476 u32 index)
477{
478 struct vio_net_desc *desc = port->vio.desc_buf;
479 int err;
480
481 err = ldc_get_dring_entry(port->vio.lp, desc, dr->entry_size,
482 (index * dr->entry_size),
483 dr->cookies, dr->ncookies);
484 if (err < 0)
485 return ERR_PTR(err);
486
487 return desc;
488}
489
490static int put_rx_desc(struct vnet_port *port,
491 struct vio_dring_state *dr,
492 struct vio_net_desc *desc,
493 u32 index)
494{
495 int err;
496
497 err = ldc_put_dring_entry(port->vio.lp, desc, dr->entry_size,
498 (index * dr->entry_size),
499 dr->cookies, dr->ncookies);
500 if (err < 0)
501 return err;
502
503 return 0;
504}
505
506static int vnet_walk_rx_one(struct vnet_port *port,
507 struct vio_dring_state *dr,
508 u32 index, int *needs_ack)
509{
510 struct vio_net_desc *desc = get_rx_desc(port, dr, index);
511 struct vio_driver_state *vio = &port->vio;
512 int err;
513
Aaron Youngdc153f82016-03-15 11:35:40 -0700514 BUG_ON(!desc);
Aaron Young31762ea2016-03-15 11:35:37 -0700515 if (IS_ERR(desc))
516 return PTR_ERR(desc);
517
518 if (desc->hdr.state != VIO_DESC_READY)
519 return 1;
520
521 dma_rmb();
522
523 viodbg(DATA, "vio_walk_rx_one desc[%02x:%02x:%08x:%08x:%llx:%llx]\n",
524 desc->hdr.state, desc->hdr.ack,
525 desc->size, desc->ncookies,
526 desc->cookies[0].cookie_addr,
527 desc->cookies[0].cookie_size);
528
529 err = vnet_rx_one(port, desc);
530 if (err == -ECONNRESET)
531 return err;
532 trace_vnet_rx_one(port->vio._local_sid, port->vio._peer_sid,
533 index, desc->hdr.ack);
534 desc->hdr.state = VIO_DESC_DONE;
535 err = put_rx_desc(port, dr, desc, index);
536 if (err < 0)
537 return err;
538 *needs_ack = desc->hdr.ack;
539 return 0;
540}
541
542static int vnet_walk_rx(struct vnet_port *port, struct vio_dring_state *dr,
543 u32 start, u32 end, int *npkts, int budget)
544{
545 struct vio_driver_state *vio = &port->vio;
546 int ack_start = -1, ack_end = -1;
547 bool send_ack = true;
548
Aaron Youngdc153f82016-03-15 11:35:40 -0700549 end = (end == (u32)-1) ? vio_dring_prev(dr, start)
550 : vio_dring_next(dr, end);
Aaron Young31762ea2016-03-15 11:35:37 -0700551
552 viodbg(DATA, "vnet_walk_rx start[%08x] end[%08x]\n", start, end);
553
554 while (start != end) {
555 int ack = 0, err = vnet_walk_rx_one(port, dr, start, &ack);
Aaron Youngdc153f82016-03-15 11:35:40 -0700556
Aaron Young31762ea2016-03-15 11:35:37 -0700557 if (err == -ECONNRESET)
558 return err;
559 if (err != 0)
560 break;
561 (*npkts)++;
562 if (ack_start == -1)
563 ack_start = start;
564 ack_end = start;
565 start = vio_dring_next(dr, start);
566 if (ack && start != end) {
567 err = vnet_send_ack(port, dr, ack_start, ack_end,
568 VIO_DRING_ACTIVE);
569 if (err == -ECONNRESET)
570 return err;
571 ack_start = -1;
572 }
573 if ((*npkts) >= budget) {
574 send_ack = false;
575 break;
576 }
577 }
Aaron Youngdc153f82016-03-15 11:35:40 -0700578 if (unlikely(ack_start == -1)) {
579 ack_end = vio_dring_prev(dr, start);
580 ack_start = ack_end;
581 }
Aaron Young31762ea2016-03-15 11:35:37 -0700582 if (send_ack) {
583 port->napi_resume = false;
584 trace_vnet_tx_send_stopped_ack(port->vio._local_sid,
585 port->vio._peer_sid,
586 ack_end, *npkts);
587 return vnet_send_ack(port, dr, ack_start, ack_end,
588 VIO_DRING_STOPPED);
589 } else {
590 trace_vnet_tx_defer_stopped_ack(port->vio._local_sid,
591 port->vio._peer_sid,
592 ack_end, *npkts);
593 port->napi_resume = true;
594 port->napi_stop_idx = ack_end;
595 return 1;
596 }
597}
598
599static int vnet_rx(struct vnet_port *port, void *msgbuf, int *npkts,
600 int budget)
601{
602 struct vio_dring_data *pkt = msgbuf;
603 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_RX_RING];
604 struct vio_driver_state *vio = &port->vio;
605
606 viodbg(DATA, "vnet_rx stype_env[%04x] seq[%016llx] rcv_nxt[%016llx]\n",
607 pkt->tag.stype_env, pkt->seq, dr->rcv_nxt);
608
609 if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
610 return 0;
611 if (unlikely(pkt->seq != dr->rcv_nxt)) {
612 pr_err("RX out of sequence seq[0x%llx] rcv_nxt[0x%llx]\n",
613 pkt->seq, dr->rcv_nxt);
614 return 0;
615 }
616
617 if (!port->napi_resume)
618 dr->rcv_nxt++;
619
620 /* XXX Validate pkt->start_idx and pkt->end_idx XXX */
621
622 return vnet_walk_rx(port, dr, pkt->start_idx, pkt->end_idx,
623 npkts, budget);
624}
625
626static int idx_is_pending(struct vio_dring_state *dr, u32 end)
627{
628 u32 idx = dr->cons;
629 int found = 0;
630
631 while (idx != dr->prod) {
632 if (idx == end) {
633 found = 1;
634 break;
635 }
636 idx = vio_dring_next(dr, idx);
637 }
638 return found;
639}
640
641static int vnet_ack(struct vnet_port *port, void *msgbuf)
642{
643 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
644 struct vio_dring_data *pkt = msgbuf;
645 struct net_device *dev;
Aaron Young31762ea2016-03-15 11:35:37 -0700646 u32 end;
647 struct vio_net_desc *desc;
648 struct netdev_queue *txq;
649
650 if (unlikely(pkt->tag.stype_env != VIO_DRING_DATA))
651 return 0;
652
653 end = pkt->end_idx;
Aaron Young67d07192016-03-15 11:35:38 -0700654 dev = VNET_PORT_TO_NET_DEVICE(port);
Aaron Young31762ea2016-03-15 11:35:37 -0700655 netif_tx_lock(dev);
656 if (unlikely(!idx_is_pending(dr, end))) {
657 netif_tx_unlock(dev);
658 return 0;
659 }
660
661 /* sync for race conditions with vnet_start_xmit() and tell xmit it
662 * is time to send a trigger.
663 */
664 trace_vnet_rx_stopped_ack(port->vio._local_sid,
665 port->vio._peer_sid, end);
666 dr->cons = vio_dring_next(dr, end);
667 desc = vio_dring_entry(dr, dr->cons);
668 if (desc->hdr.state == VIO_DESC_READY && !port->start_cons) {
669 /* vnet_start_xmit() just populated this dring but missed
670 * sending the "start" LDC message to the consumer.
671 * Send a "start" trigger on its behalf.
672 */
673 if (__vnet_tx_trigger(port, dr->cons) > 0)
674 port->start_cons = false;
675 else
676 port->start_cons = true;
677 } else {
678 port->start_cons = true;
679 }
680 netif_tx_unlock(dev);
681
682 txq = netdev_get_tx_queue(dev, port->q_index);
683 if (unlikely(netif_tx_queue_stopped(txq) &&
684 vnet_tx_dring_avail(dr) >= VNET_TX_WAKEUP_THRESH(dr)))
685 return 1;
686
687 return 0;
688}
689
690static int vnet_nack(struct vnet_port *port, void *msgbuf)
691{
692 /* XXX just reset or similar XXX */
693 return 0;
694}
695
696static int handle_mcast(struct vnet_port *port, void *msgbuf)
697{
698 struct vio_net_mcast_info *pkt = msgbuf;
Aaron Young67d07192016-03-15 11:35:38 -0700699 struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
Aaron Young31762ea2016-03-15 11:35:37 -0700700
701 if (pkt->tag.stype != VIO_SUBTYPE_ACK)
702 pr_err("%s: Got unexpected MCAST reply [%02x:%02x:%04x:%08x]\n",
Aaron Young67d07192016-03-15 11:35:38 -0700703 dev->name,
Aaron Young31762ea2016-03-15 11:35:37 -0700704 pkt->tag.type,
705 pkt->tag.stype,
706 pkt->tag.stype_env,
707 pkt->tag.sid);
708
709 return 0;
710}
711
Aaron Young8778b272016-10-28 14:26:19 -0400712/* If the queue is stopped, wake it up so that we'll
713 * send out another START message at the next TX.
Aaron Young31762ea2016-03-15 11:35:37 -0700714 */
715static void maybe_tx_wakeup(struct vnet_port *port)
716{
717 struct netdev_queue *txq;
718
Aaron Young67d07192016-03-15 11:35:38 -0700719 txq = netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port),
720 port->q_index);
Aaron Young31762ea2016-03-15 11:35:37 -0700721 __netif_tx_lock(txq, smp_processor_id());
Sowmini Varadhand4aa89c2017-02-13 10:56:58 -0800722 if (likely(netif_tx_queue_stopped(txq)))
Aaron Young31762ea2016-03-15 11:35:37 -0700723 netif_tx_wake_queue(txq);
Aaron Young31762ea2016-03-15 11:35:37 -0700724 __netif_tx_unlock(txq);
725}
726
Aaron Young67d07192016-03-15 11:35:38 -0700727bool sunvnet_port_is_up_common(struct vnet_port *vnet)
Aaron Young31762ea2016-03-15 11:35:37 -0700728{
729 struct vio_driver_state *vio = &vnet->vio;
730
731 return !!(vio->hs_state & VIO_HS_COMPLETE);
732}
Aaron Young67d07192016-03-15 11:35:38 -0700733EXPORT_SYMBOL_GPL(sunvnet_port_is_up_common);
Aaron Young31762ea2016-03-15 11:35:37 -0700734
735static int vnet_event_napi(struct vnet_port *port, int budget)
736{
Aaron Young8778b272016-10-28 14:26:19 -0400737 struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
Aaron Young31762ea2016-03-15 11:35:37 -0700738 struct vio_driver_state *vio = &port->vio;
739 int tx_wakeup, err;
740 int npkts = 0;
Aaron Young31762ea2016-03-15 11:35:37 -0700741
Shannon Nelsonbf091f32017-02-13 10:57:01 -0800742 /* we don't expect any other bits */
743 BUG_ON(port->rx_event & ~(LDC_EVENT_DATA_READY |
744 LDC_EVENT_RESET |
745 LDC_EVENT_UP));
Aaron Young31762ea2016-03-15 11:35:37 -0700746
Shannon Nelsonbf091f32017-02-13 10:57:01 -0800747 /* RESET takes precedent over any other event */
748 if (port->rx_event & LDC_EVENT_RESET) {
Shannon Nelson867fa152017-03-14 10:24:39 -0700749 /* a link went down */
750
751 if (port->vsw == 1) {
752 netif_tx_stop_all_queues(dev);
753 netif_carrier_off(dev);
754 }
755
Shannon Nelsonbf091f32017-02-13 10:57:01 -0800756 vio_link_state_change(vio, LDC_EVENT_RESET);
757 vnet_port_reset(port);
758 vio_port_up(vio);
Aaron Young8778b272016-10-28 14:26:19 -0400759
Shannon Nelsonbf091f32017-02-13 10:57:01 -0800760 /* If the device is running but its tx queue was
761 * stopped (due to flow control), restart it.
762 * This is necessary since vnet_port_reset()
763 * clears the tx drings and thus we may never get
764 * back a VIO_TYPE_DATA ACK packet - which is
765 * the normal mechanism to restart the tx queue.
766 */
767 if (netif_running(dev))
768 maybe_tx_wakeup(port);
769
Aaron Young31762ea2016-03-15 11:35:37 -0700770 port->rx_event = 0;
771 return 0;
772 }
Aaron Young31762ea2016-03-15 11:35:37 -0700773
Shannon Nelsonbf091f32017-02-13 10:57:01 -0800774 if (port->rx_event & LDC_EVENT_UP) {
Shannon Nelson867fa152017-03-14 10:24:39 -0700775 /* a link came up */
776
777 if (port->vsw == 1) {
778 netif_carrier_on(port->dev);
779 netif_tx_start_all_queues(port->dev);
780 }
781
Shannon Nelsonbf091f32017-02-13 10:57:01 -0800782 vio_link_state_change(vio, LDC_EVENT_UP);
783 port->rx_event = 0;
784 return 0;
785 }
Aaron Young31762ea2016-03-15 11:35:37 -0700786
Aaron Youngdc153f82016-03-15 11:35:40 -0700787 err = 0;
788 tx_wakeup = 0;
Aaron Young31762ea2016-03-15 11:35:37 -0700789 while (1) {
790 union {
791 struct vio_msg_tag tag;
792 u64 raw[8];
793 } msgbuf;
794
795 if (port->napi_resume) {
796 struct vio_dring_data *pkt =
797 (struct vio_dring_data *)&msgbuf;
798 struct vio_dring_state *dr =
799 &port->vio.drings[VIO_DRIVER_RX_RING];
800
801 pkt->tag.type = VIO_TYPE_DATA;
802 pkt->tag.stype = VIO_SUBTYPE_INFO;
803 pkt->tag.stype_env = VIO_DRING_DATA;
804 pkt->seq = dr->rcv_nxt;
Aaron Youngdc153f82016-03-15 11:35:40 -0700805 pkt->start_idx = vio_dring_next(dr,
806 port->napi_stop_idx);
Aaron Young31762ea2016-03-15 11:35:37 -0700807 pkt->end_idx = -1;
Shannon Nelsonbf091f32017-02-13 10:57:01 -0800808 } else {
809 err = ldc_read(vio->lp, &msgbuf, sizeof(msgbuf));
810 if (unlikely(err < 0)) {
811 if (err == -ECONNRESET)
812 vio_conn_reset(vio);
813 break;
814 }
815 if (err == 0)
816 break;
817 viodbg(DATA, "TAG [%02x:%02x:%04x:%08x]\n",
818 msgbuf.tag.type,
819 msgbuf.tag.stype,
820 msgbuf.tag.stype_env,
821 msgbuf.tag.sid);
822 err = vio_validate_sid(vio, &msgbuf.tag);
823 if (err < 0)
824 break;
Aaron Young31762ea2016-03-15 11:35:37 -0700825 }
Shannon Nelsonbf091f32017-02-13 10:57:01 -0800826
Aaron Young31762ea2016-03-15 11:35:37 -0700827 if (likely(msgbuf.tag.type == VIO_TYPE_DATA)) {
828 if (msgbuf.tag.stype == VIO_SUBTYPE_INFO) {
Aaron Young67d07192016-03-15 11:35:38 -0700829 if (!sunvnet_port_is_up_common(port)) {
Aaron Young31762ea2016-03-15 11:35:37 -0700830 /* failures like handshake_failure()
831 * may have cleaned up dring, but
832 * NAPI polling may bring us here.
833 */
834 err = -ECONNRESET;
835 break;
836 }
837 err = vnet_rx(port, &msgbuf, &npkts, budget);
838 if (npkts >= budget)
839 break;
840 if (npkts == 0)
841 break;
842 } else if (msgbuf.tag.stype == VIO_SUBTYPE_ACK) {
843 err = vnet_ack(port, &msgbuf);
844 if (err > 0)
845 tx_wakeup |= err;
846 } else if (msgbuf.tag.stype == VIO_SUBTYPE_NACK) {
847 err = vnet_nack(port, &msgbuf);
848 }
849 } else if (msgbuf.tag.type == VIO_TYPE_CTRL) {
850 if (msgbuf.tag.stype_env == VNET_MCAST_INFO)
851 err = handle_mcast(port, &msgbuf);
852 else
853 err = vio_control_pkt_engine(vio, &msgbuf);
854 if (err)
855 break;
856 } else {
857 err = vnet_handle_unknown(port, &msgbuf);
858 }
859 if (err == -ECONNRESET)
860 break;
861 }
862 if (unlikely(tx_wakeup && err != -ECONNRESET))
863 maybe_tx_wakeup(port);
864 return npkts;
865}
866
867int sunvnet_poll_common(struct napi_struct *napi, int budget)
868{
869 struct vnet_port *port = container_of(napi, struct vnet_port, napi);
870 struct vio_driver_state *vio = &port->vio;
871 int processed = vnet_event_napi(port, budget);
872
873 if (processed < budget) {
Eric Dumazet6ad20162017-01-30 08:22:01 -0800874 napi_complete_done(napi, processed);
Aaron Young31762ea2016-03-15 11:35:37 -0700875 port->rx_event &= ~LDC_EVENT_DATA_READY;
876 vio_set_intr(vio->vdev->rx_ino, HV_INTR_ENABLED);
877 }
878 return processed;
879}
880EXPORT_SYMBOL_GPL(sunvnet_poll_common);
881
882void sunvnet_event_common(void *arg, int event)
883{
884 struct vnet_port *port = arg;
885 struct vio_driver_state *vio = &port->vio;
886
887 port->rx_event |= event;
888 vio_set_intr(vio->vdev->rx_ino, HV_INTR_DISABLED);
889 napi_schedule(&port->napi);
Aaron Young31762ea2016-03-15 11:35:37 -0700890}
891EXPORT_SYMBOL_GPL(sunvnet_event_common);
892
893static int __vnet_tx_trigger(struct vnet_port *port, u32 start)
894{
895 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
896 struct vio_dring_data hdr = {
897 .tag = {
898 .type = VIO_TYPE_DATA,
899 .stype = VIO_SUBTYPE_INFO,
900 .stype_env = VIO_DRING_DATA,
901 .sid = vio_send_sid(&port->vio),
902 },
903 .dring_ident = dr->ident,
904 .start_idx = start,
Aaron Youngdc153f82016-03-15 11:35:40 -0700905 .end_idx = (u32)-1,
Aaron Young31762ea2016-03-15 11:35:37 -0700906 };
907 int err, delay;
908 int retries = 0;
909
910 if (port->stop_rx) {
911 trace_vnet_tx_pending_stopped_ack(port->vio._local_sid,
912 port->vio._peer_sid,
913 port->stop_rx_idx, -1);
914 err = vnet_send_ack(port,
915 &port->vio.drings[VIO_DRIVER_RX_RING],
916 port->stop_rx_idx, -1,
917 VIO_DRING_STOPPED);
918 if (err <= 0)
919 return err;
920 }
921
922 hdr.seq = dr->snd_nxt;
923 delay = 1;
924 do {
925 err = vio_ldc_send(&port->vio, &hdr, sizeof(hdr));
926 if (err > 0) {
927 dr->snd_nxt++;
928 break;
929 }
930 udelay(delay);
931 if ((delay <<= 1) > 128)
932 delay = 128;
933 if (retries++ > VNET_MAX_RETRIES)
934 break;
935 } while (err == -EAGAIN);
936 trace_vnet_tx_trigger(port->vio._local_sid,
937 port->vio._peer_sid, start, err);
938
939 return err;
940}
941
Aaron Young31762ea2016-03-15 11:35:37 -0700942static struct sk_buff *vnet_clean_tx_ring(struct vnet_port *port,
943 unsigned *pending)
944{
945 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
946 struct sk_buff *skb = NULL;
947 int i, txi;
948
949 *pending = 0;
950
951 txi = dr->prod;
952 for (i = 0; i < VNET_TX_RING_SIZE; ++i) {
953 struct vio_net_desc *d;
954
955 --txi;
956 if (txi < 0)
Aaron Youngdc153f82016-03-15 11:35:40 -0700957 txi = VNET_TX_RING_SIZE - 1;
Aaron Young31762ea2016-03-15 11:35:37 -0700958
959 d = vio_dring_entry(dr, txi);
960
961 if (d->hdr.state == VIO_DESC_READY) {
962 (*pending)++;
963 continue;
964 }
965 if (port->tx_bufs[txi].skb) {
966 if (d->hdr.state != VIO_DESC_DONE)
967 pr_notice("invalid ring buffer state %d\n",
968 d->hdr.state);
969 BUG_ON(port->tx_bufs[txi].skb->next);
970
971 port->tx_bufs[txi].skb->next = skb;
972 skb = port->tx_bufs[txi].skb;
973 port->tx_bufs[txi].skb = NULL;
974
975 ldc_unmap(port->vio.lp,
976 port->tx_bufs[txi].cookies,
977 port->tx_bufs[txi].ncookies);
Aaron Youngdc153f82016-03-15 11:35:40 -0700978 } else if (d->hdr.state == VIO_DESC_FREE) {
Aaron Young31762ea2016-03-15 11:35:37 -0700979 break;
Aaron Youngdc153f82016-03-15 11:35:40 -0700980 }
Aaron Young31762ea2016-03-15 11:35:37 -0700981 d->hdr.state = VIO_DESC_FREE;
982 }
983 return skb;
984}
985
986static inline void vnet_free_skbs(struct sk_buff *skb)
987{
988 struct sk_buff *next;
989
990 while (skb) {
991 next = skb->next;
992 skb->next = NULL;
993 dev_kfree_skb(skb);
994 skb = next;
995 }
996}
997
998void sunvnet_clean_timer_expire_common(unsigned long port0)
999{
1000 struct vnet_port *port = (struct vnet_port *)port0;
1001 struct sk_buff *freeskbs;
1002 unsigned pending;
1003
Aaron Young67d07192016-03-15 11:35:38 -07001004 netif_tx_lock(VNET_PORT_TO_NET_DEVICE(port));
Aaron Young31762ea2016-03-15 11:35:37 -07001005 freeskbs = vnet_clean_tx_ring(port, &pending);
Aaron Young67d07192016-03-15 11:35:38 -07001006 netif_tx_unlock(VNET_PORT_TO_NET_DEVICE(port));
Aaron Young31762ea2016-03-15 11:35:37 -07001007
1008 vnet_free_skbs(freeskbs);
1009
1010 if (pending)
1011 (void)mod_timer(&port->clean_timer,
1012 jiffies + VNET_CLEAN_TIMEOUT);
1013 else
1014 del_timer(&port->clean_timer);
1015}
1016EXPORT_SYMBOL_GPL(sunvnet_clean_timer_expire_common);
1017
1018static inline int vnet_skb_map(struct ldc_channel *lp, struct sk_buff *skb,
1019 struct ldc_trans_cookie *cookies, int ncookies,
1020 unsigned int map_perm)
1021{
1022 int i, nc, err, blen;
1023
1024 /* header */
1025 blen = skb_headlen(skb);
1026 if (blen < ETH_ZLEN)
1027 blen = ETH_ZLEN;
1028 blen += VNET_PACKET_SKIP;
1029 blen += 8 - (blen & 7);
1030
Aaron Youngdc153f82016-03-15 11:35:40 -07001031 err = ldc_map_single(lp, skb->data - VNET_PACKET_SKIP, blen, cookies,
Aaron Young31762ea2016-03-15 11:35:37 -07001032 ncookies, map_perm);
1033 if (err < 0)
1034 return err;
1035 nc = err;
1036
1037 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1038 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1039 u8 *vaddr;
1040
1041 if (nc < ncookies) {
1042 vaddr = kmap_atomic(skb_frag_page(f));
1043 blen = skb_frag_size(f);
1044 blen += 8 - (blen & 7);
1045 err = ldc_map_single(lp, vaddr + f->page_offset,
1046 blen, cookies + nc, ncookies - nc,
1047 map_perm);
1048 kunmap_atomic(vaddr);
1049 } else {
1050 err = -EMSGSIZE;
1051 }
1052
1053 if (err < 0) {
1054 ldc_unmap(lp, cookies, nc);
1055 return err;
1056 }
1057 nc += err;
1058 }
1059 return nc;
1060}
1061
1062static inline struct sk_buff *vnet_skb_shape(struct sk_buff *skb, int ncookies)
1063{
1064 struct sk_buff *nskb;
1065 int i, len, pad, docopy;
1066
1067 len = skb->len;
1068 pad = 0;
1069 if (len < ETH_ZLEN) {
1070 pad += ETH_ZLEN - skb->len;
1071 len += pad;
1072 }
1073 len += VNET_PACKET_SKIP;
1074 pad += 8 - (len & 7);
1075
1076 /* make sure we have enough cookies and alignment in every frag */
1077 docopy = skb_shinfo(skb)->nr_frags >= ncookies;
1078 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
1079 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
1080
1081 docopy |= f->page_offset & 7;
1082 }
1083 if (((unsigned long)skb->data & 7) != VNET_PACKET_SKIP ||
1084 skb_tailroom(skb) < pad ||
1085 skb_headroom(skb) < VNET_PACKET_SKIP || docopy) {
1086 int start = 0, offset;
1087 __wsum csum;
1088
1089 len = skb->len > ETH_ZLEN ? skb->len : ETH_ZLEN;
1090 nskb = alloc_and_align_skb(skb->dev, len);
Aaron Youngdc153f82016-03-15 11:35:40 -07001091 if (!nskb) {
Aaron Young31762ea2016-03-15 11:35:37 -07001092 dev_kfree_skb(skb);
1093 return NULL;
1094 }
1095 skb_reserve(nskb, VNET_PACKET_SKIP);
1096
1097 nskb->protocol = skb->protocol;
1098 offset = skb_mac_header(skb) - skb->data;
1099 skb_set_mac_header(nskb, offset);
1100 offset = skb_network_header(skb) - skb->data;
1101 skb_set_network_header(nskb, offset);
1102 offset = skb_transport_header(skb) - skb->data;
1103 skb_set_transport_header(nskb, offset);
1104
1105 offset = 0;
1106 nskb->csum_offset = skb->csum_offset;
1107 nskb->ip_summed = skb->ip_summed;
1108
1109 if (skb->ip_summed == CHECKSUM_PARTIAL)
1110 start = skb_checksum_start_offset(skb);
1111 if (start) {
1112 struct iphdr *iph = ip_hdr(nskb);
1113 int offset = start + nskb->csum_offset;
1114
1115 if (skb_copy_bits(skb, 0, nskb->data, start)) {
1116 dev_kfree_skb(nskb);
1117 dev_kfree_skb(skb);
1118 return NULL;
1119 }
1120 *(__sum16 *)(skb->data + offset) = 0;
1121 csum = skb_copy_and_csum_bits(skb, start,
1122 nskb->data + start,
1123 skb->len - start, 0);
1124 if (iph->protocol == IPPROTO_TCP ||
1125 iph->protocol == IPPROTO_UDP) {
1126 csum = csum_tcpudp_magic(iph->saddr, iph->daddr,
1127 skb->len - start,
1128 iph->protocol, csum);
1129 }
1130 *(__sum16 *)(nskb->data + offset) = csum;
1131
1132 nskb->ip_summed = CHECKSUM_NONE;
1133 } else if (skb_copy_bits(skb, 0, nskb->data, skb->len)) {
1134 dev_kfree_skb(nskb);
1135 dev_kfree_skb(skb);
1136 return NULL;
1137 }
1138 (void)skb_put(nskb, skb->len);
1139 if (skb_is_gso(skb)) {
1140 skb_shinfo(nskb)->gso_size = skb_shinfo(skb)->gso_size;
1141 skb_shinfo(nskb)->gso_type = skb_shinfo(skb)->gso_type;
1142 }
1143 nskb->queue_mapping = skb->queue_mapping;
1144 dev_kfree_skb(skb);
1145 skb = nskb;
1146 }
1147 return skb;
1148}
1149
Aaron Young67d07192016-03-15 11:35:38 -07001150static int vnet_handle_offloads(struct vnet_port *port, struct sk_buff *skb,
1151 struct vnet_port *(*vnet_tx_port)
1152 (struct sk_buff *, struct net_device *))
Aaron Young31762ea2016-03-15 11:35:37 -07001153{
Aaron Young67d07192016-03-15 11:35:38 -07001154 struct net_device *dev = VNET_PORT_TO_NET_DEVICE(port);
Aaron Young31762ea2016-03-15 11:35:37 -07001155 struct vio_dring_state *dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1156 struct sk_buff *segs;
1157 int maclen, datalen;
1158 int status;
1159 int gso_size, gso_type, gso_segs;
1160 int hlen = skb_transport_header(skb) - skb_mac_header(skb);
1161 int proto = IPPROTO_IP;
1162
1163 if (skb->protocol == htons(ETH_P_IP))
1164 proto = ip_hdr(skb)->protocol;
1165 else if (skb->protocol == htons(ETH_P_IPV6))
1166 proto = ipv6_hdr(skb)->nexthdr;
1167
Aaron Youngdc153f82016-03-15 11:35:40 -07001168 if (proto == IPPROTO_TCP) {
Aaron Young31762ea2016-03-15 11:35:37 -07001169 hlen += tcp_hdr(skb)->doff * 4;
Aaron Youngdc153f82016-03-15 11:35:40 -07001170 } else if (proto == IPPROTO_UDP) {
Aaron Young31762ea2016-03-15 11:35:37 -07001171 hlen += sizeof(struct udphdr);
Aaron Youngdc153f82016-03-15 11:35:40 -07001172 } else {
Aaron Young31762ea2016-03-15 11:35:37 -07001173 pr_err("vnet_handle_offloads GSO with unknown transport "
1174 "protocol %d tproto %d\n", skb->protocol, proto);
1175 hlen = 128; /* XXX */
1176 }
1177 datalen = port->tsolen - hlen;
1178
1179 gso_size = skb_shinfo(skb)->gso_size;
1180 gso_type = skb_shinfo(skb)->gso_type;
1181 gso_segs = skb_shinfo(skb)->gso_segs;
1182
1183 if (port->tso && gso_size < datalen)
1184 gso_segs = DIV_ROUND_UP(skb->len - hlen, datalen);
1185
1186 if (unlikely(vnet_tx_dring_avail(dr) < gso_segs)) {
1187 struct netdev_queue *txq;
1188
1189 txq = netdev_get_tx_queue(dev, port->q_index);
1190 netif_tx_stop_queue(txq);
1191 if (vnet_tx_dring_avail(dr) < skb_shinfo(skb)->gso_segs)
1192 return NETDEV_TX_BUSY;
1193 netif_tx_wake_queue(txq);
1194 }
1195
1196 maclen = skb_network_header(skb) - skb_mac_header(skb);
1197 skb_pull(skb, maclen);
1198
1199 if (port->tso && gso_size < datalen) {
1200 if (skb_unclone(skb, GFP_ATOMIC))
1201 goto out_dropped;
1202
1203 /* segment to TSO size */
1204 skb_shinfo(skb)->gso_size = datalen;
1205 skb_shinfo(skb)->gso_segs = gso_segs;
1206 }
1207 segs = skb_gso_segment(skb, dev->features & ~NETIF_F_TSO);
1208 if (IS_ERR(segs))
1209 goto out_dropped;
1210
1211 skb_push(skb, maclen);
1212 skb_reset_mac_header(skb);
1213
1214 status = 0;
1215 while (segs) {
1216 struct sk_buff *curr = segs;
1217
1218 segs = segs->next;
1219 curr->next = NULL;
1220 if (port->tso && curr->len > dev->mtu) {
1221 skb_shinfo(curr)->gso_size = gso_size;
1222 skb_shinfo(curr)->gso_type = gso_type;
1223 skb_shinfo(curr)->gso_segs =
1224 DIV_ROUND_UP(curr->len - hlen, gso_size);
Aaron Youngdc153f82016-03-15 11:35:40 -07001225 } else {
Aaron Young31762ea2016-03-15 11:35:37 -07001226 skb_shinfo(curr)->gso_size = 0;
Aaron Youngdc153f82016-03-15 11:35:40 -07001227 }
Aaron Young31762ea2016-03-15 11:35:37 -07001228
1229 skb_push(curr, maclen);
1230 skb_reset_mac_header(curr);
1231 memcpy(skb_mac_header(curr), skb_mac_header(skb),
1232 maclen);
1233 curr->csum_start = skb_transport_header(curr) - curr->head;
1234 if (ip_hdr(curr)->protocol == IPPROTO_TCP)
1235 curr->csum_offset = offsetof(struct tcphdr, check);
1236 else if (ip_hdr(curr)->protocol == IPPROTO_UDP)
1237 curr->csum_offset = offsetof(struct udphdr, check);
1238
1239 if (!(status & NETDEV_TX_MASK))
Aaron Young67d07192016-03-15 11:35:38 -07001240 status = sunvnet_start_xmit_common(curr, dev,
1241 vnet_tx_port);
Aaron Young31762ea2016-03-15 11:35:37 -07001242 if (status & NETDEV_TX_MASK)
1243 dev_kfree_skb_any(curr);
1244 }
1245
1246 if (!(status & NETDEV_TX_MASK))
1247 dev_kfree_skb_any(skb);
1248 return status;
1249out_dropped:
1250 dev->stats.tx_dropped++;
1251 dev_kfree_skb_any(skb);
1252 return NETDEV_TX_OK;
1253}
1254
Aaron Young67d07192016-03-15 11:35:38 -07001255int sunvnet_start_xmit_common(struct sk_buff *skb, struct net_device *dev,
1256 struct vnet_port *(*vnet_tx_port)
1257 (struct sk_buff *, struct net_device *))
Aaron Young31762ea2016-03-15 11:35:37 -07001258{
Aaron Young31762ea2016-03-15 11:35:37 -07001259 struct vnet_port *port = NULL;
1260 struct vio_dring_state *dr;
1261 struct vio_net_desc *d;
1262 unsigned int len;
1263 struct sk_buff *freeskbs = NULL;
1264 int i, err, txi;
1265 unsigned pending = 0;
1266 struct netdev_queue *txq;
1267
1268 rcu_read_lock();
Aaron Young67d07192016-03-15 11:35:38 -07001269 port = vnet_tx_port(skb, dev);
Shannon Nelsondaa86e52017-02-13 10:57:02 -08001270 if (unlikely(!port))
Aaron Young31762ea2016-03-15 11:35:37 -07001271 goto out_dropped;
Aaron Young31762ea2016-03-15 11:35:37 -07001272
1273 if (skb_is_gso(skb) && skb->len > port->tsolen) {
Aaron Young67d07192016-03-15 11:35:38 -07001274 err = vnet_handle_offloads(port, skb, vnet_tx_port);
Aaron Young31762ea2016-03-15 11:35:37 -07001275 rcu_read_unlock();
1276 return err;
1277 }
1278
1279 if (!skb_is_gso(skb) && skb->len > port->rmtu) {
1280 unsigned long localmtu = port->rmtu - ETH_HLEN;
1281
1282 if (vio_version_after_eq(&port->vio, 1, 3))
1283 localmtu -= VLAN_HLEN;
1284
1285 if (skb->protocol == htons(ETH_P_IP)) {
1286 struct flowi4 fl4;
1287 struct rtable *rt = NULL;
1288
1289 memset(&fl4, 0, sizeof(fl4));
1290 fl4.flowi4_oif = dev->ifindex;
1291 fl4.flowi4_tos = RT_TOS(ip_hdr(skb)->tos);
1292 fl4.daddr = ip_hdr(skb)->daddr;
1293 fl4.saddr = ip_hdr(skb)->saddr;
1294
1295 rt = ip_route_output_key(dev_net(dev), &fl4);
Aaron Young31762ea2016-03-15 11:35:37 -07001296 if (!IS_ERR(rt)) {
1297 skb_dst_set(skb, &rt->dst);
1298 icmp_send(skb, ICMP_DEST_UNREACH,
1299 ICMP_FRAG_NEEDED,
1300 htonl(localmtu));
1301 }
1302 }
1303#if IS_ENABLED(CONFIG_IPV6)
1304 else if (skb->protocol == htons(ETH_P_IPV6))
1305 icmpv6_send(skb, ICMPV6_PKT_TOOBIG, 0, localmtu);
1306#endif
1307 goto out_dropped;
1308 }
1309
1310 skb = vnet_skb_shape(skb, 2);
1311
1312 if (unlikely(!skb))
1313 goto out_dropped;
1314
1315 if (skb->ip_summed == CHECKSUM_PARTIAL)
1316 vnet_fullcsum(skb);
1317
1318 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1319 i = skb_get_queue_mapping(skb);
1320 txq = netdev_get_tx_queue(dev, i);
1321 if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
1322 if (!netif_tx_queue_stopped(txq)) {
1323 netif_tx_stop_queue(txq);
1324
1325 /* This is a hard error, log it. */
1326 netdev_err(dev, "BUG! Tx Ring full when queue awake!\n");
1327 dev->stats.tx_errors++;
1328 }
1329 rcu_read_unlock();
1330 return NETDEV_TX_BUSY;
1331 }
1332
1333 d = vio_dring_cur(dr);
1334
1335 txi = dr->prod;
1336
1337 freeskbs = vnet_clean_tx_ring(port, &pending);
1338
1339 BUG_ON(port->tx_bufs[txi].skb);
1340
1341 len = skb->len;
1342 if (len < ETH_ZLEN)
1343 len = ETH_ZLEN;
1344
1345 err = vnet_skb_map(port->vio.lp, skb, port->tx_bufs[txi].cookies, 2,
1346 (LDC_MAP_SHADOW | LDC_MAP_DIRECT | LDC_MAP_RW));
1347 if (err < 0) {
1348 netdev_info(dev, "tx buffer map error %d\n", err);
1349 goto out_dropped;
1350 }
1351
1352 port->tx_bufs[txi].skb = skb;
1353 skb = NULL;
1354 port->tx_bufs[txi].ncookies = err;
1355
1356 /* We don't rely on the ACKs to free the skb in vnet_start_xmit(),
1357 * thus it is safe to not set VIO_ACK_ENABLE for each transmission:
1358 * the protocol itself does not require it as long as the peer
1359 * sends a VIO_SUBTYPE_ACK for VIO_DRING_STOPPED.
1360 *
1361 * An ACK for every packet in the ring is expensive as the
1362 * sending of LDC messages is slow and affects performance.
1363 */
1364 d->hdr.ack = VIO_ACK_DISABLE;
1365 d->size = len;
1366 d->ncookies = port->tx_bufs[txi].ncookies;
1367 for (i = 0; i < d->ncookies; i++)
1368 d->cookies[i] = port->tx_bufs[txi].cookies[i];
1369 if (vio_version_after_eq(&port->vio, 1, 7)) {
1370 struct vio_net_dext *dext = vio_net_ext(d);
1371
1372 memset(dext, 0, sizeof(*dext));
1373 if (skb_is_gso(port->tx_bufs[txi].skb)) {
1374 dext->ipv4_lso_mss = skb_shinfo(port->tx_bufs[txi].skb)
1375 ->gso_size;
1376 dext->flags |= VNET_PKT_IPV4_LSO;
1377 }
1378 if (vio_version_after_eq(&port->vio, 1, 8) &&
1379 !port->switch_port) {
1380 dext->flags |= VNET_PKT_HCK_IPV4_HDRCKSUM_OK;
1381 dext->flags |= VNET_PKT_HCK_FULLCKSUM_OK;
1382 }
1383 }
1384
1385 /* This has to be a non-SMP write barrier because we are writing
1386 * to memory which is shared with the peer LDOM.
1387 */
1388 dma_wmb();
1389
1390 d->hdr.state = VIO_DESC_READY;
1391
1392 /* Exactly one ldc "start" trigger (for dr->cons) needs to be sent
1393 * to notify the consumer that some descriptors are READY.
1394 * After that "start" trigger, no additional triggers are needed until
1395 * a DRING_STOPPED is received from the consumer. The dr->cons field
1396 * (set up by vnet_ack()) has the value of the next dring index
1397 * that has not yet been ack-ed. We send a "start" trigger here
1398 * if, and only if, start_cons is true (reset it afterward). Conversely,
1399 * vnet_ack() should check if the dring corresponding to cons
1400 * is marked READY, but start_cons was false.
1401 * If so, vnet_ack() should send out the missed "start" trigger.
1402 *
1403 * Note that the dma_wmb() above makes sure the cookies et al. are
1404 * not globally visible before the VIO_DESC_READY, and that the
1405 * stores are ordered correctly by the compiler. The consumer will
1406 * not proceed until the VIO_DESC_READY is visible assuring that
1407 * the consumer does not observe anything related to descriptors
1408 * out of order. The HV trap from the LDC start trigger is the
1409 * producer to consumer announcement that work is available to the
1410 * consumer
1411 */
1412 if (!port->start_cons) { /* previous trigger suffices */
1413 trace_vnet_skip_tx_trigger(port->vio._local_sid,
1414 port->vio._peer_sid, dr->cons);
1415 goto ldc_start_done;
1416 }
1417
1418 err = __vnet_tx_trigger(port, dr->cons);
1419 if (unlikely(err < 0)) {
1420 netdev_info(dev, "TX trigger error %d\n", err);
1421 d->hdr.state = VIO_DESC_FREE;
1422 skb = port->tx_bufs[txi].skb;
1423 port->tx_bufs[txi].skb = NULL;
1424 dev->stats.tx_carrier_errors++;
1425 goto out_dropped;
1426 }
1427
1428ldc_start_done:
1429 port->start_cons = false;
1430
1431 dev->stats.tx_packets++;
1432 dev->stats.tx_bytes += port->tx_bufs[txi].skb->len;
1433
1434 dr->prod = (dr->prod + 1) & (VNET_TX_RING_SIZE - 1);
1435 if (unlikely(vnet_tx_dring_avail(dr) < 1)) {
1436 netif_tx_stop_queue(txq);
Shannon Nelsonfd263fb2017-02-13 10:57:00 -08001437 smp_rmb();
Aaron Young31762ea2016-03-15 11:35:37 -07001438 if (vnet_tx_dring_avail(dr) > VNET_TX_WAKEUP_THRESH(dr))
1439 netif_tx_wake_queue(txq);
1440 }
1441
1442 (void)mod_timer(&port->clean_timer, jiffies + VNET_CLEAN_TIMEOUT);
1443 rcu_read_unlock();
1444
1445 vnet_free_skbs(freeskbs);
1446
1447 return NETDEV_TX_OK;
1448
1449out_dropped:
1450 if (pending)
1451 (void)mod_timer(&port->clean_timer,
1452 jiffies + VNET_CLEAN_TIMEOUT);
1453 else if (port)
1454 del_timer(&port->clean_timer);
Shannon Nelsondaa86e52017-02-13 10:57:02 -08001455 rcu_read_unlock();
Aaron Young31762ea2016-03-15 11:35:37 -07001456 if (skb)
1457 dev_kfree_skb(skb);
1458 vnet_free_skbs(freeskbs);
1459 dev->stats.tx_dropped++;
1460 return NETDEV_TX_OK;
1461}
1462EXPORT_SYMBOL_GPL(sunvnet_start_xmit_common);
1463
1464void sunvnet_tx_timeout_common(struct net_device *dev)
1465{
1466 /* XXX Implement me XXX */
1467}
1468EXPORT_SYMBOL_GPL(sunvnet_tx_timeout_common);
1469
1470int sunvnet_open_common(struct net_device *dev)
1471{
1472 netif_carrier_on(dev);
1473 netif_tx_start_all_queues(dev);
1474
1475 return 0;
1476}
1477EXPORT_SYMBOL_GPL(sunvnet_open_common);
1478
1479int sunvnet_close_common(struct net_device *dev)
1480{
1481 netif_tx_stop_all_queues(dev);
1482 netif_carrier_off(dev);
1483
1484 return 0;
1485}
1486EXPORT_SYMBOL_GPL(sunvnet_close_common);
1487
1488static struct vnet_mcast_entry *__vnet_mc_find(struct vnet *vp, u8 *addr)
1489{
1490 struct vnet_mcast_entry *m;
1491
1492 for (m = vp->mcast_list; m; m = m->next) {
1493 if (ether_addr_equal(m->addr, addr))
1494 return m;
1495 }
1496 return NULL;
1497}
1498
1499static void __update_mc_list(struct vnet *vp, struct net_device *dev)
1500{
1501 struct netdev_hw_addr *ha;
1502
1503 netdev_for_each_mc_addr(ha, dev) {
1504 struct vnet_mcast_entry *m;
1505
1506 m = __vnet_mc_find(vp, ha->addr);
1507 if (m) {
1508 m->hit = 1;
1509 continue;
1510 }
1511
1512 if (!m) {
1513 m = kzalloc(sizeof(*m), GFP_ATOMIC);
1514 if (!m)
1515 continue;
1516 memcpy(m->addr, ha->addr, ETH_ALEN);
1517 m->hit = 1;
1518
1519 m->next = vp->mcast_list;
1520 vp->mcast_list = m;
1521 }
1522 }
1523}
1524
1525static void __send_mc_list(struct vnet *vp, struct vnet_port *port)
1526{
1527 struct vio_net_mcast_info info;
1528 struct vnet_mcast_entry *m, **pp;
1529 int n_addrs;
1530
1531 memset(&info, 0, sizeof(info));
1532
1533 info.tag.type = VIO_TYPE_CTRL;
1534 info.tag.stype = VIO_SUBTYPE_INFO;
1535 info.tag.stype_env = VNET_MCAST_INFO;
1536 info.tag.sid = vio_send_sid(&port->vio);
1537 info.set = 1;
1538
1539 n_addrs = 0;
1540 for (m = vp->mcast_list; m; m = m->next) {
1541 if (m->sent)
1542 continue;
1543 m->sent = 1;
1544 memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
1545 m->addr, ETH_ALEN);
1546 if (++n_addrs == VNET_NUM_MCAST) {
1547 info.count = n_addrs;
1548
Aaron Youngdc153f82016-03-15 11:35:40 -07001549 (void)vio_ldc_send(&port->vio, &info,
1550 sizeof(info));
Aaron Young31762ea2016-03-15 11:35:37 -07001551 n_addrs = 0;
1552 }
1553 }
1554 if (n_addrs) {
1555 info.count = n_addrs;
Aaron Youngdc153f82016-03-15 11:35:40 -07001556 (void)vio_ldc_send(&port->vio, &info, sizeof(info));
Aaron Young31762ea2016-03-15 11:35:37 -07001557 }
1558
1559 info.set = 0;
1560
1561 n_addrs = 0;
1562 pp = &vp->mcast_list;
1563 while ((m = *pp) != NULL) {
1564 if (m->hit) {
1565 m->hit = 0;
1566 pp = &m->next;
1567 continue;
1568 }
1569
1570 memcpy(&info.mcast_addr[n_addrs * ETH_ALEN],
1571 m->addr, ETH_ALEN);
1572 if (++n_addrs == VNET_NUM_MCAST) {
1573 info.count = n_addrs;
Aaron Youngdc153f82016-03-15 11:35:40 -07001574 (void)vio_ldc_send(&port->vio, &info,
1575 sizeof(info));
Aaron Young31762ea2016-03-15 11:35:37 -07001576 n_addrs = 0;
1577 }
1578
1579 *pp = m->next;
1580 kfree(m);
1581 }
1582 if (n_addrs) {
1583 info.count = n_addrs;
Aaron Youngdc153f82016-03-15 11:35:40 -07001584 (void)vio_ldc_send(&port->vio, &info, sizeof(info));
Aaron Young31762ea2016-03-15 11:35:37 -07001585 }
1586}
1587
Aaron Young67d07192016-03-15 11:35:38 -07001588void sunvnet_set_rx_mode_common(struct net_device *dev, struct vnet *vp)
Aaron Young31762ea2016-03-15 11:35:37 -07001589{
Aaron Young31762ea2016-03-15 11:35:37 -07001590 struct vnet_port *port;
1591
1592 rcu_read_lock();
1593 list_for_each_entry_rcu(port, &vp->port_list, list) {
Aaron Young31762ea2016-03-15 11:35:37 -07001594 if (port->switch_port) {
1595 __update_mc_list(vp, dev);
1596 __send_mc_list(vp, port);
1597 break;
1598 }
1599 }
1600 rcu_read_unlock();
1601}
1602EXPORT_SYMBOL_GPL(sunvnet_set_rx_mode_common);
1603
Aaron Young31762ea2016-03-15 11:35:37 -07001604int sunvnet_set_mac_addr_common(struct net_device *dev, void *p)
1605{
1606 return -EINVAL;
1607}
1608EXPORT_SYMBOL_GPL(sunvnet_set_mac_addr_common);
1609
1610void sunvnet_port_free_tx_bufs_common(struct vnet_port *port)
1611{
1612 struct vio_dring_state *dr;
1613 int i;
1614
1615 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1616
Aaron Youngdc153f82016-03-15 11:35:40 -07001617 if (!dr->base)
Aaron Young31762ea2016-03-15 11:35:37 -07001618 return;
1619
1620 for (i = 0; i < VNET_TX_RING_SIZE; i++) {
1621 struct vio_net_desc *d;
1622 void *skb = port->tx_bufs[i].skb;
1623
1624 if (!skb)
1625 continue;
1626
1627 d = vio_dring_entry(dr, i);
1628
1629 ldc_unmap(port->vio.lp,
1630 port->tx_bufs[i].cookies,
1631 port->tx_bufs[i].ncookies);
1632 dev_kfree_skb(skb);
1633 port->tx_bufs[i].skb = NULL;
1634 d->hdr.state = VIO_DESC_FREE;
1635 }
1636 ldc_free_exp_dring(port->vio.lp, dr->base,
1637 (dr->entry_size * dr->num_entries),
1638 dr->cookies, dr->ncookies);
1639 dr->base = NULL;
1640 dr->entry_size = 0;
1641 dr->num_entries = 0;
1642 dr->pending = 0;
1643 dr->ncookies = 0;
1644}
1645EXPORT_SYMBOL_GPL(sunvnet_port_free_tx_bufs_common);
1646
Shannon Nelson867fa152017-03-14 10:24:39 -07001647void vnet_port_reset(struct vnet_port *port)
Aaron Young31762ea2016-03-15 11:35:37 -07001648{
1649 del_timer(&port->clean_timer);
1650 sunvnet_port_free_tx_bufs_common(port);
1651 port->rmtu = 0;
Shannon Nelsonbc221a32017-02-13 10:57:04 -08001652 port->tso = (port->vsw == 0); /* no tso in vsw, misbehaves in bridge */
Aaron Young31762ea2016-03-15 11:35:37 -07001653 port->tsolen = 0;
1654}
Shannon Nelson867fa152017-03-14 10:24:39 -07001655EXPORT_SYMBOL_GPL(vnet_port_reset);
Aaron Young31762ea2016-03-15 11:35:37 -07001656
1657static int vnet_port_alloc_tx_ring(struct vnet_port *port)
1658{
1659 struct vio_dring_state *dr;
1660 unsigned long len, elen;
1661 int i, err, ncookies;
1662 void *dring;
1663
1664 dr = &port->vio.drings[VIO_DRIVER_TX_RING];
1665
1666 elen = sizeof(struct vio_net_desc) +
1667 sizeof(struct ldc_trans_cookie) * 2;
1668 if (vio_version_after_eq(&port->vio, 1, 7))
1669 elen += sizeof(struct vio_net_dext);
1670 len = VNET_TX_RING_SIZE * elen;
1671
1672 ncookies = VIO_MAX_RING_COOKIES;
1673 dring = ldc_alloc_exp_dring(port->vio.lp, len,
1674 dr->cookies, &ncookies,
1675 (LDC_MAP_SHADOW |
1676 LDC_MAP_DIRECT |
1677 LDC_MAP_RW));
1678 if (IS_ERR(dring)) {
1679 err = PTR_ERR(dring);
1680 goto err_out;
1681 }
1682
1683 dr->base = dring;
1684 dr->entry_size = elen;
1685 dr->num_entries = VNET_TX_RING_SIZE;
Aaron Youngdc153f82016-03-15 11:35:40 -07001686 dr->prod = 0;
1687 dr->cons = 0;
Aaron Young31762ea2016-03-15 11:35:37 -07001688 port->start_cons = true; /* need an initial trigger */
1689 dr->pending = VNET_TX_RING_SIZE;
1690 dr->ncookies = ncookies;
1691
1692 for (i = 0; i < VNET_TX_RING_SIZE; ++i) {
1693 struct vio_net_desc *d;
1694
1695 d = vio_dring_entry(dr, i);
1696 d->hdr.state = VIO_DESC_FREE;
1697 }
1698 return 0;
1699
1700err_out:
1701 sunvnet_port_free_tx_bufs_common(port);
1702
1703 return err;
1704}
1705
1706#ifdef CONFIG_NET_POLL_CONTROLLER
Aaron Young67d07192016-03-15 11:35:38 -07001707void sunvnet_poll_controller_common(struct net_device *dev, struct vnet *vp)
Aaron Young31762ea2016-03-15 11:35:37 -07001708{
Aaron Young31762ea2016-03-15 11:35:37 -07001709 struct vnet_port *port;
1710 unsigned long flags;
1711
1712 spin_lock_irqsave(&vp->lock, flags);
1713 if (!list_empty(&vp->port_list)) {
1714 port = list_entry(vp->port_list.next, struct vnet_port, list);
1715 napi_schedule(&port->napi);
1716 }
1717 spin_unlock_irqrestore(&vp->lock, flags);
1718}
1719EXPORT_SYMBOL_GPL(sunvnet_poll_controller_common);
1720#endif
1721
1722void sunvnet_port_add_txq_common(struct vnet_port *port)
1723{
1724 struct vnet *vp = port->vp;
1725 int n;
1726
1727 n = vp->nports++;
1728 n = n & (VNET_MAX_TXQS - 1);
1729 port->q_index = n;
Aaron Young67d07192016-03-15 11:35:38 -07001730 netif_tx_wake_queue(netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port),
1731 port->q_index));
Aaron Young31762ea2016-03-15 11:35:37 -07001732}
1733EXPORT_SYMBOL_GPL(sunvnet_port_add_txq_common);
1734
1735void sunvnet_port_rm_txq_common(struct vnet_port *port)
1736{
1737 port->vp->nports--;
Aaron Young67d07192016-03-15 11:35:38 -07001738 netif_tx_stop_queue(netdev_get_tx_queue(VNET_PORT_TO_NET_DEVICE(port),
1739 port->q_index));
Aaron Young31762ea2016-03-15 11:35:37 -07001740}
1741EXPORT_SYMBOL_GPL(sunvnet_port_rm_txq_common);