blob: 222f3d098ae47d04a3f8666f5a16162b3ebb0d41 [file] [log] [blame]
Rusty Russell296f96f2007-10-22 11:03:37 +10001/* A simple network driver using virtio.
2 *
3 * Copyright 2007 Rusty Russell <rusty@rustcorp.com.au> IBM Corporation
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License as published by
7 * the Free Software Foundation; either version 2 of the License, or
8 * (at your option) any later version.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 * You should have received a copy of the GNU General Public License
16 * along with this program; if not, write to the Free Software
17 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
18 */
19//#define DEBUG
20#include <linux/netdevice.h>
21#include <linux/etherdevice.h>
Herbert Xua9ea3fc2008-04-18 11:21:42 +080022#include <linux/ethtool.h>
Rusty Russell296f96f2007-10-22 11:03:37 +100023#include <linux/module.h>
24#include <linux/virtio.h>
Fernando Luis Vazquez Cao3ca4f5c2009-07-31 15:25:56 +090025#include <linux/virtio_ids.h>
Rusty Russell296f96f2007-10-22 11:03:37 +100026#include <linux/virtio_net.h>
27#include <linux/scatterlist.h>
Alex Williamsone918085a2009-01-25 18:06:26 -080028#include <linux/if_vlan.h>
Rusty Russell296f96f2007-10-22 11:03:37 +100029
Dor Laor6c0cd7c2007-12-16 15:19:43 +020030static int napi_weight = 128;
31module_param(napi_weight, int, 0444);
32
Rusty Russell34a48572008-02-04 23:50:02 -050033static int csum = 1, gso = 1;
34module_param(csum, bool, 0444);
35module_param(gso, bool, 0444);
36
Rusty Russell296f96f2007-10-22 11:03:37 +100037/* FIXME: MTU in config. */
Alex Williamsone918085a2009-01-25 18:06:26 -080038#define MAX_PACKET_LEN (ETH_HLEN + VLAN_HLEN + ETH_DATA_LEN)
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -080039#define GOOD_COPY_LEN 128
Rusty Russell296f96f2007-10-22 11:03:37 +100040
Alex Williamsonf565a7c2009-02-04 09:02:45 +000041#define VIRTNET_SEND_COMMAND_SG_MAX 2
Alex Williamson2a41f712009-02-04 09:02:34 +000042
Rusty Russell296f96f2007-10-22 11:03:37 +100043struct virtnet_info
44{
45 struct virtio_device *vdev;
Alex Williamson2a41f712009-02-04 09:02:34 +000046 struct virtqueue *rvq, *svq, *cvq;
Rusty Russell296f96f2007-10-22 11:03:37 +100047 struct net_device *dev;
48 struct napi_struct napi;
Mark McLoughlin9f4d26d2009-01-19 17:09:49 -080049 unsigned int status;
Rusty Russell296f96f2007-10-22 11:03:37 +100050
Rusty Russell363f1512008-06-08 20:51:55 +100051 /* If we need to free in a timer, this is it. */
Mark McLoughlin14c998f2008-06-08 20:50:56 +100052 struct timer_list xmit_free_timer;
53
Rusty Russell296f96f2007-10-22 11:03:37 +100054 /* Number of input buffers, and max we've ever had. */
55 unsigned int num, max;
56
Rusty Russell11a3a152008-05-26 17:48:13 +100057 /* For cleaning up after transmission. */
58 struct tasklet_struct tasklet;
Rusty Russell363f1512008-06-08 20:51:55 +100059 bool free_in_tasklet;
Rusty Russell11a3a152008-05-26 17:48:13 +100060
Herbert Xu97402b92008-04-18 11:24:27 +080061 /* I like... big packets and I cannot lie! */
62 bool big_packets;
63
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -080064 /* Host will merge rx buffers for big packets (shake it! shake it!) */
65 bool mergeable_rx_bufs;
66
Rusty Russell296f96f2007-10-22 11:03:37 +100067 /* Receive & send queues. */
68 struct sk_buff_head recv;
69 struct sk_buff_head send;
Rusty Russellfb6813f2008-07-25 12:06:01 -050070
Rusty Russell3161e452009-08-26 12:22:32 -070071 /* Work struct for refilling if we run low on memory. */
72 struct delayed_work refill;
73
Rusty Russellfb6813f2008-07-25 12:06:01 -050074 /* Chain pages by the private ptr. */
75 struct page *pages;
Rusty Russell296f96f2007-10-22 11:03:37 +100076};
77
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -080078static inline void *skb_vnet_hdr(struct sk_buff *skb)
Rusty Russell296f96f2007-10-22 11:03:37 +100079{
80 return (struct virtio_net_hdr *)skb->cb;
81}
82
Rusty Russellfb6813f2008-07-25 12:06:01 -050083static void give_a_page(struct virtnet_info *vi, struct page *page)
84{
85 page->private = (unsigned long)vi->pages;
86 vi->pages = page;
87}
88
Mark McLoughlin0a888fd2008-11-16 22:39:18 -080089static void trim_pages(struct virtnet_info *vi, struct sk_buff *skb)
90{
91 unsigned int i;
92
93 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++)
94 give_a_page(vi, skb_shinfo(skb)->frags[i].page);
95 skb_shinfo(skb)->nr_frags = 0;
96 skb->data_len = 0;
97}
98
Rusty Russellfb6813f2008-07-25 12:06:01 -050099static struct page *get_a_page(struct virtnet_info *vi, gfp_t gfp_mask)
100{
101 struct page *p = vi->pages;
102
103 if (p)
104 vi->pages = (struct page *)p->private;
105 else
106 p = alloc_page(gfp_mask);
107 return p;
108}
109
Rusty Russell2cb9c6b2008-02-04 23:50:07 -0500110static void skb_xmit_done(struct virtqueue *svq)
Rusty Russell296f96f2007-10-22 11:03:37 +1000111{
Rusty Russell2cb9c6b2008-02-04 23:50:07 -0500112 struct virtnet_info *vi = svq->vdev->priv;
Rusty Russell296f96f2007-10-22 11:03:37 +1000113
Rusty Russell2cb9c6b2008-02-04 23:50:07 -0500114 /* Suppress further interrupts. */
115 svq->vq_ops->disable_cb(svq);
Rusty Russell11a3a152008-05-26 17:48:13 +1000116
Rusty Russell363f1512008-06-08 20:51:55 +1000117 /* We were probably waiting for more output buffers. */
Rusty Russell296f96f2007-10-22 11:03:37 +1000118 netif_wake_queue(vi->dev);
Rusty Russell11a3a152008-05-26 17:48:13 +1000119
Rusty Russell8958f572009-09-24 09:59:18 -0600120 if (vi->free_in_tasklet)
121 tasklet_schedule(&vi->tasklet);
Rusty Russell296f96f2007-10-22 11:03:37 +1000122}
123
124static void receive_skb(struct net_device *dev, struct sk_buff *skb,
125 unsigned len)
126{
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800127 struct virtnet_info *vi = netdev_priv(dev);
Rusty Russell296f96f2007-10-22 11:03:37 +1000128 struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);
Herbert Xu97402b92008-04-18 11:24:27 +0800129 int err;
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800130 int i;
Rusty Russell296f96f2007-10-22 11:03:37 +1000131
132 if (unlikely(len < sizeof(struct virtio_net_hdr) + ETH_HLEN)) {
133 pr_debug("%s: short packet %i\n", dev->name, len);
134 dev->stats.rx_length_errors++;
135 goto drop;
136 }
Rusty Russell296f96f2007-10-22 11:03:37 +1000137
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800138 if (vi->mergeable_rx_bufs) {
139 struct virtio_net_hdr_mrg_rxbuf *mhdr = skb_vnet_hdr(skb);
140 unsigned int copy;
141 char *p = page_address(skb_shinfo(skb)->frags[0].page);
Rusty Russellfb6813f2008-07-25 12:06:01 -0500142
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800143 if (len > PAGE_SIZE)
144 len = PAGE_SIZE;
145 len -= sizeof(struct virtio_net_hdr_mrg_rxbuf);
146
147 memcpy(hdr, p, sizeof(*mhdr));
148 p += sizeof(*mhdr);
149
150 copy = len;
151 if (copy > skb_tailroom(skb))
152 copy = skb_tailroom(skb);
153
154 memcpy(skb_put(skb, copy), p, copy);
155
156 len -= copy;
157
158 if (!len) {
159 give_a_page(vi, skb_shinfo(skb)->frags[0].page);
160 skb_shinfo(skb)->nr_frags--;
161 } else {
162 skb_shinfo(skb)->frags[0].page_offset +=
163 sizeof(*mhdr) + copy;
164 skb_shinfo(skb)->frags[0].size = len;
165 skb->data_len += len;
166 skb->len += len;
167 }
168
169 while (--mhdr->num_buffers) {
170 struct sk_buff *nskb;
171
172 i = skb_shinfo(skb)->nr_frags;
173 if (i >= MAX_SKB_FRAGS) {
174 pr_debug("%s: packet too long %d\n", dev->name,
175 len);
176 dev->stats.rx_length_errors++;
177 goto drop;
178 }
179
180 nskb = vi->rvq->vq_ops->get_buf(vi->rvq, &len);
181 if (!nskb) {
182 pr_debug("%s: rx error: %d buffers missing\n",
183 dev->name, mhdr->num_buffers);
184 dev->stats.rx_length_errors++;
185 goto drop;
186 }
187
188 __skb_unlink(nskb, &vi->recv);
189 vi->num--;
190
191 skb_shinfo(skb)->frags[i] = skb_shinfo(nskb)->frags[0];
192 skb_shinfo(nskb)->nr_frags = 0;
193 kfree_skb(nskb);
194
195 if (len > PAGE_SIZE)
196 len = PAGE_SIZE;
197
198 skb_shinfo(skb)->frags[i].size = len;
199 skb_shinfo(skb)->nr_frags++;
200 skb->data_len += len;
201 skb->len += len;
202 }
203 } else {
204 len -= sizeof(struct virtio_net_hdr);
205
206 if (len <= MAX_PACKET_LEN)
207 trim_pages(vi, skb);
208
209 err = pskb_trim(skb, len);
210 if (err) {
211 pr_debug("%s: pskb_trim failed %i %d\n", dev->name,
212 len, err);
213 dev->stats.rx_dropped++;
214 goto drop;
215 }
Herbert Xu97402b92008-04-18 11:24:27 +0800216 }
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800217
Herbert Xu97402b92008-04-18 11:24:27 +0800218 skb->truesize += skb->data_len;
Rusty Russell296f96f2007-10-22 11:03:37 +1000219 dev->stats.rx_bytes += skb->len;
220 dev->stats.rx_packets++;
221
222 if (hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM) {
223 pr_debug("Needs csum!\n");
Rusty Russellf35d9d82008-02-04 23:49:54 -0500224 if (!skb_partial_csum_set(skb,hdr->csum_start,hdr->csum_offset))
Rusty Russell296f96f2007-10-22 11:03:37 +1000225 goto frame_err;
Rusty Russell296f96f2007-10-22 11:03:37 +1000226 }
227
Mark McLoughlin23cde762008-06-08 20:49:00 +1000228 skb->protocol = eth_type_trans(skb, dev);
229 pr_debug("Receiving skb proto 0x%04x len %i type %i\n",
230 ntohs(skb->protocol), skb->len, skb->pkt_type);
231
Rusty Russell296f96f2007-10-22 11:03:37 +1000232 if (hdr->gso_type != VIRTIO_NET_HDR_GSO_NONE) {
233 pr_debug("GSO!\n");
Rusty Russell34a48572008-02-04 23:50:02 -0500234 switch (hdr->gso_type & ~VIRTIO_NET_HDR_GSO_ECN) {
Rusty Russell296f96f2007-10-22 11:03:37 +1000235 case VIRTIO_NET_HDR_GSO_TCPV4:
236 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV4;
237 break;
Rusty Russell296f96f2007-10-22 11:03:37 +1000238 case VIRTIO_NET_HDR_GSO_UDP:
239 skb_shinfo(skb)->gso_type = SKB_GSO_UDP;
240 break;
241 case VIRTIO_NET_HDR_GSO_TCPV6:
242 skb_shinfo(skb)->gso_type = SKB_GSO_TCPV6;
243 break;
244 default:
245 if (net_ratelimit())
246 printk(KERN_WARNING "%s: bad gso type %u.\n",
247 dev->name, hdr->gso_type);
248 goto frame_err;
249 }
250
Rusty Russell34a48572008-02-04 23:50:02 -0500251 if (hdr->gso_type & VIRTIO_NET_HDR_GSO_ECN)
252 skb_shinfo(skb)->gso_type |= SKB_GSO_TCP_ECN;
253
Rusty Russell296f96f2007-10-22 11:03:37 +1000254 skb_shinfo(skb)->gso_size = hdr->gso_size;
255 if (skb_shinfo(skb)->gso_size == 0) {
256 if (net_ratelimit())
257 printk(KERN_WARNING "%s: zero gso size.\n",
258 dev->name);
259 goto frame_err;
260 }
261
262 /* Header must be checked, and gso_segs computed. */
263 skb_shinfo(skb)->gso_type |= SKB_GSO_DODGY;
264 skb_shinfo(skb)->gso_segs = 0;
265 }
266
267 netif_receive_skb(skb);
268 return;
269
270frame_err:
271 dev->stats.rx_frame_errors++;
272drop:
273 dev_kfree_skb(skb);
274}
275
Rusty Russell3161e452009-08-26 12:22:32 -0700276static bool try_fill_recv_maxbufs(struct virtnet_info *vi, gfp_t gfp)
Rusty Russell296f96f2007-10-22 11:03:37 +1000277{
278 struct sk_buff *skb;
Rusty Russell05271682008-05-02 21:50:45 -0500279 struct scatterlist sg[2+MAX_SKB_FRAGS];
Herbert Xu97402b92008-04-18 11:24:27 +0800280 int num, err, i;
Rusty Russell3161e452009-08-26 12:22:32 -0700281 bool oom = false;
Rusty Russell296f96f2007-10-22 11:03:37 +1000282
Rusty Russell05271682008-05-02 21:50:45 -0500283 sg_init_table(sg, 2+MAX_SKB_FRAGS);
Rusty Russell296f96f2007-10-22 11:03:37 +1000284 for (;;) {
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800285 struct virtio_net_hdr *hdr;
286
Herbert Xu8981f012009-06-11 20:55:17 -0700287 skb = netdev_alloc_skb(vi->dev, MAX_PACKET_LEN + NET_IP_ALIGN);
Rusty Russell3161e452009-08-26 12:22:32 -0700288 if (unlikely(!skb)) {
289 oom = true;
Rusty Russell296f96f2007-10-22 11:03:37 +1000290 break;
Rusty Russell3161e452009-08-26 12:22:32 -0700291 }
Rusty Russell296f96f2007-10-22 11:03:37 +1000292
Herbert Xu8981f012009-06-11 20:55:17 -0700293 skb_reserve(skb, NET_IP_ALIGN);
Rusty Russell296f96f2007-10-22 11:03:37 +1000294 skb_put(skb, MAX_PACKET_LEN);
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800295
296 hdr = skb_vnet_hdr(skb);
Ira W. Snyder8527bec2009-01-26 21:00:33 -0800297 sg_set_buf(sg, hdr, sizeof(*hdr));
Herbert Xu97402b92008-04-18 11:24:27 +0800298
299 if (vi->big_packets) {
300 for (i = 0; i < MAX_SKB_FRAGS; i++) {
301 skb_frag_t *f = &skb_shinfo(skb)->frags[i];
Rusty Russell3161e452009-08-26 12:22:32 -0700302 f->page = get_a_page(vi, gfp);
Herbert Xu97402b92008-04-18 11:24:27 +0800303 if (!f->page)
304 break;
305
306 f->page_offset = 0;
307 f->size = PAGE_SIZE;
308
309 skb->data_len += PAGE_SIZE;
310 skb->len += PAGE_SIZE;
311
312 skb_shinfo(skb)->nr_frags++;
313 }
314 }
315
Rusty Russell296f96f2007-10-22 11:03:37 +1000316 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
317 skb_queue_head(&vi->recv, skb);
318
319 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, num, skb);
Rusty Russell3c1b27d2009-09-23 22:26:31 -0600320 if (err < 0) {
Rusty Russell296f96f2007-10-22 11:03:37 +1000321 skb_unlink(skb, &vi->recv);
Mark McLoughlin0a888fd2008-11-16 22:39:18 -0800322 trim_pages(vi, skb);
Rusty Russell296f96f2007-10-22 11:03:37 +1000323 kfree_skb(skb);
324 break;
325 }
326 vi->num++;
327 }
328 if (unlikely(vi->num > vi->max))
329 vi->max = vi->num;
330 vi->rvq->vq_ops->kick(vi->rvq);
Rusty Russell3161e452009-08-26 12:22:32 -0700331 return !oom;
Rusty Russell296f96f2007-10-22 11:03:37 +1000332}
333
Rusty Russell3161e452009-08-26 12:22:32 -0700334/* Returns false if we couldn't fill entirely (OOM). */
335static bool try_fill_recv(struct virtnet_info *vi, gfp_t gfp)
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800336{
337 struct sk_buff *skb;
338 struct scatterlist sg[1];
339 int err;
Rusty Russell3161e452009-08-26 12:22:32 -0700340 bool oom = false;
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800341
Rusty Russell3161e452009-08-26 12:22:32 -0700342 if (!vi->mergeable_rx_bufs)
343 return try_fill_recv_maxbufs(vi, gfp);
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800344
345 for (;;) {
346 skb_frag_t *f;
347
348 skb = netdev_alloc_skb(vi->dev, GOOD_COPY_LEN + NET_IP_ALIGN);
Rusty Russell3161e452009-08-26 12:22:32 -0700349 if (unlikely(!skb)) {
350 oom = true;
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800351 break;
Rusty Russell3161e452009-08-26 12:22:32 -0700352 }
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800353
354 skb_reserve(skb, NET_IP_ALIGN);
355
356 f = &skb_shinfo(skb)->frags[0];
Rusty Russell3161e452009-08-26 12:22:32 -0700357 f->page = get_a_page(vi, gfp);
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800358 if (!f->page) {
Rusty Russell3161e452009-08-26 12:22:32 -0700359 oom = true;
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800360 kfree_skb(skb);
361 break;
362 }
363
364 f->page_offset = 0;
365 f->size = PAGE_SIZE;
366
367 skb_shinfo(skb)->nr_frags++;
368
369 sg_init_one(sg, page_address(f->page), PAGE_SIZE);
370 skb_queue_head(&vi->recv, skb);
371
372 err = vi->rvq->vq_ops->add_buf(vi->rvq, sg, 0, 1, skb);
Rusty Russell3c1b27d2009-09-23 22:26:31 -0600373 if (err < 0) {
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800374 skb_unlink(skb, &vi->recv);
375 kfree_skb(skb);
376 break;
377 }
378 vi->num++;
379 }
380 if (unlikely(vi->num > vi->max))
381 vi->max = vi->num;
382 vi->rvq->vq_ops->kick(vi->rvq);
Rusty Russell3161e452009-08-26 12:22:32 -0700383 return !oom;
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800384}
385
Rusty Russell18445c42008-02-04 23:49:57 -0500386static void skb_recv_done(struct virtqueue *rvq)
Rusty Russell296f96f2007-10-22 11:03:37 +1000387{
388 struct virtnet_info *vi = rvq->vdev->priv;
Rusty Russell18445c42008-02-04 23:49:57 -0500389 /* Schedule NAPI, Suppress further interrupts if successful. */
Ben Hutchings288379f2009-01-19 16:43:59 -0800390 if (napi_schedule_prep(&vi->napi)) {
Rusty Russell18445c42008-02-04 23:49:57 -0500391 rvq->vq_ops->disable_cb(rvq);
Ben Hutchings288379f2009-01-19 16:43:59 -0800392 __napi_schedule(&vi->napi);
Rusty Russell18445c42008-02-04 23:49:57 -0500393 }
Rusty Russell296f96f2007-10-22 11:03:37 +1000394}
395
Rusty Russell3161e452009-08-26 12:22:32 -0700396static void refill_work(struct work_struct *work)
397{
398 struct virtnet_info *vi;
399 bool still_empty;
400
401 vi = container_of(work, struct virtnet_info, refill.work);
402 napi_disable(&vi->napi);
403 try_fill_recv(vi, GFP_KERNEL);
404 still_empty = (vi->num == 0);
405 napi_enable(&vi->napi);
406
407 /* In theory, this can happen: if we don't get any buffers in
408 * we will *never* try to fill again. */
409 if (still_empty)
410 schedule_delayed_work(&vi->refill, HZ/2);
411}
412
Rusty Russell296f96f2007-10-22 11:03:37 +1000413static int virtnet_poll(struct napi_struct *napi, int budget)
414{
415 struct virtnet_info *vi = container_of(napi, struct virtnet_info, napi);
416 struct sk_buff *skb = NULL;
417 unsigned int len, received = 0;
418
419again:
420 while (received < budget &&
421 (skb = vi->rvq->vq_ops->get_buf(vi->rvq, &len)) != NULL) {
422 __skb_unlink(skb, &vi->recv);
423 receive_skb(vi->dev, skb, len);
424 vi->num--;
425 received++;
426 }
427
Rusty Russell3161e452009-08-26 12:22:32 -0700428 if (vi->num < vi->max / 2) {
429 if (!try_fill_recv(vi, GFP_ATOMIC))
430 schedule_delayed_work(&vi->refill, 0);
431 }
Rusty Russell296f96f2007-10-22 11:03:37 +1000432
Rusty Russell8329d982007-11-19 11:20:43 -0500433 /* Out of packets? */
434 if (received < budget) {
Ben Hutchings288379f2009-01-19 16:43:59 -0800435 napi_complete(napi);
Rusty Russell18445c42008-02-04 23:49:57 -0500436 if (unlikely(!vi->rvq->vq_ops->enable_cb(vi->rvq))
Christian Borntraeger4265f162008-03-14 14:17:05 +0100437 && napi_schedule_prep(napi)) {
438 vi->rvq->vq_ops->disable_cb(vi->rvq);
Ben Hutchings288379f2009-01-19 16:43:59 -0800439 __napi_schedule(napi);
Rusty Russell296f96f2007-10-22 11:03:37 +1000440 goto again;
Christian Borntraeger4265f162008-03-14 14:17:05 +0100441 }
Rusty Russell296f96f2007-10-22 11:03:37 +1000442 }
443
444 return received;
445}
446
447static void free_old_xmit_skbs(struct virtnet_info *vi)
448{
449 struct sk_buff *skb;
450 unsigned int len;
451
452 while ((skb = vi->svq->vq_ops->get_buf(vi->svq, &len)) != NULL) {
453 pr_debug("Sent skb %p\n", skb);
454 __skb_unlink(skb, &vi->send);
Rusty Russell655aa312008-05-02 21:50:43 -0500455 vi->dev->stats.tx_bytes += skb->len;
Rusty Russell296f96f2007-10-22 11:03:37 +1000456 vi->dev->stats.tx_packets++;
457 kfree_skb(skb);
458 }
459}
460
Rusty Russell363f1512008-06-08 20:51:55 +1000461/* If the virtio transport doesn't always notify us when all in-flight packets
462 * are consumed, we fall back to using this function on a timer to free them. */
Mark McLoughlin14c998f2008-06-08 20:50:56 +1000463static void xmit_free(unsigned long data)
464{
465 struct virtnet_info *vi = (void *)data;
466
467 netif_tx_lock(vi->dev);
468
469 free_old_xmit_skbs(vi);
470
471 if (!skb_queue_empty(&vi->send))
472 mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10));
473
474 netif_tx_unlock(vi->dev);
475}
476
Rusty Russell99ffc692008-05-02 21:50:46 -0500477static int xmit_skb(struct virtnet_info *vi, struct sk_buff *skb)
Rusty Russell296f96f2007-10-22 11:03:37 +1000478{
Mark McLoughlin14c998f2008-06-08 20:50:56 +1000479 int num, err;
Rusty Russell05271682008-05-02 21:50:45 -0500480 struct scatterlist sg[2+MAX_SKB_FRAGS];
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800481 struct virtio_net_hdr_mrg_rxbuf *mhdr = skb_vnet_hdr(skb);
482 struct virtio_net_hdr *hdr = skb_vnet_hdr(skb);
Rusty Russell296f96f2007-10-22 11:03:37 +1000483 const unsigned char *dest = ((struct ethhdr *)skb->data)->h_dest;
Rusty Russell296f96f2007-10-22 11:03:37 +1000484
Rusty Russell05271682008-05-02 21:50:45 -0500485 sg_init_table(sg, 2+MAX_SKB_FRAGS);
Rusty Russell4d125de2007-11-07 16:34:49 +1100486
Johannes Berge1749612008-10-27 15:59:26 -0700487 pr_debug("%s: xmit %p %pM\n", vi->dev->name, skb, dest);
Rusty Russell296f96f2007-10-22 11:03:37 +1000488
Rusty Russell296f96f2007-10-22 11:03:37 +1000489 if (skb->ip_summed == CHECKSUM_PARTIAL) {
490 hdr->flags = VIRTIO_NET_HDR_F_NEEDS_CSUM;
491 hdr->csum_start = skb->csum_start - skb_headroom(skb);
492 hdr->csum_offset = skb->csum_offset;
493 } else {
494 hdr->flags = 0;
495 hdr->csum_offset = hdr->csum_start = 0;
496 }
497
498 if (skb_is_gso(skb)) {
Herbert Xub82f08e2009-06-04 00:59:18 +0000499 hdr->hdr_len = skb_headlen(skb);
Rusty Russell296f96f2007-10-22 11:03:37 +1000500 hdr->gso_size = skb_shinfo(skb)->gso_size;
Rusty Russell34a48572008-02-04 23:50:02 -0500501 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
Rusty Russell296f96f2007-10-22 11:03:37 +1000502 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV4;
503 else if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6)
504 hdr->gso_type = VIRTIO_NET_HDR_GSO_TCPV6;
505 else if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP)
506 hdr->gso_type = VIRTIO_NET_HDR_GSO_UDP;
507 else
508 BUG();
Rusty Russell34a48572008-02-04 23:50:02 -0500509 if (skb_shinfo(skb)->gso_type & SKB_GSO_TCP_ECN)
510 hdr->gso_type |= VIRTIO_NET_HDR_GSO_ECN;
Rusty Russell296f96f2007-10-22 11:03:37 +1000511 } else {
512 hdr->gso_type = VIRTIO_NET_HDR_GSO_NONE;
Rusty Russell50c8ea82008-02-04 23:50:01 -0500513 hdr->gso_size = hdr->hdr_len = 0;
Rusty Russell296f96f2007-10-22 11:03:37 +1000514 }
515
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800516 mhdr->num_buffers = 0;
517
518 /* Encode metadata header at front. */
519 if (vi->mergeable_rx_bufs)
Ira W. Snyder8527bec2009-01-26 21:00:33 -0800520 sg_set_buf(sg, mhdr, sizeof(*mhdr));
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800521 else
Ira W. Snyder8527bec2009-01-26 21:00:33 -0800522 sg_set_buf(sg, hdr, sizeof(*hdr));
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800523
Rusty Russell296f96f2007-10-22 11:03:37 +1000524 num = skb_to_sgvec(skb, sg+1, 0, skb->len) + 1;
Rusty Russell99ffc692008-05-02 21:50:46 -0500525
Mark McLoughlin14c998f2008-06-08 20:50:56 +1000526 err = vi->svq->vq_ops->add_buf(vi->svq, sg, num, 0, skb);
Rusty Russell2b5bbe32009-09-24 09:59:17 -0600527 if (err >= 0 && !vi->free_in_tasklet) {
528 /* Don't wait up for transmitted skbs to be freed. */
529 skb_orphan(skb);
530 nf_reset(skb);
Mark McLoughlin14c998f2008-06-08 20:50:56 +1000531 mod_timer(&vi->xmit_free_timer, jiffies + (HZ/10));
Rusty Russell2b5bbe32009-09-24 09:59:17 -0600532 }
Mark McLoughlin14c998f2008-06-08 20:50:56 +1000533
534 return err;
Rusty Russell99ffc692008-05-02 21:50:46 -0500535}
536
Rusty Russell11a3a152008-05-26 17:48:13 +1000537static void xmit_tasklet(unsigned long data)
538{
539 struct virtnet_info *vi = (void *)data;
540
541 netif_tx_lock_bh(vi->dev);
Rusty Russell8958f572009-09-24 09:59:18 -0600542 free_old_xmit_skbs(vi);
Rusty Russell11a3a152008-05-26 17:48:13 +1000543 netif_tx_unlock_bh(vi->dev);
544}
545
Stephen Hemminger424efe92009-08-31 19:50:51 +0000546static netdev_tx_t start_xmit(struct sk_buff *skb, struct net_device *dev)
Rusty Russell99ffc692008-05-02 21:50:46 -0500547{
548 struct virtnet_info *vi = netdev_priv(dev);
Rusty Russell2cb9c6b2008-02-04 23:50:07 -0500549
550again:
551 /* Free up any pending old buffers before queueing new ones. */
552 free_old_xmit_skbs(vi);
Rusty Russell2cb9c6b2008-02-04 23:50:07 -0500553
Rusty Russell99ffc692008-05-02 21:50:46 -0500554 /* Put new one in send queue and do transmit */
Rusty Russell8958f572009-09-24 09:59:18 -0600555 __skb_queue_head(&vi->send, skb);
556 if (likely(xmit_skb(vi, skb) >= 0)) {
557 vi->svq->vq_ops->kick(vi->svq);
558 return NETDEV_TX_OK;
Rusty Russell99ffc692008-05-02 21:50:46 -0500559 }
Rusty Russell99ffc692008-05-02 21:50:46 -0500560
Rusty Russell8958f572009-09-24 09:59:18 -0600561 /* Ring too full for this packet, remove it from queue again. */
Rusty Russell99ffc692008-05-02 21:50:46 -0500562 pr_debug("%s: virtio not prepared to send\n", dev->name);
Rusty Russell8958f572009-09-24 09:59:18 -0600563 __skb_unlink(skb, &vi->send);
Rusty Russell99ffc692008-05-02 21:50:46 -0500564 netif_stop_queue(dev);
565
566 /* Activate callback for using skbs: if this returns false it
567 * means some were used in the meantime. */
568 if (unlikely(!vi->svq->vq_ops->enable_cb(vi->svq))) {
569 vi->svq->vq_ops->disable_cb(vi->svq);
570 netif_start_queue(dev);
571 goto again;
572 }
Rusty Russell8958f572009-09-24 09:59:18 -0600573 return NETDEV_TX_BUSY;
Rusty Russell296f96f2007-10-22 11:03:37 +1000574}
575
Alex Williamson9c46f6d2009-02-04 16:36:34 -0800576static int virtnet_set_mac_address(struct net_device *dev, void *p)
577{
578 struct virtnet_info *vi = netdev_priv(dev);
579 struct virtio_device *vdev = vi->vdev;
580 int ret;
581
582 ret = eth_mac_addr(dev, p);
583 if (ret)
584 return ret;
585
Alex Williamson62994b22009-04-04 16:40:19 -0700586 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC))
587 vdev->config->set(vdev, offsetof(struct virtio_net_config, mac),
588 dev->dev_addr, dev->addr_len);
Alex Williamson9c46f6d2009-02-04 16:36:34 -0800589
590 return 0;
591}
592
Amit Shahda74e892008-02-29 16:24:50 +0530593#ifdef CONFIG_NET_POLL_CONTROLLER
594static void virtnet_netpoll(struct net_device *dev)
595{
596 struct virtnet_info *vi = netdev_priv(dev);
597
598 napi_schedule(&vi->napi);
599}
600#endif
601
Rusty Russell296f96f2007-10-22 11:03:37 +1000602static int virtnet_open(struct net_device *dev)
603{
604 struct virtnet_info *vi = netdev_priv(dev);
605
Rusty Russell296f96f2007-10-22 11:03:37 +1000606 napi_enable(&vi->napi);
Rusty Russella48bd8f2008-02-04 23:50:07 -0500607
608 /* If all buffers were filled by other side before we napi_enabled, we
609 * won't get another interrupt, so process any outstanding packets
Christian Borntraeger370076d2008-02-06 08:50:11 +0100610 * now. virtnet_poll wants re-enable the queue, so we disable here.
611 * We synchronize against interrupts via NAPI_STATE_SCHED */
Ben Hutchings288379f2009-01-19 16:43:59 -0800612 if (napi_schedule_prep(&vi->napi)) {
Christian Borntraeger370076d2008-02-06 08:50:11 +0100613 vi->rvq->vq_ops->disable_cb(vi->rvq);
Ben Hutchings288379f2009-01-19 16:43:59 -0800614 __napi_schedule(&vi->napi);
Christian Borntraeger370076d2008-02-06 08:50:11 +0100615 }
Rusty Russell296f96f2007-10-22 11:03:37 +1000616 return 0;
617}
618
Alex Williamson2a41f712009-02-04 09:02:34 +0000619/*
620 * Send command via the control virtqueue and check status. Commands
621 * supported by the hypervisor, as indicated by feature bits, should
622 * never fail unless improperly formated.
623 */
624static bool virtnet_send_command(struct virtnet_info *vi, u8 class, u8 cmd,
625 struct scatterlist *data, int out, int in)
626{
Alex Williamson23e258e2009-05-01 17:27:56 +0000627 struct scatterlist *s, sg[VIRTNET_SEND_COMMAND_SG_MAX + 2];
Alex Williamson2a41f712009-02-04 09:02:34 +0000628 struct virtio_net_ctrl_hdr ctrl;
629 virtio_net_ctrl_ack status = ~0;
630 unsigned int tmp;
Alex Williamson23e258e2009-05-01 17:27:56 +0000631 int i;
Alex Williamson2a41f712009-02-04 09:02:34 +0000632
Alexander Beregalov0ee904c2009-04-11 14:50:23 +0000633 /* Caller should know better */
634 BUG_ON(!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ||
635 (out + in > VIRTNET_SEND_COMMAND_SG_MAX));
Alex Williamson2a41f712009-02-04 09:02:34 +0000636
637 out++; /* Add header */
638 in++; /* Add return status */
639
640 ctrl.class = class;
641 ctrl.cmd = cmd;
642
643 sg_init_table(sg, out + in);
644
645 sg_set_buf(&sg[0], &ctrl, sizeof(ctrl));
Alex Williamson23e258e2009-05-01 17:27:56 +0000646 for_each_sg(data, s, out + in - 2, i)
647 sg_set_buf(&sg[i + 1], sg_virt(s), s->length);
Alex Williamson2a41f712009-02-04 09:02:34 +0000648 sg_set_buf(&sg[out + in - 1], &status, sizeof(status));
649
Rusty Russell3c1b27d2009-09-23 22:26:31 -0600650 BUG_ON(vi->cvq->vq_ops->add_buf(vi->cvq, sg, out, in, vi) < 0);
Alex Williamson2a41f712009-02-04 09:02:34 +0000651
652 vi->cvq->vq_ops->kick(vi->cvq);
653
654 /*
655 * Spin for a response, the kick causes an ioport write, trapping
656 * into the hypervisor, so the request should be handled immediately.
657 */
658 while (!vi->cvq->vq_ops->get_buf(vi->cvq, &tmp))
659 cpu_relax();
660
661 return status == VIRTIO_NET_OK;
662}
663
Rusty Russell296f96f2007-10-22 11:03:37 +1000664static int virtnet_close(struct net_device *dev)
665{
666 struct virtnet_info *vi = netdev_priv(dev);
Rusty Russell296f96f2007-10-22 11:03:37 +1000667
668 napi_disable(&vi->napi);
669
Rusty Russell296f96f2007-10-22 11:03:37 +1000670 return 0;
671}
672
Herbert Xua9ea3fc2008-04-18 11:21:42 +0800673static int virtnet_set_tx_csum(struct net_device *dev, u32 data)
674{
675 struct virtnet_info *vi = netdev_priv(dev);
676 struct virtio_device *vdev = vi->vdev;
677
678 if (data && !virtio_has_feature(vdev, VIRTIO_NET_F_CSUM))
679 return -ENOSYS;
680
681 return ethtool_op_set_tx_hw_csum(dev, data);
682}
683
Alex Williamson2af76982009-02-04 09:02:40 +0000684static void virtnet_set_rx_mode(struct net_device *dev)
685{
686 struct virtnet_info *vi = netdev_priv(dev);
Alex Williamsonf565a7c2009-02-04 09:02:45 +0000687 struct scatterlist sg[2];
Alex Williamson2af76982009-02-04 09:02:40 +0000688 u8 promisc, allmulti;
Alex Williamsonf565a7c2009-02-04 09:02:45 +0000689 struct virtio_net_ctrl_mac *mac_data;
690 struct dev_addr_list *addr;
Jiri Pirkoccffad22009-05-22 23:22:17 +0000691 struct netdev_hw_addr *ha;
Alex Williamsonf565a7c2009-02-04 09:02:45 +0000692 void *buf;
693 int i;
Alex Williamson2af76982009-02-04 09:02:40 +0000694
695 /* We can't dynamicaly set ndo_set_rx_mode, so return gracefully */
696 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_RX))
697 return;
698
Alex Williamsonf565a7c2009-02-04 09:02:45 +0000699 promisc = ((dev->flags & IFF_PROMISC) != 0);
700 allmulti = ((dev->flags & IFF_ALLMULTI) != 0);
Alex Williamson2af76982009-02-04 09:02:40 +0000701
Alex Williamson23e258e2009-05-01 17:27:56 +0000702 sg_init_one(sg, &promisc, sizeof(promisc));
Alex Williamson2af76982009-02-04 09:02:40 +0000703
704 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
705 VIRTIO_NET_CTRL_RX_PROMISC,
Alex Williamsonf565a7c2009-02-04 09:02:45 +0000706 sg, 1, 0))
Alex Williamson2af76982009-02-04 09:02:40 +0000707 dev_warn(&dev->dev, "Failed to %sable promisc mode.\n",
708 promisc ? "en" : "dis");
709
Alex Williamson23e258e2009-05-01 17:27:56 +0000710 sg_init_one(sg, &allmulti, sizeof(allmulti));
Alex Williamson2af76982009-02-04 09:02:40 +0000711
712 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_RX,
713 VIRTIO_NET_CTRL_RX_ALLMULTI,
Alex Williamsonf565a7c2009-02-04 09:02:45 +0000714 sg, 1, 0))
Alex Williamson2af76982009-02-04 09:02:40 +0000715 dev_warn(&dev->dev, "Failed to %sable allmulti mode.\n",
716 allmulti ? "en" : "dis");
Alex Williamsonf565a7c2009-02-04 09:02:45 +0000717
718 /* MAC filter - use one buffer for both lists */
Jiri Pirko31278e72009-06-17 01:12:19 +0000719 mac_data = buf = kzalloc(((dev->uc.count + dev->mc_count) * ETH_ALEN) +
Alex Williamsonf565a7c2009-02-04 09:02:45 +0000720 (2 * sizeof(mac_data->entries)), GFP_ATOMIC);
721 if (!buf) {
722 dev_warn(&dev->dev, "No memory for MAC address buffer\n");
723 return;
724 }
725
Alex Williamson23e258e2009-05-01 17:27:56 +0000726 sg_init_table(sg, 2);
727
Alex Williamsonf565a7c2009-02-04 09:02:45 +0000728 /* Store the unicast list and count in the front of the buffer */
Jiri Pirko31278e72009-06-17 01:12:19 +0000729 mac_data->entries = dev->uc.count;
Jiri Pirkoccffad22009-05-22 23:22:17 +0000730 i = 0;
Jiri Pirko31278e72009-06-17 01:12:19 +0000731 list_for_each_entry(ha, &dev->uc.list, list)
Jiri Pirkoccffad22009-05-22 23:22:17 +0000732 memcpy(&mac_data->macs[i++][0], ha->addr, ETH_ALEN);
Alex Williamsonf565a7c2009-02-04 09:02:45 +0000733
734 sg_set_buf(&sg[0], mac_data,
Jiri Pirko31278e72009-06-17 01:12:19 +0000735 sizeof(mac_data->entries) + (dev->uc.count * ETH_ALEN));
Alex Williamsonf565a7c2009-02-04 09:02:45 +0000736
737 /* multicast list and count fill the end */
Jiri Pirko31278e72009-06-17 01:12:19 +0000738 mac_data = (void *)&mac_data->macs[dev->uc.count][0];
Alex Williamsonf565a7c2009-02-04 09:02:45 +0000739
740 mac_data->entries = dev->mc_count;
741 addr = dev->mc_list;
742 for (i = 0; i < dev->mc_count; i++, addr = addr->next)
743 memcpy(&mac_data->macs[i][0], addr->da_addr, ETH_ALEN);
744
745 sg_set_buf(&sg[1], mac_data,
746 sizeof(mac_data->entries) + (dev->mc_count * ETH_ALEN));
747
748 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_MAC,
749 VIRTIO_NET_CTRL_MAC_TABLE_SET,
750 sg, 2, 0))
751 dev_warn(&dev->dev, "Failed to set MAC fitler table.\n");
752
753 kfree(buf);
Alex Williamson2af76982009-02-04 09:02:40 +0000754}
755
Alex Williamson1824a982009-05-01 17:31:10 +0000756static void virtnet_vlan_rx_add_vid(struct net_device *dev, u16 vid)
Alex Williamson0bde95692009-02-04 09:02:50 +0000757{
758 struct virtnet_info *vi = netdev_priv(dev);
759 struct scatterlist sg;
760
Alex Williamson23e258e2009-05-01 17:27:56 +0000761 sg_init_one(&sg, &vid, sizeof(vid));
Alex Williamson0bde95692009-02-04 09:02:50 +0000762
763 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
764 VIRTIO_NET_CTRL_VLAN_ADD, &sg, 1, 0))
765 dev_warn(&dev->dev, "Failed to add VLAN ID %d.\n", vid);
766}
767
Alex Williamson1824a982009-05-01 17:31:10 +0000768static void virtnet_vlan_rx_kill_vid(struct net_device *dev, u16 vid)
Alex Williamson0bde95692009-02-04 09:02:50 +0000769{
770 struct virtnet_info *vi = netdev_priv(dev);
771 struct scatterlist sg;
772
Alex Williamson23e258e2009-05-01 17:27:56 +0000773 sg_init_one(&sg, &vid, sizeof(vid));
Alex Williamson0bde95692009-02-04 09:02:50 +0000774
775 if (!virtnet_send_command(vi, VIRTIO_NET_CTRL_VLAN,
776 VIRTIO_NET_CTRL_VLAN_DEL, &sg, 1, 0))
777 dev_warn(&dev->dev, "Failed to kill VLAN ID %d.\n", vid);
778}
779
Stephen Hemminger0fc0b732009-09-02 01:03:33 -0700780static const struct ethtool_ops virtnet_ethtool_ops = {
Herbert Xua9ea3fc2008-04-18 11:21:42 +0800781 .set_tx_csum = virtnet_set_tx_csum,
782 .set_sg = ethtool_op_set_sg,
Mark McLoughlin0276b492008-11-16 22:40:36 -0800783 .set_tso = ethtool_op_set_tso,
Sridhar Samudrala5c516752009-07-14 14:21:02 +0000784 .set_ufo = ethtool_op_set_ufo,
Mark McLoughlin9f4d26d2009-01-19 17:09:49 -0800785 .get_link = ethtool_op_get_link,
Herbert Xua9ea3fc2008-04-18 11:21:42 +0800786};
787
Mark McLoughlin39da5812008-11-26 13:58:11 +0000788#define MIN_MTU 68
789#define MAX_MTU 65535
790
791static int virtnet_change_mtu(struct net_device *dev, int new_mtu)
792{
793 if (new_mtu < MIN_MTU || new_mtu > MAX_MTU)
794 return -EINVAL;
795 dev->mtu = new_mtu;
796 return 0;
797}
798
Stephen Hemminger76288b42009-01-06 10:44:22 -0800799static const struct net_device_ops virtnet_netdev = {
800 .ndo_open = virtnet_open,
801 .ndo_stop = virtnet_close,
802 .ndo_start_xmit = start_xmit,
803 .ndo_validate_addr = eth_validate_addr,
Alex Williamson9c46f6d2009-02-04 16:36:34 -0800804 .ndo_set_mac_address = virtnet_set_mac_address,
Alex Williamson2af76982009-02-04 09:02:40 +0000805 .ndo_set_rx_mode = virtnet_set_rx_mode,
Stephen Hemminger76288b42009-01-06 10:44:22 -0800806 .ndo_change_mtu = virtnet_change_mtu,
Alex Williamson1824a982009-05-01 17:31:10 +0000807 .ndo_vlan_rx_add_vid = virtnet_vlan_rx_add_vid,
808 .ndo_vlan_rx_kill_vid = virtnet_vlan_rx_kill_vid,
Stephen Hemminger76288b42009-01-06 10:44:22 -0800809#ifdef CONFIG_NET_POLL_CONTROLLER
810 .ndo_poll_controller = virtnet_netpoll,
811#endif
812};
813
Mark McLoughlin9f4d26d2009-01-19 17:09:49 -0800814static void virtnet_update_status(struct virtnet_info *vi)
815{
816 u16 v;
817
818 if (!virtio_has_feature(vi->vdev, VIRTIO_NET_F_STATUS))
819 return;
820
821 vi->vdev->config->get(vi->vdev,
822 offsetof(struct virtio_net_config, status),
823 &v, sizeof(v));
824
825 /* Ignore unknown (future) status bits */
826 v &= VIRTIO_NET_S_LINK_UP;
827
828 if (vi->status == v)
829 return;
830
831 vi->status = v;
832
833 if (vi->status & VIRTIO_NET_S_LINK_UP) {
834 netif_carrier_on(vi->dev);
835 netif_wake_queue(vi->dev);
836 } else {
837 netif_carrier_off(vi->dev);
838 netif_stop_queue(vi->dev);
839 }
840}
841
842static void virtnet_config_changed(struct virtio_device *vdev)
843{
844 struct virtnet_info *vi = vdev->priv;
845
846 virtnet_update_status(vi);
847}
848
Rusty Russell296f96f2007-10-22 11:03:37 +1000849static int virtnet_probe(struct virtio_device *vdev)
850{
851 int err;
Rusty Russell296f96f2007-10-22 11:03:37 +1000852 struct net_device *dev;
853 struct virtnet_info *vi;
Michael S. Tsirkind2a7ddd2009-06-12 22:16:36 -0600854 struct virtqueue *vqs[3];
855 vq_callback_t *callbacks[] = { skb_recv_done, skb_xmit_done, NULL};
856 const char *names[] = { "input", "output", "control" };
857 int nvqs;
Rusty Russell296f96f2007-10-22 11:03:37 +1000858
859 /* Allocate ourselves a network device with room for our info */
860 dev = alloc_etherdev(sizeof(struct virtnet_info));
861 if (!dev)
862 return -ENOMEM;
863
864 /* Set up network device as normal. */
Stephen Hemminger76288b42009-01-06 10:44:22 -0800865 dev->netdev_ops = &virtnet_netdev;
Rusty Russell296f96f2007-10-22 11:03:37 +1000866 dev->features = NETIF_F_HIGHDMA;
Herbert Xua9ea3fc2008-04-18 11:21:42 +0800867 SET_ETHTOOL_OPS(dev, &virtnet_ethtool_ops);
Rusty Russell296f96f2007-10-22 11:03:37 +1000868 SET_NETDEV_DEV(dev, &vdev->dev);
869
870 /* Do we support "hardware" checksums? */
Rusty Russellc45a6812008-05-02 21:50:50 -0500871 if (csum && virtio_has_feature(vdev, VIRTIO_NET_F_CSUM)) {
Rusty Russell296f96f2007-10-22 11:03:37 +1000872 /* This opens up the world of extra features. */
873 dev->features |= NETIF_F_HW_CSUM|NETIF_F_SG|NETIF_F_FRAGLIST;
Rusty Russellc45a6812008-05-02 21:50:50 -0500874 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_GSO)) {
Rusty Russell34a48572008-02-04 23:50:02 -0500875 dev->features |= NETIF_F_TSO | NETIF_F_UFO
876 | NETIF_F_TSO_ECN | NETIF_F_TSO6;
877 }
Rusty Russell5539ae92008-05-02 21:50:46 -0500878 /* Individual feature bits: what can host handle? */
Rusty Russellc45a6812008-05-02 21:50:50 -0500879 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO4))
Rusty Russell5539ae92008-05-02 21:50:46 -0500880 dev->features |= NETIF_F_TSO;
Rusty Russellc45a6812008-05-02 21:50:50 -0500881 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_TSO6))
Rusty Russell5539ae92008-05-02 21:50:46 -0500882 dev->features |= NETIF_F_TSO6;
Rusty Russellc45a6812008-05-02 21:50:50 -0500883 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_ECN))
Rusty Russell5539ae92008-05-02 21:50:46 -0500884 dev->features |= NETIF_F_TSO_ECN;
Rusty Russellc45a6812008-05-02 21:50:50 -0500885 if (gso && virtio_has_feature(vdev, VIRTIO_NET_F_HOST_UFO))
Rusty Russell5539ae92008-05-02 21:50:46 -0500886 dev->features |= NETIF_F_UFO;
Rusty Russell296f96f2007-10-22 11:03:37 +1000887 }
888
889 /* Configuration may specify what MAC to use. Otherwise random. */
Rusty Russellc45a6812008-05-02 21:50:50 -0500890 if (virtio_has_feature(vdev, VIRTIO_NET_F_MAC)) {
Rusty Russella586d4f2008-02-04 23:49:56 -0500891 vdev->config->get(vdev,
892 offsetof(struct virtio_net_config, mac),
893 dev->dev_addr, dev->addr_len);
Alex Williamson62994b22009-04-04 16:40:19 -0700894 } else
Rusty Russell296f96f2007-10-22 11:03:37 +1000895 random_ether_addr(dev->dev_addr);
896
897 /* Set up our device-specific information */
898 vi = netdev_priv(dev);
Dor Laor6c0cd7c2007-12-16 15:19:43 +0200899 netif_napi_add(dev, &vi->napi, virtnet_poll, napi_weight);
Rusty Russell296f96f2007-10-22 11:03:37 +1000900 vi->dev = dev;
901 vi->vdev = vdev;
Christian Borntraegerd9d5dcc2008-02-18 10:02:51 +0100902 vdev->priv = vi;
Rusty Russellfb6813f2008-07-25 12:06:01 -0500903 vi->pages = NULL;
Rusty Russell3161e452009-08-26 12:22:32 -0700904 INIT_DELAYED_WORK(&vi->refill, refill_work);
Rusty Russell296f96f2007-10-22 11:03:37 +1000905
Rusty Russell363f1512008-06-08 20:51:55 +1000906 /* If they give us a callback when all buffers are done, we don't need
907 * the timer. */
908 vi->free_in_tasklet = virtio_has_feature(vdev,VIRTIO_F_NOTIFY_ON_EMPTY);
909
Herbert Xu97402b92008-04-18 11:24:27 +0800910 /* If we can receive ANY GSO packets, we must allocate large ones. */
911 if (virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO4)
912 || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_TSO6)
913 || virtio_has_feature(vdev, VIRTIO_NET_F_GUEST_ECN))
914 vi->big_packets = true;
915
Mark McLoughlin3f2c31d2008-11-16 22:41:34 -0800916 if (virtio_has_feature(vdev, VIRTIO_NET_F_MRG_RXBUF))
917 vi->mergeable_rx_bufs = true;
918
Michael S. Tsirkind2a7ddd2009-06-12 22:16:36 -0600919 /* We expect two virtqueues, receive then send,
920 * and optionally control. */
921 nvqs = virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ) ? 3 : 2;
Rusty Russell296f96f2007-10-22 11:03:37 +1000922
Michael S. Tsirkind2a7ddd2009-06-12 22:16:36 -0600923 err = vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names);
924 if (err)
925 goto free;
926
927 vi->rvq = vqs[0];
928 vi->svq = vqs[1];
Rusty Russell296f96f2007-10-22 11:03:37 +1000929
Alex Williamson2a41f712009-02-04 09:02:34 +0000930 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VQ)) {
Michael S. Tsirkind2a7ddd2009-06-12 22:16:36 -0600931 vi->cvq = vqs[2];
Alex Williamson0bde95692009-02-04 09:02:50 +0000932
933 if (virtio_has_feature(vi->vdev, VIRTIO_NET_F_CTRL_VLAN))
934 dev->features |= NETIF_F_HW_VLAN_FILTER;
Alex Williamson2a41f712009-02-04 09:02:34 +0000935 }
936
Rusty Russell296f96f2007-10-22 11:03:37 +1000937 /* Initialize our empty receive and send queues. */
938 skb_queue_head_init(&vi->recv);
939 skb_queue_head_init(&vi->send);
940
Rusty Russell11a3a152008-05-26 17:48:13 +1000941 tasklet_init(&vi->tasklet, xmit_tasklet, (unsigned long)vi);
942
Rusty Russell363f1512008-06-08 20:51:55 +1000943 if (!vi->free_in_tasklet)
944 setup_timer(&vi->xmit_free_timer, xmit_free, (unsigned long)vi);
Mark McLoughlin14c998f2008-06-08 20:50:56 +1000945
Rusty Russell296f96f2007-10-22 11:03:37 +1000946 err = register_netdev(dev);
947 if (err) {
948 pr_debug("virtio_net: registering device failed\n");
Michael S. Tsirkind2a7ddd2009-06-12 22:16:36 -0600949 goto free_vqs;
Rusty Russell296f96f2007-10-22 11:03:37 +1000950 }
Rusty Russellb3369c12008-02-04 23:50:02 -0500951
952 /* Last of all, set up some receive buffers. */
Rusty Russell3161e452009-08-26 12:22:32 -0700953 try_fill_recv(vi, GFP_KERNEL);
Rusty Russellb3369c12008-02-04 23:50:02 -0500954
955 /* If we didn't even get one input buffer, we're useless. */
956 if (vi->num == 0) {
957 err = -ENOMEM;
958 goto unregister;
959 }
960
Mark McLoughlin9f4d26d2009-01-19 17:09:49 -0800961 vi->status = VIRTIO_NET_S_LINK_UP;
962 virtnet_update_status(vi);
Pantelis Koukousoulas47832562009-03-18 18:40:02 -0700963 netif_carrier_on(dev);
Mark McLoughlin9f4d26d2009-01-19 17:09:49 -0800964
Rusty Russell296f96f2007-10-22 11:03:37 +1000965 pr_debug("virtnet: registered device %s\n", dev->name);
Rusty Russell296f96f2007-10-22 11:03:37 +1000966 return 0;
967
Rusty Russellb3369c12008-02-04 23:50:02 -0500968unregister:
969 unregister_netdev(dev);
Rusty Russell3161e452009-08-26 12:22:32 -0700970 cancel_delayed_work_sync(&vi->refill);
Michael S. Tsirkind2a7ddd2009-06-12 22:16:36 -0600971free_vqs:
972 vdev->config->del_vqs(vdev);
Rusty Russell296f96f2007-10-22 11:03:37 +1000973free:
974 free_netdev(dev);
975 return err;
976}
977
978static void virtnet_remove(struct virtio_device *vdev)
979{
Rusty Russell74b25532007-11-19 11:20:42 -0500980 struct virtnet_info *vi = vdev->priv;
Rusty Russellb3369c12008-02-04 23:50:02 -0500981 struct sk_buff *skb;
982
Rusty Russell6e5aa7e2008-02-04 23:50:03 -0500983 /* Stop all the virtqueues. */
984 vdev->config->reset(vdev);
985
Rusty Russell363f1512008-06-08 20:51:55 +1000986 if (!vi->free_in_tasklet)
987 del_timer_sync(&vi->xmit_free_timer);
Mark McLoughlin14c998f2008-06-08 20:50:56 +1000988
Rusty Russellb3369c12008-02-04 23:50:02 -0500989 /* Free our skbs in send and recv queues, if any. */
Rusty Russellb3369c12008-02-04 23:50:02 -0500990 while ((skb = __skb_dequeue(&vi->recv)) != NULL) {
991 kfree_skb(skb);
992 vi->num--;
993 }
Wang Chen288369c2008-05-22 18:07:43 +0800994 __skb_queue_purge(&vi->send);
Rusty Russellb3369c12008-02-04 23:50:02 -0500995
996 BUG_ON(vi->num != 0);
Rusty Russell74b25532007-11-19 11:20:42 -0500997
Rusty Russell74b25532007-11-19 11:20:42 -0500998 unregister_netdev(vi->dev);
Rusty Russell3161e452009-08-26 12:22:32 -0700999 cancel_delayed_work_sync(&vi->refill);
Rusty Russellfb6813f2008-07-25 12:06:01 -05001000
Michael S. Tsirkind2a7ddd2009-06-12 22:16:36 -06001001 vdev->config->del_vqs(vi->vdev);
1002
Rusty Russellfb6813f2008-07-25 12:06:01 -05001003 while (vi->pages)
1004 __free_pages(get_a_page(vi, GFP_KERNEL), 0);
1005
Rusty Russell74b25532007-11-19 11:20:42 -05001006 free_netdev(vi->dev);
Rusty Russell296f96f2007-10-22 11:03:37 +10001007}
1008
1009static struct virtio_device_id id_table[] = {
1010 { VIRTIO_ID_NET, VIRTIO_DEV_ANY_ID },
1011 { 0 },
1012};
1013
Rusty Russellc45a6812008-05-02 21:50:50 -05001014static unsigned int features[] = {
Mark McLoughlin5e4fe5c2008-07-08 17:10:42 +10001015 VIRTIO_NET_F_CSUM, VIRTIO_NET_F_GUEST_CSUM,
1016 VIRTIO_NET_F_GSO, VIRTIO_NET_F_MAC,
Rusty Russellc45a6812008-05-02 21:50:50 -05001017 VIRTIO_NET_F_HOST_TSO4, VIRTIO_NET_F_HOST_UFO, VIRTIO_NET_F_HOST_TSO6,
Herbert Xu97402b92008-04-18 11:24:27 +08001018 VIRTIO_NET_F_HOST_ECN, VIRTIO_NET_F_GUEST_TSO4, VIRTIO_NET_F_GUEST_TSO6,
Sridhar Samudrala5c516752009-07-14 14:21:02 +00001019 VIRTIO_NET_F_GUEST_ECN, VIRTIO_NET_F_GUEST_UFO,
Alex Williamson2a41f712009-02-04 09:02:34 +00001020 VIRTIO_NET_F_MRG_RXBUF, VIRTIO_NET_F_STATUS, VIRTIO_NET_F_CTRL_VQ,
Alex Williamson0bde95692009-02-04 09:02:50 +00001021 VIRTIO_NET_F_CTRL_RX, VIRTIO_NET_F_CTRL_VLAN,
Herbert Xu97402b92008-04-18 11:24:27 +08001022 VIRTIO_F_NOTIFY_ON_EMPTY,
Rusty Russellc45a6812008-05-02 21:50:50 -05001023};
1024
Rusty Russell296f96f2007-10-22 11:03:37 +10001025static struct virtio_driver virtio_net = {
Rusty Russellc45a6812008-05-02 21:50:50 -05001026 .feature_table = features,
1027 .feature_table_size = ARRAY_SIZE(features),
Rusty Russell296f96f2007-10-22 11:03:37 +10001028 .driver.name = KBUILD_MODNAME,
1029 .driver.owner = THIS_MODULE,
1030 .id_table = id_table,
1031 .probe = virtnet_probe,
1032 .remove = __devexit_p(virtnet_remove),
Mark McLoughlin9f4d26d2009-01-19 17:09:49 -08001033 .config_changed = virtnet_config_changed,
Rusty Russell296f96f2007-10-22 11:03:37 +10001034};
1035
1036static int __init init(void)
1037{
1038 return register_virtio_driver(&virtio_net);
1039}
1040
1041static void __exit fini(void)
1042{
1043 unregister_virtio_driver(&virtio_net);
1044}
1045module_init(init);
1046module_exit(fini);
1047
1048MODULE_DEVICE_TABLE(virtio, id_table);
1049MODULE_DESCRIPTION("Virtio network driver");
1050MODULE_LICENSE("GPL");