netvsc: fix NAPI performance regression
When using NAPI, the single stream performance declined signifcantly
because the poll routine was updating host after every burst
of packets. This excess signalling caused host throttling.
This fix restores the old behavior. Host is only signalled
after the ring has been emptied.
Signed-off-by: Stephen Hemminger <sthemmin@microsoft.com>
Signed-off-by: David S. Miller <davem@davemloft.net>
diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c
index 989b7cd..727762d0 100644
--- a/drivers/net/hyperv/netvsc.c
+++ b/drivers/net/hyperv/netvsc.c
@@ -1173,7 +1173,6 @@
struct vmbus_channel *channel,
struct netvsc_device *net_device,
struct net_device *ndev,
- u64 request_id,
const struct vmpacket_descriptor *desc)
{
struct net_device_context *net_device_ctx = netdev_priv(ndev);
@@ -1195,7 +1194,7 @@
default:
netdev_err(ndev, "unhandled packet type %d, tid %llx\n",
- desc->type, request_id);
+ desc->type, desc->trans_id);
break;
}
@@ -1222,28 +1221,20 @@
u16 q_idx = channel->offermsg.offer.sub_channel_index;
struct net_device *ndev = hv_get_drvdata(device);
struct netvsc_device *net_device = net_device_to_netvsc_device(ndev);
- const struct vmpacket_descriptor *desc;
int work_done = 0;
- desc = hv_pkt_iter_first(channel);
- while (desc) {
- int count;
+ /* If starting a new interval */
+ if (!nvchan->desc)
+ nvchan->desc = hv_pkt_iter_first(channel);
- count = netvsc_process_raw_pkt(device, channel, net_device,
- ndev, desc->trans_id, desc);
- work_done += count;
- desc = __hv_pkt_iter_next(channel, desc);
-
- /* If receive packet budget is exhausted, reschedule */
- if (work_done >= budget) {
- work_done = budget;
- break;
- }
+ while (nvchan->desc && work_done < budget) {
+ work_done += netvsc_process_raw_pkt(device, channel, net_device,
+ ndev, nvchan->desc);
+ nvchan->desc = hv_pkt_iter_next(channel, nvchan->desc);
}
- hv_pkt_iter_close(channel);
- /* If budget was not exhausted and
- * not doing busy poll
+ /* If receive ring was exhausted
+ * and not doing busy poll
* then re-enable host interrupts
* and reschedule if ring is not empty.
*/
@@ -1253,7 +1244,9 @@
napi_reschedule(napi);
netvsc_chk_recv_comp(net_device, channel, q_idx);
- return work_done;
+
+ /* Driver may overshoot since multiple packets per descriptor */
+ return min(work_done, budget);
}
/* Call back when data is available in host ring buffer.
@@ -1263,10 +1256,12 @@
{
struct netvsc_channel *nvchan = context;
- /* disable interupts from host */
- hv_begin_read(&nvchan->channel->inbound);
+ if (napi_schedule_prep(&nvchan->napi)) {
+ /* disable interupts from host */
+ hv_begin_read(&nvchan->channel->inbound);
- napi_schedule(&nvchan->napi);
+ __napi_schedule(&nvchan->napi);
+ }
}
/*