blob: fcc7c365cee576ecf4ef1d0f1765b3155e3f52f9 [file] [log] [blame]
Eliezer Tamir06021292013-06-10 11:39:50 +03001/*
2 * Low Latency Sockets
3 * Copyright(c) 2013 Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 *
18 * Author: Eliezer Tamir
19 *
20 * Contact Information:
21 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
22 */
23
Eliezer Tamir06021292013-06-10 11:39:50 +030024#ifndef _LINUX_NET_LL_POLL_H
25#define _LINUX_NET_LL_POLL_H
26
27#include <linux/netdevice.h>
28#include <net/ip.h>
29
30#ifdef CONFIG_NET_LL_RX_POLL
31
32struct napi_struct;
Eliezer Tamireb6db622013-06-14 16:33:25 +030033extern unsigned int sysctl_net_ll_poll __read_mostly;
Eliezer Tamir06021292013-06-10 11:39:50 +030034
35/* return values from ndo_ll_poll */
36#define LL_FLUSH_FAILED -1
37#define LL_FLUSH_BUSY -2
38
Eliezer Tamir9a3c71a2013-06-14 16:33:35 +030039/* we can use sched_clock() because we don't care much about precision
40 * we only care that the average is bounded
41 */
Eliezer Tamirdafcc432013-06-14 16:33:57 +030042static inline u64 ll_end_time(struct sock *sk)
Eliezer Tamir06021292013-06-10 11:39:50 +030043{
Eliezer Tamirdafcc432013-06-14 16:33:57 +030044 u64 end_time = ACCESS_ONCE(sk->sk_ll_usec);
Eliezer Tamir9a3c71a2013-06-14 16:33:35 +030045
46 /* we don't mind a ~2.5% imprecision
Eliezer Tamirdafcc432013-06-14 16:33:57 +030047 * sk->sk_ll_usec is a u_int so this can't overflow
Eliezer Tamir9a3c71a2013-06-14 16:33:35 +030048 */
49 end_time = (end_time << 10) + sched_clock();
50
51 return end_time;
Eliezer Tamir06021292013-06-10 11:39:50 +030052}
53
54static inline bool sk_valid_ll(struct sock *sk)
55{
Eliezer Tamirdafcc432013-06-14 16:33:57 +030056 return sk->sk_ll_usec && sk->sk_napi_id &&
Eliezer Tamir06021292013-06-10 11:39:50 +030057 !need_resched() && !signal_pending(current);
58}
59
Eliezer Tamir9a3c71a2013-06-14 16:33:35 +030060static inline bool can_poll_ll(u64 end_time)
Eliezer Tamir06021292013-06-10 11:39:50 +030061{
Eliezer Tamir9a3c71a2013-06-14 16:33:35 +030062 return !time_after64(sched_clock(), end_time);
Eliezer Tamir06021292013-06-10 11:39:50 +030063}
64
65static inline bool sk_poll_ll(struct sock *sk, int nonblock)
66{
Eliezer Tamir06021292013-06-10 11:39:50 +030067 const struct net_device_ops *ops;
Eliezer Tamirdafcc432013-06-14 16:33:57 +030068 u64 end_time = ll_end_time(sk);
Eliezer Tamir06021292013-06-10 11:39:50 +030069 struct napi_struct *napi;
70 int rc = false;
71
72 /*
73 * rcu read lock for napi hash
74 * bh so we don't race with net_rx_action
75 */
76 rcu_read_lock_bh();
77
78 napi = napi_by_id(sk->sk_napi_id);
79 if (!napi)
80 goto out;
81
82 ops = napi->dev->netdev_ops;
83 if (!ops->ndo_ll_poll)
84 goto out;
85
86 do {
87
88 rc = ops->ndo_ll_poll(napi);
89
90 if (rc == LL_FLUSH_FAILED)
91 break; /* permanent failure */
92
93 if (rc > 0)
94 /* local bh are disabled so it is ok to use _BH */
95 NET_ADD_STATS_BH(sock_net(sk),
96 LINUX_MIB_LOWLATENCYRXPACKETS, rc);
97
98 } while (skb_queue_empty(&sk->sk_receive_queue)
99 && can_poll_ll(end_time) && !nonblock);
100
101 rc = !skb_queue_empty(&sk->sk_receive_queue);
102out:
103 rcu_read_unlock_bh();
104 return rc;
105}
106
107/* used in the NIC receive handler to mark the skb */
108static inline void skb_mark_ll(struct sk_buff *skb, struct napi_struct *napi)
109{
110 skb->napi_id = napi->napi_id;
111}
112
113/* used in the protocol hanlder to propagate the napi_id to the socket */
114static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb)
115{
116 sk->sk_napi_id = skb->napi_id;
117}
118
119#else /* CONFIG_NET_LL_RX_POLL */
120
Eliezer Tamirdafcc432013-06-14 16:33:57 +0300121static inline u64 ll_end_time(struct sock *sk)
Eliezer Tamir06021292013-06-10 11:39:50 +0300122{
123 return 0;
124}
125
126static inline bool sk_valid_ll(struct sock *sk)
127{
128 return false;
129}
130
131static inline bool sk_poll_ll(struct sock *sk, int nonblock)
132{
133 return false;
134}
135
136static inline void skb_mark_ll(struct sk_buff *skb, struct napi_struct *napi)
137{
138}
139
140static inline void sk_mark_ll(struct sock *sk, struct sk_buff *skb)
141{
142}
143
Eliezer Tamir9a3c71a2013-06-14 16:33:35 +0300144static inline bool can_poll_ll(u64 end_time)
Eliezer Tamir06021292013-06-10 11:39:50 +0300145{
146 return false;
147}
148
149#endif /* CONFIG_NET_LL_RX_POLL */
150#endif /* _LINUX_NET_LL_POLL_H */