Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 1 | /* |
| 2 | * This program is free software; you can redistribute it and/or |
| 3 | * modify it under the terms of the GNU General Public License version 2 |
| 4 | * as published by the Free Software Foundation; or, when distributed |
| 5 | * separately from the Linux kernel or incorporated into other |
| 6 | * software packages, subject to the following license: |
| 7 | * |
| 8 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 9 | * of this source file (the "Software"), to deal in the Software without |
| 10 | * restriction, including without limitation the rights to use, copy, modify, |
| 11 | * merge, publish, distribute, sublicense, and/or sell copies of the Software, |
| 12 | * and to permit persons to whom the Software is furnished to do so, subject to |
| 13 | * the following conditions: |
| 14 | * |
| 15 | * The above copyright notice and this permission notice shall be included in |
| 16 | * all copies or substantial portions of the Software. |
| 17 | * |
| 18 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 19 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 20 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE |
| 21 | * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 22 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING |
| 23 | * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS |
| 24 | * IN THE SOFTWARE. |
| 25 | */ |
| 26 | |
| 27 | #ifndef __XEN_NETBACK__COMMON_H__ |
| 28 | #define __XEN_NETBACK__COMMON_H__ |
| 29 | |
| 30 | #define pr_fmt(fmt) KBUILD_MODNAME ":%s: " fmt, __func__ |
| 31 | |
| 32 | #include <linux/module.h> |
| 33 | #include <linux/interrupt.h> |
| 34 | #include <linux/slab.h> |
| 35 | #include <linux/ip.h> |
| 36 | #include <linux/in.h> |
| 37 | #include <linux/io.h> |
| 38 | #include <linux/netdevice.h> |
| 39 | #include <linux/etherdevice.h> |
| 40 | #include <linux/wait.h> |
| 41 | #include <linux/sched.h> |
| 42 | |
| 43 | #include <xen/interface/io/netif.h> |
| 44 | #include <xen/interface/grant_table.h> |
| 45 | #include <xen/grant_table.h> |
| 46 | #include <xen/xenbus.h> |
Julien Grall | d0089e8 | 2015-05-05 13:15:29 +0100 | [diff] [blame] | 47 | #include <xen/page.h> |
Zoltan Kiss | f51de24 | 2014-07-08 19:49:14 +0100 | [diff] [blame] | 48 | #include <linux/debugfs.h> |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 49 | |
Wei Liu | b3f980b | 2013-08-26 12:59:38 +0100 | [diff] [blame] | 50 | typedef unsigned int pending_ring_idx_t; |
| 51 | #define INVALID_PENDING_RING_IDX (~0U) |
| 52 | |
Wei Liu | b3f980b | 2013-08-26 12:59:38 +0100 | [diff] [blame] | 53 | struct pending_tx_info { |
Zoltan Kiss | 62bad31 | 2014-03-06 21:48:27 +0000 | [diff] [blame] | 54 | struct xen_netif_tx_request req; /* tx request */ |
Paul Durrant | 562abd3 | 2016-03-10 12:30:27 +0000 | [diff] [blame] | 55 | unsigned int extra_count; |
Zoltan Kiss | f53c3fe | 2014-03-06 21:48:26 +0000 | [diff] [blame] | 56 | /* Callback data for released SKBs. The callback is always |
| 57 | * xenvif_zerocopy_callback, desc contains the pending_idx, which is |
| 58 | * also an index in pending_tx_info array. It is initialized in |
| 59 | * xenvif_alloc and it never changes. |
| 60 | * skb_shinfo(skb)->destructor_arg points to the first mapped slot's |
| 61 | * callback_struct in this array of struct pending_tx_info's, then ctx |
| 62 | * to the next, or NULL if there is no more slot for this skb. |
| 63 | * ubuf_to_vif is a helper which finds the struct xenvif from a pointer |
| 64 | * to this field. |
| 65 | */ |
| 66 | struct ubuf_info callback_struct; |
Wei Liu | b3f980b | 2013-08-26 12:59:38 +0100 | [diff] [blame] | 67 | }; |
| 68 | |
Julien Grall | d0089e8 | 2015-05-05 13:15:29 +0100 | [diff] [blame] | 69 | #define XEN_NETIF_TX_RING_SIZE __CONST_RING_SIZE(xen_netif_tx, XEN_PAGE_SIZE) |
| 70 | #define XEN_NETIF_RX_RING_SIZE __CONST_RING_SIZE(xen_netif_rx, XEN_PAGE_SIZE) |
Wei Liu | b3f980b | 2013-08-26 12:59:38 +0100 | [diff] [blame] | 71 | |
| 72 | struct xenvif_rx_meta { |
| 73 | int id; |
| 74 | int size; |
Paul Durrant | 82cada2 | 2013-10-16 17:50:32 +0100 | [diff] [blame] | 75 | int gso_type; |
Wei Liu | b3f980b | 2013-08-26 12:59:38 +0100 | [diff] [blame] | 76 | int gso_size; |
| 77 | }; |
| 78 | |
Paul Durrant | 82cada2 | 2013-10-16 17:50:32 +0100 | [diff] [blame] | 79 | #define GSO_BIT(type) \ |
| 80 | (1 << XEN_NETIF_GSO_TYPE_ ## type) |
| 81 | |
Wei Liu | b3f980b | 2013-08-26 12:59:38 +0100 | [diff] [blame] | 82 | /* Discriminate from any valid pending_idx value. */ |
| 83 | #define INVALID_PENDING_IDX 0xFFFF |
| 84 | |
Julien Grall | d0089e8 | 2015-05-05 13:15:29 +0100 | [diff] [blame] | 85 | #define MAX_BUFFER_OFFSET XEN_PAGE_SIZE |
Wei Liu | b3f980b | 2013-08-26 12:59:38 +0100 | [diff] [blame] | 86 | |
Zoltan Kiss | 869b9b1 | 2014-03-24 23:59:49 +0000 | [diff] [blame] | 87 | #define MAX_PENDING_REQS XEN_NETIF_TX_RING_SIZE |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 88 | |
Julien Grall | d0089e8 | 2015-05-05 13:15:29 +0100 | [diff] [blame] | 89 | /* The maximum number of frags is derived from the size of a grant (same |
| 90 | * as a Xen page size for now). |
| 91 | */ |
| 92 | #define MAX_XEN_SKB_FRAGS (65536 / XEN_PAGE_SIZE + 1) |
| 93 | |
Paul Durrant | ac3d5ac | 2013-12-23 09:27:17 +0000 | [diff] [blame] | 94 | /* It's possible for an skb to have a maximal number of frags |
| 95 | * but still be less than MAX_BUFFER_OFFSET in size. Thus the |
Julien Grall | d0089e8 | 2015-05-05 13:15:29 +0100 | [diff] [blame] | 96 | * worst-case number of copy operations is MAX_XEN_SKB_FRAGS per |
Paul Durrant | ac3d5ac | 2013-12-23 09:27:17 +0000 | [diff] [blame] | 97 | * ring slot. |
| 98 | */ |
Julien Grall | d0089e8 | 2015-05-05 13:15:29 +0100 | [diff] [blame] | 99 | #define MAX_GRANT_COPY_OPS (MAX_XEN_SKB_FRAGS * XEN_NETIF_RX_RING_SIZE) |
Paul Durrant | ac3d5ac | 2013-12-23 09:27:17 +0000 | [diff] [blame] | 100 | |
Zoltan Kiss | 121fa4b | 2014-03-06 21:48:24 +0000 | [diff] [blame] | 101 | #define NETBACK_INVALID_HANDLE -1 |
| 102 | |
| 103 | /* To avoid confusion, we define XEN_NETBK_LEGACY_SLOTS_MAX indicating |
| 104 | * the maximum slots a valid packet can use. Now this value is defined |
| 105 | * to be XEN_NETIF_NR_SLOTS_MIN, which is supposed to be supported by |
| 106 | * all backend. |
| 107 | */ |
| 108 | #define XEN_NETBK_LEGACY_SLOTS_MAX XEN_NETIF_NR_SLOTS_MIN |
| 109 | |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 110 | /* Queue name is interface name with "-qNNN" appended */ |
| 111 | #define QUEUE_NAME_SIZE (IFNAMSIZ + 5) |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 112 | |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 113 | /* IRQ name is queue name with "-tx" or "-rx" appended */ |
| 114 | #define IRQ_NAME_SIZE (QUEUE_NAME_SIZE + 3) |
| 115 | |
| 116 | struct xenvif; |
| 117 | |
| 118 | struct xenvif_stats { |
| 119 | /* Stats fields to be updated per-queue. |
| 120 | * A subset of struct net_device_stats that contains only the |
| 121 | * fields that are updated in netback.c for each queue. |
Wei Liu | e9d8b2c | 2014-04-01 12:46:12 +0100 | [diff] [blame] | 122 | */ |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 123 | unsigned int rx_bytes; |
| 124 | unsigned int rx_packets; |
| 125 | unsigned int tx_bytes; |
| 126 | unsigned int tx_packets; |
| 127 | |
| 128 | /* Additional stats used by xenvif */ |
| 129 | unsigned long rx_gso_checksum_fixup; |
| 130 | unsigned long tx_zerocopy_sent; |
| 131 | unsigned long tx_zerocopy_success; |
| 132 | unsigned long tx_zerocopy_fail; |
| 133 | unsigned long tx_frag_overflow; |
| 134 | }; |
| 135 | |
| 136 | struct xenvif_queue { /* Per-queue data for xenvif */ |
| 137 | unsigned int id; /* Queue ID, 0-based */ |
| 138 | char name[QUEUE_NAME_SIZE]; /* DEVNAME-qN */ |
| 139 | struct xenvif *vif; /* Parent VIF */ |
Wei Liu | e9d8b2c | 2014-04-01 12:46:12 +0100 | [diff] [blame] | 140 | |
Wei Liu | b3f980b | 2013-08-26 12:59:38 +0100 | [diff] [blame] | 141 | /* Use NAPI for guest TX */ |
| 142 | struct napi_struct napi; |
Wei Liu | e1f00a69 | 2013-05-22 06:34:45 +0000 | [diff] [blame] | 143 | /* When feature-split-event-channels = 0, tx_irq = rx_irq. */ |
| 144 | unsigned int tx_irq; |
Wei Liu | e1f00a69 | 2013-05-22 06:34:45 +0000 | [diff] [blame] | 145 | /* Only used when feature-split-event-channels = 1 */ |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 146 | char tx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-tx */ |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 147 | struct xen_netif_tx_back_ring tx; |
Wei Liu | b3f980b | 2013-08-26 12:59:38 +0100 | [diff] [blame] | 148 | struct sk_buff_head tx_queue; |
| 149 | struct page *mmap_pages[MAX_PENDING_REQS]; |
| 150 | pending_ring_idx_t pending_prod; |
| 151 | pending_ring_idx_t pending_cons; |
| 152 | u16 pending_ring[MAX_PENDING_REQS]; |
| 153 | struct pending_tx_info pending_tx_info[MAX_PENDING_REQS]; |
Zoltan Kiss | f53c3fe | 2014-03-06 21:48:26 +0000 | [diff] [blame] | 154 | grant_handle_t grant_tx_handle[MAX_PENDING_REQS]; |
Wei Liu | b3f980b | 2013-08-26 12:59:38 +0100 | [diff] [blame] | 155 | |
Zoltan Kiss | bdab827 | 2014-04-02 18:04:58 +0100 | [diff] [blame] | 156 | struct gnttab_copy tx_copy_ops[MAX_PENDING_REQS]; |
Zoltan Kiss | f53c3fe | 2014-03-06 21:48:26 +0000 | [diff] [blame] | 157 | struct gnttab_map_grant_ref tx_map_ops[MAX_PENDING_REQS]; |
| 158 | struct gnttab_unmap_grant_ref tx_unmap_ops[MAX_PENDING_REQS]; |
| 159 | /* passed to gnttab_[un]map_refs with pages under (un)mapping */ |
| 160 | struct page *pages_to_map[MAX_PENDING_REQS]; |
| 161 | struct page *pages_to_unmap[MAX_PENDING_REQS]; |
Wei Liu | b3f980b | 2013-08-26 12:59:38 +0100 | [diff] [blame] | 162 | |
Zoltan Kiss | f53c3fe | 2014-03-06 21:48:26 +0000 | [diff] [blame] | 163 | /* This prevents zerocopy callbacks to race over dealloc_ring */ |
| 164 | spinlock_t callback_lock; |
| 165 | /* This prevents dealloc thread and NAPI instance to race over response |
| 166 | * creation and pending_ring in xenvif_idx_release. In xenvif_tx_err |
| 167 | * it only protect response creation |
| 168 | */ |
| 169 | spinlock_t response_lock; |
| 170 | pending_ring_idx_t dealloc_prod; |
| 171 | pending_ring_idx_t dealloc_cons; |
| 172 | u16 dealloc_ring[MAX_PENDING_REQS]; |
| 173 | struct task_struct *dealloc_task; |
| 174 | wait_queue_head_t dealloc_wq; |
Wei Liu | a64bd93 | 2014-08-12 11:48:07 +0100 | [diff] [blame] | 175 | atomic_t inflight_packets; |
Wei Liu | b3f980b | 2013-08-26 12:59:38 +0100 | [diff] [blame] | 176 | |
| 177 | /* Use kthread for guest RX */ |
| 178 | struct task_struct *task; |
| 179 | wait_queue_head_t wq; |
| 180 | /* When feature-split-event-channels = 0, tx_irq = rx_irq. */ |
| 181 | unsigned int rx_irq; |
| 182 | /* Only used when feature-split-event-channels = 1 */ |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 183 | char rx_irq_name[IRQ_NAME_SIZE]; /* DEVNAME-qN-rx */ |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 184 | struct xen_netif_rx_back_ring rx; |
Wei Liu | b3f980b | 2013-08-26 12:59:38 +0100 | [diff] [blame] | 185 | struct sk_buff_head rx_queue; |
Zoltan Kiss | 0935078 | 2014-03-06 21:48:30 +0000 | [diff] [blame] | 186 | |
David Vrabel | f48da8b | 2014-10-22 14:08:54 +0100 | [diff] [blame] | 187 | unsigned int rx_queue_max; |
| 188 | unsigned int rx_queue_len; |
David Vrabel | ecf08d2 | 2014-10-22 14:08:55 +0100 | [diff] [blame] | 189 | unsigned long last_rx_time; |
| 190 | bool stalled; |
Wei Liu | b3f980b | 2013-08-26 12:59:38 +0100 | [diff] [blame] | 191 | |
Andrew J. Bennieston | a55d976 | 2014-06-04 10:30:41 +0100 | [diff] [blame] | 192 | struct gnttab_copy grant_copy_op[MAX_GRANT_COPY_OPS]; |
Wei Liu | b3f980b | 2013-08-26 12:59:38 +0100 | [diff] [blame] | 193 | |
Paul Durrant | ac3d5ac | 2013-12-23 09:27:17 +0000 | [diff] [blame] | 194 | /* We create one meta structure per ring request we consume, so |
| 195 | * the maximum number is the same as the ring size. |
| 196 | */ |
| 197 | struct xenvif_rx_meta meta[XEN_NETIF_RX_RING_SIZE]; |
Wei Liu | b3f980b | 2013-08-26 12:59:38 +0100 | [diff] [blame] | 198 | |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 199 | /* Transmit shaping: allow 'credit_bytes' every 'credit_usec'. */ |
| 200 | unsigned long credit_bytes; |
| 201 | unsigned long credit_usec; |
| 202 | unsigned long remaining_credit; |
| 203 | struct timer_list credit_timeout; |
| 204 | u64 credit_window_start; |
| 205 | |
| 206 | /* Statistics */ |
| 207 | struct xenvif_stats stats; |
| 208 | }; |
| 209 | |
Zoltan Kiss | 3d1af1d | 2014-08-04 16:20:57 +0100 | [diff] [blame] | 210 | enum state_bit_shift { |
| 211 | /* This bit marks that the vif is connected */ |
Zoltan Kiss | f34a4cf | 2014-08-04 16:20:58 +0100 | [diff] [blame] | 212 | VIF_STATUS_CONNECTED, |
Zoltan Kiss | 3d1af1d | 2014-08-04 16:20:57 +0100 | [diff] [blame] | 213 | }; |
| 214 | |
Paul Durrant | 210c34d | 2015-09-02 17:58:36 +0100 | [diff] [blame] | 215 | struct xenvif_mcast_addr { |
| 216 | struct list_head entry; |
| 217 | struct rcu_head rcu; |
| 218 | u8 addr[6]; |
| 219 | }; |
| 220 | |
| 221 | #define XEN_NETBK_MCAST_MAX 64 |
| 222 | |
Paul Durrant | 40d8abd | 2016-05-13 09:37:27 +0100 | [diff] [blame] | 223 | #define XEN_NETBK_MAX_HASH_KEY_SIZE 40 |
| 224 | #define XEN_NETBK_MAX_HASH_MAPPING_SIZE 128 |
| 225 | #define XEN_NETBK_HASH_TAG_SIZE 40 |
| 226 | |
| 227 | struct xenvif_hash_cache_entry { |
| 228 | struct list_head link; |
| 229 | struct rcu_head rcu; |
| 230 | u8 tag[XEN_NETBK_HASH_TAG_SIZE]; |
| 231 | unsigned int len; |
| 232 | u32 val; |
| 233 | int seq; |
| 234 | }; |
| 235 | |
| 236 | struct xenvif_hash_cache { |
| 237 | spinlock_t lock; |
| 238 | struct list_head list; |
| 239 | unsigned int count; |
| 240 | atomic_t seq; |
| 241 | }; |
| 242 | |
| 243 | struct xenvif_hash { |
| 244 | unsigned int alg; |
| 245 | u32 flags; |
| 246 | u8 key[XEN_NETBK_MAX_HASH_KEY_SIZE]; |
| 247 | u32 mapping[XEN_NETBK_MAX_HASH_MAPPING_SIZE]; |
| 248 | unsigned int size; |
| 249 | struct xenvif_hash_cache cache; |
| 250 | }; |
| 251 | |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 252 | struct xenvif { |
| 253 | /* Unique identifier for this interface. */ |
| 254 | domid_t domid; |
| 255 | unsigned int handle; |
| 256 | |
Wei Liu | b3f980b | 2013-08-26 12:59:38 +0100 | [diff] [blame] | 257 | u8 fe_dev_addr[6]; |
Paul Durrant | 210c34d | 2015-09-02 17:58:36 +0100 | [diff] [blame] | 258 | struct list_head fe_mcast_addr; |
| 259 | unsigned int fe_mcast_count; |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 260 | |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 261 | /* Frontend feature information. */ |
Paul Durrant | 82cada2 | 2013-10-16 17:50:32 +0100 | [diff] [blame] | 262 | int gso_mask; |
| 263 | int gso_prefix_mask; |
| 264 | |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 265 | u8 can_sg:1; |
Paul Durrant | 146c8a7 | 2013-10-16 17:50:28 +0100 | [diff] [blame] | 266 | u8 ip_csum:1; |
| 267 | u8 ipv6_csum:1; |
Paul Durrant | 210c34d | 2015-09-02 17:58:36 +0100 | [diff] [blame] | 268 | u8 multicast_control:1; |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 269 | |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 270 | /* Is this interface disabled? True when backend discovers |
| 271 | * frontend is rogue. |
| 272 | */ |
| 273 | bool disabled; |
Zoltan Kiss | 3d1af1d | 2014-08-04 16:20:57 +0100 | [diff] [blame] | 274 | unsigned long status; |
David Vrabel | 26c0e10 | 2014-12-18 11:13:06 +0000 | [diff] [blame] | 275 | unsigned long drain_timeout; |
| 276 | unsigned long stall_timeout; |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 277 | |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 278 | /* Queues */ |
| 279 | struct xenvif_queue *queues; |
Wei Liu | f7b50c4 | 2014-06-23 10:50:17 +0100 | [diff] [blame] | 280 | unsigned int num_queues; /* active queues, resource allocated */ |
David Vrabel | ecf08d2 | 2014-10-22 14:08:55 +0100 | [diff] [blame] | 281 | unsigned int stalled_queues; |
| 282 | |
Paul Durrant | 40d8abd | 2016-05-13 09:37:27 +0100 | [diff] [blame] | 283 | struct xenvif_hash hash; |
| 284 | |
Palik, Imre | edafc13 | 2015-03-19 11:05:42 +0100 | [diff] [blame] | 285 | struct xenbus_watch credit_watch; |
Paul Durrant | 22fae97 | 2016-02-02 11:55:05 +0000 | [diff] [blame] | 286 | struct xenbus_watch mcast_ctrl_watch; |
Palik, Imre | edafc13 | 2015-03-19 11:05:42 +0100 | [diff] [blame] | 287 | |
David Vrabel | ecf08d2 | 2014-10-22 14:08:55 +0100 | [diff] [blame] | 288 | spinlock_t lock; |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 289 | |
Zoltan Kiss | f51de24 | 2014-07-08 19:49:14 +0100 | [diff] [blame] | 290 | #ifdef CONFIG_DEBUG_FS |
| 291 | struct dentry *xenvif_dbg_root; |
| 292 | #endif |
| 293 | |
Paul Durrant | 4e15ee2 | 2016-05-13 09:37:26 +0100 | [diff] [blame] | 294 | struct xen_netif_ctrl_back_ring ctrl; |
Paul Durrant | 4e15ee2 | 2016-05-13 09:37:26 +0100 | [diff] [blame] | 295 | unsigned int ctrl_irq; |
| 296 | |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 297 | /* Miscellaneous private stuff. */ |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 298 | struct net_device *dev; |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 299 | }; |
| 300 | |
David Vrabel | f48da8b | 2014-10-22 14:08:54 +0100 | [diff] [blame] | 301 | struct xenvif_rx_cb { |
| 302 | unsigned long expires; |
| 303 | int meta_slots_used; |
David Vrabel | f48da8b | 2014-10-22 14:08:54 +0100 | [diff] [blame] | 304 | }; |
| 305 | |
| 306 | #define XENVIF_RX_CB(skb) ((struct xenvif_rx_cb *)(skb)->cb) |
| 307 | |
David Vrabel | c9d6369 | 2011-09-29 16:53:31 +0100 | [diff] [blame] | 308 | static inline struct xenbus_device *xenvif_to_xenbus_device(struct xenvif *vif) |
| 309 | { |
| 310 | return to_xenbus_device(vif->dev->dev.parent); |
| 311 | } |
| 312 | |
Palik, Imre | edafc13 | 2015-03-19 11:05:42 +0100 | [diff] [blame] | 313 | void xenvif_tx_credit_callback(unsigned long data); |
| 314 | |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 315 | struct xenvif *xenvif_alloc(struct device *parent, |
| 316 | domid_t domid, |
| 317 | unsigned int handle); |
| 318 | |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 319 | int xenvif_init_queue(struct xenvif_queue *queue); |
Andrew J. Bennieston | 8d3d53b | 2014-06-04 10:30:43 +0100 | [diff] [blame] | 320 | void xenvif_deinit_queue(struct xenvif_queue *queue); |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 321 | |
Paul Durrant | 4e15ee2 | 2016-05-13 09:37:26 +0100 | [diff] [blame] | 322 | int xenvif_connect_data(struct xenvif_queue *queue, |
| 323 | unsigned long tx_ring_ref, |
| 324 | unsigned long rx_ring_ref, |
| 325 | unsigned int tx_evtchn, |
| 326 | unsigned int rx_evtchn); |
| 327 | void xenvif_disconnect_data(struct xenvif *vif); |
| 328 | int xenvif_connect_ctrl(struct xenvif *vif, grant_ref_t ring_ref, |
| 329 | unsigned int evtchn); |
| 330 | void xenvif_disconnect_ctrl(struct xenvif *vif); |
Paul Durrant | 279f438 | 2013-09-17 17:46:08 +0100 | [diff] [blame] | 331 | void xenvif_free(struct xenvif *vif); |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 332 | |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 333 | int xenvif_xenbus_init(void); |
Wei Liu | b103f35 | 2013-05-16 23:26:11 +0000 | [diff] [blame] | 334 | void xenvif_xenbus_fini(void); |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 335 | |
| 336 | int xenvif_schedulable(struct xenvif *vif); |
| 337 | |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 338 | int xenvif_queue_stopped(struct xenvif_queue *queue); |
| 339 | void xenvif_wake_queue(struct xenvif_queue *queue); |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 340 | |
| 341 | /* (Un)Map communication rings. */ |
Paul Durrant | 4e15ee2 | 2016-05-13 09:37:26 +0100 | [diff] [blame] | 342 | void xenvif_unmap_frontend_data_rings(struct xenvif_queue *queue); |
| 343 | int xenvif_map_frontend_data_rings(struct xenvif_queue *queue, |
| 344 | grant_ref_t tx_ring_ref, |
| 345 | grant_ref_t rx_ring_ref); |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 346 | |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 347 | /* Check for SKBs from frontend and schedule backend processing */ |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 348 | void xenvif_napi_schedule_or_enable_events(struct xenvif_queue *queue); |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 349 | |
Ian Campbell | 48856286 | 2013-02-06 23:41:35 +0000 | [diff] [blame] | 350 | /* Prevent the device from generating any further traffic. */ |
| 351 | void xenvif_carrier_off(struct xenvif *vif); |
| 352 | |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 353 | int xenvif_tx_action(struct xenvif_queue *queue, int budget); |
Wei Liu | b3f980b | 2013-08-26 12:59:38 +0100 | [diff] [blame] | 354 | |
Zoltan Kiss | 121fa4b | 2014-03-06 21:48:24 +0000 | [diff] [blame] | 355 | int xenvif_kthread_guest_rx(void *data); |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 356 | void xenvif_kick_thread(struct xenvif_queue *queue); |
Paul Durrant | ca2f09f | 2013-12-06 16:36:07 +0000 | [diff] [blame] | 357 | |
Zoltan Kiss | f53c3fe | 2014-03-06 21:48:26 +0000 | [diff] [blame] | 358 | int xenvif_dealloc_kthread(void *data); |
| 359 | |
Juergen Gross | 0364a88 | 2016-09-22 11:06:25 +0200 | [diff] [blame] | 360 | irqreturn_t xenvif_ctrl_irq_fn(int irq, void *data); |
Paul Durrant | 4e15ee2 | 2016-05-13 09:37:26 +0100 | [diff] [blame] | 361 | |
David Vrabel | f48da8b | 2014-10-22 14:08:54 +0100 | [diff] [blame] | 362 | void xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb); |
| 363 | |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 364 | void xenvif_carrier_on(struct xenvif *vif); |
Wei Liu | b3f980b | 2013-08-26 12:59:38 +0100 | [diff] [blame] | 365 | |
Zoltan Kiss | f53c3fe | 2014-03-06 21:48:26 +0000 | [diff] [blame] | 366 | /* Callback from stack when TX packet can be released */ |
| 367 | void xenvif_zerocopy_callback(struct ubuf_info *ubuf, bool zerocopy_success); |
| 368 | |
| 369 | /* Unmap a pending page and release it back to the guest */ |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 370 | void xenvif_idx_unmap(struct xenvif_queue *queue, u16 pending_idx); |
Zoltan Kiss | f53c3fe | 2014-03-06 21:48:26 +0000 | [diff] [blame] | 371 | |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 372 | static inline pending_ring_idx_t nr_pending_reqs(struct xenvif_queue *queue) |
Zoltan Kiss | 121fa4b | 2014-03-06 21:48:24 +0000 | [diff] [blame] | 373 | { |
| 374 | return MAX_PENDING_REQS - |
Wei Liu | e9ce7cb | 2014-06-04 10:30:42 +0100 | [diff] [blame] | 375 | queue->pending_prod + queue->pending_cons; |
Zoltan Kiss | 121fa4b | 2014-03-06 21:48:24 +0000 | [diff] [blame] | 376 | } |
| 377 | |
Zoltan Kiss | f51de24 | 2014-07-08 19:49:14 +0100 | [diff] [blame] | 378 | irqreturn_t xenvif_interrupt(int irq, void *dev_id); |
| 379 | |
Wei Liu | e1f00a69 | 2013-05-22 06:34:45 +0000 | [diff] [blame] | 380 | extern bool separate_tx_rx_irq; |
| 381 | |
Zoltan Kiss | 0935078 | 2014-03-06 21:48:30 +0000 | [diff] [blame] | 382 | extern unsigned int rx_drain_timeout_msecs; |
David Vrabel | 26c0e10 | 2014-12-18 11:13:06 +0000 | [diff] [blame] | 383 | extern unsigned int rx_stall_timeout_msecs; |
Andrew J. Bennieston | 8d3d53b | 2014-06-04 10:30:43 +0100 | [diff] [blame] | 384 | extern unsigned int xenvif_max_queues; |
Paul Durrant | 40d8abd | 2016-05-13 09:37:27 +0100 | [diff] [blame] | 385 | extern unsigned int xenvif_hash_cache_size; |
Zoltan Kiss | 0935078 | 2014-03-06 21:48:30 +0000 | [diff] [blame] | 386 | |
Zoltan Kiss | f51de24 | 2014-07-08 19:49:14 +0100 | [diff] [blame] | 387 | #ifdef CONFIG_DEBUG_FS |
| 388 | extern struct dentry *xen_netback_dbg_root; |
| 389 | #endif |
| 390 | |
Wei Liu | a64bd93 | 2014-08-12 11:48:07 +0100 | [diff] [blame] | 391 | void xenvif_skb_zerocopy_prepare(struct xenvif_queue *queue, |
| 392 | struct sk_buff *skb); |
| 393 | void xenvif_skb_zerocopy_complete(struct xenvif_queue *queue); |
| 394 | |
Paul Durrant | 210c34d | 2015-09-02 17:58:36 +0100 | [diff] [blame] | 395 | /* Multicast control */ |
| 396 | bool xenvif_mcast_match(struct xenvif *vif, const u8 *addr); |
| 397 | void xenvif_mcast_addr_list_free(struct xenvif *vif); |
| 398 | |
Paul Durrant | 40d8abd | 2016-05-13 09:37:27 +0100 | [diff] [blame] | 399 | /* Hash */ |
| 400 | void xenvif_init_hash(struct xenvif *vif); |
| 401 | void xenvif_deinit_hash(struct xenvif *vif); |
| 402 | |
| 403 | u32 xenvif_set_hash_alg(struct xenvif *vif, u32 alg); |
| 404 | u32 xenvif_get_hash_flags(struct xenvif *vif, u32 *flags); |
| 405 | u32 xenvif_set_hash_flags(struct xenvif *vif, u32 flags); |
| 406 | u32 xenvif_set_hash_key(struct xenvif *vif, u32 gref, u32 len); |
| 407 | u32 xenvif_set_hash_mapping_size(struct xenvif *vif, u32 size); |
| 408 | u32 xenvif_set_hash_mapping(struct xenvif *vif, u32 gref, u32 len, |
| 409 | u32 off); |
| 410 | |
| 411 | void xenvif_set_skb_hash(struct xenvif *vif, struct sk_buff *skb); |
| 412 | |
Paul Durrant | c0c64c1 | 2016-08-17 16:13:29 +0100 | [diff] [blame] | 413 | #ifdef CONFIG_DEBUG_FS |
| 414 | void xenvif_dump_hash_info(struct xenvif *vif, struct seq_file *m); |
| 415 | #endif |
| 416 | |
Ian Campbell | f942dc2 | 2011-03-15 00:06:18 +0000 | [diff] [blame] | 417 | #endif /* __XEN_NETBACK__COMMON_H__ */ |