blob: dc82f65267ec039fc3826d72f2b6f1ee3dc25253 [file] [log] [blame]
Greg Rose5321a212013-12-21 06:13:06 +00001/*******************************************************************************
2 *
3 * Intel Ethernet Controller XL710 Family Linux Virtual Function Driver
Anjali Singhai Jainecc6a232016-01-13 16:51:43 -08004 * Copyright(c) 2013 - 2016 Intel Corporation.
Greg Rose5321a212013-12-21 06:13:06 +00005 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
Jesse Brandeburgb8316072014-04-05 07:46:11 +000015 * You should have received a copy of the GNU General Public License along
16 * with this program. If not, see <http://www.gnu.org/licenses/>.
17 *
Greg Rose5321a212013-12-21 06:13:06 +000018 * The full GNU General Public License is included in this distribution in
19 * the file called "COPYING".
20 *
21 * Contact Information:
22 * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
23 * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
24 *
25 ******************************************************************************/
26
27#ifndef _I40E_TXRX_H_
28#define _I40E_TXRX_H_
29
Jesse Brandeburgaee80872014-04-09 05:59:02 +000030/* Interrupt Throttling and Rate Limiting Goodies */
Greg Rose5321a212013-12-21 06:13:06 +000031
32#define I40E_MAX_ITR 0x0FF0 /* reg uses 2 usec resolution */
Jesse Brandeburg79442d32014-10-25 03:24:32 +000033#define I40E_MIN_ITR 0x0001 /* reg uses 2 usec resolution */
Greg Rose5321a212013-12-21 06:13:06 +000034#define I40E_ITR_100K 0x0005
Jesse Brandeburgc56625d2015-09-28 14:16:53 -040035#define I40E_ITR_50K 0x000A
Greg Rose5321a212013-12-21 06:13:06 +000036#define I40E_ITR_20K 0x0019
Jesse Brandeburgc56625d2015-09-28 14:16:53 -040037#define I40E_ITR_18K 0x001B
Greg Rose5321a212013-12-21 06:13:06 +000038#define I40E_ITR_8K 0x003E
39#define I40E_ITR_4K 0x007A
Jesse Brandeburgac26fc12015-09-28 14:12:37 -040040#define I40E_MAX_INTRL 0x3B /* reg uses 4 usec resolution */
Jesse Brandeburgee2319c2015-09-28 14:16:54 -040041#define I40E_ITR_RX_DEF I40E_ITR_20K
42#define I40E_ITR_TX_DEF I40E_ITR_20K
Greg Rose5321a212013-12-21 06:13:06 +000043#define I40E_ITR_DYNAMIC 0x8000 /* use top bit as a flag */
44#define I40E_MIN_INT_RATE 250 /* ~= 1000000 / (I40E_MAX_ITR * 2) */
45#define I40E_MAX_INT_RATE 500000 /* == 1000000 / (I40E_MIN_ITR * 2) */
46#define I40E_DEFAULT_IRQ_WORK 256
47#define ITR_TO_REG(setting) ((setting & ~I40E_ITR_DYNAMIC) >> 1)
48#define ITR_IS_DYNAMIC(setting) (!!(setting & I40E_ITR_DYNAMIC))
49#define ITR_REG_TO_USEC(itr_reg) (itr_reg << 1)
Jesse Brandeburgac26fc12015-09-28 14:12:37 -040050/* 0x40 is the enable bit for interrupt rate limiting, and must be set if
51 * the value of the rate limit is non-zero
52 */
53#define INTRL_ENA BIT(6)
54#define INTRL_REG_TO_USEC(intrl) ((intrl & ~INTRL_ENA) << 2)
55#define INTRL_USEC_TO_REG(set) ((set) ? ((set) >> 2) | INTRL_ENA : 0)
56#define I40E_INTRL_8K 125 /* 8000 ints/sec */
57#define I40E_INTRL_62K 16 /* 62500 ints/sec */
58#define I40E_INTRL_83K 12 /* 83333 ints/sec */
Greg Rose5321a212013-12-21 06:13:06 +000059
60#define I40E_QUEUE_END_OF_LIST 0x7FF
61
62/* this enum matches hardware bits and is meant to be used by DYN_CTLN
63 * registers and QINT registers or more generally anywhere in the manual
64 * mentioning ITR_INDX, ITR_NONE cannot be used as an index 'n' into any
65 * register but instead is a special value meaning "don't update" ITR0/1/2.
66 */
67enum i40e_dyn_idx_t {
68 I40E_IDX_ITR0 = 0,
69 I40E_IDX_ITR1 = 1,
70 I40E_IDX_ITR2 = 2,
71 I40E_ITR_NONE = 3 /* ITR_NONE must not be used as an index */
72};
73
74/* these are indexes into ITRN registers */
75#define I40E_RX_ITR I40E_IDX_ITR0
76#define I40E_TX_ITR I40E_IDX_ITR1
77#define I40E_PE_ITR I40E_IDX_ITR2
78
79/* Supported RSS offloads */
80#define I40E_DEFAULT_RSS_HENA ( \
Jesse Brandeburg41a1d042015-06-04 16:24:02 -040081 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_UDP) | \
82 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_SCTP) | \
83 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP) | \
84 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_OTHER) | \
85 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV4) | \
86 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_UDP) | \
87 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP) | \
88 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_SCTP) | \
89 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_OTHER) | \
90 BIT_ULL(I40E_FILTER_PCTYPE_FRAG_IPV6) | \
91 BIT_ULL(I40E_FILTER_PCTYPE_L2_PAYLOAD))
Greg Rose5321a212013-12-21 06:13:06 +000092
Anjali Singhai Jaine25d00b2015-06-23 19:00:04 -040093#define I40E_DEFAULT_RSS_HENA_EXPANDED (I40E_DEFAULT_RSS_HENA | \
Jesse Brandeburgd08f5552015-09-16 19:01:08 -070094 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV4_TCP_SYN_NO_ACK) | \
95 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV4_UDP) | \
96 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV4_UDP) | \
97 BIT_ULL(I40E_FILTER_PCTYPE_NONF_IPV6_TCP_SYN_NO_ACK) | \
98 BIT_ULL(I40E_FILTER_PCTYPE_NONF_UNICAST_IPV6_UDP) | \
99 BIT_ULL(I40E_FILTER_PCTYPE_NONF_MULTICAST_IPV6_UDP))
Anjali Singhai Jaine25d00b2015-06-23 19:00:04 -0400100
101#define i40e_pf_get_default_rss_hena(pf) \
102 (((pf)->flags & I40E_FLAG_MULTIPLE_TCP_UDP_RSS_PCTYPE) ? \
Jesse Brandeburgd08f5552015-09-16 19:01:08 -0700103 I40E_DEFAULT_RSS_HENA_EXPANDED : I40E_DEFAULT_RSS_HENA)
Anjali Singhai Jaine25d00b2015-06-23 19:00:04 -0400104
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700105/* Supported Rx Buffer Sizes (a multiple of 128) */
106#define I40E_RXBUFFER_256 256
Alexander Duyckdab86af2017-03-14 10:15:27 -0700107#define I40E_RXBUFFER_1536 1536 /* 128B aligned standard Ethernet frame */
Greg Rose5321a212013-12-21 06:13:06 +0000108#define I40E_RXBUFFER_2048 2048
Alexander Duyck98efd692017-04-05 07:51:01 -0400109#define I40E_RXBUFFER_3072 3072 /* Used for large frames w/ padding */
Greg Rose5321a212013-12-21 06:13:06 +0000110#define I40E_MAX_RXBUFFER 9728 /* largest size for single descriptor */
111
112/* NOTE: netdev_alloc_skb reserves up to 64 bytes, NET_IP_ALIGN means we
113 * reserve 2 more, and skb_shared_info adds an additional 384 bytes more,
114 * this adds up to 512 bytes of extra data meaning the smallest allocation
115 * we could have is 1K.
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700116 * i.e. RXBUFFER_256 --> 960 byte skb (size-1024 slab)
117 * i.e. RXBUFFER_512 --> 1216 byte skb (size-2048 slab)
Greg Rose5321a212013-12-21 06:13:06 +0000118 */
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700119#define I40E_RX_HDR_SIZE I40E_RXBUFFER_256
120#define i40e_rx_desc i40e_32byte_rx_desc
121
Alexander Duyck59605bc2017-01-30 12:29:35 -0800122#define I40E_RX_DMA_ATTR \
123 (DMA_ATTR_SKIP_CPU_SYNC | DMA_ATTR_WEAK_ORDERING)
124
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700125/**
126 * i40e_test_staterr - tests bits in Rx descriptor status and error fields
127 * @rx_desc: pointer to receive descriptor (in le64 format)
128 * @stat_err_bits: value to mask
129 *
130 * This function does some fast chicanery in order to return the
131 * value of the mask which is really only used for boolean tests.
132 * The status_error_len doesn't need to be shifted because it begins
133 * at offset zero.
134 */
135static inline bool i40e_test_staterr(union i40e_rx_desc *rx_desc,
136 const u64 stat_err_bits)
137{
138 return !!(rx_desc->wb.qword1.status_error_len &
139 cpu_to_le64(stat_err_bits));
140}
Greg Rose5321a212013-12-21 06:13:06 +0000141
142/* How many Rx Buffers do we bundle into one write to the hardware ? */
143#define I40E_RX_BUFFER_WRITE 16 /* Must be power of 2 */
Mitch Williamsa132af22015-01-24 09:58:35 +0000144#define I40E_RX_INCREMENT(r, i) \
145 do { \
146 (i)++; \
147 if ((i) == (r)->count) \
148 i = 0; \
149 r->next_to_clean = i; \
150 } while (0)
151
Greg Rose5321a212013-12-21 06:13:06 +0000152#define I40E_RX_NEXT_DESC(r, i, n) \
153 do { \
154 (i)++; \
155 if ((i) == (r)->count) \
156 i = 0; \
157 (n) = I40E_RX_DESC((r), (i)); \
158 } while (0)
159
160#define I40E_RX_NEXT_DESC_PREFETCH(r, i, n) \
161 do { \
162 I40E_RX_NEXT_DESC((r), (i), (n)); \
163 prefetch((n)); \
164 } while (0)
165
Anjali Singhai71da6192015-02-21 06:42:35 +0000166#define I40E_MAX_BUFFER_TXD 8
Greg Rose5321a212013-12-21 06:13:06 +0000167#define I40E_MIN_TX_LEN 17
Alexander Duyck5c4654d2016-02-19 12:17:08 -0800168
169/* The size limit for a transmit buffer in a descriptor is (16K - 1).
170 * In order to align with the read requests we will align the value to
171 * the nearest 4K which represents our maximum read request size.
172 */
173#define I40E_MAX_READ_REQ_SIZE 4096
174#define I40E_MAX_DATA_PER_TXD (16 * 1024 - 1)
175#define I40E_MAX_DATA_PER_TXD_ALIGNED \
176 (I40E_MAX_DATA_PER_TXD & ~(I40E_MAX_READ_REQ_SIZE - 1))
177
Mitch Williams4293d5f2016-11-08 13:05:14 -0800178/**
179 * i40e_txd_use_count - estimate the number of descriptors needed for Tx
180 * @size: transmit request size in bytes
181 *
182 * Due to hardware alignment restrictions (4K alignment), we need to
183 * assume that we can have no more than 12K of data per descriptor, even
184 * though each descriptor can take up to 16K - 1 bytes of aligned memory.
185 * Thus, we need to divide by 12K. But division is slow! Instead,
186 * we decompose the operation into shifts and one relatively cheap
187 * multiply operation.
188 *
189 * To divide by 12K, we first divide by 4K, then divide by 3:
190 * To divide by 4K, shift right by 12 bits
191 * To divide by 3, multiply by 85, then divide by 256
192 * (Divide by 256 is done by shifting right by 8 bits)
193 * Finally, we add one to round up. Because 256 isn't an exact multiple of
194 * 3, we'll underestimate near each multiple of 12K. This is actually more
195 * accurate as we have 4K - 1 of wiggle room that we can fit into the last
196 * segment. For our purposes this is accurate out to 1M which is orders of
197 * magnitude greater than our largest possible GSO size.
198 *
199 * This would then be implemented as:
200 * return (((size >> 12) * 85) >> 8) + 1;
201 *
202 * Since multiplication and division are commutative, we can reorder
203 * operations into:
204 * return ((size * 85) >> 20) + 1;
Alexander Duyck5c4654d2016-02-19 12:17:08 -0800205 */
206static inline unsigned int i40e_txd_use_count(unsigned int size)
207{
Mitch Williams4293d5f2016-11-08 13:05:14 -0800208 return ((size * 85) >> 20) + 1;
Alexander Duyck5c4654d2016-02-19 12:17:08 -0800209}
Greg Rose5321a212013-12-21 06:13:06 +0000210
211/* Tx Descriptors needed, worst case */
Jesse Brandeburg980093e2014-05-10 04:49:12 +0000212#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
Anjali Singhai Jain810b3ae2014-07-10 07:58:25 +0000213#define I40E_MIN_DESC_PENDING 4
Greg Rose5321a212013-12-21 06:13:06 +0000214
Jesse Brandeburg41a1d042015-06-04 16:24:02 -0400215#define I40E_TX_FLAGS_HW_VLAN BIT(1)
216#define I40E_TX_FLAGS_SW_VLAN BIT(2)
217#define I40E_TX_FLAGS_TSO BIT(3)
218#define I40E_TX_FLAGS_IPV4 BIT(4)
219#define I40E_TX_FLAGS_IPV6 BIT(5)
220#define I40E_TX_FLAGS_FCCRC BIT(6)
221#define I40E_TX_FLAGS_FSO BIT(7)
222#define I40E_TX_FLAGS_FD_SB BIT(9)
223#define I40E_TX_FLAGS_VXLAN_TUNNEL BIT(10)
Greg Rose5321a212013-12-21 06:13:06 +0000224#define I40E_TX_FLAGS_VLAN_MASK 0xffff0000
225#define I40E_TX_FLAGS_VLAN_PRIO_MASK 0xe0000000
226#define I40E_TX_FLAGS_VLAN_PRIO_SHIFT 29
227#define I40E_TX_FLAGS_VLAN_SHIFT 16
228
229struct i40e_tx_buffer {
230 struct i40e_tx_desc *next_to_watch;
Anjali Singhai Jain49d7d932014-06-04 08:45:15 +0000231 union {
232 struct sk_buff *skb;
233 void *raw_buf;
234 };
Greg Rose5321a212013-12-21 06:13:06 +0000235 unsigned int bytecount;
236 unsigned short gso_segs;
Jesse Brandeburg6995b362015-08-28 17:55:54 -0400237
Greg Rose5321a212013-12-21 06:13:06 +0000238 DEFINE_DMA_UNMAP_ADDR(dma);
239 DEFINE_DMA_UNMAP_LEN(len);
240 u32 tx_flags;
241};
242
243struct i40e_rx_buffer {
Greg Rose5321a212013-12-21 06:13:06 +0000244 dma_addr_t dma;
245 struct page *page;
Alexander Duyck17936682017-02-21 15:55:39 -0800246#if (BITS_PER_LONG > 32) || (PAGE_SIZE >= 65536)
247 __u32 page_offset;
248#else
249 __u16 page_offset;
250#endif
251 __u16 pagecnt_bias;
Greg Rose5321a212013-12-21 06:13:06 +0000252};
253
254struct i40e_queue_stats {
255 u64 packets;
256 u64 bytes;
257};
258
259struct i40e_tx_queue_stats {
260 u64 restart_queue;
261 u64 tx_busy;
262 u64 tx_done_old;
Anjali Singhai Jain2fc3d712015-08-27 11:42:29 -0400263 u64 tx_linearize;
Anjali Singhai Jain164c9f52015-10-21 19:47:08 -0400264 u64 tx_force_wb;
Anjali Singhai Jaindd353102016-01-15 14:33:12 -0800265 u64 tx_lost_interrupt;
Greg Rose5321a212013-12-21 06:13:06 +0000266};
267
268struct i40e_rx_queue_stats {
269 u64 non_eop_descs;
270 u64 alloc_page_failed;
271 u64 alloc_buff_failed;
Mitch Williamsf16704e2016-01-13 16:51:49 -0800272 u64 page_reuse_count;
273 u64 realloc_count;
Greg Rose5321a212013-12-21 06:13:06 +0000274};
275
276enum i40e_ring_state_t {
277 __I40E_TX_FDIR_INIT_DONE,
278 __I40E_TX_XPS_INIT_DONE,
Greg Rose5321a212013-12-21 06:13:06 +0000279};
280
Jesse Brandeburgbec60fc2016-04-18 11:33:47 -0700281/* some useful defines for virtchannel interface, which
282 * is the only remaining user of header split
283 */
284#define I40E_RX_DTYPE_NO_SPLIT 0
285#define I40E_RX_DTYPE_HEADER_SPLIT 1
286#define I40E_RX_DTYPE_SPLIT_ALWAYS 2
287#define I40E_RX_SPLIT_L2 0x1
288#define I40E_RX_SPLIT_IP 0x2
289#define I40E_RX_SPLIT_TCP_UDP 0x4
290#define I40E_RX_SPLIT_SCTP 0x8
Greg Rose5321a212013-12-21 06:13:06 +0000291
292/* struct that defines a descriptor ring, associated with a VSI */
293struct i40e_ring {
294 struct i40e_ring *next; /* pointer to next ring in q_vector */
295 void *desc; /* Descriptor ring memory */
296 struct device *dev; /* Used for DMA mapping */
297 struct net_device *netdev; /* netdev ring maps to */
298 union {
299 struct i40e_tx_buffer *tx_bi;
300 struct i40e_rx_buffer *rx_bi;
301 };
302 unsigned long state;
303 u16 queue_index; /* Queue number of ring */
304 u8 dcb_tc; /* Traffic class of ring */
305 u8 __iomem *tail;
306
Jacob Keller65e87c02016-09-12 14:18:44 -0700307 /* high bit set means dynamic, use accessors routines to read/write.
308 * hardware only supports 2us resolution for the ITR registers.
309 * these values always store the USER setting, and must be converted
310 * before programming to a register.
311 */
312 u16 rx_itr_setting;
313 u16 tx_itr_setting;
314
Greg Rose5321a212013-12-21 06:13:06 +0000315 u16 count; /* Number of descriptors */
316 u16 reg_idx; /* HW register index of the ring */
Greg Rose5321a212013-12-21 06:13:06 +0000317 u16 rx_buf_len;
Greg Rose5321a212013-12-21 06:13:06 +0000318
319 /* used in interrupt processing */
320 u16 next_to_use;
321 u16 next_to_clean;
322
323 u8 atr_sample_rate;
324 u8 atr_count;
325
326 bool ring_active; /* is ring online or not */
Anjali Singhai Jainc29af372015-01-10 01:07:19 +0000327 bool arm_wb; /* do something to arm write back */
Anjali Singhai Jain6a7fded2015-10-26 19:44:29 -0400328 u8 packet_stride;
Greg Rose5321a212013-12-21 06:13:06 +0000329
Anjali Singhai Jain8e0764b2015-06-05 12:20:30 -0400330 u16 flags;
331#define I40E_TXR_FLAGS_WB_ON_ITR BIT(0)
Anjali Singhai Jain527274c2015-06-05 12:20:31 -0400332
Greg Rose5321a212013-12-21 06:13:06 +0000333 /* stats structs */
334 struct i40e_queue_stats stats;
335 struct u64_stats_sync syncp;
336 union {
337 struct i40e_tx_queue_stats tx_stats;
338 struct i40e_rx_queue_stats rx_stats;
339 };
340
341 unsigned int size; /* length of descriptor ring in bytes */
342 dma_addr_t dma; /* physical address of ring */
343
344 struct i40e_vsi *vsi; /* Backreference to associated VSI */
345 struct i40e_q_vector *q_vector; /* Backreference to associated vector */
346
347 struct rcu_head rcu; /* to avoid race on free */
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700348 u16 next_to_alloc;
Scott Petersone72e5652017-02-09 23:40:25 -0800349 struct sk_buff *skb; /* When i40evf_clean_rx_ring_irq() must
350 * return before it sees the EOP for
351 * the current packet, we save that skb
352 * here and resume receiving this
353 * packet the next time
354 * i40evf_clean_rx_ring_irq() is called
355 * for this ring.
356 */
Greg Rose5321a212013-12-21 06:13:06 +0000357} ____cacheline_internodealigned_in_smp;
358
359enum i40e_latency_range {
360 I40E_LOWEST_LATENCY = 0,
361 I40E_LOW_LATENCY = 1,
362 I40E_BULK_LATENCY = 2,
Jesse Brandeburgc56625d2015-09-28 14:16:53 -0400363 I40E_ULTRA_LATENCY = 3,
Greg Rose5321a212013-12-21 06:13:06 +0000364};
365
366struct i40e_ring_container {
367 /* array of pointers to rings */
368 struct i40e_ring *ring;
369 unsigned int total_bytes; /* total bytes processed this int */
370 unsigned int total_packets; /* total packets processed this int */
371 u16 count;
372 enum i40e_latency_range latency_range;
373 u16 itr;
374};
375
376/* iterator for handling rings in ring container */
377#define i40e_for_each_ring(pos, head) \
378 for (pos = (head).ring; pos != NULL; pos = pos->next)
379
Alexander Duyck98efd692017-04-05 07:51:01 -0400380static inline unsigned int i40e_rx_pg_order(struct i40e_ring *ring)
381{
382#if (PAGE_SIZE < 8192)
383 if (ring->rx_buf_len > (PAGE_SIZE / 2))
384 return 1;
385#endif
386 return 0;
387}
388
389#define i40e_rx_pg_size(_ring) (PAGE_SIZE << i40e_rx_pg_order(_ring))
390
Jesse Brandeburgab9ad982016-04-18 11:33:46 -0700391bool i40evf_alloc_rx_buffers(struct i40e_ring *rxr, u16 cleaned_count);
Greg Rose5321a212013-12-21 06:13:06 +0000392netdev_tx_t i40evf_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
393void i40evf_clean_tx_ring(struct i40e_ring *tx_ring);
394void i40evf_clean_rx_ring(struct i40e_ring *rx_ring);
395int i40evf_setup_tx_descriptors(struct i40e_ring *tx_ring);
396int i40evf_setup_rx_descriptors(struct i40e_ring *rx_ring);
397void i40evf_free_tx_resources(struct i40e_ring *tx_ring);
398void i40evf_free_rx_resources(struct i40e_ring *rx_ring);
399int i40evf_napi_poll(struct napi_struct *napi, int budget);
Anjali Singhai Jainecc6a232016-01-13 16:51:43 -0800400void i40evf_force_wb(struct i40e_vsi *vsi, struct i40e_q_vector *q_vector);
Anjali Singhai Jaindd353102016-01-15 14:33:12 -0800401u32 i40evf_get_tx_pending(struct i40e_ring *ring, bool in_sw);
Alexander Duyck4ec441d2016-02-17 11:02:43 -0800402int __i40evf_maybe_stop_tx(struct i40e_ring *tx_ring, int size);
Alexander Duyck2d374902016-02-17 11:02:50 -0800403bool __i40evf_chk_linearize(struct sk_buff *skb);
Kiran Patil9c6c1252015-11-06 15:26:02 -0800404
405/**
Alexander Duyck4ec441d2016-02-17 11:02:43 -0800406 * i40e_xmit_descriptor_count - calculate number of Tx descriptors needed
407 * @skb: send buffer
408 * @tx_ring: ring to send buffer on
409 *
410 * Returns number of data descriptors needed for this skb. Returns 0 to indicate
411 * there is not enough descriptors available in this ring since we need at least
412 * one descriptor.
413 **/
414static inline int i40e_xmit_descriptor_count(struct sk_buff *skb)
415{
416 const struct skb_frag_struct *frag = &skb_shinfo(skb)->frags[0];
417 unsigned int nr_frags = skb_shinfo(skb)->nr_frags;
418 int count = 0, size = skb_headlen(skb);
419
420 for (;;) {
Alexander Duyck5c4654d2016-02-19 12:17:08 -0800421 count += i40e_txd_use_count(size);
Alexander Duyck4ec441d2016-02-17 11:02:43 -0800422
423 if (!nr_frags--)
424 break;
425
426 size = skb_frag_size(frag++);
427 }
428
429 return count;
430}
431
432/**
433 * i40e_maybe_stop_tx - 1st level check for Tx stop conditions
434 * @tx_ring: the ring to be checked
435 * @size: the size buffer we want to assure is available
436 *
437 * Returns 0 if stop is not needed
438 **/
439static inline int i40e_maybe_stop_tx(struct i40e_ring *tx_ring, int size)
440{
441 if (likely(I40E_DESC_UNUSED(tx_ring) >= size))
442 return 0;
443 return __i40evf_maybe_stop_tx(tx_ring, size);
444}
Alexander Duyck2d374902016-02-17 11:02:50 -0800445
446/**
447 * i40e_chk_linearize - Check if there are more than 8 fragments per packet
448 * @skb: send buffer
449 * @count: number of buffers used
450 *
451 * Note: Our HW can't scatter-gather more than 8 fragments to build
452 * a packet on the wire and so we need to figure out the cases where we
453 * need to linearize the skb.
454 **/
455static inline bool i40e_chk_linearize(struct sk_buff *skb, int count)
456{
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -0700457 /* Both TSO and single send will work if count is less than 8 */
458 if (likely(count < I40E_MAX_BUFFER_TXD))
Alexander Duyck2d374902016-02-17 11:02:50 -0800459 return false;
460
Alexander Duyck3f3f7cb2016-03-30 16:15:37 -0700461 if (skb_is_gso(skb))
462 return __i40evf_chk_linearize(skb);
463
464 /* we can support up to 8 data buffers for a single send */
465 return count != I40E_MAX_BUFFER_TXD;
Alexander Duyck2d374902016-02-17 11:02:50 -0800466}
Jesse Brandeburg1f15d662016-04-01 03:56:06 -0700467/**
Alexander Duycke486bdf2016-09-12 14:18:40 -0700468 * @ring: Tx ring to find the netdev equivalent of
469 **/
470static inline struct netdev_queue *txring_txq(const struct i40e_ring *ring)
471{
472 return netdev_get_tx_queue(ring->netdev, ring->queue_index);
473}
Greg Rose5321a212013-12-21 06:13:06 +0000474#endif /* _I40E_TXRX_H_ */