blob: 99911f45f334e9c0c1f2485ba645ddf2f56cc657 [file] [log] [blame]
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -05001/*
2 * AMD 10Gb Ethernet driver
3 *
4 * This file is available to you under your choice of the following two
5 * licenses:
6 *
7 * License 1: GPLv2
8 *
9 * Copyright (c) 2014 Advanced Micro Devices, Inc.
10 *
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
15 *
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
23 *
24 * This file incorporates work covered by the following copyright and
25 * permission notice:
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
29 * and you.
30 *
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
40 *
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
43 *
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
55 *
56 *
57 * License 2: Modified BSD
58 *
59 * Copyright (c) 2014 Advanced Micro Devices, Inc.
60 * All rights reserved.
61 *
62 * Redistribution and use in source and binary forms, with or without
63 * modification, are permitted provided that the following conditions are met:
64 * * Redistributions of source code must retain the above copyright
65 * notice, this list of conditions and the following disclaimer.
66 * * Redistributions in binary form must reproduce the above copyright
67 * notice, this list of conditions and the following disclaimer in the
68 * documentation and/or other materials provided with the distribution.
69 * * Neither the name of Advanced Micro Devices, Inc. nor the
70 * names of its contributors may be used to endorse or promote products
71 * derived from this software without specific prior written permission.
72 *
73 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
74 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
75 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
76 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
77 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
78 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
79 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
80 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
81 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
82 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
83 *
84 * This file incorporates work covered by the following copyright and
85 * permission notice:
86 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
87 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
88 * Inc. unless otherwise expressly agreed to in writing between Synopsys
89 * and you.
90 *
91 * The Software IS NOT an item of Licensed Software or Licensed Product
92 * under any End User Software License Agreement or Agreement for Licensed
93 * Product with Synopsys or any supplement thereto. Permission is hereby
94 * granted, free of charge, to any person obtaining a copy of this software
95 * annotated with this license and the Software, to deal in the Software
96 * without restriction, including without limitation the rights to use,
97 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
98 * of the Software, and to permit persons to whom the Software is furnished
99 * to do so, subject to the following conditions:
100 *
101 * The above copyright notice and this permission notice shall be included
102 * in all copies or substantial portions of the Software.
103 *
104 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
105 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
106 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
107 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
108 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
109 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
110 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
111 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
112 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
113 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
114 * THE POSSIBILITY OF SUCH DAMAGE.
115 */
116
117#include "xgbe.h"
118#include "xgbe-common.h"
119
Lendacky, Thomas08dcc472014-11-04 16:06:44 -0600120static void xgbe_unmap_rdata(struct xgbe_prv_data *, struct xgbe_ring_data *);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500121
122static void xgbe_free_ring(struct xgbe_prv_data *pdata,
123 struct xgbe_ring *ring)
124{
125 struct xgbe_ring_data *rdata;
126 unsigned int i;
127
128 if (!ring)
129 return;
130
131 if (ring->rdata) {
132 for (i = 0; i < ring->rdesc_count; i++) {
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -0500133 rdata = XGBE_GET_DESC_DATA(ring, i);
Lendacky, Thomas08dcc472014-11-04 16:06:44 -0600134 xgbe_unmap_rdata(pdata, rdata);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500135 }
136
137 kfree(ring->rdata);
138 ring->rdata = NULL;
139 }
140
Lendacky, Thomas08dcc472014-11-04 16:06:44 -0600141 if (ring->rx_pa.pages) {
142 dma_unmap_page(pdata->dev, ring->rx_pa.pages_dma,
143 ring->rx_pa.pages_len, DMA_FROM_DEVICE);
144 put_page(ring->rx_pa.pages);
145
146 ring->rx_pa.pages = NULL;
147 ring->rx_pa.pages_len = 0;
148 ring->rx_pa.pages_offset = 0;
149 ring->rx_pa.pages_dma = 0;
150 }
151
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500152 if (ring->rdesc) {
153 dma_free_coherent(pdata->dev,
154 (sizeof(struct xgbe_ring_desc) *
155 ring->rdesc_count),
156 ring->rdesc, ring->rdesc_dma);
157 ring->rdesc = NULL;
158 }
159}
160
161static void xgbe_free_ring_resources(struct xgbe_prv_data *pdata)
162{
163 struct xgbe_channel *channel;
164 unsigned int i;
165
166 DBGPR("-->xgbe_free_ring_resources\n");
167
168 channel = pdata->channel;
169 for (i = 0; i < pdata->channel_count; i++, channel++) {
170 xgbe_free_ring(pdata, channel->tx_ring);
171 xgbe_free_ring(pdata, channel->rx_ring);
172 }
173
174 DBGPR("<--xgbe_free_ring_resources\n");
175}
176
177static int xgbe_init_ring(struct xgbe_prv_data *pdata,
178 struct xgbe_ring *ring, unsigned int rdesc_count)
179{
180 DBGPR("-->xgbe_init_ring\n");
181
182 if (!ring)
183 return 0;
184
185 /* Descriptors */
186 ring->rdesc_count = rdesc_count;
187 ring->rdesc = dma_alloc_coherent(pdata->dev,
188 (sizeof(struct xgbe_ring_desc) *
189 rdesc_count), &ring->rdesc_dma,
190 GFP_KERNEL);
191 if (!ring->rdesc)
192 return -ENOMEM;
193
194 /* Descriptor information */
195 ring->rdata = kcalloc(rdesc_count, sizeof(struct xgbe_ring_data),
196 GFP_KERNEL);
197 if (!ring->rdata)
198 return -ENOMEM;
199
200 DBGPR(" rdesc=0x%p, rdesc_dma=0x%llx, rdata=0x%p\n",
201 ring->rdesc, ring->rdesc_dma, ring->rdata);
202
203 DBGPR("<--xgbe_init_ring\n");
204
205 return 0;
206}
207
208static int xgbe_alloc_ring_resources(struct xgbe_prv_data *pdata)
209{
210 struct xgbe_channel *channel;
211 unsigned int i;
212 int ret;
213
214 DBGPR("-->xgbe_alloc_ring_resources\n");
215
216 channel = pdata->channel;
217 for (i = 0; i < pdata->channel_count; i++, channel++) {
218 DBGPR(" %s - tx_ring:\n", channel->name);
219 ret = xgbe_init_ring(pdata, channel->tx_ring,
220 pdata->tx_desc_count);
221 if (ret) {
222 netdev_alert(pdata->netdev,
223 "error initializing Tx ring\n");
224 goto err_ring;
225 }
226
227 DBGPR(" %s - rx_ring:\n", channel->name);
228 ret = xgbe_init_ring(pdata, channel->rx_ring,
229 pdata->rx_desc_count);
230 if (ret) {
231 netdev_alert(pdata->netdev,
232 "error initializing Tx ring\n");
233 goto err_ring;
234 }
235 }
236
237 DBGPR("<--xgbe_alloc_ring_resources\n");
238
239 return 0;
240
241err_ring:
242 xgbe_free_ring_resources(pdata);
243
244 return ret;
245}
246
Lendacky, Thomas08dcc472014-11-04 16:06:44 -0600247static int xgbe_map_rx_buffer(struct xgbe_prv_data *pdata,
248 struct xgbe_ring *ring,
249 struct xgbe_ring_data *rdata)
250{
251 if (!ring->rx_pa.pages) {
252 struct page *pages = NULL;
253 dma_addr_t pages_dma;
254 gfp_t gfp;
255 int order, ret;
256
257 /* Try to obtain pages, decreasing order if necessary */
258 gfp = GFP_ATOMIC | __GFP_COLD | __GFP_COMP;
259 order = max_t(int, PAGE_ALLOC_COSTLY_ORDER, 1);
260 while (--order >= 0) {
261 pages = alloc_pages(gfp, order);
262 if (pages)
263 break;
264 }
265 if (!pages)
266 return -ENOMEM;
267
268 /* Map the pages */
269 pages_dma = dma_map_page(pdata->dev, pages, 0,
270 PAGE_SIZE << order, DMA_FROM_DEVICE);
271 ret = dma_mapping_error(pdata->dev, pages_dma);
272 if (ret) {
273 put_page(pages);
274 return ret;
275 }
276
277 /* Set the values for this ring */
278 ring->rx_pa.pages = pages;
279 ring->rx_pa.pages_len = PAGE_SIZE << order;
280 ring->rx_pa.pages_offset = 0;
281 ring->rx_pa.pages_dma = pages_dma;
282 }
283
284 get_page(ring->rx_pa.pages);
285 rdata->rx_pa = ring->rx_pa;
286
287 rdata->rx_dma = ring->rx_pa.pages_dma + ring->rx_pa.pages_offset;
288 rdata->rx_dma_len = pdata->rx_buf_size;
289
290 ring->rx_pa.pages_offset += pdata->rx_buf_size;
291 if ((ring->rx_pa.pages_offset + pdata->rx_buf_size) >
292 ring->rx_pa.pages_len) {
293 /* This data descriptor is responsible for unmapping page(s) */
294 rdata->rx_unmap = ring->rx_pa;
295
296 /* Get a new allocation next time */
297 ring->rx_pa.pages = NULL;
298 ring->rx_pa.pages_len = 0;
299 ring->rx_pa.pages_offset = 0;
300 ring->rx_pa.pages_dma = 0;
301 }
302
303 return 0;
304}
305
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500306static void xgbe_wrapper_tx_descriptor_init(struct xgbe_prv_data *pdata)
307{
308 struct xgbe_hw_if *hw_if = &pdata->hw_if;
309 struct xgbe_channel *channel;
310 struct xgbe_ring *ring;
311 struct xgbe_ring_data *rdata;
312 struct xgbe_ring_desc *rdesc;
313 dma_addr_t rdesc_dma;
314 unsigned int i, j;
315
316 DBGPR("-->xgbe_wrapper_tx_descriptor_init\n");
317
318 channel = pdata->channel;
319 for (i = 0; i < pdata->channel_count; i++, channel++) {
320 ring = channel->tx_ring;
321 if (!ring)
322 break;
323
324 rdesc = ring->rdesc;
325 rdesc_dma = ring->rdesc_dma;
326
327 for (j = 0; j < ring->rdesc_count; j++) {
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -0500328 rdata = XGBE_GET_DESC_DATA(ring, j);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500329
330 rdata->rdesc = rdesc;
331 rdata->rdesc_dma = rdesc_dma;
332
333 rdesc++;
334 rdesc_dma += sizeof(struct xgbe_ring_desc);
335 }
336
337 ring->cur = 0;
338 ring->dirty = 0;
339 ring->tx.queue_stopped = 0;
340
341 hw_if->tx_desc_init(channel);
342 }
343
344 DBGPR("<--xgbe_wrapper_tx_descriptor_init\n");
345}
346
347static void xgbe_wrapper_rx_descriptor_init(struct xgbe_prv_data *pdata)
348{
349 struct xgbe_hw_if *hw_if = &pdata->hw_if;
350 struct xgbe_channel *channel;
351 struct xgbe_ring *ring;
352 struct xgbe_ring_desc *rdesc;
353 struct xgbe_ring_data *rdata;
Lendacky, Thomas08dcc472014-11-04 16:06:44 -0600354 dma_addr_t rdesc_dma;
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500355 unsigned int i, j;
356
357 DBGPR("-->xgbe_wrapper_rx_descriptor_init\n");
358
359 channel = pdata->channel;
360 for (i = 0; i < pdata->channel_count; i++, channel++) {
361 ring = channel->rx_ring;
362 if (!ring)
363 break;
364
365 rdesc = ring->rdesc;
366 rdesc_dma = ring->rdesc_dma;
367
368 for (j = 0; j < ring->rdesc_count; j++) {
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -0500369 rdata = XGBE_GET_DESC_DATA(ring, j);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500370
371 rdata->rdesc = rdesc;
372 rdata->rdesc_dma = rdesc_dma;
373
Lendacky, Thomas08dcc472014-11-04 16:06:44 -0600374 if (xgbe_map_rx_buffer(pdata, ring, rdata))
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500375 break;
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500376
377 rdesc++;
378 rdesc_dma += sizeof(struct xgbe_ring_desc);
379 }
380
381 ring->cur = 0;
382 ring->dirty = 0;
383 ring->rx.realloc_index = 0;
384 ring->rx.realloc_threshold = 0;
385
386 hw_if->rx_desc_init(channel);
387 }
388
389 DBGPR("<--xgbe_wrapper_rx_descriptor_init\n");
390}
391
Lendacky, Thomas08dcc472014-11-04 16:06:44 -0600392static void xgbe_unmap_rdata(struct xgbe_prv_data *pdata,
393 struct xgbe_ring_data *rdata)
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500394{
395 if (rdata->skb_dma) {
396 if (rdata->mapped_as_page) {
397 dma_unmap_page(pdata->dev, rdata->skb_dma,
398 rdata->skb_dma_len, DMA_TO_DEVICE);
399 } else {
400 dma_unmap_single(pdata->dev, rdata->skb_dma,
401 rdata->skb_dma_len, DMA_TO_DEVICE);
402 }
403 rdata->skb_dma = 0;
404 rdata->skb_dma_len = 0;
405 }
406
407 if (rdata->skb) {
408 dev_kfree_skb_any(rdata->skb);
409 rdata->skb = NULL;
410 }
411
Lendacky, Thomas08dcc472014-11-04 16:06:44 -0600412 if (rdata->rx_pa.pages)
413 put_page(rdata->rx_pa.pages);
414
415 if (rdata->rx_unmap.pages) {
416 dma_unmap_page(pdata->dev, rdata->rx_unmap.pages_dma,
417 rdata->rx_unmap.pages_len, DMA_FROM_DEVICE);
418 put_page(rdata->rx_unmap.pages);
419 }
420
421 memset(&rdata->rx_pa, 0, sizeof(rdata->rx_pa));
422 memset(&rdata->rx_unmap, 0, sizeof(rdata->rx_unmap));
423
424 rdata->rx_dma = 0;
425 rdata->rx_dma_len = 0;
426
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500427 rdata->tso_header = 0;
428 rdata->len = 0;
429 rdata->interrupt = 0;
430 rdata->mapped_as_page = 0;
Lendacky, Thomas23e4eef2014-07-29 08:57:19 -0500431
432 if (rdata->state_saved) {
433 rdata->state_saved = 0;
434 rdata->state.incomplete = 0;
435 rdata->state.context_next = 0;
436 rdata->state.skb = NULL;
437 rdata->state.len = 0;
438 rdata->state.error = 0;
439 }
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500440}
441
442static int xgbe_map_tx_skb(struct xgbe_channel *channel, struct sk_buff *skb)
443{
444 struct xgbe_prv_data *pdata = channel->pdata;
445 struct xgbe_ring *ring = channel->tx_ring;
446 struct xgbe_ring_data *rdata;
447 struct xgbe_packet_data *packet;
448 struct skb_frag_struct *frag;
449 dma_addr_t skb_dma;
450 unsigned int start_index, cur_index;
451 unsigned int offset, tso, vlan, datalen, len;
452 unsigned int i;
453
454 DBGPR("-->xgbe_map_tx_skb: cur = %d\n", ring->cur);
455
456 offset = 0;
457 start_index = ring->cur;
458 cur_index = ring->cur;
459
460 packet = &ring->packet_data;
461 packet->rdesc_count = 0;
462 packet->length = 0;
463
464 tso = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
465 TSO_ENABLE);
466 vlan = XGMAC_GET_BITS(packet->attributes, TX_PACKET_ATTRIBUTES,
467 VLAN_CTAG);
468
469 /* Save space for a context descriptor if needed */
470 if ((tso && (packet->mss != ring->tx.cur_mss)) ||
471 (vlan && (packet->vlan_ctag != ring->tx.cur_vlan_ctag)))
472 cur_index++;
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -0500473 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500474
475 if (tso) {
476 DBGPR(" TSO packet\n");
477
478 /* Map the TSO header */
479 skb_dma = dma_map_single(pdata->dev, skb->data,
480 packet->header_len, DMA_TO_DEVICE);
481 if (dma_mapping_error(pdata->dev, skb_dma)) {
482 netdev_alert(pdata->netdev, "dma_map_single failed\n");
483 goto err_out;
484 }
485 rdata->skb_dma = skb_dma;
486 rdata->skb_dma_len = packet->header_len;
487 rdata->tso_header = 1;
488
489 offset = packet->header_len;
490
491 packet->length += packet->header_len;
492
493 cur_index++;
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -0500494 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500495 }
496
497 /* Map the (remainder of the) packet */
498 for (datalen = skb_headlen(skb) - offset; datalen; ) {
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -0500499 len = min_t(unsigned int, datalen, XGBE_TX_MAX_BUF_SIZE);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500500
501 skb_dma = dma_map_single(pdata->dev, skb->data + offset, len,
502 DMA_TO_DEVICE);
503 if (dma_mapping_error(pdata->dev, skb_dma)) {
504 netdev_alert(pdata->netdev, "dma_map_single failed\n");
505 goto err_out;
506 }
507 rdata->skb_dma = skb_dma;
508 rdata->skb_dma_len = len;
509 DBGPR(" skb data: index=%u, dma=0x%llx, len=%u\n",
510 cur_index, skb_dma, len);
511
512 datalen -= len;
513 offset += len;
514
515 packet->length += len;
516
517 cur_index++;
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -0500518 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500519 }
520
521 for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
522 DBGPR(" mapping frag %u\n", i);
523
524 frag = &skb_shinfo(skb)->frags[i];
525 offset = 0;
526
527 for (datalen = skb_frag_size(frag); datalen; ) {
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -0500528 len = min_t(unsigned int, datalen,
529 XGBE_TX_MAX_BUF_SIZE);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500530
531 skb_dma = skb_frag_dma_map(pdata->dev, frag, offset,
532 len, DMA_TO_DEVICE);
533 if (dma_mapping_error(pdata->dev, skb_dma)) {
534 netdev_alert(pdata->netdev,
535 "skb_frag_dma_map failed\n");
536 goto err_out;
537 }
538 rdata->skb_dma = skb_dma;
539 rdata->skb_dma_len = len;
540 rdata->mapped_as_page = 1;
541 DBGPR(" skb data: index=%u, dma=0x%llx, len=%u\n",
542 cur_index, skb_dma, len);
543
544 datalen -= len;
545 offset += len;
546
547 packet->length += len;
548
549 cur_index++;
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -0500550 rdata = XGBE_GET_DESC_DATA(ring, cur_index);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500551 }
552 }
553
554 /* Save the skb address in the last entry */
555 rdata->skb = skb;
556
557 /* Save the number of descriptor entries used */
558 packet->rdesc_count = cur_index - start_index;
559
560 DBGPR("<--xgbe_map_tx_skb: count=%u\n", packet->rdesc_count);
561
562 return packet->rdesc_count;
563
564err_out:
565 while (start_index < cur_index) {
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -0500566 rdata = XGBE_GET_DESC_DATA(ring, start_index++);
Lendacky, Thomas08dcc472014-11-04 16:06:44 -0600567 xgbe_unmap_rdata(pdata, rdata);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500568 }
569
570 DBGPR("<--xgbe_map_tx_skb: count=0\n");
571
572 return 0;
573}
574
Lendacky, Thomas08dcc472014-11-04 16:06:44 -0600575static void xgbe_realloc_rx_buffer(struct xgbe_channel *channel)
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500576{
577 struct xgbe_prv_data *pdata = channel->pdata;
578 struct xgbe_hw_if *hw_if = &pdata->hw_if;
579 struct xgbe_ring *ring = channel->rx_ring;
580 struct xgbe_ring_data *rdata;
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500581 int i;
582
Lendacky, Thomas08dcc472014-11-04 16:06:44 -0600583 DBGPR("-->xgbe_realloc_rx_buffer: rx_ring->rx.realloc_index = %u\n",
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500584 ring->rx.realloc_index);
585
586 for (i = 0; i < ring->dirty; i++) {
Lendacky, Thomasd0a8ba62014-06-24 16:19:06 -0500587 rdata = XGBE_GET_DESC_DATA(ring, ring->rx.realloc_index);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500588
589 /* Reset rdata values */
Lendacky, Thomas08dcc472014-11-04 16:06:44 -0600590 xgbe_unmap_rdata(pdata, rdata);
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500591
Lendacky, Thomas08dcc472014-11-04 16:06:44 -0600592 if (xgbe_map_rx_buffer(pdata, ring, rdata))
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500593 break;
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500594
595 hw_if->rx_desc_reset(rdata);
596
597 ring->rx.realloc_index++;
598 }
599 ring->dirty = 0;
600
Lendacky, Thomas08dcc472014-11-04 16:06:44 -0600601 DBGPR("<--xgbe_realloc_rx_buffer\n");
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500602}
603
604void xgbe_init_function_ptrs_desc(struct xgbe_desc_if *desc_if)
605{
606 DBGPR("-->xgbe_init_function_ptrs_desc\n");
607
608 desc_if->alloc_ring_resources = xgbe_alloc_ring_resources;
609 desc_if->free_ring_resources = xgbe_free_ring_resources;
610 desc_if->map_tx_skb = xgbe_map_tx_skb;
Lendacky, Thomas08dcc472014-11-04 16:06:44 -0600611 desc_if->realloc_rx_buffer = xgbe_realloc_rx_buffer;
612 desc_if->unmap_rdata = xgbe_unmap_rdata;
Lendacky, Thomasc5aa9e32014-06-05 09:15:06 -0500613 desc_if->wrapper_tx_desc_init = xgbe_wrapper_tx_descriptor_init;
614 desc_if->wrapper_rx_desc_init = xgbe_wrapper_rx_descriptor_init;
615
616 DBGPR("<--xgbe_init_function_ptrs_desc\n");
617}