blob: 57b4d59d10979a8f868a02a65dd8b6005371757d [file] [log] [blame]
huangdaodeb5996f12015-09-17 14:51:50 +08001/*
2 * Copyright (c) 2014-2015 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <linux/clk.h>
11#include <linux/cpumask.h>
12#include <linux/etherdevice.h>
13#include <linux/if_vlan.h>
14#include <linux/interrupt.h>
15#include <linux/io.h>
16#include <linux/ip.h>
17#include <linux/ipv6.h>
18#include <linux/module.h>
19#include <linux/phy.h>
20#include <linux/platform_device.h>
21#include <linux/skbuff.h>
22
23#include "hnae.h"
24#include "hns_enet.h"
Jarod Wilson44770e12016-10-17 15:54:17 -040025#include "hns_dsaf_mac.h"
huangdaodeb5996f12015-09-17 14:51:50 +080026
27#define NIC_MAX_Q_PER_VF 16
28#define HNS_NIC_TX_TIMEOUT (5 * HZ)
29
30#define SERVICE_TIMER_HZ (1 * HZ)
31
32#define NIC_TX_CLEAN_MAX_NUM 256
33#define NIC_RX_CLEAN_MAX_NUM 64
34
huangdaodeb5996f12015-09-17 14:51:50 +080035#define RCB_IRQ_NOT_INITED 0
36#define RCB_IRQ_INITED 1
yankejian9cbe9fd2015-12-08 11:02:31 +080037#define HNS_BUFFER_SIZE_2048 2048
huangdaodeb5996f12015-09-17 14:51:50 +080038
Salil13ac6952015-12-03 12:17:53 +000039#define BD_MAX_SEND_SIZE 8191
40#define SKB_TMP_LEN(SKB) \
41 (((SKB)->transport_header - (SKB)->mac_header) + tcp_hdrlen(SKB))
42
43static void fill_v2_desc(struct hnae_ring *ring, void *priv,
44 int size, dma_addr_t dma, int frag_end,
45 int buf_num, enum hns_desc_type type, int mtu)
46{
47 struct hnae_desc *desc = &ring->desc[ring->next_to_use];
48 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
49 struct iphdr *iphdr;
50 struct ipv6hdr *ipv6hdr;
51 struct sk_buff *skb;
Salil13ac6952015-12-03 12:17:53 +000052 __be16 protocol;
53 u8 bn_pid = 0;
54 u8 rrcfv = 0;
55 u8 ip_offset = 0;
56 u8 tvsvsn = 0;
57 u16 mss = 0;
58 u8 l4_len = 0;
59 u16 paylen = 0;
60
61 desc_cb->priv = priv;
62 desc_cb->length = size;
63 desc_cb->dma = dma;
64 desc_cb->type = type;
65
66 desc->addr = cpu_to_le64(dma);
67 desc->tx.send_size = cpu_to_le16((u16)size);
68
Sheng Lif8a1a632016-03-22 16:06:23 +080069 /* config bd buffer end */
Salil13ac6952015-12-03 12:17:53 +000070 hnae_set_bit(rrcfv, HNSV2_TXD_VLD_B, 1);
71 hnae_set_field(bn_pid, HNSV2_TXD_BUFNUM_M, 0, buf_num - 1);
72
Sheng Lif8a1a632016-03-22 16:06:23 +080073 /* fill port_id in the tx bd for sending management pkts */
74 hnae_set_field(bn_pid, HNSV2_TXD_PORTID_M,
75 HNSV2_TXD_PORTID_S, ring->q->handle->dport_id);
76
Salil13ac6952015-12-03 12:17:53 +000077 if (type == DESC_TYPE_SKB) {
78 skb = (struct sk_buff *)priv;
79
80 if (skb->ip_summed == CHECKSUM_PARTIAL) {
81 skb_reset_mac_len(skb);
82 protocol = skb->protocol;
83 ip_offset = ETH_HLEN;
84
85 if (protocol == htons(ETH_P_8021Q)) {
86 ip_offset += VLAN_HLEN;
87 protocol = vlan_get_protocol(skb);
88 skb->protocol = protocol;
89 }
90
91 if (skb->protocol == htons(ETH_P_IP)) {
92 iphdr = ip_hdr(skb);
93 hnae_set_bit(rrcfv, HNSV2_TXD_L3CS_B, 1);
94 hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1);
95
96 /* check for tcp/udp header */
Daode Huang0b51b1d2016-03-22 16:06:31 +080097 if (iphdr->protocol == IPPROTO_TCP &&
98 skb_is_gso(skb)) {
Salil13ac6952015-12-03 12:17:53 +000099 hnae_set_bit(tvsvsn,
100 HNSV2_TXD_TSE_B, 1);
Salil13ac6952015-12-03 12:17:53 +0000101 l4_len = tcp_hdrlen(skb);
Daode Huang0b51b1d2016-03-22 16:06:31 +0800102 mss = skb_shinfo(skb)->gso_size;
103 paylen = skb->len - SKB_TMP_LEN(skb);
Salil13ac6952015-12-03 12:17:53 +0000104 }
105 } else if (skb->protocol == htons(ETH_P_IPV6)) {
106 hnae_set_bit(tvsvsn, HNSV2_TXD_IPV6_B, 1);
107 ipv6hdr = ipv6_hdr(skb);
108 hnae_set_bit(rrcfv, HNSV2_TXD_L4CS_B, 1);
109
110 /* check for tcp/udp header */
Daode Huang0b51b1d2016-03-22 16:06:31 +0800111 if (ipv6hdr->nexthdr == IPPROTO_TCP &&
112 skb_is_gso(skb) && skb_is_gso_v6(skb)) {
Salil13ac6952015-12-03 12:17:53 +0000113 hnae_set_bit(tvsvsn,
114 HNSV2_TXD_TSE_B, 1);
Salil13ac6952015-12-03 12:17:53 +0000115 l4_len = tcp_hdrlen(skb);
Daode Huang0b51b1d2016-03-22 16:06:31 +0800116 mss = skb_shinfo(skb)->gso_size;
117 paylen = skb->len - SKB_TMP_LEN(skb);
Salil13ac6952015-12-03 12:17:53 +0000118 }
119 }
120 desc->tx.ip_offset = ip_offset;
121 desc->tx.tse_vlan_snap_v6_sctp_nth = tvsvsn;
122 desc->tx.mss = cpu_to_le16(mss);
123 desc->tx.l4_len = l4_len;
124 desc->tx.paylen = cpu_to_le16(paylen);
125 }
126 }
127
128 hnae_set_bit(rrcfv, HNSV2_TXD_FE_B, frag_end);
129
130 desc->tx.bn_pid = bn_pid;
131 desc->tx.ra_ri_cs_fe_vld = rrcfv;
132
133 ring_ptr_move_fw(ring, next_to_use);
134}
135
Kejian Yan63434882016-06-03 10:55:21 +0800136static const struct acpi_device_id hns_enet_acpi_match[] = {
137 { "HISI00C1", 0 },
138 { "HISI00C2", 0 },
139 { },
140};
141MODULE_DEVICE_TABLE(acpi, hns_enet_acpi_match);
142
huangdaodeb5996f12015-09-17 14:51:50 +0800143static void fill_desc(struct hnae_ring *ring, void *priv,
144 int size, dma_addr_t dma, int frag_end,
Salil13ac6952015-12-03 12:17:53 +0000145 int buf_num, enum hns_desc_type type, int mtu)
huangdaodeb5996f12015-09-17 14:51:50 +0800146{
147 struct hnae_desc *desc = &ring->desc[ring->next_to_use];
148 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
149 struct sk_buff *skb;
150 __be16 protocol;
151 u32 ip_offset;
152 u32 asid_bufnum_pid = 0;
153 u32 flag_ipoffset = 0;
154
155 desc_cb->priv = priv;
156 desc_cb->length = size;
157 desc_cb->dma = dma;
158 desc_cb->type = type;
159
160 desc->addr = cpu_to_le64(dma);
161 desc->tx.send_size = cpu_to_le16((u16)size);
162
163 /*config bd buffer end */
164 flag_ipoffset |= 1 << HNS_TXD_VLD_B;
165
166 asid_bufnum_pid |= buf_num << HNS_TXD_BUFNUM_S;
167
168 if (type == DESC_TYPE_SKB) {
169 skb = (struct sk_buff *)priv;
170
171 if (skb->ip_summed == CHECKSUM_PARTIAL) {
172 protocol = skb->protocol;
173 ip_offset = ETH_HLEN;
174
175 /*if it is a SW VLAN check the next protocol*/
176 if (protocol == htons(ETH_P_8021Q)) {
177 ip_offset += VLAN_HLEN;
178 protocol = vlan_get_protocol(skb);
179 skb->protocol = protocol;
180 }
181
182 if (skb->protocol == htons(ETH_P_IP)) {
183 flag_ipoffset |= 1 << HNS_TXD_L3CS_B;
184 /* check for tcp/udp header */
185 flag_ipoffset |= 1 << HNS_TXD_L4CS_B;
186
187 } else if (skb->protocol == htons(ETH_P_IPV6)) {
188 /* ipv6 has not l3 cs, check for L4 header */
189 flag_ipoffset |= 1 << HNS_TXD_L4CS_B;
190 }
191
192 flag_ipoffset |= ip_offset << HNS_TXD_IPOFFSET_S;
193 }
194 }
195
196 flag_ipoffset |= frag_end << HNS_TXD_FE_B;
197
198 desc->tx.asid_bufnum_pid = cpu_to_le16(asid_bufnum_pid);
199 desc->tx.flag_ipoffset = cpu_to_le32(flag_ipoffset);
200
201 ring_ptr_move_fw(ring, next_to_use);
202}
203
204static void unfill_desc(struct hnae_ring *ring)
205{
206 ring_ptr_move_bw(ring, next_to_use);
207}
208
Salil13ac6952015-12-03 12:17:53 +0000209static int hns_nic_maybe_stop_tx(
210 struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring)
211{
212 struct sk_buff *skb = *out_skb;
213 struct sk_buff *new_skb = NULL;
214 int buf_num;
215
216 /* no. of segments (plus a header) */
217 buf_num = skb_shinfo(skb)->nr_frags + 1;
218
219 if (unlikely(buf_num > ring->max_desc_num_per_pkt)) {
220 if (ring_space(ring) < 1)
221 return -EBUSY;
222
223 new_skb = skb_copy(skb, GFP_ATOMIC);
224 if (!new_skb)
225 return -ENOMEM;
226
227 dev_kfree_skb_any(skb);
228 *out_skb = new_skb;
229 buf_num = 1;
230 } else if (buf_num > ring_space(ring)) {
231 return -EBUSY;
232 }
233
234 *bnum = buf_num;
235 return 0;
236}
237
Salil64353af2015-12-03 12:17:55 +0000238static int hns_nic_maybe_stop_tso(
239 struct sk_buff **out_skb, int *bnum, struct hnae_ring *ring)
240{
241 int i;
242 int size;
243 int buf_num;
244 int frag_num;
245 struct sk_buff *skb = *out_skb;
246 struct sk_buff *new_skb = NULL;
247 struct skb_frag_struct *frag;
248
249 size = skb_headlen(skb);
250 buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
251
252 frag_num = skb_shinfo(skb)->nr_frags;
253 for (i = 0; i < frag_num; i++) {
254 frag = &skb_shinfo(skb)->frags[i];
255 size = skb_frag_size(frag);
256 buf_num += (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
257 }
258
259 if (unlikely(buf_num > ring->max_desc_num_per_pkt)) {
260 buf_num = (skb->len + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
261 if (ring_space(ring) < buf_num)
262 return -EBUSY;
263 /* manual split the send packet */
264 new_skb = skb_copy(skb, GFP_ATOMIC);
265 if (!new_skb)
266 return -ENOMEM;
267 dev_kfree_skb_any(skb);
268 *out_skb = new_skb;
269
270 } else if (ring_space(ring) < buf_num) {
271 return -EBUSY;
272 }
273
274 *bnum = buf_num;
275 return 0;
276}
277
278static void fill_tso_desc(struct hnae_ring *ring, void *priv,
279 int size, dma_addr_t dma, int frag_end,
280 int buf_num, enum hns_desc_type type, int mtu)
281{
282 int frag_buf_num;
283 int sizeoflast;
284 int k;
285
286 frag_buf_num = (size + BD_MAX_SEND_SIZE - 1) / BD_MAX_SEND_SIZE;
287 sizeoflast = size % BD_MAX_SEND_SIZE;
288 sizeoflast = sizeoflast ? sizeoflast : BD_MAX_SEND_SIZE;
289
290 /* when the frag size is bigger than hardware, split this frag */
291 for (k = 0; k < frag_buf_num; k++)
292 fill_v2_desc(ring, priv,
293 (k == frag_buf_num - 1) ?
294 sizeoflast : BD_MAX_SEND_SIZE,
295 dma + BD_MAX_SEND_SIZE * k,
296 frag_end && (k == frag_buf_num - 1) ? 1 : 0,
297 buf_num,
298 (type == DESC_TYPE_SKB && !k) ?
299 DESC_TYPE_SKB : DESC_TYPE_PAGE,
300 mtu);
301}
302
huangdaodeb5996f12015-09-17 14:51:50 +0800303int hns_nic_net_xmit_hw(struct net_device *ndev,
304 struct sk_buff *skb,
305 struct hns_nic_ring_data *ring_data)
306{
307 struct hns_nic_priv *priv = netdev_priv(ndev);
huangdaodeb5996f12015-09-17 14:51:50 +0800308 struct hnae_ring *ring = ring_data->ring;
Kejian Yanb85ea002017-02-09 11:46:15 +0000309 struct device *dev = ring_to_dev(ring);
huangdaodeb5996f12015-09-17 14:51:50 +0800310 struct netdev_queue *dev_queue;
311 struct skb_frag_struct *frag;
312 int buf_num;
Salil13ac6952015-12-03 12:17:53 +0000313 int seg_num;
huangdaodeb5996f12015-09-17 14:51:50 +0800314 dma_addr_t dma;
315 int size, next_to_use;
Salil13ac6952015-12-03 12:17:53 +0000316 int i;
huangdaodeb5996f12015-09-17 14:51:50 +0800317
Salil13ac6952015-12-03 12:17:53 +0000318 switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
319 case -EBUSY:
huangdaodeb5996f12015-09-17 14:51:50 +0800320 ring->stats.tx_busy++;
321 goto out_net_tx_busy;
Salil13ac6952015-12-03 12:17:53 +0000322 case -ENOMEM:
323 ring->stats.sw_err_cnt++;
324 netdev_err(ndev, "no memory to xmit!\n");
325 goto out_err_tx_ok;
326 default:
327 break;
huangdaodeb5996f12015-09-17 14:51:50 +0800328 }
Salil13ac6952015-12-03 12:17:53 +0000329
330 /* no. of segments (plus a header) */
331 seg_num = skb_shinfo(skb)->nr_frags + 1;
huangdaodeb5996f12015-09-17 14:51:50 +0800332 next_to_use = ring->next_to_use;
333
334 /* fill the first part */
335 size = skb_headlen(skb);
336 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
337 if (dma_mapping_error(dev, dma)) {
338 netdev_err(ndev, "TX head DMA map failed\n");
339 ring->stats.sw_err_cnt++;
340 goto out_err_tx_ok;
341 }
Salil13ac6952015-12-03 12:17:53 +0000342 priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
343 buf_num, DESC_TYPE_SKB, ndev->mtu);
huangdaodeb5996f12015-09-17 14:51:50 +0800344
345 /* fill the fragments */
Salil13ac6952015-12-03 12:17:53 +0000346 for (i = 1; i < seg_num; i++) {
huangdaodeb5996f12015-09-17 14:51:50 +0800347 frag = &skb_shinfo(skb)->frags[i - 1];
348 size = skb_frag_size(frag);
349 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
350 if (dma_mapping_error(dev, dma)) {
351 netdev_err(ndev, "TX frag(%d) DMA map failed\n", i);
352 ring->stats.sw_err_cnt++;
353 goto out_map_frag_fail;
354 }
Salil13ac6952015-12-03 12:17:53 +0000355 priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
356 seg_num - 1 == i ? 1 : 0, buf_num,
357 DESC_TYPE_PAGE, ndev->mtu);
huangdaodeb5996f12015-09-17 14:51:50 +0800358 }
359
360 /*complete translate all packets*/
361 dev_queue = netdev_get_tx_queue(ndev, skb->queue_mapping);
362 netdev_tx_sent_queue(dev_queue, skb->len);
363
364 wmb(); /* commit all data before submit */
365 assert(skb->queue_mapping < priv->ae_handle->q_num);
366 hnae_queue_xmit(priv->ae_handle->qs[skb->queue_mapping], buf_num);
367 ring->stats.tx_pkts++;
368 ring->stats.tx_bytes += skb->len;
369
370 return NETDEV_TX_OK;
371
372out_map_frag_fail:
373
Salil13ac6952015-12-03 12:17:53 +0000374 while (ring->next_to_use != next_to_use) {
huangdaodeb5996f12015-09-17 14:51:50 +0800375 unfill_desc(ring);
Salil13ac6952015-12-03 12:17:53 +0000376 if (ring->next_to_use != next_to_use)
377 dma_unmap_page(dev,
378 ring->desc_cb[ring->next_to_use].dma,
379 ring->desc_cb[ring->next_to_use].length,
380 DMA_TO_DEVICE);
381 else
382 dma_unmap_single(dev,
383 ring->desc_cb[next_to_use].dma,
384 ring->desc_cb[next_to_use].length,
385 DMA_TO_DEVICE);
huangdaodeb5996f12015-09-17 14:51:50 +0800386 }
387
huangdaodeb5996f12015-09-17 14:51:50 +0800388out_err_tx_ok:
389
390 dev_kfree_skb_any(skb);
391 return NETDEV_TX_OK;
392
393out_net_tx_busy:
394
395 netif_stop_subqueue(ndev, skb->queue_mapping);
396
397 /* Herbert's original patch had:
398 * smp_mb__after_netif_stop_queue();
399 * but since that doesn't exist yet, just open code it.
400 */
401 smp_mb();
402 return NETDEV_TX_BUSY;
403}
404
405/**
406 * hns_nic_get_headlen - determine size of header for RSC/LRO/GRO/FCOE
407 * @data: pointer to the start of the headers
408 * @max: total length of section to find headers in
409 *
410 * This function is meant to determine the length of headers that will
411 * be recognized by hardware for LRO, GRO, and RSC offloads. The main
412 * motivation of doing this is to only perform one pull for IPv4 TCP
413 * packets so that we can do basic things like calculating the gso_size
414 * based on the average data per packet.
415 **/
416static unsigned int hns_nic_get_headlen(unsigned char *data, u32 flag,
417 unsigned int max_size)
418{
419 unsigned char *network;
420 u8 hlen;
421
422 /* this should never happen, but better safe than sorry */
423 if (max_size < ETH_HLEN)
424 return max_size;
425
426 /* initialize network frame pointer */
427 network = data;
428
429 /* set first protocol and move network header forward */
430 network += ETH_HLEN;
431
432 /* handle any vlan tag if present */
433 if (hnae_get_field(flag, HNS_RXD_VLAN_M, HNS_RXD_VLAN_S)
434 == HNS_RX_FLAG_VLAN_PRESENT) {
435 if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
436 return max_size;
437
438 network += VLAN_HLEN;
439 }
440
441 /* handle L3 protocols */
442 if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
443 == HNS_RX_FLAG_L3ID_IPV4) {
444 if ((typeof(max_size))(network - data) >
445 (max_size - sizeof(struct iphdr)))
446 return max_size;
447
448 /* access ihl as a u8 to avoid unaligned access on ia64 */
449 hlen = (network[0] & 0x0F) << 2;
450
451 /* verify hlen meets minimum size requirements */
452 if (hlen < sizeof(struct iphdr))
453 return network - data;
454
455 /* record next protocol if header is present */
456 } else if (hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S)
457 == HNS_RX_FLAG_L3ID_IPV6) {
458 if ((typeof(max_size))(network - data) >
459 (max_size - sizeof(struct ipv6hdr)))
460 return max_size;
461
462 /* record next protocol */
463 hlen = sizeof(struct ipv6hdr);
464 } else {
465 return network - data;
466 }
467
468 /* relocate pointer to start of L4 header */
469 network += hlen;
470
471 /* finally sort out TCP/UDP */
472 if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
473 == HNS_RX_FLAG_L4ID_TCP) {
474 if ((typeof(max_size))(network - data) >
475 (max_size - sizeof(struct tcphdr)))
476 return max_size;
477
478 /* access doff as a u8 to avoid unaligned access on ia64 */
479 hlen = (network[12] & 0xF0) >> 2;
480
481 /* verify hlen meets minimum size requirements */
482 if (hlen < sizeof(struct tcphdr))
483 return network - data;
484
485 network += hlen;
486 } else if (hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S)
487 == HNS_RX_FLAG_L4ID_UDP) {
488 if ((typeof(max_size))(network - data) >
489 (max_size - sizeof(struct udphdr)))
490 return max_size;
491
492 network += sizeof(struct udphdr);
493 }
494
495 /* If everything has gone correctly network should be the
496 * data section of the packet and will be the end of the header.
497 * If not then it probably represents the end of the last recognized
498 * header.
499 */
500 if ((typeof(max_size))(network - data) < max_size)
501 return network - data;
502 else
503 return max_size;
504}
505
yankejian9cbe9fd2015-12-08 11:02:31 +0800506static void hns_nic_reuse_page(struct sk_buff *skb, int i,
507 struct hnae_ring *ring, int pull_len,
508 struct hnae_desc_cb *desc_cb)
huangdaodeb5996f12015-09-17 14:51:50 +0800509{
yankejian9cbe9fd2015-12-08 11:02:31 +0800510 struct hnae_desc *desc;
511 int truesize, size;
512 int last_offset;
Arnd Bergmannbe78a692016-01-01 23:27:57 +0100513 bool twobufs;
514
515 twobufs = ((PAGE_SIZE < 8192) && hnae_buf_size(ring) == HNS_BUFFER_SIZE_2048);
yankejian9cbe9fd2015-12-08 11:02:31 +0800516
517 desc = &ring->desc[ring->next_to_clean];
518 size = le16_to_cpu(desc->rx.size);
519
Arnd Bergmannbe78a692016-01-01 23:27:57 +0100520 if (twobufs) {
yankejian9cbe9fd2015-12-08 11:02:31 +0800521 truesize = hnae_buf_size(ring);
522 } else {
523 truesize = ALIGN(size, L1_CACHE_BYTES);
524 last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
525 }
526
yankejian9cbe9fd2015-12-08 11:02:31 +0800527 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
528 size - pull_len, truesize - pull_len);
529
huangdaodeb5996f12015-09-17 14:51:50 +0800530 /* avoid re-using remote pages,flag default unreuse */
Arnd Bergmannbe78a692016-01-01 23:27:57 +0100531 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
532 return;
yankejian9cbe9fd2015-12-08 11:02:31 +0800533
Arnd Bergmannbe78a692016-01-01 23:27:57 +0100534 if (twobufs) {
535 /* if we are only owner of page we can reuse it */
536 if (likely(page_count(desc_cb->priv) == 1)) {
537 /* flip page offset to other buffer */
538 desc_cb->page_offset ^= truesize;
huangdaodeb5996f12015-09-17 14:51:50 +0800539
huangdaodeb5996f12015-09-17 14:51:50 +0800540 desc_cb->reuse_flag = 1;
541 /* bump ref count on page before it is given*/
542 get_page(desc_cb->priv);
543 }
Arnd Bergmannbe78a692016-01-01 23:27:57 +0100544 return;
545 }
546
547 /* move offset up to the next cache line */
548 desc_cb->page_offset += truesize;
549
550 if (desc_cb->page_offset <= last_offset) {
551 desc_cb->reuse_flag = 1;
552 /* bump ref count on page before it is given*/
553 get_page(desc_cb->priv);
huangdaodeb5996f12015-09-17 14:51:50 +0800554 }
555}
556
Salil13ac6952015-12-03 12:17:53 +0000557static void get_v2rx_desc_bnum(u32 bnum_flag, int *out_bnum)
558{
559 *out_bnum = hnae_get_field(bnum_flag,
560 HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S) + 1;
561}
562
563static void get_rx_desc_bnum(u32 bnum_flag, int *out_bnum)
564{
565 *out_bnum = hnae_get_field(bnum_flag,
566 HNS_RXD_BUFNUM_M, HNS_RXD_BUFNUM_S);
567}
568
Salil862b3d22016-12-06 11:09:46 +0000569static void hns_nic_rx_checksum(struct hns_nic_ring_data *ring_data,
570 struct sk_buff *skb, u32 flag)
571{
572 struct net_device *netdev = ring_data->napi.dev;
573 u32 l3id;
574 u32 l4id;
575
576 /* check if RX checksum offload is enabled */
577 if (unlikely(!(netdev->features & NETIF_F_RXCSUM)))
578 return;
579
580 /* In hardware, we only support checksum for the following protocols:
581 * 1) IPv4,
582 * 2) TCP(over IPv4 or IPv6),
583 * 3) UDP(over IPv4 or IPv6),
584 * 4) SCTP(over IPv4 or IPv6)
585 * but we support many L3(IPv4, IPv6, MPLS, PPPoE etc) and L4(TCP,
586 * UDP, GRE, SCTP, IGMP, ICMP etc.) protocols.
587 *
588 * Hardware limitation:
589 * Our present hardware RX Descriptor lacks L3/L4 checksum "Status &
590 * Error" bit (which usually can be used to indicate whether checksum
591 * was calculated by the hardware and if there was any error encountered
592 * during checksum calculation).
593 *
594 * Software workaround:
595 * We do get info within the RX descriptor about the kind of L3/L4
596 * protocol coming in the packet and the error status. These errors
597 * might not just be checksum errors but could be related to version,
598 * length of IPv4, UDP, TCP etc.
599 * Because there is no-way of knowing if it is a L3/L4 error due to bad
600 * checksum or any other L3/L4 error, we will not (cannot) convey
601 * checksum status for such cases to upper stack and will not maintain
602 * the RX L3/L4 checksum counters as well.
603 */
604
605 l3id = hnae_get_field(flag, HNS_RXD_L3ID_M, HNS_RXD_L3ID_S);
606 l4id = hnae_get_field(flag, HNS_RXD_L4ID_M, HNS_RXD_L4ID_S);
607
608 /* check L3 protocol for which checksum is supported */
609 if ((l3id != HNS_RX_FLAG_L3ID_IPV4) && (l3id != HNS_RX_FLAG_L3ID_IPV6))
610 return;
611
612 /* check for any(not just checksum)flagged L3 protocol errors */
613 if (unlikely(hnae_get_bit(flag, HNS_RXD_L3E_B)))
614 return;
615
616 /* we do not support checksum of fragmented packets */
617 if (unlikely(hnae_get_bit(flag, HNS_RXD_FRAG_B)))
618 return;
619
620 /* check L4 protocol for which checksum is supported */
621 if ((l4id != HNS_RX_FLAG_L4ID_TCP) &&
622 (l4id != HNS_RX_FLAG_L4ID_UDP) &&
623 (l4id != HNS_RX_FLAG_L4ID_SCTP))
624 return;
625
626 /* check for any(not just checksum)flagged L4 protocol errors */
627 if (unlikely(hnae_get_bit(flag, HNS_RXD_L4E_B)))
628 return;
629
630 /* now, this has to be a packet with valid RX checksum */
631 skb->ip_summed = CHECKSUM_UNNECESSARY;
632}
633
huangdaodeb5996f12015-09-17 14:51:50 +0800634static int hns_nic_poll_rx_skb(struct hns_nic_ring_data *ring_data,
635 struct sk_buff **out_skb, int *out_bnum)
636{
637 struct hnae_ring *ring = ring_data->ring;
638 struct net_device *ndev = ring_data->napi.dev;
Salil13ac6952015-12-03 12:17:53 +0000639 struct hns_nic_priv *priv = netdev_priv(ndev);
huangdaodeb5996f12015-09-17 14:51:50 +0800640 struct sk_buff *skb;
641 struct hnae_desc *desc;
642 struct hnae_desc_cb *desc_cb;
643 unsigned char *va;
yankejian9cbe9fd2015-12-08 11:02:31 +0800644 int bnum, length, i;
huangdaodeb5996f12015-09-17 14:51:50 +0800645 int pull_len;
646 u32 bnum_flag;
647
huangdaodeb5996f12015-09-17 14:51:50 +0800648 desc = &ring->desc[ring->next_to_clean];
649 desc_cb = &ring->desc_cb[ring->next_to_clean];
Salil13ac6952015-12-03 12:17:53 +0000650
651 prefetch(desc);
652
huangdaodeb5996f12015-09-17 14:51:50 +0800653 va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
654
Salil13ac6952015-12-03 12:17:53 +0000655 /* prefetch first cache line of first page */
656 prefetch(va);
657#if L1_CACHE_BYTES < 128
658 prefetch(va + L1_CACHE_BYTES);
659#endif
660
661 skb = *out_skb = napi_alloc_skb(&ring_data->napi,
662 HNS_RX_HEAD_SIZE);
huangdaodeb5996f12015-09-17 14:51:50 +0800663 if (unlikely(!skb)) {
664 netdev_err(ndev, "alloc rx skb fail\n");
665 ring->stats.sw_err_cnt++;
666 return -ENOMEM;
667 }
668
yankejian9cbe9fd2015-12-08 11:02:31 +0800669 prefetchw(skb->data);
Salil13ac6952015-12-03 12:17:53 +0000670 length = le16_to_cpu(desc->rx.pkt_len);
671 bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
672 priv->ops.get_rxd_bnum(bnum_flag, &bnum);
673 *out_bnum = bnum;
674
huangdaodeb5996f12015-09-17 14:51:50 +0800675 if (length <= HNS_RX_HEAD_SIZE) {
676 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
677
678 /* we can reuse buffer as-is, just make sure it is local */
679 if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
680 desc_cb->reuse_flag = 1;
681 else /* this page cannot be reused so discard it */
682 put_page(desc_cb->priv);
683
684 ring_ptr_move_fw(ring, next_to_clean);
685
686 if (unlikely(bnum != 1)) { /* check err*/
687 *out_bnum = 1;
688 goto out_bnum_err;
689 }
690 } else {
691 ring->stats.seg_pkt_cnt++;
692
693 pull_len = hns_nic_get_headlen(va, bnum_flag, HNS_RX_HEAD_SIZE);
694 memcpy(__skb_put(skb, pull_len), va,
695 ALIGN(pull_len, sizeof(long)));
696
yankejian9cbe9fd2015-12-08 11:02:31 +0800697 hns_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
huangdaodeb5996f12015-09-17 14:51:50 +0800698 ring_ptr_move_fw(ring, next_to_clean);
699
700 if (unlikely(bnum >= (int)MAX_SKB_FRAGS)) { /* check err*/
701 *out_bnum = 1;
702 goto out_bnum_err;
703 }
704 for (i = 1; i < bnum; i++) {
705 desc = &ring->desc[ring->next_to_clean];
706 desc_cb = &ring->desc_cb[ring->next_to_clean];
huangdaodeb5996f12015-09-17 14:51:50 +0800707
yankejian9cbe9fd2015-12-08 11:02:31 +0800708 hns_nic_reuse_page(skb, i, ring, 0, desc_cb);
huangdaodeb5996f12015-09-17 14:51:50 +0800709 ring_ptr_move_fw(ring, next_to_clean);
710 }
711 }
712
713 /* check except process, free skb and jump the desc */
714 if (unlikely((!bnum) || (bnum > ring->max_desc_num_per_pkt))) {
715out_bnum_err:
716 *out_bnum = *out_bnum ? *out_bnum : 1; /* ntc moved,cannot 0*/
717 netdev_err(ndev, "invalid bnum(%d,%d,%d,%d),%016llx,%016llx\n",
718 bnum, ring->max_desc_num_per_pkt,
719 length, (int)MAX_SKB_FRAGS,
720 ((u64 *)desc)[0], ((u64 *)desc)[1]);
721 ring->stats.err_bd_num++;
722 dev_kfree_skb_any(skb);
723 return -EDOM;
724 }
725
726 bnum_flag = le32_to_cpu(desc->rx.ipoff_bnum_pid_flag);
727
728 if (unlikely(!hnae_get_bit(bnum_flag, HNS_RXD_VLD_B))) {
729 netdev_err(ndev, "no valid bd,%016llx,%016llx\n",
730 ((u64 *)desc)[0], ((u64 *)desc)[1]);
731 ring->stats.non_vld_descs++;
732 dev_kfree_skb_any(skb);
733 return -EINVAL;
734 }
735
736 if (unlikely((!desc->rx.pkt_len) ||
737 hnae_get_bit(bnum_flag, HNS_RXD_DROP_B))) {
huangdaodeb5996f12015-09-17 14:51:50 +0800738 ring->stats.err_pkt_len++;
739 dev_kfree_skb_any(skb);
740 return -EFAULT;
741 }
742
743 if (unlikely(hnae_get_bit(bnum_flag, HNS_RXD_L2E_B))) {
huangdaodeb5996f12015-09-17 14:51:50 +0800744 ring->stats.l2_err++;
745 dev_kfree_skb_any(skb);
746 return -EFAULT;
747 }
748
749 ring->stats.rx_pkts++;
750 ring->stats.rx_bytes += skb->len;
751
Salil862b3d22016-12-06 11:09:46 +0000752 /* indicate to upper stack if our hardware has already calculated
753 * the RX checksum
754 */
755 hns_nic_rx_checksum(ring_data, skb, bnum_flag);
huangdaodeb5996f12015-09-17 14:51:50 +0800756
757 return 0;
758}
759
760static void
761hns_nic_alloc_rx_buffers(struct hns_nic_ring_data *ring_data, int cleand_count)
762{
763 int i, ret;
764 struct hnae_desc_cb res_cbs;
765 struct hnae_desc_cb *desc_cb;
766 struct hnae_ring *ring = ring_data->ring;
767 struct net_device *ndev = ring_data->napi.dev;
768
769 for (i = 0; i < cleand_count; i++) {
770 desc_cb = &ring->desc_cb[ring->next_to_use];
771 if (desc_cb->reuse_flag) {
772 ring->stats.reuse_pg_cnt++;
773 hnae_reuse_buffer(ring, ring->next_to_use);
774 } else {
775 ret = hnae_reserve_buffer_map(ring, &res_cbs);
776 if (ret) {
777 ring->stats.sw_err_cnt++;
778 netdev_err(ndev, "hnae reserve buffer map failed.\n");
779 break;
780 }
781 hnae_replace_buffer(ring, ring->next_to_use, &res_cbs);
782 }
783
784 ring_ptr_move_fw(ring, next_to_use);
785 }
786
787 wmb(); /* make all data has been write before submit */
788 writel_relaxed(i, ring->io_base + RCB_REG_HEAD);
789}
790
791/* return error number for error or number of desc left to take
792 */
793static void hns_nic_rx_up_pro(struct hns_nic_ring_data *ring_data,
794 struct sk_buff *skb)
795{
796 struct net_device *ndev = ring_data->napi.dev;
797
798 skb->protocol = eth_type_trans(skb, ndev);
799 (void)napi_gro_receive(&ring_data->napi, skb);
huangdaodeb5996f12015-09-17 14:51:50 +0800800}
801
lipeng0e97cd42016-09-29 18:09:09 +0100802static int hns_desc_unused(struct hnae_ring *ring)
803{
804 int ntc = ring->next_to_clean;
805 int ntu = ring->next_to_use;
806
807 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
808}
809
huangdaodeb5996f12015-09-17 14:51:50 +0800810static int hns_nic_rx_poll_one(struct hns_nic_ring_data *ring_data,
811 int budget, void *v)
812{
813 struct hnae_ring *ring = ring_data->ring;
814 struct sk_buff *skb;
Daode Huang34447272016-09-29 18:09:12 +0100815 int num, bnum;
huangdaodeb5996f12015-09-17 14:51:50 +0800816#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
817 int recv_pkts, recv_bds, clean_count, err;
lipeng0e97cd42016-09-29 18:09:09 +0100818 int unused_count = hns_desc_unused(ring);
huangdaodeb5996f12015-09-17 14:51:50 +0800819
820 num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
821 rmb(); /* make sure num taken effect before the other data is touched */
822
823 recv_pkts = 0, recv_bds = 0, clean_count = 0;
lipeng0e97cd42016-09-29 18:09:09 +0100824 num -= unused_count;
Daode Huang34447272016-09-29 18:09:12 +0100825
huangdaodeb5996f12015-09-17 14:51:50 +0800826 while (recv_pkts < budget && recv_bds < num) {
Daode Huang6ba312e2016-07-01 17:34:09 +0800827 /* reuse or realloc buffers */
lipeng0e97cd42016-09-29 18:09:09 +0100828 if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
829 hns_nic_alloc_rx_buffers(ring_data,
830 clean_count + unused_count);
huangdaodeb5996f12015-09-17 14:51:50 +0800831 clean_count = 0;
lipeng0e97cd42016-09-29 18:09:09 +0100832 unused_count = hns_desc_unused(ring);
huangdaodeb5996f12015-09-17 14:51:50 +0800833 }
834
Daode Huang6ba312e2016-07-01 17:34:09 +0800835 /* poll one pkt */
huangdaodeb5996f12015-09-17 14:51:50 +0800836 err = hns_nic_poll_rx_skb(ring_data, &skb, &bnum);
837 if (unlikely(!skb)) /* this fault cannot be repaired */
Jun He3a31b642016-06-21 11:56:36 +0800838 goto out;
huangdaodeb5996f12015-09-17 14:51:50 +0800839
840 recv_bds += bnum;
841 clean_count += bnum;
842 if (unlikely(err)) { /* do jump the err */
843 recv_pkts++;
844 continue;
845 }
846
847 /* do update ip stack process*/
848 ((void (*)(struct hns_nic_ring_data *, struct sk_buff *))v)(
849 ring_data, skb);
850 recv_pkts++;
851 }
852
Jun He3a31b642016-06-21 11:56:36 +0800853out:
Salil13ac6952015-12-03 12:17:53 +0000854 /* make all data has been write before submit */
lipeng0e97cd42016-09-29 18:09:09 +0100855 if (clean_count + unused_count > 0)
856 hns_nic_alloc_rx_buffers(ring_data,
857 clean_count + unused_count);
Salil13ac6952015-12-03 12:17:53 +0000858
huangdaodeb5996f12015-09-17 14:51:50 +0800859 return recv_pkts;
860}
861
lipeng36eedfde12017-04-01 12:03:36 +0100862static bool hns_nic_rx_fini_pro(struct hns_nic_ring_data *ring_data)
huangdaodeb5996f12015-09-17 14:51:50 +0800863{
864 struct hnae_ring *ring = ring_data->ring;
865 int num = 0;
866
Daode Huangcee5add2016-09-29 18:09:11 +0100867 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
868
huangdaodeb5996f12015-09-17 14:51:50 +0800869 /* for hardware bug fixed */
870 num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
871
872 if (num > 0) {
873 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
874 ring_data->ring, 1);
875
lipeng36eedfde12017-04-01 12:03:36 +0100876 return false;
877 } else {
878 return true;
huangdaodeb5996f12015-09-17 14:51:50 +0800879 }
880}
881
lipeng36eedfde12017-04-01 12:03:36 +0100882static bool hns_nic_rx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
Daode Huangcee5add2016-09-29 18:09:11 +0100883{
884 struct hnae_ring *ring = ring_data->ring;
lipeng36eedfde12017-04-01 12:03:36 +0100885 int num;
Daode Huangcee5add2016-09-29 18:09:11 +0100886
887 num = readl_relaxed(ring->io_base + RCB_REG_FBDNUM);
888
lipeng36eedfde12017-04-01 12:03:36 +0100889 if (!num)
890 return true;
Daode Huangcee5add2016-09-29 18:09:11 +0100891 else
lipeng36eedfde12017-04-01 12:03:36 +0100892 return false;
Daode Huangcee5add2016-09-29 18:09:11 +0100893}
894
huangdaodeb5996f12015-09-17 14:51:50 +0800895static inline void hns_nic_reclaim_one_desc(struct hnae_ring *ring,
896 int *bytes, int *pkts)
897{
898 struct hnae_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
899
900 (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
901 (*bytes) += desc_cb->length;
902 /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
903 hnae_free_buffer_detach(ring, ring->next_to_clean);
904
905 ring_ptr_move_fw(ring, next_to_clean);
906}
907
908static int is_valid_clean_head(struct hnae_ring *ring, int h)
909{
910 int u = ring->next_to_use;
911 int c = ring->next_to_clean;
912
913 if (unlikely(h > ring->desc_num))
914 return 0;
915
916 assert(u > 0 && u < ring->desc_num);
917 assert(c > 0 && c < ring->desc_num);
918 assert(u != c && h != c); /* must be checked before call this func */
919
920 return u > c ? (h > c && h <= u) : (h > c || h <= u);
921}
922
923/* netif_tx_lock will turn down the performance, set only when necessary */
924#ifdef CONFIG_NET_POLL_CONTROLLER
925#define NETIF_TX_LOCK(ndev) netif_tx_lock(ndev)
926#define NETIF_TX_UNLOCK(ndev) netif_tx_unlock(ndev)
927#else
928#define NETIF_TX_LOCK(ndev)
929#define NETIF_TX_UNLOCK(ndev)
930#endif
931/* reclaim all desc in one budget
932 * return error or number of desc left
933 */
934static int hns_nic_tx_poll_one(struct hns_nic_ring_data *ring_data,
935 int budget, void *v)
936{
937 struct hnae_ring *ring = ring_data->ring;
938 struct net_device *ndev = ring_data->napi.dev;
939 struct netdev_queue *dev_queue;
940 struct hns_nic_priv *priv = netdev_priv(ndev);
941 int head;
942 int bytes, pkts;
943
944 NETIF_TX_LOCK(ndev);
945
946 head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
947 rmb(); /* make sure head is ready before touch any data */
948
949 if (is_ring_empty(ring) || head == ring->next_to_clean) {
950 NETIF_TX_UNLOCK(ndev);
951 return 0; /* no data to poll */
952 }
953
954 if (!is_valid_clean_head(ring, head)) {
955 netdev_err(ndev, "wrong head (%d, %d-%d)\n", head,
956 ring->next_to_use, ring->next_to_clean);
957 ring->stats.io_err_cnt++;
958 NETIF_TX_UNLOCK(ndev);
959 return -EIO;
960 }
961
962 bytes = 0;
963 pkts = 0;
yankejian9cbe9fd2015-12-08 11:02:31 +0800964 while (head != ring->next_to_clean) {
huangdaodeb5996f12015-09-17 14:51:50 +0800965 hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
yankejian9cbe9fd2015-12-08 11:02:31 +0800966 /* issue prefetch for next Tx descriptor */
967 prefetch(&ring->desc_cb[ring->next_to_clean]);
968 }
huangdaodeb5996f12015-09-17 14:51:50 +0800969
970 NETIF_TX_UNLOCK(ndev);
971
972 dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
973 netdev_tx_completed_queue(dev_queue, pkts, bytes);
974
Salil13ac6952015-12-03 12:17:53 +0000975 if (unlikely(priv->link && !netif_carrier_ok(ndev)))
976 netif_carrier_on(ndev);
977
huangdaodeb5996f12015-09-17 14:51:50 +0800978 if (unlikely(pkts && netif_carrier_ok(ndev) &&
979 (ring_space(ring) >= ring->max_desc_num_per_pkt * 2))) {
980 /* Make sure that anybody stopping the queue after this
981 * sees the new next_to_clean.
982 */
983 smp_mb();
984 if (netif_tx_queue_stopped(dev_queue) &&
985 !test_bit(NIC_STATE_DOWN, &priv->state)) {
986 netif_tx_wake_queue(dev_queue);
987 ring->stats.restart_queue++;
988 }
989 }
990 return 0;
991}
992
lipeng36eedfde12017-04-01 12:03:36 +0100993static bool hns_nic_tx_fini_pro(struct hns_nic_ring_data *ring_data)
huangdaodeb5996f12015-09-17 14:51:50 +0800994{
995 struct hnae_ring *ring = ring_data->ring;
Daode Huangcee5add2016-09-29 18:09:11 +0100996 int head;
997
998 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
999
1000 head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
huangdaodeb5996f12015-09-17 14:51:50 +08001001
1002 if (head != ring->next_to_clean) {
1003 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
1004 ring_data->ring, 1);
1005
lipeng36eedfde12017-04-01 12:03:36 +01001006 return false;
1007 } else {
1008 return true;
huangdaodeb5996f12015-09-17 14:51:50 +08001009 }
1010}
1011
lipeng36eedfde12017-04-01 12:03:36 +01001012static bool hns_nic_tx_fini_pro_v2(struct hns_nic_ring_data *ring_data)
Daode Huangcee5add2016-09-29 18:09:11 +01001013{
1014 struct hnae_ring *ring = ring_data->ring;
1015 int head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
1016
1017 if (head == ring->next_to_clean)
lipeng36eedfde12017-04-01 12:03:36 +01001018 return true;
Daode Huangcee5add2016-09-29 18:09:11 +01001019 else
lipeng36eedfde12017-04-01 12:03:36 +01001020 return false;
Daode Huangcee5add2016-09-29 18:09:11 +01001021}
1022
huangdaodeb5996f12015-09-17 14:51:50 +08001023static void hns_nic_tx_clr_all_bufs(struct hns_nic_ring_data *ring_data)
1024{
1025 struct hnae_ring *ring = ring_data->ring;
1026 struct net_device *ndev = ring_data->napi.dev;
1027 struct netdev_queue *dev_queue;
1028 int head;
1029 int bytes, pkts;
1030
1031 NETIF_TX_LOCK(ndev);
1032
1033 head = ring->next_to_use; /* ntu :soft setted ring position*/
1034 bytes = 0;
1035 pkts = 0;
1036 while (head != ring->next_to_clean)
1037 hns_nic_reclaim_one_desc(ring, &bytes, &pkts);
1038
1039 NETIF_TX_UNLOCK(ndev);
1040
1041 dev_queue = netdev_get_tx_queue(ndev, ring_data->queue_index);
1042 netdev_tx_reset_queue(dev_queue);
1043}
1044
1045static int hns_nic_common_poll(struct napi_struct *napi, int budget)
1046{
lipeng36eedfde12017-04-01 12:03:36 +01001047 int clean_complete = 0;
huangdaodeb5996f12015-09-17 14:51:50 +08001048 struct hns_nic_ring_data *ring_data =
1049 container_of(napi, struct hns_nic_ring_data, napi);
lipeng36eedfde12017-04-01 12:03:36 +01001050 struct hnae_ring *ring = ring_data->ring;
huangdaodeb5996f12015-09-17 14:51:50 +08001051
lipeng36eedfde12017-04-01 12:03:36 +01001052try_again:
1053 clean_complete += ring_data->poll_one(
1054 ring_data, budget - clean_complete,
1055 ring_data->ex_process);
1056
1057 if (clean_complete < budget) {
1058 if (ring_data->fini_process(ring_data)) {
1059 napi_complete(napi);
1060 ring->q->handle->dev->ops->toggle_ring_irq(ring, 0);
1061 } else {
1062 goto try_again;
1063 }
huangdaodeb5996f12015-09-17 14:51:50 +08001064 }
1065
1066 return clean_complete;
1067}
1068
1069static irqreturn_t hns_irq_handle(int irq, void *dev)
1070{
1071 struct hns_nic_ring_data *ring_data = (struct hns_nic_ring_data *)dev;
1072
1073 ring_data->ring->q->handle->dev->ops->toggle_ring_irq(
1074 ring_data->ring, 1);
1075 napi_schedule(&ring_data->napi);
1076
1077 return IRQ_HANDLED;
1078}
1079
1080/**
1081 *hns_nic_adjust_link - adjust net work mode by the phy stat or new param
1082 *@ndev: net device
1083 */
1084static void hns_nic_adjust_link(struct net_device *ndev)
1085{
1086 struct hns_nic_priv *priv = netdev_priv(ndev);
1087 struct hnae_handle *h = priv->ae_handle;
Qianqian Xiebb7189d2016-06-21 11:56:29 +08001088 int state = 1;
huangdaodeb5996f12015-09-17 14:51:50 +08001089
Philippe Reynes262b38c2016-09-20 22:30:11 +02001090 if (ndev->phydev) {
Qianqian Xiebb7189d2016-06-21 11:56:29 +08001091 h->dev->ops->adjust_link(h, ndev->phydev->speed,
1092 ndev->phydev->duplex);
Philippe Reynes262b38c2016-09-20 22:30:11 +02001093 state = ndev->phydev->link;
Qianqian Xiebb7189d2016-06-21 11:56:29 +08001094 }
1095 state = state && h->dev->ops->get_status(h);
1096
1097 if (state != priv->link) {
1098 if (state) {
1099 netif_carrier_on(ndev);
1100 netif_tx_wake_all_queues(ndev);
1101 netdev_info(ndev, "link up\n");
1102 } else {
1103 netif_carrier_off(ndev);
1104 netdev_info(ndev, "link down\n");
1105 }
1106 priv->link = state;
1107 }
huangdaodeb5996f12015-09-17 14:51:50 +08001108}
1109
1110/**
1111 *hns_nic_init_phy - init phy
1112 *@ndev: net device
1113 *@h: ae handle
1114 * Return 0 on success, negative on failure
1115 */
1116int hns_nic_init_phy(struct net_device *ndev, struct hnae_handle *h)
1117{
Kejian Yan652d39b2016-06-03 10:55:16 +08001118 struct phy_device *phy_dev = h->phy_dev;
1119 int ret;
huangdaodeb5996f12015-09-17 14:51:50 +08001120
Kejian Yan652d39b2016-06-03 10:55:16 +08001121 if (!h->phy_dev)
huangdaodeb5996f12015-09-17 14:51:50 +08001122 return 0;
1123
Kejian Yan652d39b2016-06-03 10:55:16 +08001124 if (h->phy_if != PHY_INTERFACE_MODE_XGMII) {
1125 phy_dev->dev_flags = 0;
huangdaodeb5996f12015-09-17 14:51:50 +08001126
Kejian Yan652d39b2016-06-03 10:55:16 +08001127 ret = phy_connect_direct(ndev, phy_dev, hns_nic_adjust_link,
1128 h->phy_if);
1129 } else {
1130 ret = phy_attach_direct(ndev, phy_dev, 0, h->phy_if);
1131 }
1132 if (unlikely(ret))
1133 return -ENODEV;
huangdaodeb5996f12015-09-17 14:51:50 +08001134
1135 phy_dev->supported &= h->if_support;
1136 phy_dev->advertising = phy_dev->supported;
1137
1138 if (h->phy_if == PHY_INTERFACE_MODE_XGMII)
1139 phy_dev->autoneg = false;
1140
huangdaodeb5996f12015-09-17 14:51:50 +08001141 return 0;
1142}
1143
1144static int hns_nic_ring_open(struct net_device *netdev, int idx)
1145{
1146 struct hns_nic_priv *priv = netdev_priv(netdev);
1147 struct hnae_handle *h = priv->ae_handle;
1148
1149 napi_enable(&priv->ring_data[idx].napi);
1150
1151 enable_irq(priv->ring_data[idx].ring->irq);
1152 h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 0);
1153
1154 return 0;
1155}
1156
1157static int hns_nic_net_set_mac_address(struct net_device *ndev, void *p)
1158{
1159 struct hns_nic_priv *priv = netdev_priv(ndev);
1160 struct hnae_handle *h = priv->ae_handle;
1161 struct sockaddr *mac_addr = p;
1162 int ret;
1163
1164 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1165 return -EADDRNOTAVAIL;
1166
1167 ret = h->dev->ops->set_mac_addr(h, mac_addr->sa_data);
1168 if (ret) {
1169 netdev_err(ndev, "set_mac_address fail, ret=%d!\n", ret);
1170 return ret;
1171 }
1172
1173 memcpy(ndev->dev_addr, mac_addr->sa_data, ndev->addr_len);
1174
1175 return 0;
1176}
1177
1178void hns_nic_update_stats(struct net_device *netdev)
1179{
1180 struct hns_nic_priv *priv = netdev_priv(netdev);
1181 struct hnae_handle *h = priv->ae_handle;
1182
1183 h->dev->ops->update_stats(h, &netdev->stats);
1184}
1185
1186/* set mac addr if it is configed. or leave it to the AE driver */
1187static void hns_init_mac_addr(struct net_device *ndev)
1188{
1189 struct hns_nic_priv *priv = netdev_priv(ndev);
huangdaodeb5996f12015-09-17 14:51:50 +08001190
Kejian Yan61629282016-06-03 10:55:13 +08001191 if (!device_get_mac_address(priv->dev, ndev->dev_addr, ETH_ALEN)) {
huangdaodeb5996f12015-09-17 14:51:50 +08001192 eth_hw_addr_random(ndev);
1193 dev_warn(priv->dev, "No valid mac, use random mac %pM",
1194 ndev->dev_addr);
1195 }
1196}
1197
1198static void hns_nic_ring_close(struct net_device *netdev, int idx)
1199{
1200 struct hns_nic_priv *priv = netdev_priv(netdev);
1201 struct hnae_handle *h = priv->ae_handle;
1202
1203 h->dev->ops->toggle_ring_irq(priv->ring_data[idx].ring, 1);
1204 disable_irq(priv->ring_data[idx].ring->irq);
1205
1206 napi_disable(&priv->ring_data[idx].napi);
1207}
1208
lipengba2d0792017-04-01 12:03:31 +01001209static int hns_nic_init_affinity_mask(int q_num, int ring_idx,
1210 struct hnae_ring *ring, cpumask_t *mask)
Salil13ac6952015-12-03 12:17:53 +00001211{
Salil13ac6952015-12-03 12:17:53 +00001212 int cpu;
Arnd Bergmannff3edc92017-02-02 15:49:24 +01001213
lipengba2d0792017-04-01 12:03:31 +01001214 /* Diffrent irq banlance between 16core and 32core.
1215 * The cpu mask set by ring index according to the ring flag
1216 * which indicate the ring is tx or rx.
1217 */
1218 if (q_num == num_possible_cpus()) {
1219 if (is_tx_ring(ring))
1220 cpu = ring_idx;
1221 else
1222 cpu = ring_idx - q_num;
Salil13ac6952015-12-03 12:17:53 +00001223 } else {
lipengba2d0792017-04-01 12:03:31 +01001224 if (is_tx_ring(ring))
1225 cpu = ring_idx * 2;
1226 else
1227 cpu = (ring_idx - q_num) * 2 + 1;
Salil13ac6952015-12-03 12:17:53 +00001228 }
Arnd Bergmannff3edc92017-02-02 15:49:24 +01001229
lipengba2d0792017-04-01 12:03:31 +01001230 cpumask_clear(mask);
1231 cpumask_set_cpu(cpu, mask);
1232
1233 return cpu;
Salil13ac6952015-12-03 12:17:53 +00001234}
1235
huangdaodeb5996f12015-09-17 14:51:50 +08001236static int hns_nic_init_irq(struct hns_nic_priv *priv)
1237{
1238 struct hnae_handle *h = priv->ae_handle;
1239 struct hns_nic_ring_data *rd;
1240 int i;
1241 int ret;
lipengba2d0792017-04-01 12:03:31 +01001242 int cpu;
huangdaodeb5996f12015-09-17 14:51:50 +08001243
1244 for (i = 0; i < h->q_num * 2; i++) {
1245 rd = &priv->ring_data[i];
1246
1247 if (rd->ring->irq_init_flag == RCB_IRQ_INITED)
1248 break;
1249
1250 snprintf(rd->ring->ring_name, RCB_RING_NAME_LEN,
1251 "%s-%s%d", priv->netdev->name,
lipengba2d0792017-04-01 12:03:31 +01001252 (is_tx_ring(rd->ring) ? "tx" : "rx"), rd->queue_index);
huangdaodeb5996f12015-09-17 14:51:50 +08001253
1254 rd->ring->ring_name[RCB_RING_NAME_LEN - 1] = '\0';
1255
1256 ret = request_irq(rd->ring->irq,
1257 hns_irq_handle, 0, rd->ring->ring_name, rd);
1258 if (ret) {
1259 netdev_err(priv->netdev, "request irq(%d) fail\n",
1260 rd->ring->irq);
1261 return ret;
1262 }
1263 disable_irq(rd->ring->irq);
lipengba2d0792017-04-01 12:03:31 +01001264
1265 cpu = hns_nic_init_affinity_mask(h->q_num, i,
1266 rd->ring, &rd->mask);
1267
1268 if (cpu_online(cpu))
1269 irq_set_affinity_hint(rd->ring->irq,
1270 &rd->mask);
1271
huangdaodeb5996f12015-09-17 14:51:50 +08001272 rd->ring->irq_init_flag = RCB_IRQ_INITED;
huangdaodeb5996f12015-09-17 14:51:50 +08001273 }
1274
1275 return 0;
1276}
1277
1278static int hns_nic_net_up(struct net_device *ndev)
1279{
1280 struct hns_nic_priv *priv = netdev_priv(ndev);
1281 struct hnae_handle *h = priv->ae_handle;
Daode Huang454784d2016-06-21 11:56:34 +08001282 int i, j;
huangdaodeb5996f12015-09-17 14:51:50 +08001283 int ret;
1284
1285 ret = hns_nic_init_irq(priv);
1286 if (ret != 0) {
1287 netdev_err(ndev, "hns init irq failed! ret=%d\n", ret);
1288 return ret;
1289 }
1290
1291 for (i = 0; i < h->q_num * 2; i++) {
1292 ret = hns_nic_ring_open(ndev, i);
1293 if (ret)
1294 goto out_has_some_queues;
1295 }
1296
huangdaodeb5996f12015-09-17 14:51:50 +08001297 ret = h->dev->ops->set_mac_addr(h, ndev->dev_addr);
1298 if (ret)
1299 goto out_set_mac_addr_err;
1300
1301 ret = h->dev->ops->start ? h->dev->ops->start(h) : 0;
1302 if (ret)
1303 goto out_start_err;
1304
Philippe Reynes262b38c2016-09-20 22:30:11 +02001305 if (ndev->phydev)
1306 phy_start(ndev->phydev);
huangdaodeb5996f12015-09-17 14:51:50 +08001307
1308 clear_bit(NIC_STATE_DOWN, &priv->state);
1309 (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
1310
1311 return 0;
1312
1313out_start_err:
1314 netif_stop_queue(ndev);
1315out_set_mac_addr_err:
huangdaodeb5996f12015-09-17 14:51:50 +08001316out_has_some_queues:
1317 for (j = i - 1; j >= 0; j--)
1318 hns_nic_ring_close(ndev, j);
1319
1320 set_bit(NIC_STATE_DOWN, &priv->state);
1321
1322 return ret;
1323}
1324
1325static void hns_nic_net_down(struct net_device *ndev)
1326{
1327 int i;
1328 struct hnae_ae_ops *ops;
1329 struct hns_nic_priv *priv = netdev_priv(ndev);
1330
1331 if (test_and_set_bit(NIC_STATE_DOWN, &priv->state))
1332 return;
1333
1334 (void)del_timer_sync(&priv->service_timer);
1335 netif_tx_stop_all_queues(ndev);
1336 netif_carrier_off(ndev);
1337 netif_tx_disable(ndev);
1338 priv->link = 0;
1339
Philippe Reynes262b38c2016-09-20 22:30:11 +02001340 if (ndev->phydev)
1341 phy_stop(ndev->phydev);
huangdaodeb5996f12015-09-17 14:51:50 +08001342
1343 ops = priv->ae_handle->dev->ops;
1344
1345 if (ops->stop)
1346 ops->stop(priv->ae_handle);
1347
1348 netif_tx_stop_all_queues(ndev);
1349
1350 for (i = priv->ae_handle->q_num - 1; i >= 0; i--) {
1351 hns_nic_ring_close(ndev, i);
1352 hns_nic_ring_close(ndev, i + priv->ae_handle->q_num);
1353
1354 /* clean tx buffers*/
1355 hns_nic_tx_clr_all_bufs(priv->ring_data + i);
1356 }
1357}
1358
1359void hns_nic_net_reset(struct net_device *ndev)
1360{
1361 struct hns_nic_priv *priv = netdev_priv(ndev);
1362 struct hnae_handle *handle = priv->ae_handle;
1363
1364 while (test_and_set_bit(NIC_STATE_RESETTING, &priv->state))
1365 usleep_range(1000, 2000);
1366
1367 (void)hnae_reinit_handle(handle);
1368
1369 clear_bit(NIC_STATE_RESETTING, &priv->state);
1370}
1371
1372void hns_nic_net_reinit(struct net_device *netdev)
1373{
1374 struct hns_nic_priv *priv = netdev_priv(netdev);
1375
Florian Westphal860e9532016-05-03 16:33:13 +02001376 netif_trans_update(priv->netdev);
huangdaodeb5996f12015-09-17 14:51:50 +08001377 while (test_and_set_bit(NIC_STATE_REINITING, &priv->state))
1378 usleep_range(1000, 2000);
1379
1380 hns_nic_net_down(netdev);
1381 hns_nic_net_reset(netdev);
1382 (void)hns_nic_net_up(netdev);
1383 clear_bit(NIC_STATE_REINITING, &priv->state);
1384}
1385
1386static int hns_nic_net_open(struct net_device *ndev)
1387{
1388 struct hns_nic_priv *priv = netdev_priv(ndev);
1389 struct hnae_handle *h = priv->ae_handle;
1390 int ret;
1391
1392 if (test_bit(NIC_STATE_TESTING, &priv->state))
1393 return -EBUSY;
1394
1395 priv->link = 0;
1396 netif_carrier_off(ndev);
1397
1398 ret = netif_set_real_num_tx_queues(ndev, h->q_num);
1399 if (ret < 0) {
1400 netdev_err(ndev, "netif_set_real_num_tx_queues fail, ret=%d!\n",
1401 ret);
1402 return ret;
1403 }
1404
1405 ret = netif_set_real_num_rx_queues(ndev, h->q_num);
1406 if (ret < 0) {
1407 netdev_err(ndev,
1408 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
1409 return ret;
1410 }
1411
1412 ret = hns_nic_net_up(ndev);
1413 if (ret) {
1414 netdev_err(ndev,
1415 "hns net up fail, ret=%d!\n", ret);
1416 return ret;
1417 }
1418
1419 return 0;
1420}
1421
1422static int hns_nic_net_stop(struct net_device *ndev)
1423{
1424 hns_nic_net_down(ndev);
1425
1426 return 0;
1427}
1428
1429static void hns_tx_timeout_reset(struct hns_nic_priv *priv);
1430static void hns_nic_net_timeout(struct net_device *ndev)
1431{
1432 struct hns_nic_priv *priv = netdev_priv(ndev);
1433
1434 hns_tx_timeout_reset(priv);
1435}
1436
1437static int hns_nic_do_ioctl(struct net_device *netdev, struct ifreq *ifr,
1438 int cmd)
1439{
Philippe Reynes262b38c2016-09-20 22:30:11 +02001440 struct phy_device *phy_dev = netdev->phydev;
huangdaodeb5996f12015-09-17 14:51:50 +08001441
1442 if (!netif_running(netdev))
1443 return -EINVAL;
1444
1445 if (!phy_dev)
1446 return -ENOTSUPP;
1447
1448 return phy_mii_ioctl(phy_dev, ifr, cmd);
1449}
1450
1451/* use only for netconsole to poll with the device without interrupt */
1452#ifdef CONFIG_NET_POLL_CONTROLLER
1453void hns_nic_poll_controller(struct net_device *ndev)
1454{
1455 struct hns_nic_priv *priv = netdev_priv(ndev);
1456 unsigned long flags;
1457 int i;
1458
1459 local_irq_save(flags);
1460 for (i = 0; i < priv->ae_handle->q_num * 2; i++)
1461 napi_schedule(&priv->ring_data[i].napi);
1462 local_irq_restore(flags);
1463}
1464#endif
1465
1466static netdev_tx_t hns_nic_net_xmit(struct sk_buff *skb,
1467 struct net_device *ndev)
1468{
1469 struct hns_nic_priv *priv = netdev_priv(ndev);
1470 int ret;
1471
1472 assert(skb->queue_mapping < ndev->ae_handle->q_num);
1473 ret = hns_nic_net_xmit_hw(ndev, skb,
1474 &tx_ring_data(priv, skb->queue_mapping));
1475 if (ret == NETDEV_TX_OK) {
Florian Westphal860e9532016-05-03 16:33:13 +02001476 netif_trans_update(ndev);
huangdaodeb5996f12015-09-17 14:51:50 +08001477 ndev->stats.tx_bytes += skb->len;
1478 ndev->stats.tx_packets++;
1479 }
1480 return (netdev_tx_t)ret;
1481}
1482
lipengb29bd412017-04-01 12:03:37 +01001483static void hns_nic_drop_rx_fetch(struct hns_nic_ring_data *ring_data,
1484 struct sk_buff *skb)
1485{
1486 dev_kfree_skb_any(skb);
1487}
1488
1489#define HNS_LB_TX_RING 0
1490static struct sk_buff *hns_assemble_skb(struct net_device *ndev)
1491{
1492 struct sk_buff *skb;
1493 struct ethhdr *ethhdr;
1494 int frame_len;
1495
1496 /* allocate test skb */
1497 skb = alloc_skb(64, GFP_KERNEL);
1498 if (!skb)
1499 return NULL;
1500
1501 skb_put(skb, 64);
1502 skb->dev = ndev;
1503 memset(skb->data, 0xFF, skb->len);
1504
1505 /* must be tcp/ip package */
1506 ethhdr = (struct ethhdr *)skb->data;
1507 ethhdr->h_proto = htons(ETH_P_IP);
1508
1509 frame_len = skb->len & (~1ul);
1510 memset(&skb->data[frame_len / 2], 0xAA,
1511 frame_len / 2 - 1);
1512
1513 skb->queue_mapping = HNS_LB_TX_RING;
1514
1515 return skb;
1516}
1517
1518static int hns_enable_serdes_lb(struct net_device *ndev)
1519{
1520 struct hns_nic_priv *priv = netdev_priv(ndev);
1521 struct hnae_handle *h = priv->ae_handle;
1522 struct hnae_ae_ops *ops = h->dev->ops;
1523 int speed, duplex;
1524 int ret;
1525
1526 ret = ops->set_loopback(h, MAC_INTERNALLOOP_SERDES, 1);
1527 if (ret)
1528 return ret;
1529
1530 ret = ops->start ? ops->start(h) : 0;
1531 if (ret)
1532 return ret;
1533
1534 /* link adjust duplex*/
1535 if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
1536 speed = 1000;
1537 else
1538 speed = 10000;
1539 duplex = 1;
1540
1541 ops->adjust_link(h, speed, duplex);
1542
1543 /* wait h/w ready */
1544 mdelay(300);
1545
1546 return 0;
1547}
1548
1549static void hns_disable_serdes_lb(struct net_device *ndev)
1550{
1551 struct hns_nic_priv *priv = netdev_priv(ndev);
1552 struct hnae_handle *h = priv->ae_handle;
1553 struct hnae_ae_ops *ops = h->dev->ops;
1554
1555 ops->stop(h);
1556 ops->set_loopback(h, MAC_INTERNALLOOP_SERDES, 0);
1557}
1558
1559/**
1560 *hns_nic_clear_all_rx_fetch - clear the chip fetched descriptions. The
1561 *function as follows:
1562 * 1. if one rx ring has found the page_offset is not equal 0 between head
1563 * and tail, it means that the chip fetched the wrong descs for the ring
1564 * which buffer size is 4096.
1565 * 2. we set the chip serdes loopback and set rss indirection to the ring.
1566 * 3. construct 64-bytes ip broadcast packages, wait the associated rx ring
1567 * recieving all packages and it will fetch new descriptions.
1568 * 4. recover to the original state.
1569 *
1570 *@ndev: net device
1571 */
1572static int hns_nic_clear_all_rx_fetch(struct net_device *ndev)
1573{
1574 struct hns_nic_priv *priv = netdev_priv(ndev);
1575 struct hnae_handle *h = priv->ae_handle;
1576 struct hnae_ae_ops *ops = h->dev->ops;
1577 struct hns_nic_ring_data *rd;
1578 struct hnae_ring *ring;
1579 struct sk_buff *skb;
1580 u32 *org_indir;
1581 u32 *cur_indir;
1582 int indir_size;
1583 int head, tail;
1584 int fetch_num;
1585 int i, j;
1586 bool found;
1587 int retry_times;
1588 int ret = 0;
1589
1590 /* alloc indir memory */
1591 indir_size = ops->get_rss_indir_size(h) * sizeof(*org_indir);
1592 org_indir = kzalloc(indir_size, GFP_KERNEL);
1593 if (!org_indir)
1594 return -ENOMEM;
1595
1596 /* store the orginal indirection */
1597 ops->get_rss(h, org_indir, NULL, NULL);
1598
1599 cur_indir = kzalloc(indir_size, GFP_KERNEL);
1600 if (!cur_indir) {
1601 ret = -ENOMEM;
1602 goto cur_indir_alloc_err;
1603 }
1604
1605 /* set loopback */
1606 if (hns_enable_serdes_lb(ndev)) {
1607 ret = -EINVAL;
1608 goto enable_serdes_lb_err;
1609 }
1610
1611 /* foreach every rx ring to clear fetch desc */
1612 for (i = 0; i < h->q_num; i++) {
1613 ring = &h->qs[i]->rx_ring;
1614 head = readl_relaxed(ring->io_base + RCB_REG_HEAD);
1615 tail = readl_relaxed(ring->io_base + RCB_REG_TAIL);
1616 found = false;
1617 fetch_num = ring_dist(ring, head, tail);
1618
1619 while (head != tail) {
1620 if (ring->desc_cb[head].page_offset != 0) {
1621 found = true;
1622 break;
1623 }
1624
1625 head++;
1626 if (head == ring->desc_num)
1627 head = 0;
1628 }
1629
1630 if (found) {
1631 for (j = 0; j < indir_size / sizeof(*org_indir); j++)
1632 cur_indir[j] = i;
1633 ops->set_rss(h, cur_indir, NULL, 0);
1634
1635 for (j = 0; j < fetch_num; j++) {
1636 /* alloc one skb and init */
1637 skb = hns_assemble_skb(ndev);
1638 if (!skb)
1639 goto out;
1640 rd = &tx_ring_data(priv, skb->queue_mapping);
1641 hns_nic_net_xmit_hw(ndev, skb, rd);
1642
1643 retry_times = 0;
1644 while (retry_times++ < 10) {
1645 mdelay(10);
1646 /* clean rx */
1647 rd = &rx_ring_data(priv, i);
1648 if (rd->poll_one(rd, fetch_num,
1649 hns_nic_drop_rx_fetch))
1650 break;
1651 }
1652
1653 retry_times = 0;
1654 while (retry_times++ < 10) {
1655 mdelay(10);
1656 /* clean tx ring 0 send package */
1657 rd = &tx_ring_data(priv,
1658 HNS_LB_TX_RING);
1659 if (rd->poll_one(rd, fetch_num, NULL))
1660 break;
1661 }
1662 }
1663 }
1664 }
1665
1666out:
1667 /* restore everything */
1668 ops->set_rss(h, org_indir, NULL, 0);
1669 hns_disable_serdes_lb(ndev);
1670enable_serdes_lb_err:
1671 kfree(cur_indir);
1672cur_indir_alloc_err:
1673 kfree(org_indir);
1674
1675 return ret;
1676}
1677
huangdaodeb5996f12015-09-17 14:51:50 +08001678static int hns_nic_change_mtu(struct net_device *ndev, int new_mtu)
1679{
1680 struct hns_nic_priv *priv = netdev_priv(ndev);
1681 struct hnae_handle *h = priv->ae_handle;
lipengb29bd412017-04-01 12:03:37 +01001682 bool if_running = netif_running(ndev);
huangdaodeb5996f12015-09-17 14:51:50 +08001683 int ret;
1684
lipengb29bd412017-04-01 12:03:37 +01001685 /* MTU < 68 is an error and causes problems on some kernels */
1686 if (new_mtu < 68)
1687 return -EINVAL;
1688
1689 /* MTU no change */
1690 if (new_mtu == ndev->mtu)
1691 return 0;
1692
huangdaodeb5996f12015-09-17 14:51:50 +08001693 if (!h->dev->ops->set_mtu)
1694 return -ENOTSUPP;
1695
lipengb29bd412017-04-01 12:03:37 +01001696 if (if_running) {
huangdaodeb5996f12015-09-17 14:51:50 +08001697 (void)hns_nic_net_stop(ndev);
1698 msleep(100);
huangdaodeb5996f12015-09-17 14:51:50 +08001699 }
1700
lipengb29bd412017-04-01 12:03:37 +01001701 if (priv->enet_ver != AE_VERSION_1 &&
1702 ndev->mtu <= BD_SIZE_2048_MAX_MTU &&
1703 new_mtu > BD_SIZE_2048_MAX_MTU) {
1704 /* update desc */
1705 hnae_reinit_all_ring_desc(h);
1706
1707 /* clear the package which the chip has fetched */
1708 ret = hns_nic_clear_all_rx_fetch(ndev);
1709
1710 /* the page offset must be consist with desc */
1711 hnae_reinit_all_ring_page_off(h);
1712
1713 if (ret) {
1714 netdev_err(ndev, "clear the fetched desc fail\n");
1715 goto out;
1716 }
1717 }
1718
1719 ret = h->dev->ops->set_mtu(h, new_mtu);
1720 if (ret) {
1721 netdev_err(ndev, "set mtu fail, return value %d\n",
1722 ret);
1723 goto out;
1724 }
1725
1726 /* finally, set new mtu to netdevice */
1727 ndev->mtu = new_mtu;
1728
1729out:
1730 if (if_running) {
1731 if (hns_nic_net_open(ndev)) {
1732 netdev_err(ndev, "hns net open fail\n");
1733 ret = -EINVAL;
1734 }
1735 }
huangdaodeb5996f12015-09-17 14:51:50 +08001736
1737 return ret;
1738}
1739
Salil38f616d2015-12-03 12:17:56 +00001740static int hns_nic_set_features(struct net_device *netdev,
1741 netdev_features_t features)
1742{
1743 struct hns_nic_priv *priv = netdev_priv(netdev);
Salil38f616d2015-12-03 12:17:56 +00001744
1745 switch (priv->enet_ver) {
1746 case AE_VERSION_1:
1747 if (features & (NETIF_F_TSO | NETIF_F_TSO6))
1748 netdev_info(netdev, "enet v1 do not support tso!\n");
1749 break;
1750 default:
1751 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
1752 priv->ops.fill_desc = fill_tso_desc;
1753 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
1754 /* The chip only support 7*4096 */
1755 netif_set_gso_max_size(netdev, 7 * 4096);
Salil38f616d2015-12-03 12:17:56 +00001756 } else {
1757 priv->ops.fill_desc = fill_v2_desc;
1758 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
Salil38f616d2015-12-03 12:17:56 +00001759 }
1760 break;
1761 }
1762 netdev->features = features;
1763 return 0;
1764}
1765
1766static netdev_features_t hns_nic_fix_features(
1767 struct net_device *netdev, netdev_features_t features)
1768{
1769 struct hns_nic_priv *priv = netdev_priv(netdev);
1770
1771 switch (priv->enet_ver) {
1772 case AE_VERSION_1:
1773 features &= ~(NETIF_F_TSO | NETIF_F_TSO6 |
1774 NETIF_F_HW_VLAN_CTAG_FILTER);
1775 break;
1776 default:
1777 break;
1778 }
1779 return features;
1780}
1781
Kejian Yan66355f52016-11-09 18:14:01 +00001782static int hns_nic_uc_sync(struct net_device *netdev, const unsigned char *addr)
1783{
1784 struct hns_nic_priv *priv = netdev_priv(netdev);
1785 struct hnae_handle *h = priv->ae_handle;
1786
1787 if (h->dev->ops->add_uc_addr)
1788 return h->dev->ops->add_uc_addr(h, addr);
1789
1790 return 0;
1791}
1792
1793static int hns_nic_uc_unsync(struct net_device *netdev,
1794 const unsigned char *addr)
1795{
1796 struct hns_nic_priv *priv = netdev_priv(netdev);
1797 struct hnae_handle *h = priv->ae_handle;
1798
1799 if (h->dev->ops->rm_uc_addr)
1800 return h->dev->ops->rm_uc_addr(h, addr);
1801
1802 return 0;
1803}
1804
huangdaodeb5996f12015-09-17 14:51:50 +08001805/**
1806 * nic_set_multicast_list - set mutl mac address
1807 * @netdev: net device
1808 * @p: mac address
1809 *
1810 * return void
1811 */
1812void hns_set_multicast_list(struct net_device *ndev)
1813{
1814 struct hns_nic_priv *priv = netdev_priv(ndev);
1815 struct hnae_handle *h = priv->ae_handle;
1816 struct netdev_hw_addr *ha = NULL;
1817
1818 if (!h) {
1819 netdev_err(ndev, "hnae handle is null\n");
1820 return;
1821 }
1822
Kejian Yanec2cafe2016-11-09 18:14:00 +00001823 if (h->dev->ops->clr_mc_addr)
1824 if (h->dev->ops->clr_mc_addr(h))
1825 netdev_err(ndev, "clear multicast address fail\n");
1826
huangdaodeb5996f12015-09-17 14:51:50 +08001827 if (h->dev->ops->set_mc_addr) {
1828 netdev_for_each_mc_addr(ha, ndev)
1829 if (h->dev->ops->set_mc_addr(h, ha->addr))
1830 netdev_err(ndev, "set multicast fail\n");
1831 }
1832}
1833
yankejian45686372015-10-13 09:53:45 +08001834void hns_nic_set_rx_mode(struct net_device *ndev)
1835{
1836 struct hns_nic_priv *priv = netdev_priv(ndev);
1837 struct hnae_handle *h = priv->ae_handle;
1838
1839 if (h->dev->ops->set_promisc_mode) {
1840 if (ndev->flags & IFF_PROMISC)
1841 h->dev->ops->set_promisc_mode(h, 1);
1842 else
1843 h->dev->ops->set_promisc_mode(h, 0);
1844 }
1845
1846 hns_set_multicast_list(ndev);
Kejian Yan66355f52016-11-09 18:14:01 +00001847
1848 if (__dev_uc_sync(ndev, hns_nic_uc_sync, hns_nic_uc_unsync))
1849 netdev_err(ndev, "sync uc address fail\n");
yankejian45686372015-10-13 09:53:45 +08001850}
1851
stephen hemmingerbc1f4472017-01-06 19:12:52 -08001852static void hns_nic_get_stats64(struct net_device *ndev,
1853 struct rtnl_link_stats64 *stats)
huangdaodeb5996f12015-09-17 14:51:50 +08001854{
1855 int idx = 0;
1856 u64 tx_bytes = 0;
1857 u64 rx_bytes = 0;
1858 u64 tx_pkts = 0;
1859 u64 rx_pkts = 0;
1860 struct hns_nic_priv *priv = netdev_priv(ndev);
1861 struct hnae_handle *h = priv->ae_handle;
1862
1863 for (idx = 0; idx < h->q_num; idx++) {
1864 tx_bytes += h->qs[idx]->tx_ring.stats.tx_bytes;
1865 tx_pkts += h->qs[idx]->tx_ring.stats.tx_pkts;
1866 rx_bytes += h->qs[idx]->rx_ring.stats.rx_bytes;
1867 rx_pkts += h->qs[idx]->rx_ring.stats.rx_pkts;
1868 }
1869
1870 stats->tx_bytes = tx_bytes;
1871 stats->tx_packets = tx_pkts;
1872 stats->rx_bytes = rx_bytes;
1873 stats->rx_packets = rx_pkts;
1874
1875 stats->rx_errors = ndev->stats.rx_errors;
1876 stats->multicast = ndev->stats.multicast;
1877 stats->rx_length_errors = ndev->stats.rx_length_errors;
1878 stats->rx_crc_errors = ndev->stats.rx_crc_errors;
1879 stats->rx_missed_errors = ndev->stats.rx_missed_errors;
1880
1881 stats->tx_errors = ndev->stats.tx_errors;
1882 stats->rx_dropped = ndev->stats.rx_dropped;
1883 stats->tx_dropped = ndev->stats.tx_dropped;
1884 stats->collisions = ndev->stats.collisions;
1885 stats->rx_over_errors = ndev->stats.rx_over_errors;
1886 stats->rx_frame_errors = ndev->stats.rx_frame_errors;
1887 stats->rx_fifo_errors = ndev->stats.rx_fifo_errors;
1888 stats->tx_aborted_errors = ndev->stats.tx_aborted_errors;
1889 stats->tx_carrier_errors = ndev->stats.tx_carrier_errors;
1890 stats->tx_fifo_errors = ndev->stats.tx_fifo_errors;
1891 stats->tx_heartbeat_errors = ndev->stats.tx_heartbeat_errors;
1892 stats->tx_window_errors = ndev->stats.tx_window_errors;
1893 stats->rx_compressed = ndev->stats.rx_compressed;
1894 stats->tx_compressed = ndev->stats.tx_compressed;
huangdaodeb5996f12015-09-17 14:51:50 +08001895}
1896
Daode Huang2162a4a2016-09-29 18:09:15 +01001897static u16
1898hns_nic_select_queue(struct net_device *ndev, struct sk_buff *skb,
1899 void *accel_priv, select_queue_fallback_t fallback)
1900{
1901 struct ethhdr *eth_hdr = (struct ethhdr *)skb->data;
1902 struct hns_nic_priv *priv = netdev_priv(ndev);
1903
1904 /* fix hardware broadcast/multicast packets queue loopback */
1905 if (!AE_IS_VER1(priv->enet_ver) &&
1906 is_multicast_ether_addr(eth_hdr->h_dest))
1907 return 0;
1908 else
1909 return fallback(ndev, skb);
1910}
1911
huangdaodeb5996f12015-09-17 14:51:50 +08001912static const struct net_device_ops hns_nic_netdev_ops = {
1913 .ndo_open = hns_nic_net_open,
1914 .ndo_stop = hns_nic_net_stop,
1915 .ndo_start_xmit = hns_nic_net_xmit,
1916 .ndo_tx_timeout = hns_nic_net_timeout,
1917 .ndo_set_mac_address = hns_nic_net_set_mac_address,
1918 .ndo_change_mtu = hns_nic_change_mtu,
1919 .ndo_do_ioctl = hns_nic_do_ioctl,
Salil38f616d2015-12-03 12:17:56 +00001920 .ndo_set_features = hns_nic_set_features,
1921 .ndo_fix_features = hns_nic_fix_features,
huangdaodeb5996f12015-09-17 14:51:50 +08001922 .ndo_get_stats64 = hns_nic_get_stats64,
1923#ifdef CONFIG_NET_POLL_CONTROLLER
1924 .ndo_poll_controller = hns_nic_poll_controller,
1925#endif
yankejian45686372015-10-13 09:53:45 +08001926 .ndo_set_rx_mode = hns_nic_set_rx_mode,
Daode Huang2162a4a2016-09-29 18:09:15 +01001927 .ndo_select_queue = hns_nic_select_queue,
huangdaodeb5996f12015-09-17 14:51:50 +08001928};
1929
1930static void hns_nic_update_link_status(struct net_device *netdev)
1931{
1932 struct hns_nic_priv *priv = netdev_priv(netdev);
1933
1934 struct hnae_handle *h = priv->ae_handle;
huangdaodeb5996f12015-09-17 14:51:50 +08001935
Qianqian Xiebb7189d2016-06-21 11:56:29 +08001936 if (h->phy_dev) {
1937 if (h->phy_if != PHY_INTERFACE_MODE_XGMII)
1938 return;
huangdaodeb5996f12015-09-17 14:51:50 +08001939
Qianqian Xiebb7189d2016-06-21 11:56:29 +08001940 (void)genphy_read_status(h->phy_dev);
huangdaodeb5996f12015-09-17 14:51:50 +08001941 }
Qianqian Xiebb7189d2016-06-21 11:56:29 +08001942 hns_nic_adjust_link(netdev);
huangdaodeb5996f12015-09-17 14:51:50 +08001943}
1944
1945/* for dumping key regs*/
1946static void hns_nic_dump(struct hns_nic_priv *priv)
1947{
1948 struct hnae_handle *h = priv->ae_handle;
1949 struct hnae_ae_ops *ops = h->dev->ops;
1950 u32 *data, reg_num, i;
1951
1952 if (ops->get_regs_len && ops->get_regs) {
1953 reg_num = ops->get_regs_len(priv->ae_handle);
1954 reg_num = (reg_num + 3ul) & ~3ul;
1955 data = kcalloc(reg_num, sizeof(u32), GFP_KERNEL);
1956 if (data) {
1957 ops->get_regs(priv->ae_handle, data);
1958 for (i = 0; i < reg_num; i += 4)
1959 pr_info("0x%08x: 0x%08x 0x%08x 0x%08x 0x%08x\n",
1960 i, data[i], data[i + 1],
1961 data[i + 2], data[i + 3]);
1962 kfree(data);
1963 }
1964 }
1965
1966 for (i = 0; i < h->q_num; i++) {
1967 pr_info("tx_queue%d_next_to_clean:%d\n",
1968 i, h->qs[i]->tx_ring.next_to_clean);
1969 pr_info("tx_queue%d_next_to_use:%d\n",
1970 i, h->qs[i]->tx_ring.next_to_use);
1971 pr_info("rx_queue%d_next_to_clean:%d\n",
1972 i, h->qs[i]->rx_ring.next_to_clean);
1973 pr_info("rx_queue%d_next_to_use:%d\n",
1974 i, h->qs[i]->rx_ring.next_to_use);
1975 }
1976}
1977
Qianqian Xief7211722016-06-21 11:56:24 +08001978/* for resetting subtask */
huangdaodeb5996f12015-09-17 14:51:50 +08001979static void hns_nic_reset_subtask(struct hns_nic_priv *priv)
1980{
1981 enum hnae_port_type type = priv->ae_handle->port_type;
1982
1983 if (!test_bit(NIC_STATE2_RESET_REQUESTED, &priv->state))
1984 return;
1985 clear_bit(NIC_STATE2_RESET_REQUESTED, &priv->state);
1986
1987 /* If we're already down, removing or resetting, just bail */
1988 if (test_bit(NIC_STATE_DOWN, &priv->state) ||
1989 test_bit(NIC_STATE_REMOVING, &priv->state) ||
1990 test_bit(NIC_STATE_RESETTING, &priv->state))
1991 return;
1992
1993 hns_nic_dump(priv);
Salil13ac6952015-12-03 12:17:53 +00001994 netdev_info(priv->netdev, "try to reset %s port!\n",
1995 (type == HNAE_PORT_DEBUG ? "debug" : "service"));
huangdaodeb5996f12015-09-17 14:51:50 +08001996
1997 rtnl_lock();
lisheng90a505b2015-10-16 17:03:20 +08001998 /* put off any impending NetWatchDogTimeout */
Florian Westphal860e9532016-05-03 16:33:13 +02001999 netif_trans_update(priv->netdev);
lisheng90a505b2015-10-16 17:03:20 +08002000
Salil13ac6952015-12-03 12:17:53 +00002001 if (type == HNAE_PORT_DEBUG) {
huangdaodeb5996f12015-09-17 14:51:50 +08002002 hns_nic_net_reinit(priv->netdev);
Salil13ac6952015-12-03 12:17:53 +00002003 } else {
2004 netif_carrier_off(priv->netdev);
2005 netif_tx_disable(priv->netdev);
2006 }
huangdaodeb5996f12015-09-17 14:51:50 +08002007 rtnl_unlock();
2008}
2009
2010/* for doing service complete*/
2011static void hns_nic_service_event_complete(struct hns_nic_priv *priv)
2012{
Salil13ac6952015-12-03 12:17:53 +00002013 WARN_ON(!test_bit(NIC_STATE_SERVICE_SCHED, &priv->state));
huangdaodeb5996f12015-09-17 14:51:50 +08002014
2015 smp_mb__before_atomic();
2016 clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
2017}
2018
2019static void hns_nic_service_task(struct work_struct *work)
2020{
2021 struct hns_nic_priv *priv
2022 = container_of(work, struct hns_nic_priv, service_task);
2023 struct hnae_handle *h = priv->ae_handle;
2024
2025 hns_nic_update_link_status(priv->netdev);
2026 h->dev->ops->update_led_status(h);
2027 hns_nic_update_stats(priv->netdev);
2028
2029 hns_nic_reset_subtask(priv);
2030 hns_nic_service_event_complete(priv);
2031}
2032
2033static void hns_nic_task_schedule(struct hns_nic_priv *priv)
2034{
2035 if (!test_bit(NIC_STATE_DOWN, &priv->state) &&
2036 !test_bit(NIC_STATE_REMOVING, &priv->state) &&
2037 !test_and_set_bit(NIC_STATE_SERVICE_SCHED, &priv->state))
2038 (void)schedule_work(&priv->service_task);
2039}
2040
2041static void hns_nic_service_timer(unsigned long data)
2042{
2043 struct hns_nic_priv *priv = (struct hns_nic_priv *)data;
2044
2045 (void)mod_timer(&priv->service_timer, jiffies + SERVICE_TIMER_HZ);
2046
2047 hns_nic_task_schedule(priv);
2048}
2049
2050/**
2051 * hns_tx_timeout_reset - initiate reset due to Tx timeout
2052 * @priv: driver private struct
2053 **/
2054static void hns_tx_timeout_reset(struct hns_nic_priv *priv)
2055{
2056 /* Do the reset outside of interrupt context */
2057 if (!test_bit(NIC_STATE_DOWN, &priv->state)) {
2058 set_bit(NIC_STATE2_RESET_REQUESTED, &priv->state);
2059 netdev_warn(priv->netdev,
2060 "initiating reset due to tx timeout(%llu,0x%lx)\n",
2061 priv->tx_timeout_count, priv->state);
2062 priv->tx_timeout_count++;
2063 hns_nic_task_schedule(priv);
2064 }
2065}
2066
2067static int hns_nic_init_ring_data(struct hns_nic_priv *priv)
2068{
2069 struct hnae_handle *h = priv->ae_handle;
2070 struct hns_nic_ring_data *rd;
Sheng Li4b34aa42016-03-24 19:08:04 +08002071 bool is_ver1 = AE_IS_VER1(priv->enet_ver);
huangdaodeb5996f12015-09-17 14:51:50 +08002072 int i;
2073
2074 if (h->q_num > NIC_MAX_Q_PER_VF) {
2075 netdev_err(priv->netdev, "too much queue (%d)\n", h->q_num);
2076 return -EINVAL;
2077 }
2078
2079 priv->ring_data = kzalloc(h->q_num * sizeof(*priv->ring_data) * 2,
2080 GFP_KERNEL);
2081 if (!priv->ring_data)
2082 return -ENOMEM;
2083
2084 for (i = 0; i < h->q_num; i++) {
2085 rd = &priv->ring_data[i];
2086 rd->queue_index = i;
2087 rd->ring = &h->qs[i]->tx_ring;
2088 rd->poll_one = hns_nic_tx_poll_one;
Daode Huangcee5add2016-09-29 18:09:11 +01002089 rd->fini_process = is_ver1 ? hns_nic_tx_fini_pro :
2090 hns_nic_tx_fini_pro_v2;
huangdaodeb5996f12015-09-17 14:51:50 +08002091
2092 netif_napi_add(priv->netdev, &rd->napi,
2093 hns_nic_common_poll, NIC_TX_CLEAN_MAX_NUM);
2094 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
2095 }
2096 for (i = h->q_num; i < h->q_num * 2; i++) {
2097 rd = &priv->ring_data[i];
2098 rd->queue_index = i - h->q_num;
2099 rd->ring = &h->qs[i - h->q_num]->rx_ring;
2100 rd->poll_one = hns_nic_rx_poll_one;
2101 rd->ex_process = hns_nic_rx_up_pro;
Daode Huangcee5add2016-09-29 18:09:11 +01002102 rd->fini_process = is_ver1 ? hns_nic_rx_fini_pro :
2103 hns_nic_rx_fini_pro_v2;
huangdaodeb5996f12015-09-17 14:51:50 +08002104
2105 netif_napi_add(priv->netdev, &rd->napi,
2106 hns_nic_common_poll, NIC_RX_CLEAN_MAX_NUM);
2107 rd->ring->irq_init_flag = RCB_IRQ_NOT_INITED;
2108 }
2109
2110 return 0;
2111}
2112
2113static void hns_nic_uninit_ring_data(struct hns_nic_priv *priv)
2114{
2115 struct hnae_handle *h = priv->ae_handle;
2116 int i;
2117
2118 for (i = 0; i < h->q_num * 2; i++) {
2119 netif_napi_del(&priv->ring_data[i].napi);
2120 if (priv->ring_data[i].ring->irq_init_flag == RCB_IRQ_INITED) {
Salil13ac6952015-12-03 12:17:53 +00002121 (void)irq_set_affinity_hint(
2122 priv->ring_data[i].ring->irq,
2123 NULL);
huangdaodeb5996f12015-09-17 14:51:50 +08002124 free_irq(priv->ring_data[i].ring->irq,
2125 &priv->ring_data[i]);
2126 }
2127
2128 priv->ring_data[i].ring->irq_init_flag = RCB_IRQ_NOT_INITED;
2129 }
2130 kfree(priv->ring_data);
2131}
2132
Salil13ac6952015-12-03 12:17:53 +00002133static void hns_nic_set_priv_ops(struct net_device *netdev)
2134{
2135 struct hns_nic_priv *priv = netdev_priv(netdev);
Salil64353af2015-12-03 12:17:55 +00002136 struct hnae_handle *h = priv->ae_handle;
Salil13ac6952015-12-03 12:17:53 +00002137
2138 if (AE_IS_VER1(priv->enet_ver)) {
2139 priv->ops.fill_desc = fill_desc;
2140 priv->ops.get_rxd_bnum = get_rx_desc_bnum;
2141 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
2142 } else {
2143 priv->ops.get_rxd_bnum = get_v2rx_desc_bnum;
Salil64353af2015-12-03 12:17:55 +00002144 if ((netdev->features & NETIF_F_TSO) ||
2145 (netdev->features & NETIF_F_TSO6)) {
2146 priv->ops.fill_desc = fill_tso_desc;
2147 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tso;
2148 /* This chip only support 7*4096 */
2149 netif_set_gso_max_size(netdev, 7 * 4096);
Salil64353af2015-12-03 12:17:55 +00002150 } else {
2151 priv->ops.fill_desc = fill_v2_desc;
2152 priv->ops.maybe_stop_tx = hns_nic_maybe_stop_tx;
2153 }
Daode Huang6fe27462016-06-21 11:56:39 +08002154 /* enable tso when init
2155 * control tso on/off through TSE bit in bd
2156 */
2157 h->dev->ops->set_tso_stats(h, 1);
Salil13ac6952015-12-03 12:17:53 +00002158 }
2159}
2160
huangdaodeb5996f12015-09-17 14:51:50 +08002161static int hns_nic_try_get_ae(struct net_device *ndev)
2162{
2163 struct hns_nic_priv *priv = netdev_priv(ndev);
2164 struct hnae_handle *h;
2165 int ret;
2166
2167 h = hnae_get_handle(&priv->netdev->dev,
Kejian Yan7b2acae2016-06-03 10:55:15 +08002168 priv->fwnode, priv->port_id, NULL);
huangdaodeb5996f12015-09-17 14:51:50 +08002169 if (IS_ERR_OR_NULL(h)) {
Kejian Yandaa8cfd2016-03-24 19:08:05 +08002170 ret = -ENODEV;
huangdaodeb5996f12015-09-17 14:51:50 +08002171 dev_dbg(priv->dev, "has not handle, register notifier!\n");
2172 goto out;
2173 }
2174 priv->ae_handle = h;
2175
2176 ret = hns_nic_init_phy(ndev, h);
2177 if (ret) {
2178 dev_err(priv->dev, "probe phy device fail!\n");
2179 goto out_init_phy;
2180 }
2181
2182 ret = hns_nic_init_ring_data(priv);
2183 if (ret) {
2184 ret = -ENOMEM;
2185 goto out_init_ring_data;
2186 }
2187
Salil13ac6952015-12-03 12:17:53 +00002188 hns_nic_set_priv_ops(ndev);
2189
huangdaodeb5996f12015-09-17 14:51:50 +08002190 ret = register_netdev(ndev);
2191 if (ret) {
2192 dev_err(priv->dev, "probe register netdev fail!\n");
2193 goto out_reg_ndev_fail;
2194 }
2195 return 0;
2196
2197out_reg_ndev_fail:
2198 hns_nic_uninit_ring_data(priv);
2199 priv->ring_data = NULL;
2200out_init_phy:
2201out_init_ring_data:
2202 hnae_put_handle(priv->ae_handle);
2203 priv->ae_handle = NULL;
2204out:
2205 return ret;
2206}
2207
2208static int hns_nic_notifier_action(struct notifier_block *nb,
2209 unsigned long action, void *data)
2210{
2211 struct hns_nic_priv *priv =
2212 container_of(nb, struct hns_nic_priv, notifier_block);
2213
2214 assert(action == HNAE_AE_REGISTER);
2215
2216 if (!hns_nic_try_get_ae(priv->netdev)) {
2217 hnae_unregister_notifier(&priv->notifier_block);
2218 priv->notifier_block.notifier_call = NULL;
2219 }
2220 return 0;
2221}
2222
2223static int hns_nic_dev_probe(struct platform_device *pdev)
2224{
2225 struct device *dev = &pdev->dev;
2226 struct net_device *ndev;
2227 struct hns_nic_priv *priv;
Yisen.Zhuang\(Zhuangyuzeng\)406adee2016-04-23 17:05:07 +08002228 u32 port_id;
huangdaodeb5996f12015-09-17 14:51:50 +08002229 int ret;
2230
2231 ndev = alloc_etherdev_mq(sizeof(struct hns_nic_priv), NIC_MAX_Q_PER_VF);
2232 if (!ndev)
2233 return -ENOMEM;
2234
2235 platform_set_drvdata(pdev, ndev);
2236
2237 priv = netdev_priv(ndev);
2238 priv->dev = dev;
2239 priv->netdev = ndev;
2240
Kejian Yan63434882016-06-03 10:55:21 +08002241 if (dev_of_node(dev)) {
2242 struct device_node *ae_node;
huangdaodeb5996f12015-09-17 14:51:50 +08002243
Kejian Yan63434882016-06-03 10:55:21 +08002244 if (of_device_is_compatible(dev->of_node,
2245 "hisilicon,hns-nic-v1"))
2246 priv->enet_ver = AE_VERSION_1;
2247 else
2248 priv->enet_ver = AE_VERSION_2;
2249
2250 ae_node = of_parse_phandle(dev->of_node, "ae-handle", 0);
2251 if (IS_ERR_OR_NULL(ae_node)) {
2252 ret = PTR_ERR(ae_node);
2253 dev_err(dev, "not find ae-handle\n");
2254 goto out_read_prop_fail;
2255 }
2256 priv->fwnode = &ae_node->fwnode;
2257 } else if (is_acpi_node(dev->fwnode)) {
2258 struct acpi_reference_args args;
2259
2260 if (acpi_dev_found(hns_enet_acpi_match[0].id))
2261 priv->enet_ver = AE_VERSION_1;
2262 else if (acpi_dev_found(hns_enet_acpi_match[1].id))
2263 priv->enet_ver = AE_VERSION_2;
2264 else
2265 return -ENXIO;
2266
2267 /* try to find port-idx-in-ae first */
2268 ret = acpi_node_get_property_reference(dev->fwnode,
2269 "ae-handle", 0, &args);
2270 if (ret) {
2271 dev_err(dev, "not find ae-handle\n");
2272 goto out_read_prop_fail;
2273 }
2274 priv->fwnode = acpi_fwnode_handle(args.adev);
2275 } else {
2276 dev_err(dev, "cannot read cfg data from OF or acpi\n");
2277 return -ENXIO;
yankejian48189d62016-01-20 16:00:19 +08002278 }
Kejian Yan7b2acae2016-06-03 10:55:15 +08002279
Kejian Yan61629282016-06-03 10:55:13 +08002280 ret = device_property_read_u32(dev, "port-idx-in-ae", &port_id);
Yisen.Zhuang\(Zhuangyuzeng\)406adee2016-04-23 17:05:07 +08002281 if (ret) {
2282 /* only for old code compatible */
Kejian Yan61629282016-06-03 10:55:13 +08002283 ret = device_property_read_u32(dev, "port-id", &port_id);
Yisen.Zhuang\(Zhuangyuzeng\)406adee2016-04-23 17:05:07 +08002284 if (ret)
2285 goto out_read_prop_fail;
2286 /* for old dts, we need to caculate the port offset */
2287 port_id = port_id < HNS_SRV_OFFSET ? port_id + HNS_DEBUG_OFFSET
2288 : port_id - HNS_SRV_OFFSET;
2289 }
2290 priv->port_id = port_id;
huangdaodeb5996f12015-09-17 14:51:50 +08002291
2292 hns_init_mac_addr(ndev);
2293
2294 ndev->watchdog_timeo = HNS_NIC_TX_TIMEOUT;
2295 ndev->priv_flags |= IFF_UNICAST_FLT;
2296 ndev->netdev_ops = &hns_nic_netdev_ops;
2297 hns_ethtool_set_ops(ndev);
Salil13ac6952015-12-03 12:17:53 +00002298
huangdaodeb5996f12015-09-17 14:51:50 +08002299 ndev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2300 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
2301 NETIF_F_GRO;
2302 ndev->vlan_features |=
2303 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM;
2304 ndev->vlan_features |= NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO;
2305
Jarod Wilson44770e12016-10-17 15:54:17 -04002306 /* MTU range: 68 - 9578 (v1) or 9706 (v2) */
2307 ndev->min_mtu = MAC_MIN_MTU;
Salil13ac6952015-12-03 12:17:53 +00002308 switch (priv->enet_ver) {
2309 case AE_VERSION_2:
Salil64353af2015-12-03 12:17:55 +00002310 ndev->features |= NETIF_F_TSO | NETIF_F_TSO6;
Salil13ac6952015-12-03 12:17:53 +00002311 ndev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
2312 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
Salil64353af2015-12-03 12:17:55 +00002313 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6;
Jarod Wilson44770e12016-10-17 15:54:17 -04002314 ndev->max_mtu = MAC_MAX_MTU_V2 -
2315 (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
Salil13ac6952015-12-03 12:17:53 +00002316 break;
2317 default:
Jarod Wilson44770e12016-10-17 15:54:17 -04002318 ndev->max_mtu = MAC_MAX_MTU -
2319 (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
Salil13ac6952015-12-03 12:17:53 +00002320 break;
2321 }
2322
huangdaodeb5996f12015-09-17 14:51:50 +08002323 SET_NETDEV_DEV(ndev, dev);
2324
2325 if (!dma_set_mask_and_coherent(dev, DMA_BIT_MASK(64)))
2326 dev_dbg(dev, "set mask to 64bit\n");
2327 else
Qianqian Xie39c94412016-06-21 11:56:26 +08002328 dev_err(dev, "set mask to 64bit fail!\n");
huangdaodeb5996f12015-09-17 14:51:50 +08002329
2330 /* carrier off reporting is important to ethtool even BEFORE open */
2331 netif_carrier_off(ndev);
2332
2333 setup_timer(&priv->service_timer, hns_nic_service_timer,
2334 (unsigned long)priv);
2335 INIT_WORK(&priv->service_task, hns_nic_service_task);
2336
2337 set_bit(NIC_STATE_SERVICE_INITED, &priv->state);
2338 clear_bit(NIC_STATE_SERVICE_SCHED, &priv->state);
2339 set_bit(NIC_STATE_DOWN, &priv->state);
2340
2341 if (hns_nic_try_get_ae(priv->netdev)) {
2342 priv->notifier_block.notifier_call = hns_nic_notifier_action;
2343 ret = hnae_register_notifier(&priv->notifier_block);
2344 if (ret) {
2345 dev_err(dev, "register notifier fail!\n");
2346 goto out_notify_fail;
2347 }
2348 dev_dbg(dev, "has not handle, register notifier!\n");
2349 }
2350
2351 return 0;
2352
2353out_notify_fail:
2354 (void)cancel_work_sync(&priv->service_task);
yankejian48189d62016-01-20 16:00:19 +08002355out_read_prop_fail:
huangdaodeb5996f12015-09-17 14:51:50 +08002356 free_netdev(ndev);
2357 return ret;
2358}
2359
2360static int hns_nic_dev_remove(struct platform_device *pdev)
2361{
2362 struct net_device *ndev = platform_get_drvdata(pdev);
2363 struct hns_nic_priv *priv = netdev_priv(ndev);
2364
2365 if (ndev->reg_state != NETREG_UNINITIALIZED)
2366 unregister_netdev(ndev);
2367
2368 if (priv->ring_data)
2369 hns_nic_uninit_ring_data(priv);
2370 priv->ring_data = NULL;
2371
Philippe Reynes262b38c2016-09-20 22:30:11 +02002372 if (ndev->phydev)
2373 phy_disconnect(ndev->phydev);
huangdaodeb5996f12015-09-17 14:51:50 +08002374
2375 if (!IS_ERR_OR_NULL(priv->ae_handle))
2376 hnae_put_handle(priv->ae_handle);
2377 priv->ae_handle = NULL;
2378 if (priv->notifier_block.notifier_call)
2379 hnae_unregister_notifier(&priv->notifier_block);
2380 priv->notifier_block.notifier_call = NULL;
2381
2382 set_bit(NIC_STATE_REMOVING, &priv->state);
2383 (void)cancel_work_sync(&priv->service_task);
2384
2385 free_netdev(ndev);
2386 return 0;
2387}
2388
2389static const struct of_device_id hns_enet_of_match[] = {
2390 {.compatible = "hisilicon,hns-nic-v1",},
2391 {.compatible = "hisilicon,hns-nic-v2",},
2392 {},
2393};
2394
2395MODULE_DEVICE_TABLE(of, hns_enet_of_match);
2396
2397static struct platform_driver hns_nic_dev_driver = {
2398 .driver = {
2399 .name = "hns-nic",
huangdaodeb5996f12015-09-17 14:51:50 +08002400 .of_match_table = hns_enet_of_match,
Kejian Yan63434882016-06-03 10:55:21 +08002401 .acpi_match_table = ACPI_PTR(hns_enet_acpi_match),
huangdaodeb5996f12015-09-17 14:51:50 +08002402 },
2403 .probe = hns_nic_dev_probe,
2404 .remove = hns_nic_dev_remove,
2405};
2406
2407module_platform_driver(hns_nic_dev_driver);
2408
2409MODULE_DESCRIPTION("HISILICON HNS Ethernet driver");
2410MODULE_AUTHOR("Hisilicon, Inc.");
2411MODULE_LICENSE("GPL");
2412MODULE_ALIAS("platform:hns-nic");