blob: 320ae8892a68ef49d48e04794b2e0bec703e86ff [file] [log] [blame]
Salil76ad4f02017-08-02 16:59:45 +01001/*
2 * Copyright (c) 2016~2017 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <linux/dma-mapping.h>
11#include <linux/etherdevice.h>
12#include <linux/interrupt.h>
13#include <linux/if_vlan.h>
14#include <linux/ip.h>
15#include <linux/ipv6.h>
16#include <linux/module.h>
17#include <linux/pci.h>
18#include <linux/skbuff.h>
19#include <linux/sctp.h>
20#include <linux/vermagic.h>
21#include <net/gre.h>
Yunsheng Lin30d240d2017-10-17 14:51:30 +080022#include <net/pkt_cls.h>
Salil76ad4f02017-08-02 16:59:45 +010023#include <net/vxlan.h>
24
25#include "hnae3.h"
26#include "hns3_enet.h"
27
Yunsheng Lin1db9b1b2017-10-09 15:44:01 +080028static const char hns3_driver_name[] = "hns3";
Salil76ad4f02017-08-02 16:59:45 +010029const char hns3_driver_version[] = VERMAGIC_STRING;
30static const char hns3_driver_string[] =
31 "Hisilicon Ethernet Network Driver for Hip08 Family";
32static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
33static struct hnae3_client client;
34
35/* hns3_pci_tbl - PCI Device ID Table
36 *
37 * Last entry must be all 0s
38 *
39 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
40 * Class, Class Mask, private data (not used) }
41 */
42static const struct pci_device_id hns3_pci_tbl[] = {
43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
Yunsheng Line92a0842017-09-20 18:52:50 +080045 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
Yunsheng Lin2daf4a62017-09-20 18:52:51 +080046 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
Yunsheng Line92a0842017-09-20 18:52:50 +080047 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
Yunsheng Lin2daf4a62017-09-20 18:52:51 +080048 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
Yunsheng Line92a0842017-09-20 18:52:50 +080049 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
Yunsheng Lin2daf4a62017-09-20 18:52:51 +080050 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
Yunsheng Line92a0842017-09-20 18:52:50 +080051 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
Yunsheng Lin2daf4a62017-09-20 18:52:51 +080052 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
Yunsheng Line92a0842017-09-20 18:52:50 +080053 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
Yunsheng Lin2daf4a62017-09-20 18:52:51 +080054 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
Salil Mehta424eb832017-12-14 18:03:06 +000055 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
56 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
Salil76ad4f02017-08-02 16:59:45 +010057 /* required last entry */
58 {0, }
59};
60MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
61
62static irqreturn_t hns3_irq_handle(int irq, void *dev)
63{
64 struct hns3_enet_tqp_vector *tqp_vector = dev;
65
66 napi_schedule(&tqp_vector->napi);
67
68 return IRQ_HANDLED;
69}
70
71static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
72{
73 struct hns3_enet_tqp_vector *tqp_vectors;
74 unsigned int i;
75
76 for (i = 0; i < priv->vector_num; i++) {
77 tqp_vectors = &priv->tqp_vector[i];
78
79 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
80 continue;
81
82 /* release the irq resource */
83 free_irq(tqp_vectors->vector_irq, tqp_vectors);
84 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
85 }
86}
87
88static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
89{
90 struct hns3_enet_tqp_vector *tqp_vectors;
91 int txrx_int_idx = 0;
92 int rx_int_idx = 0;
93 int tx_int_idx = 0;
94 unsigned int i;
95 int ret;
96
97 for (i = 0; i < priv->vector_num; i++) {
98 tqp_vectors = &priv->tqp_vector[i];
99
100 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
101 continue;
102
103 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
104 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
105 "%s-%s-%d", priv->netdev->name, "TxRx",
106 txrx_int_idx++);
107 txrx_int_idx++;
108 } else if (tqp_vectors->rx_group.ring) {
109 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
110 "%s-%s-%d", priv->netdev->name, "Rx",
111 rx_int_idx++);
112 } else if (tqp_vectors->tx_group.ring) {
113 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
114 "%s-%s-%d", priv->netdev->name, "Tx",
115 tx_int_idx++);
116 } else {
117 /* Skip this unused q_vector */
118 continue;
119 }
120
121 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
122
123 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
124 tqp_vectors->name,
125 tqp_vectors);
126 if (ret) {
127 netdev_err(priv->netdev, "request irq(%d) fail\n",
128 tqp_vectors->vector_irq);
129 return ret;
130 }
131
132 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
133 }
134
135 return 0;
136}
137
138static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
139 u32 mask_en)
140{
141 writel(mask_en, tqp_vector->mask_addr);
142}
143
144static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
145{
146 napi_enable(&tqp_vector->napi);
147
148 /* enable vector */
149 hns3_mask_vector_irq(tqp_vector, 1);
150}
151
152static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
153{
154 /* disable vector */
155 hns3_mask_vector_irq(tqp_vector, 0);
156
157 disable_irq(tqp_vector->vector_irq);
158 napi_disable(&tqp_vector->napi);
159}
160
161static void hns3_set_vector_coalesc_gl(struct hns3_enet_tqp_vector *tqp_vector,
162 u32 gl_value)
163{
164 /* this defines the configuration for GL (Interrupt Gap Limiter)
165 * GL defines inter interrupt gap.
166 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
167 */
168 writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
169 writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
170 writel(gl_value, tqp_vector->mask_addr + HNS3_VECTOR_GL2_OFFSET);
171}
172
173static void hns3_set_vector_coalesc_rl(struct hns3_enet_tqp_vector *tqp_vector,
174 u32 rl_value)
175{
176 /* this defines the configuration for RL (Interrupt Rate Limiter).
177 * Rl defines rate of interrupts i.e. number of interrupts-per-second
178 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
179 */
180 writel(rl_value, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
181}
182
183static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector)
184{
185 /* initialize the configuration for interrupt coalescing.
186 * 1. GL (Interrupt Gap Limiter)
187 * 2. RL (Interrupt Rate Limiter)
188 */
189
190 /* Default :enable interrupt coalesce */
191 tqp_vector->rx_group.int_gl = HNS3_INT_GL_50K;
192 tqp_vector->tx_group.int_gl = HNS3_INT_GL_50K;
193 hns3_set_vector_coalesc_gl(tqp_vector, HNS3_INT_GL_50K);
194 /* for now we are disabling Interrupt RL - we
195 * will re-enable later
196 */
197 hns3_set_vector_coalesc_rl(tqp_vector, 0);
198 tqp_vector->rx_group.flow_level = HNS3_FLOW_LOW;
199 tqp_vector->tx_group.flow_level = HNS3_FLOW_LOW;
200}
201
Yunsheng Lin9df8f792017-09-27 09:45:32 +0800202static int hns3_nic_set_real_num_queue(struct net_device *netdev)
203{
Yunsheng Lin9780cb92017-10-09 15:43:56 +0800204 struct hnae3_handle *h = hns3_get_handle(netdev);
Yunsheng Lin9df8f792017-09-27 09:45:32 +0800205 struct hnae3_knic_private_info *kinfo = &h->kinfo;
206 unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
207 int ret;
208
209 ret = netif_set_real_num_tx_queues(netdev, queue_size);
210 if (ret) {
211 netdev_err(netdev,
212 "netif_set_real_num_tx_queues fail, ret=%d!\n",
213 ret);
214 return ret;
215 }
216
217 ret = netif_set_real_num_rx_queues(netdev, queue_size);
218 if (ret) {
219 netdev_err(netdev,
220 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
221 return ret;
222 }
223
224 return 0;
225}
226
Salil76ad4f02017-08-02 16:59:45 +0100227static int hns3_nic_net_up(struct net_device *netdev)
228{
229 struct hns3_nic_priv *priv = netdev_priv(netdev);
230 struct hnae3_handle *h = priv->ae_handle;
231 int i, j;
232 int ret;
233
234 /* get irq resource for all vectors */
235 ret = hns3_nic_init_irq(priv);
236 if (ret) {
237 netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
238 return ret;
239 }
240
241 /* enable the vectors */
242 for (i = 0; i < priv->vector_num; i++)
243 hns3_vector_enable(&priv->tqp_vector[i]);
244
245 /* start the ae_dev */
246 ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
247 if (ret)
248 goto out_start_err;
249
250 return 0;
251
252out_start_err:
253 for (j = i - 1; j >= 0; j--)
254 hns3_vector_disable(&priv->tqp_vector[j]);
255
256 hns3_nic_uninit_irq(priv);
257
258 return ret;
259}
260
261static int hns3_nic_net_open(struct net_device *netdev)
262{
Lipengf8fa222c2017-11-02 20:45:20 +0800263 struct hns3_nic_priv *priv = netdev_priv(netdev);
Salil76ad4f02017-08-02 16:59:45 +0100264 int ret;
265
266 netif_carrier_off(netdev);
267
Yunsheng Lin9df8f792017-09-27 09:45:32 +0800268 ret = hns3_nic_set_real_num_queue(netdev);
269 if (ret)
Salil76ad4f02017-08-02 16:59:45 +0100270 return ret;
Salil76ad4f02017-08-02 16:59:45 +0100271
272 ret = hns3_nic_net_up(netdev);
273 if (ret) {
274 netdev_err(netdev,
275 "hns net up fail, ret=%d!\n", ret);
276 return ret;
277 }
278
Lipengf8fa222c2017-11-02 20:45:20 +0800279 priv->last_reset_time = jiffies;
Salil76ad4f02017-08-02 16:59:45 +0100280 return 0;
281}
282
283static void hns3_nic_net_down(struct net_device *netdev)
284{
285 struct hns3_nic_priv *priv = netdev_priv(netdev);
286 const struct hnae3_ae_ops *ops;
287 int i;
288
289 /* stop ae_dev */
290 ops = priv->ae_handle->ae_algo->ops;
291 if (ops->stop)
292 ops->stop(priv->ae_handle);
293
294 /* disable vectors */
295 for (i = 0; i < priv->vector_num; i++)
296 hns3_vector_disable(&priv->tqp_vector[i]);
297
298 /* free irq resources */
299 hns3_nic_uninit_irq(priv);
300}
301
302static int hns3_nic_net_stop(struct net_device *netdev)
303{
304 netif_tx_stop_all_queues(netdev);
305 netif_carrier_off(netdev);
306
307 hns3_nic_net_down(netdev);
308
309 return 0;
310}
311
Salil76ad4f02017-08-02 16:59:45 +0100312static int hns3_nic_uc_sync(struct net_device *netdev,
313 const unsigned char *addr)
314{
Yunsheng Lin9780cb92017-10-09 15:43:56 +0800315 struct hnae3_handle *h = hns3_get_handle(netdev);
Salil76ad4f02017-08-02 16:59:45 +0100316
317 if (h->ae_algo->ops->add_uc_addr)
318 return h->ae_algo->ops->add_uc_addr(h, addr);
319
320 return 0;
321}
322
323static int hns3_nic_uc_unsync(struct net_device *netdev,
324 const unsigned char *addr)
325{
Yunsheng Lin9780cb92017-10-09 15:43:56 +0800326 struct hnae3_handle *h = hns3_get_handle(netdev);
Salil76ad4f02017-08-02 16:59:45 +0100327
328 if (h->ae_algo->ops->rm_uc_addr)
329 return h->ae_algo->ops->rm_uc_addr(h, addr);
330
331 return 0;
332}
333
334static int hns3_nic_mc_sync(struct net_device *netdev,
335 const unsigned char *addr)
336{
Yunsheng Lin9780cb92017-10-09 15:43:56 +0800337 struct hnae3_handle *h = hns3_get_handle(netdev);
Salil76ad4f02017-08-02 16:59:45 +0100338
Dan Carpenter720a8472017-08-10 12:56:14 +0300339 if (h->ae_algo->ops->add_mc_addr)
Salil76ad4f02017-08-02 16:59:45 +0100340 return h->ae_algo->ops->add_mc_addr(h, addr);
341
342 return 0;
343}
344
345static int hns3_nic_mc_unsync(struct net_device *netdev,
346 const unsigned char *addr)
347{
Yunsheng Lin9780cb92017-10-09 15:43:56 +0800348 struct hnae3_handle *h = hns3_get_handle(netdev);
Salil76ad4f02017-08-02 16:59:45 +0100349
Dan Carpenter720a8472017-08-10 12:56:14 +0300350 if (h->ae_algo->ops->rm_mc_addr)
Salil76ad4f02017-08-02 16:59:45 +0100351 return h->ae_algo->ops->rm_mc_addr(h, addr);
352
353 return 0;
354}
355
Yunsheng Lin1db9b1b2017-10-09 15:44:01 +0800356static void hns3_nic_set_rx_mode(struct net_device *netdev)
Salil76ad4f02017-08-02 16:59:45 +0100357{
Yunsheng Lin9780cb92017-10-09 15:43:56 +0800358 struct hnae3_handle *h = hns3_get_handle(netdev);
Salil76ad4f02017-08-02 16:59:45 +0100359
360 if (h->ae_algo->ops->set_promisc_mode) {
361 if (netdev->flags & IFF_PROMISC)
362 h->ae_algo->ops->set_promisc_mode(h, 1);
363 else
364 h->ae_algo->ops->set_promisc_mode(h, 0);
365 }
366 if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync))
367 netdev_err(netdev, "sync uc address fail\n");
368 if (netdev->flags & IFF_MULTICAST)
369 if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync))
370 netdev_err(netdev, "sync mc address fail\n");
371}
372
373static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
374 u16 *mss, u32 *type_cs_vlan_tso)
375{
376 u32 l4_offset, hdr_len;
377 union l3_hdr_info l3;
378 union l4_hdr_info l4;
379 u32 l4_paylen;
380 int ret;
381
382 if (!skb_is_gso(skb))
383 return 0;
384
385 ret = skb_cow_head(skb, 0);
386 if (ret)
387 return ret;
388
389 l3.hdr = skb_network_header(skb);
390 l4.hdr = skb_transport_header(skb);
391
392 /* Software should clear the IPv4's checksum field when tso is
393 * needed.
394 */
395 if (l3.v4->version == 4)
396 l3.v4->check = 0;
397
398 /* tunnel packet.*/
399 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
400 SKB_GSO_GRE_CSUM |
401 SKB_GSO_UDP_TUNNEL |
402 SKB_GSO_UDP_TUNNEL_CSUM)) {
403 if ((!(skb_shinfo(skb)->gso_type &
404 SKB_GSO_PARTIAL)) &&
405 (skb_shinfo(skb)->gso_type &
406 SKB_GSO_UDP_TUNNEL_CSUM)) {
407 /* Software should clear the udp's checksum
408 * field when tso is needed.
409 */
410 l4.udp->check = 0;
411 }
412 /* reset l3&l4 pointers from outer to inner headers */
413 l3.hdr = skb_inner_network_header(skb);
414 l4.hdr = skb_inner_transport_header(skb);
415
416 /* Software should clear the IPv4's checksum field when
417 * tso is needed.
418 */
419 if (l3.v4->version == 4)
420 l3.v4->check = 0;
421 }
422
423 /* normal or tunnel packet*/
424 l4_offset = l4.hdr - skb->data;
425 hdr_len = (l4.tcp->doff * 4) + l4_offset;
426
427 /* remove payload length from inner pseudo checksum when tso*/
428 l4_paylen = skb->len - l4_offset;
429 csum_replace_by_diff(&l4.tcp->check,
430 (__force __wsum)htonl(l4_paylen));
431
432 /* find the txbd field values */
433 *paylen = skb->len - hdr_len;
434 hnae_set_bit(*type_cs_vlan_tso,
435 HNS3_TXD_TSO_B, 1);
436
437 /* get MSS for TSO */
438 *mss = skb_shinfo(skb)->gso_size;
439
440 return 0;
441}
442
Salil1898d4e2017-08-18 12:31:39 +0100443static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
444 u8 *il4_proto)
Salil76ad4f02017-08-02 16:59:45 +0100445{
446 union {
447 struct iphdr *v4;
448 struct ipv6hdr *v6;
449 unsigned char *hdr;
450 } l3;
451 unsigned char *l4_hdr;
452 unsigned char *exthdr;
453 u8 l4_proto_tmp;
454 __be16 frag_off;
455
456 /* find outer header point */
457 l3.hdr = skb_network_header(skb);
458 l4_hdr = skb_inner_transport_header(skb);
459
460 if (skb->protocol == htons(ETH_P_IPV6)) {
461 exthdr = l3.hdr + sizeof(*l3.v6);
462 l4_proto_tmp = l3.v6->nexthdr;
463 if (l4_hdr != exthdr)
464 ipv6_skip_exthdr(skb, exthdr - skb->data,
465 &l4_proto_tmp, &frag_off);
466 } else if (skb->protocol == htons(ETH_P_IP)) {
467 l4_proto_tmp = l3.v4->protocol;
Salil1898d4e2017-08-18 12:31:39 +0100468 } else {
469 return -EINVAL;
Salil76ad4f02017-08-02 16:59:45 +0100470 }
471
472 *ol4_proto = l4_proto_tmp;
473
474 /* tunnel packet */
475 if (!skb->encapsulation) {
476 *il4_proto = 0;
Salil1898d4e2017-08-18 12:31:39 +0100477 return 0;
Salil76ad4f02017-08-02 16:59:45 +0100478 }
479
480 /* find inner header point */
481 l3.hdr = skb_inner_network_header(skb);
482 l4_hdr = skb_inner_transport_header(skb);
483
484 if (l3.v6->version == 6) {
485 exthdr = l3.hdr + sizeof(*l3.v6);
486 l4_proto_tmp = l3.v6->nexthdr;
487 if (l4_hdr != exthdr)
488 ipv6_skip_exthdr(skb, exthdr - skb->data,
489 &l4_proto_tmp, &frag_off);
490 } else if (l3.v4->version == 4) {
491 l4_proto_tmp = l3.v4->protocol;
492 }
493
494 *il4_proto = l4_proto_tmp;
Salil1898d4e2017-08-18 12:31:39 +0100495
496 return 0;
Salil76ad4f02017-08-02 16:59:45 +0100497}
498
499static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
500 u8 il4_proto, u32 *type_cs_vlan_tso,
501 u32 *ol_type_vlan_len_msec)
502{
503 union {
504 struct iphdr *v4;
505 struct ipv6hdr *v6;
506 unsigned char *hdr;
507 } l3;
508 union {
509 struct tcphdr *tcp;
510 struct udphdr *udp;
511 struct gre_base_hdr *gre;
512 unsigned char *hdr;
513 } l4;
514 unsigned char *l2_hdr;
515 u8 l4_proto = ol4_proto;
516 u32 ol2_len;
517 u32 ol3_len;
518 u32 ol4_len;
519 u32 l2_len;
520 u32 l3_len;
521
522 l3.hdr = skb_network_header(skb);
523 l4.hdr = skb_transport_header(skb);
524
525 /* compute L2 header size for normal packet, defined in 2 Bytes */
526 l2_len = l3.hdr - skb->data;
527 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
528 HNS3_TXD_L2LEN_S, l2_len >> 1);
529
530 /* tunnel packet*/
531 if (skb->encapsulation) {
532 /* compute OL2 header size, defined in 2 Bytes */
533 ol2_len = l2_len;
534 hnae_set_field(*ol_type_vlan_len_msec,
535 HNS3_TXD_L2LEN_M,
536 HNS3_TXD_L2LEN_S, ol2_len >> 1);
537
538 /* compute OL3 header size, defined in 4 Bytes */
539 ol3_len = l4.hdr - l3.hdr;
540 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
541 HNS3_TXD_L3LEN_S, ol3_len >> 2);
542
543 /* MAC in UDP, MAC in GRE (0x6558)*/
544 if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
545 /* switch MAC header ptr from outer to inner header.*/
546 l2_hdr = skb_inner_mac_header(skb);
547
548 /* compute OL4 header size, defined in 4 Bytes. */
549 ol4_len = l2_hdr - l4.hdr;
550 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M,
551 HNS3_TXD_L4LEN_S, ol4_len >> 2);
552
553 /* switch IP header ptr from outer to inner header */
554 l3.hdr = skb_inner_network_header(skb);
555
556 /* compute inner l2 header size, defined in 2 Bytes. */
557 l2_len = l3.hdr - l2_hdr;
558 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
559 HNS3_TXD_L2LEN_S, l2_len >> 1);
560 } else {
561 /* skb packet types not supported by hardware,
562 * txbd len fild doesn't be filled.
563 */
564 return;
565 }
566
567 /* switch L4 header pointer from outer to inner */
568 l4.hdr = skb_inner_transport_header(skb);
569
570 l4_proto = il4_proto;
571 }
572
573 /* compute inner(/normal) L3 header size, defined in 4 Bytes */
574 l3_len = l4.hdr - l3.hdr;
575 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
576 HNS3_TXD_L3LEN_S, l3_len >> 2);
577
578 /* compute inner(/normal) L4 header size, defined in 4 Bytes */
579 switch (l4_proto) {
580 case IPPROTO_TCP:
581 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
582 HNS3_TXD_L4LEN_S, l4.tcp->doff);
583 break;
584 case IPPROTO_SCTP:
585 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
586 HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2));
587 break;
588 case IPPROTO_UDP:
589 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
590 HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2));
591 break;
592 default:
593 /* skb packet types not supported by hardware,
594 * txbd len fild doesn't be filled.
595 */
596 return;
597 }
598}
599
600static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
601 u8 il4_proto, u32 *type_cs_vlan_tso,
602 u32 *ol_type_vlan_len_msec)
603{
604 union {
605 struct iphdr *v4;
606 struct ipv6hdr *v6;
607 unsigned char *hdr;
608 } l3;
609 u32 l4_proto = ol4_proto;
610
611 l3.hdr = skb_network_header(skb);
612
613 /* define OL3 type and tunnel type(OL4).*/
614 if (skb->encapsulation) {
615 /* define outer network header type.*/
616 if (skb->protocol == htons(ETH_P_IP)) {
617 if (skb_is_gso(skb))
618 hnae_set_field(*ol_type_vlan_len_msec,
619 HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
620 HNS3_OL3T_IPV4_CSUM);
621 else
622 hnae_set_field(*ol_type_vlan_len_msec,
623 HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
624 HNS3_OL3T_IPV4_NO_CSUM);
625
626 } else if (skb->protocol == htons(ETH_P_IPV6)) {
627 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
628 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
629 }
630
631 /* define tunnel type(OL4).*/
632 switch (l4_proto) {
633 case IPPROTO_UDP:
634 hnae_set_field(*ol_type_vlan_len_msec,
635 HNS3_TXD_TUNTYPE_M,
636 HNS3_TXD_TUNTYPE_S,
637 HNS3_TUN_MAC_IN_UDP);
638 break;
639 case IPPROTO_GRE:
640 hnae_set_field(*ol_type_vlan_len_msec,
641 HNS3_TXD_TUNTYPE_M,
642 HNS3_TXD_TUNTYPE_S,
643 HNS3_TUN_NVGRE);
644 break;
645 default:
646 /* drop the skb tunnel packet if hardware don't support,
647 * because hardware can't calculate csum when TSO.
648 */
649 if (skb_is_gso(skb))
650 return -EDOM;
651
652 /* the stack computes the IP header already,
653 * driver calculate l4 checksum when not TSO.
654 */
655 skb_checksum_help(skb);
656 return 0;
657 }
658
659 l3.hdr = skb_inner_network_header(skb);
660 l4_proto = il4_proto;
661 }
662
663 if (l3.v4->version == 4) {
664 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
665 HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
666
667 /* the stack computes the IP header already, the only time we
668 * need the hardware to recompute it is in the case of TSO.
669 */
670 if (skb_is_gso(skb))
671 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
672
673 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
674 } else if (l3.v6->version == 6) {
675 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
676 HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
677 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
678 }
679
680 switch (l4_proto) {
681 case IPPROTO_TCP:
682 hnae_set_field(*type_cs_vlan_tso,
683 HNS3_TXD_L4T_M,
684 HNS3_TXD_L4T_S,
685 HNS3_L4T_TCP);
686 break;
687 case IPPROTO_UDP:
688 hnae_set_field(*type_cs_vlan_tso,
689 HNS3_TXD_L4T_M,
690 HNS3_TXD_L4T_S,
691 HNS3_L4T_UDP);
692 break;
693 case IPPROTO_SCTP:
694 hnae_set_field(*type_cs_vlan_tso,
695 HNS3_TXD_L4T_M,
696 HNS3_TXD_L4T_S,
697 HNS3_L4T_SCTP);
698 break;
699 default:
700 /* drop the skb tunnel packet if hardware don't support,
701 * because hardware can't calculate csum when TSO.
702 */
703 if (skb_is_gso(skb))
704 return -EDOM;
705
706 /* the stack computes the IP header already,
707 * driver calculate l4 checksum when not TSO.
708 */
709 skb_checksum_help(skb);
710 return 0;
711 }
712
713 return 0;
714}
715
716static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
717{
718 /* Config bd buffer end */
719 hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
720 HNS3_TXD_BDTYPE_M, 0);
721 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
722 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
Lipeng7036d262017-10-24 21:02:09 +0800723 hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
Salil76ad4f02017-08-02 16:59:45 +0100724}
725
Peng Li9699cff2017-12-22 12:21:48 +0800726static int hns3_fill_desc_vtags(struct sk_buff *skb,
727 struct hns3_enet_ring *tx_ring,
728 u32 *inner_vlan_flag,
729 u32 *out_vlan_flag,
730 u16 *inner_vtag,
731 u16 *out_vtag)
732{
733#define HNS3_TX_VLAN_PRIO_SHIFT 13
734
735 if (skb->protocol == htons(ETH_P_8021Q) &&
736 !(tx_ring->tqp->handle->kinfo.netdev->features &
737 NETIF_F_HW_VLAN_CTAG_TX)) {
738 /* When HW VLAN acceleration is turned off, and the stack
739 * sets the protocol to 802.1q, the driver just need to
740 * set the protocol to the encapsulated ethertype.
741 */
742 skb->protocol = vlan_get_protocol(skb);
743 return 0;
744 }
745
746 if (skb_vlan_tag_present(skb)) {
747 u16 vlan_tag;
748
749 vlan_tag = skb_vlan_tag_get(skb);
750 vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT;
751
752 /* Based on hw strategy, use out_vtag in two layer tag case,
753 * and use inner_vtag in one tag case.
754 */
755 if (skb->protocol == htons(ETH_P_8021Q)) {
756 hnae_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
757 *out_vtag = vlan_tag;
758 } else {
759 hnae_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
760 *inner_vtag = vlan_tag;
761 }
762 } else if (skb->protocol == htons(ETH_P_8021Q)) {
763 struct vlan_ethhdr *vhdr;
764 int rc;
765
766 rc = skb_cow_head(skb, 0);
767 if (rc < 0)
768 return rc;
769 vhdr = (struct vlan_ethhdr *)skb->data;
770 vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
771 << HNS3_TX_VLAN_PRIO_SHIFT);
772 }
773
774 skb->protocol = vlan_get_protocol(skb);
775 return 0;
776}
777
Salil76ad4f02017-08-02 16:59:45 +0100778static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
779 int size, dma_addr_t dma, int frag_end,
780 enum hns_desc_type type)
781{
782 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
783 struct hns3_desc *desc = &ring->desc[ring->next_to_use];
784 u32 ol_type_vlan_len_msec = 0;
785 u16 bdtp_fe_sc_vld_ra_ri = 0;
786 u32 type_cs_vlan_tso = 0;
787 struct sk_buff *skb;
Peng Li9699cff2017-12-22 12:21:48 +0800788 u16 inner_vtag = 0;
789 u16 out_vtag = 0;
Salil76ad4f02017-08-02 16:59:45 +0100790 u32 paylen = 0;
791 u16 mss = 0;
792 __be16 protocol;
793 u8 ol4_proto;
794 u8 il4_proto;
795 int ret;
796
797 /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
798 desc_cb->priv = priv;
799 desc_cb->length = size;
800 desc_cb->dma = dma;
801 desc_cb->type = type;
802
803 /* now, fill the descriptor */
804 desc->addr = cpu_to_le64(dma);
805 desc->tx.send_size = cpu_to_le16((u16)size);
806 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
807 desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
808
809 if (type == DESC_TYPE_SKB) {
810 skb = (struct sk_buff *)priv;
Yunsheng Lina90bb9a2017-10-09 15:44:00 +0800811 paylen = skb->len;
Salil76ad4f02017-08-02 16:59:45 +0100812
Peng Li9699cff2017-12-22 12:21:48 +0800813 ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
814 &ol_type_vlan_len_msec,
815 &inner_vtag, &out_vtag);
816 if (unlikely(ret))
817 return ret;
818
Salil76ad4f02017-08-02 16:59:45 +0100819 if (skb->ip_summed == CHECKSUM_PARTIAL) {
820 skb_reset_mac_len(skb);
821 protocol = skb->protocol;
822
Salil1898d4e2017-08-18 12:31:39 +0100823 ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
824 if (ret)
825 return ret;
Salil76ad4f02017-08-02 16:59:45 +0100826 hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto,
827 &type_cs_vlan_tso,
828 &ol_type_vlan_len_msec);
829 ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto,
830 &type_cs_vlan_tso,
831 &ol_type_vlan_len_msec);
832 if (ret)
833 return ret;
834
835 ret = hns3_set_tso(skb, &paylen, &mss,
836 &type_cs_vlan_tso);
837 if (ret)
838 return ret;
839 }
840
841 /* Set txbd */
842 desc->tx.ol_type_vlan_len_msec =
843 cpu_to_le32(ol_type_vlan_len_msec);
844 desc->tx.type_cs_vlan_tso_len =
845 cpu_to_le32(type_cs_vlan_tso);
Yunsheng Lina90bb9a2017-10-09 15:44:00 +0800846 desc->tx.paylen = cpu_to_le32(paylen);
Salil76ad4f02017-08-02 16:59:45 +0100847 desc->tx.mss = cpu_to_le16(mss);
Peng Li9699cff2017-12-22 12:21:48 +0800848 desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
849 desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
Salil76ad4f02017-08-02 16:59:45 +0100850 }
851
852 /* move ring pointer to next.*/
853 ring_ptr_move_fw(ring, next_to_use);
854
855 return 0;
856}
857
858static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv,
859 int size, dma_addr_t dma, int frag_end,
860 enum hns_desc_type type)
861{
862 unsigned int frag_buf_num;
863 unsigned int k;
864 int sizeoflast;
865 int ret;
866
867 frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
868 sizeoflast = size % HNS3_MAX_BD_SIZE;
869 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
870
871 /* When the frag size is bigger than hardware, split this frag */
872 for (k = 0; k < frag_buf_num; k++) {
873 ret = hns3_fill_desc(ring, priv,
874 (k == frag_buf_num - 1) ?
875 sizeoflast : HNS3_MAX_BD_SIZE,
876 dma + HNS3_MAX_BD_SIZE * k,
877 frag_end && (k == frag_buf_num - 1) ? 1 : 0,
878 (type == DESC_TYPE_SKB && !k) ?
879 DESC_TYPE_SKB : DESC_TYPE_PAGE);
880 if (ret)
881 return ret;
882 }
883
884 return 0;
885}
886
887static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
888 struct hns3_enet_ring *ring)
889{
890 struct sk_buff *skb = *out_skb;
891 struct skb_frag_struct *frag;
892 int bdnum_for_frag;
893 int frag_num;
894 int buf_num;
895 int size;
896 int i;
897
898 size = skb_headlen(skb);
899 buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
900
901 frag_num = skb_shinfo(skb)->nr_frags;
902 for (i = 0; i < frag_num; i++) {
903 frag = &skb_shinfo(skb)->frags[i];
904 size = skb_frag_size(frag);
905 bdnum_for_frag =
906 (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
907 if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG)
908 return -ENOMEM;
909
910 buf_num += bdnum_for_frag;
911 }
912
913 if (buf_num > ring_space(ring))
914 return -EBUSY;
915
916 *bnum = buf_num;
917 return 0;
918}
919
920static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
921 struct hns3_enet_ring *ring)
922{
923 struct sk_buff *skb = *out_skb;
924 int buf_num;
925
926 /* No. of segments (plus a header) */
927 buf_num = skb_shinfo(skb)->nr_frags + 1;
928
929 if (buf_num > ring_space(ring))
930 return -EBUSY;
931
932 *bnum = buf_num;
933
934 return 0;
935}
936
937static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig)
938{
939 struct device *dev = ring_to_dev(ring);
940 unsigned int i;
941
942 for (i = 0; i < ring->desc_num; i++) {
943 /* check if this is where we started */
944 if (ring->next_to_use == next_to_use_orig)
945 break;
946
947 /* unmap the descriptor dma address */
948 if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
949 dma_unmap_single(dev,
950 ring->desc_cb[ring->next_to_use].dma,
951 ring->desc_cb[ring->next_to_use].length,
952 DMA_TO_DEVICE);
953 else
954 dma_unmap_page(dev,
955 ring->desc_cb[ring->next_to_use].dma,
956 ring->desc_cb[ring->next_to_use].length,
957 DMA_TO_DEVICE);
958
959 /* rollback one */
960 ring_ptr_move_bw(ring, next_to_use);
961 }
962}
963
Yunsheng Lind43e5ac2017-10-20 10:19:21 +0800964netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
Salil76ad4f02017-08-02 16:59:45 +0100965{
966 struct hns3_nic_priv *priv = netdev_priv(netdev);
967 struct hns3_nic_ring_data *ring_data =
968 &tx_ring_data(priv, skb->queue_mapping);
969 struct hns3_enet_ring *ring = ring_data->ring;
970 struct device *dev = priv->dev;
971 struct netdev_queue *dev_queue;
972 struct skb_frag_struct *frag;
973 int next_to_use_head;
974 int next_to_use_frag;
975 dma_addr_t dma;
976 int buf_num;
977 int seg_num;
978 int size;
979 int ret;
980 int i;
981
982 /* Prefetch the data used later */
983 prefetch(skb->data);
984
985 switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
986 case -EBUSY:
987 u64_stats_update_begin(&ring->syncp);
988 ring->stats.tx_busy++;
989 u64_stats_update_end(&ring->syncp);
990
991 goto out_net_tx_busy;
992 case -ENOMEM:
993 u64_stats_update_begin(&ring->syncp);
994 ring->stats.sw_err_cnt++;
995 u64_stats_update_end(&ring->syncp);
996 netdev_err(netdev, "no memory to xmit!\n");
997
998 goto out_err_tx_ok;
999 default:
1000 break;
1001 }
1002
1003 /* No. of segments (plus a header) */
1004 seg_num = skb_shinfo(skb)->nr_frags + 1;
1005 /* Fill the first part */
1006 size = skb_headlen(skb);
1007
1008 next_to_use_head = ring->next_to_use;
1009
1010 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
1011 if (dma_mapping_error(dev, dma)) {
1012 netdev_err(netdev, "TX head DMA map failed\n");
1013 ring->stats.sw_err_cnt++;
1014 goto out_err_tx_ok;
1015 }
1016
1017 ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
1018 DESC_TYPE_SKB);
1019 if (ret)
1020 goto head_dma_map_err;
1021
1022 next_to_use_frag = ring->next_to_use;
1023 /* Fill the fragments */
1024 for (i = 1; i < seg_num; i++) {
1025 frag = &skb_shinfo(skb)->frags[i - 1];
1026 size = skb_frag_size(frag);
1027 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
1028 if (dma_mapping_error(dev, dma)) {
1029 netdev_err(netdev, "TX frag(%d) DMA map failed\n", i);
1030 ring->stats.sw_err_cnt++;
1031 goto frag_dma_map_err;
1032 }
1033 ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
1034 seg_num - 1 == i ? 1 : 0,
1035 DESC_TYPE_PAGE);
1036
1037 if (ret)
1038 goto frag_dma_map_err;
1039 }
1040
1041 /* Complete translate all packets */
1042 dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
1043 netdev_tx_sent_queue(dev_queue, skb->len);
1044
1045 wmb(); /* Commit all data before submit */
1046
1047 hnae_queue_xmit(ring->tqp, buf_num);
1048
1049 return NETDEV_TX_OK;
1050
1051frag_dma_map_err:
1052 hns_nic_dma_unmap(ring, next_to_use_frag);
1053
1054head_dma_map_err:
1055 hns_nic_dma_unmap(ring, next_to_use_head);
1056
1057out_err_tx_ok:
1058 dev_kfree_skb_any(skb);
1059 return NETDEV_TX_OK;
1060
1061out_net_tx_busy:
1062 netif_stop_subqueue(netdev, ring_data->queue_index);
1063 smp_mb(); /* Commit all data before submit */
1064
1065 return NETDEV_TX_BUSY;
1066}
1067
1068static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
1069{
Yunsheng Lin9780cb92017-10-09 15:43:56 +08001070 struct hnae3_handle *h = hns3_get_handle(netdev);
Salil76ad4f02017-08-02 16:59:45 +01001071 struct sockaddr *mac_addr = p;
1072 int ret;
1073
1074 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1075 return -EADDRNOTAVAIL;
1076
1077 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data);
1078 if (ret) {
1079 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
1080 return ret;
1081 }
1082
1083 ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
1084
1085 return 0;
1086}
1087
1088static int hns3_nic_set_features(struct net_device *netdev,
1089 netdev_features_t features)
1090{
1091 struct hns3_nic_priv *priv = netdev_priv(netdev);
Peng Li052ece62017-12-22 12:21:47 +08001092 struct hnae3_handle *h = priv->ae_handle;
1093 netdev_features_t changed;
1094 int ret;
Salil76ad4f02017-08-02 16:59:45 +01001095
1096 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
1097 priv->ops.fill_desc = hns3_fill_desc_tso;
1098 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
1099 } else {
1100 priv->ops.fill_desc = hns3_fill_desc;
1101 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
1102 }
1103
Peng Li052ece62017-12-22 12:21:47 +08001104 changed = netdev->features ^ features;
1105 if (changed & NETIF_F_HW_VLAN_CTAG_RX) {
1106 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1107 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true);
1108 else
1109 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, false);
1110
1111 if (ret)
1112 return ret;
1113 }
1114
Salil76ad4f02017-08-02 16:59:45 +01001115 netdev->features = features;
1116 return 0;
1117}
1118
1119static void
1120hns3_nic_get_stats64(struct net_device *netdev, struct rtnl_link_stats64 *stats)
1121{
1122 struct hns3_nic_priv *priv = netdev_priv(netdev);
1123 int queue_num = priv->ae_handle->kinfo.num_tqps;
1124 struct hns3_enet_ring *ring;
1125 unsigned int start;
1126 unsigned int idx;
1127 u64 tx_bytes = 0;
1128 u64 rx_bytes = 0;
1129 u64 tx_pkts = 0;
1130 u64 rx_pkts = 0;
1131
1132 for (idx = 0; idx < queue_num; idx++) {
1133 /* fetch the tx stats */
1134 ring = priv->ring_data[idx].ring;
1135 do {
Salild36d36c2017-08-18 12:31:37 +01001136 start = u64_stats_fetch_begin_irq(&ring->syncp);
Salil76ad4f02017-08-02 16:59:45 +01001137 tx_bytes += ring->stats.tx_bytes;
1138 tx_pkts += ring->stats.tx_pkts;
1139 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1140
1141 /* fetch the rx stats */
1142 ring = priv->ring_data[idx + queue_num].ring;
1143 do {
Salild36d36c2017-08-18 12:31:37 +01001144 start = u64_stats_fetch_begin_irq(&ring->syncp);
Salil76ad4f02017-08-02 16:59:45 +01001145 rx_bytes += ring->stats.rx_bytes;
1146 rx_pkts += ring->stats.rx_pkts;
1147 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1148 }
1149
1150 stats->tx_bytes = tx_bytes;
1151 stats->tx_packets = tx_pkts;
1152 stats->rx_bytes = rx_bytes;
1153 stats->rx_packets = rx_pkts;
1154
1155 stats->rx_errors = netdev->stats.rx_errors;
1156 stats->multicast = netdev->stats.multicast;
1157 stats->rx_length_errors = netdev->stats.rx_length_errors;
1158 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
1159 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
1160
1161 stats->tx_errors = netdev->stats.tx_errors;
1162 stats->rx_dropped = netdev->stats.rx_dropped;
1163 stats->tx_dropped = netdev->stats.tx_dropped;
1164 stats->collisions = netdev->stats.collisions;
1165 stats->rx_over_errors = netdev->stats.rx_over_errors;
1166 stats->rx_frame_errors = netdev->stats.rx_frame_errors;
1167 stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
1168 stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
1169 stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
1170 stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
1171 stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
1172 stats->tx_window_errors = netdev->stats.tx_window_errors;
1173 stats->rx_compressed = netdev->stats.rx_compressed;
1174 stats->tx_compressed = netdev->stats.tx_compressed;
1175}
1176
1177static void hns3_add_tunnel_port(struct net_device *netdev, u16 port,
1178 enum hns3_udp_tnl_type type)
1179{
1180 struct hns3_nic_priv *priv = netdev_priv(netdev);
1181 struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type];
1182 struct hnae3_handle *h = priv->ae_handle;
1183
1184 if (udp_tnl->used && udp_tnl->dst_port == port) {
1185 udp_tnl->used++;
1186 return;
1187 }
1188
1189 if (udp_tnl->used) {
1190 netdev_warn(netdev,
1191 "UDP tunnel [%d], port [%d] offload\n", type, port);
1192 return;
1193 }
1194
1195 udp_tnl->dst_port = port;
1196 udp_tnl->used = 1;
1197 /* TBD send command to hardware to add port */
1198 if (h->ae_algo->ops->add_tunnel_udp)
1199 h->ae_algo->ops->add_tunnel_udp(h, port);
1200}
1201
1202static void hns3_del_tunnel_port(struct net_device *netdev, u16 port,
1203 enum hns3_udp_tnl_type type)
1204{
1205 struct hns3_nic_priv *priv = netdev_priv(netdev);
1206 struct hns3_udp_tunnel *udp_tnl = &priv->udp_tnl[type];
1207 struct hnae3_handle *h = priv->ae_handle;
1208
1209 if (!udp_tnl->used || udp_tnl->dst_port != port) {
1210 netdev_warn(netdev,
1211 "Invalid UDP tunnel port %d\n", port);
1212 return;
1213 }
1214
1215 udp_tnl->used--;
1216 if (udp_tnl->used)
1217 return;
1218
1219 udp_tnl->dst_port = 0;
1220 /* TBD send command to hardware to del port */
1221 if (h->ae_algo->ops->del_tunnel_udp)
Dan Carpenter9537e7c2017-08-10 12:54:59 +03001222 h->ae_algo->ops->del_tunnel_udp(h, port);
Salil76ad4f02017-08-02 16:59:45 +01001223}
1224
1225/* hns3_nic_udp_tunnel_add - Get notifiacetion about UDP tunnel ports
1226 * @netdev: This physical ports's netdev
1227 * @ti: Tunnel information
1228 */
1229static void hns3_nic_udp_tunnel_add(struct net_device *netdev,
1230 struct udp_tunnel_info *ti)
1231{
1232 u16 port_n = ntohs(ti->port);
1233
1234 switch (ti->type) {
1235 case UDP_TUNNEL_TYPE_VXLAN:
1236 hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN);
1237 break;
1238 case UDP_TUNNEL_TYPE_GENEVE:
1239 hns3_add_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE);
1240 break;
1241 default:
1242 netdev_err(netdev, "unsupported tunnel type %d\n", ti->type);
1243 break;
1244 }
1245}
1246
1247static void hns3_nic_udp_tunnel_del(struct net_device *netdev,
1248 struct udp_tunnel_info *ti)
1249{
1250 u16 port_n = ntohs(ti->port);
1251
1252 switch (ti->type) {
1253 case UDP_TUNNEL_TYPE_VXLAN:
1254 hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_VXLAN);
1255 break;
1256 case UDP_TUNNEL_TYPE_GENEVE:
1257 hns3_del_tunnel_port(netdev, port_n, HNS3_UDP_TNL_GENEVE);
1258 break;
1259 default:
1260 break;
1261 }
1262}
1263
Yunsheng Lin30d240d2017-10-17 14:51:30 +08001264static int hns3_setup_tc(struct net_device *netdev, void *type_data)
Salil76ad4f02017-08-02 16:59:45 +01001265{
Yunsheng Lin30d240d2017-10-17 14:51:30 +08001266 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
Yunsheng Lin9780cb92017-10-09 15:43:56 +08001267 struct hnae3_handle *h = hns3_get_handle(netdev);
Salil76ad4f02017-08-02 16:59:45 +01001268 struct hnae3_knic_private_info *kinfo = &h->kinfo;
Yunsheng Lin30d240d2017-10-17 14:51:30 +08001269 u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map;
1270 u8 tc = mqprio_qopt->qopt.num_tc;
1271 u16 mode = mqprio_qopt->mode;
1272 u8 hw = mqprio_qopt->qopt.hw;
1273 bool if_running;
Salil76ad4f02017-08-02 16:59:45 +01001274 unsigned int i;
1275 int ret;
1276
Yunsheng Lin30d240d2017-10-17 14:51:30 +08001277 if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
1278 mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
1279 return -EOPNOTSUPP;
1280
Salil76ad4f02017-08-02 16:59:45 +01001281 if (tc > HNAE3_MAX_TC)
1282 return -EINVAL;
1283
Salil76ad4f02017-08-02 16:59:45 +01001284 if (!netdev)
1285 return -EINVAL;
1286
Yunsheng Lin30d240d2017-10-17 14:51:30 +08001287 if_running = netif_running(netdev);
1288 if (if_running) {
1289 hns3_nic_net_stop(netdev);
1290 msleep(100);
Salil76ad4f02017-08-02 16:59:45 +01001291 }
1292
Yunsheng Lin30d240d2017-10-17 14:51:30 +08001293 ret = (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
1294 kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
Salil76ad4f02017-08-02 16:59:45 +01001295 if (ret)
Yunsheng Lin30d240d2017-10-17 14:51:30 +08001296 goto out;
Salil76ad4f02017-08-02 16:59:45 +01001297
Yunsheng Lin30d240d2017-10-17 14:51:30 +08001298 if (tc <= 1) {
1299 netdev_reset_tc(netdev);
1300 } else {
1301 ret = netdev_set_num_tc(netdev, tc);
1302 if (ret)
1303 goto out;
1304
1305 for (i = 0; i < HNAE3_MAX_TC; i++) {
1306 if (!kinfo->tc_info[i].enable)
1307 continue;
1308
Salil76ad4f02017-08-02 16:59:45 +01001309 netdev_set_tc_queue(netdev,
1310 kinfo->tc_info[i].tc,
1311 kinfo->tc_info[i].tqp_count,
1312 kinfo->tc_info[i].tqp_offset);
Yunsheng Lin30d240d2017-10-17 14:51:30 +08001313 }
Salil76ad4f02017-08-02 16:59:45 +01001314 }
1315
Yunsheng Lin30d240d2017-10-17 14:51:30 +08001316 ret = hns3_nic_set_real_num_queue(netdev);
1317
1318out:
1319 if (if_running)
1320 hns3_nic_net_open(netdev);
1321
1322 return ret;
Salil76ad4f02017-08-02 16:59:45 +01001323}
1324
Jiri Pirko2572ac52017-08-07 10:15:17 +02001325static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
Jiri Pirkode4784c2017-08-07 10:15:32 +02001326 void *type_data)
Salil76ad4f02017-08-02 16:59:45 +01001327{
Nogah Frankel575ed7d2017-11-06 07:23:42 +01001328 if (type != TC_SETUP_QDISC_MQPRIO)
Jiri Pirko38cf0422017-08-07 10:15:31 +02001329 return -EOPNOTSUPP;
Salil76ad4f02017-08-02 16:59:45 +01001330
Yunsheng Lin30d240d2017-10-17 14:51:30 +08001331 return hns3_setup_tc(dev, type_data);
Salil76ad4f02017-08-02 16:59:45 +01001332}
1333
1334static int hns3_vlan_rx_add_vid(struct net_device *netdev,
1335 __be16 proto, u16 vid)
1336{
Yunsheng Lin9780cb92017-10-09 15:43:56 +08001337 struct hnae3_handle *h = hns3_get_handle(netdev);
Salil76ad4f02017-08-02 16:59:45 +01001338 int ret = -EIO;
1339
1340 if (h->ae_algo->ops->set_vlan_filter)
1341 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
1342
1343 return ret;
1344}
1345
1346static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
1347 __be16 proto, u16 vid)
1348{
Yunsheng Lin9780cb92017-10-09 15:43:56 +08001349 struct hnae3_handle *h = hns3_get_handle(netdev);
Salil76ad4f02017-08-02 16:59:45 +01001350 int ret = -EIO;
1351
1352 if (h->ae_algo->ops->set_vlan_filter)
1353 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
1354
1355 return ret;
1356}
1357
1358static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1359 u8 qos, __be16 vlan_proto)
1360{
Yunsheng Lin9780cb92017-10-09 15:43:56 +08001361 struct hnae3_handle *h = hns3_get_handle(netdev);
Salil76ad4f02017-08-02 16:59:45 +01001362 int ret = -EIO;
1363
1364 if (h->ae_algo->ops->set_vf_vlan_filter)
1365 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
1366 qos, vlan_proto);
1367
1368 return ret;
1369}
1370
Salila8e8b7f2017-08-21 17:05:24 +01001371static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
1372{
Yunsheng Lin9780cb92017-10-09 15:43:56 +08001373 struct hnae3_handle *h = hns3_get_handle(netdev);
Salila8e8b7f2017-08-21 17:05:24 +01001374 bool if_running = netif_running(netdev);
1375 int ret;
1376
1377 if (!h->ae_algo->ops->set_mtu)
1378 return -EOPNOTSUPP;
1379
1380 /* if this was called with netdev up then bring netdevice down */
1381 if (if_running) {
1382 (void)hns3_nic_net_stop(netdev);
1383 msleep(100);
1384 }
1385
1386 ret = h->ae_algo->ops->set_mtu(h, new_mtu);
1387 if (ret) {
1388 netdev_err(netdev, "failed to change MTU in hardware %d\n",
1389 ret);
1390 return ret;
1391 }
1392
1393 /* if the netdev was running earlier, bring it up again */
1394 if (if_running && hns3_nic_net_open(netdev))
1395 ret = -EINVAL;
1396
1397 return ret;
1398}
1399
Lipengf8fa222c2017-11-02 20:45:20 +08001400static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
1401{
1402 struct hns3_nic_priv *priv = netdev_priv(ndev);
1403 struct hns3_enet_ring *tx_ring = NULL;
1404 int timeout_queue = 0;
1405 int hw_head, hw_tail;
1406 int i;
1407
1408 /* Find the stopped queue the same way the stack does */
1409 for (i = 0; i < ndev->real_num_tx_queues; i++) {
1410 struct netdev_queue *q;
1411 unsigned long trans_start;
1412
1413 q = netdev_get_tx_queue(ndev, i);
1414 trans_start = q->trans_start;
1415 if (netif_xmit_stopped(q) &&
1416 time_after(jiffies,
1417 (trans_start + ndev->watchdog_timeo))) {
1418 timeout_queue = i;
1419 break;
1420 }
1421 }
1422
1423 if (i == ndev->num_tx_queues) {
1424 netdev_info(ndev,
1425 "no netdev TX timeout queue found, timeout count: %llu\n",
1426 priv->tx_timeout_count);
1427 return false;
1428 }
1429
1430 tx_ring = priv->ring_data[timeout_queue].ring;
1431
1432 hw_head = readl_relaxed(tx_ring->tqp->io_base +
1433 HNS3_RING_TX_RING_HEAD_REG);
1434 hw_tail = readl_relaxed(tx_ring->tqp->io_base +
1435 HNS3_RING_TX_RING_TAIL_REG);
1436 netdev_info(ndev,
1437 "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n",
1438 priv->tx_timeout_count,
1439 timeout_queue,
1440 tx_ring->next_to_use,
1441 tx_ring->next_to_clean,
1442 hw_head,
1443 hw_tail,
1444 readl(tx_ring->tqp_vector->mask_addr));
1445
1446 return true;
1447}
1448
1449static void hns3_nic_net_timeout(struct net_device *ndev)
1450{
1451 struct hns3_nic_priv *priv = netdev_priv(ndev);
1452 unsigned long last_reset_time = priv->last_reset_time;
1453 struct hnae3_handle *h = priv->ae_handle;
1454
1455 if (!hns3_get_tx_timeo_queue_info(ndev))
1456 return;
1457
1458 priv->tx_timeout_count++;
1459
1460 /* This timeout is far away enough from last timeout,
1461 * if timeout again,set the reset type to PF reset
1462 */
1463 if (time_after(jiffies, (last_reset_time + 20 * HZ)))
1464 priv->reset_level = HNAE3_FUNC_RESET;
1465
1466 /* Don't do any new action before the next timeout */
1467 else if (time_before(jiffies, (last_reset_time + ndev->watchdog_timeo)))
1468 return;
1469
1470 priv->last_reset_time = jiffies;
1471
1472 if (h->ae_algo->ops->reset_event)
1473 h->ae_algo->ops->reset_event(h, priv->reset_level);
1474
1475 priv->reset_level++;
1476 if (priv->reset_level > HNAE3_GLOBAL_RESET)
1477 priv->reset_level = HNAE3_GLOBAL_RESET;
1478}
1479
Salil76ad4f02017-08-02 16:59:45 +01001480static const struct net_device_ops hns3_nic_netdev_ops = {
1481 .ndo_open = hns3_nic_net_open,
1482 .ndo_stop = hns3_nic_net_stop,
1483 .ndo_start_xmit = hns3_nic_net_xmit,
Lipengf8fa222c2017-11-02 20:45:20 +08001484 .ndo_tx_timeout = hns3_nic_net_timeout,
Salil76ad4f02017-08-02 16:59:45 +01001485 .ndo_set_mac_address = hns3_nic_net_set_mac_address,
Salila8e8b7f2017-08-21 17:05:24 +01001486 .ndo_change_mtu = hns3_nic_change_mtu,
Salil76ad4f02017-08-02 16:59:45 +01001487 .ndo_set_features = hns3_nic_set_features,
1488 .ndo_get_stats64 = hns3_nic_get_stats64,
1489 .ndo_setup_tc = hns3_nic_setup_tc,
1490 .ndo_set_rx_mode = hns3_nic_set_rx_mode,
1491 .ndo_udp_tunnel_add = hns3_nic_udp_tunnel_add,
1492 .ndo_udp_tunnel_del = hns3_nic_udp_tunnel_del,
1493 .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid,
1494 .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid,
1495 .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
1496};
1497
1498/* hns3_probe - Device initialization routine
1499 * @pdev: PCI device information struct
1500 * @ent: entry in hns3_pci_tbl
1501 *
1502 * hns3_probe initializes a PF identified by a pci_dev structure.
1503 * The OS initialization, configuring of the PF private structure,
1504 * and a hardware reset occur.
1505 *
1506 * Returns 0 on success, negative on failure
1507 */
1508static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1509{
1510 struct hnae3_ae_dev *ae_dev;
1511 int ret;
1512
1513 ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev),
1514 GFP_KERNEL);
1515 if (!ae_dev) {
1516 ret = -ENOMEM;
1517 return ret;
1518 }
1519
1520 ae_dev->pdev = pdev;
Yunsheng Line92a0842017-09-20 18:52:50 +08001521 ae_dev->flag = ent->driver_data;
Salil76ad4f02017-08-02 16:59:45 +01001522 ae_dev->dev_type = HNAE3_DEV_KNIC;
1523 pci_set_drvdata(pdev, ae_dev);
1524
1525 return hnae3_register_ae_dev(ae_dev);
1526}
1527
1528/* hns3_remove - Device removal routine
1529 * @pdev: PCI device information struct
1530 */
1531static void hns3_remove(struct pci_dev *pdev)
1532{
1533 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1534
1535 hnae3_unregister_ae_dev(ae_dev);
1536
1537 devm_kfree(&pdev->dev, ae_dev);
1538
1539 pci_set_drvdata(pdev, NULL);
1540}
1541
1542static struct pci_driver hns3_driver = {
1543 .name = hns3_driver_name,
1544 .id_table = hns3_pci_tbl,
1545 .probe = hns3_probe,
1546 .remove = hns3_remove,
1547};
1548
1549/* set default feature to hns3 */
1550static void hns3_set_default_feature(struct net_device *netdev)
1551{
1552 netdev->priv_flags |= IFF_UNICAST_FLT;
1553
1554 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1555 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1556 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1557 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1558 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1559
1560 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
1561
1562 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
1563
1564 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1565 NETIF_F_HW_VLAN_CTAG_FILTER |
Peng Li052ece62017-12-22 12:21:47 +08001566 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
Salil76ad4f02017-08-02 16:59:45 +01001567 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1568 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1569 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1570 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1571
1572 netdev->vlan_features |=
1573 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
1574 NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
1575 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1576 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1577 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1578
1579 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1580 NETIF_F_HW_VLAN_CTAG_FILTER |
Peng Li052ece62017-12-22 12:21:47 +08001581 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
Salil76ad4f02017-08-02 16:59:45 +01001582 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1583 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1584 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1585 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1586}
1587
1588static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
1589 struct hns3_desc_cb *cb)
1590{
1591 unsigned int order = hnae_page_order(ring);
1592 struct page *p;
1593
1594 p = dev_alloc_pages(order);
1595 if (!p)
1596 return -ENOMEM;
1597
1598 cb->priv = p;
1599 cb->page_offset = 0;
1600 cb->reuse_flag = 0;
1601 cb->buf = page_address(p);
1602 cb->length = hnae_page_size(ring);
1603 cb->type = DESC_TYPE_PAGE;
1604
Salil76ad4f02017-08-02 16:59:45 +01001605 return 0;
1606}
1607
1608static void hns3_free_buffer(struct hns3_enet_ring *ring,
1609 struct hns3_desc_cb *cb)
1610{
1611 if (cb->type == DESC_TYPE_SKB)
1612 dev_kfree_skb_any((struct sk_buff *)cb->priv);
1613 else if (!HNAE3_IS_TX_RING(ring))
1614 put_page((struct page *)cb->priv);
1615 memset(cb, 0, sizeof(*cb));
1616}
1617
1618static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
1619{
1620 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
1621 cb->length, ring_to_dma_dir(ring));
1622
1623 if (dma_mapping_error(ring_to_dev(ring), cb->dma))
1624 return -EIO;
1625
1626 return 0;
1627}
1628
1629static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
1630 struct hns3_desc_cb *cb)
1631{
1632 if (cb->type == DESC_TYPE_SKB)
1633 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
1634 ring_to_dma_dir(ring));
1635 else
1636 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
1637 ring_to_dma_dir(ring));
1638}
1639
1640static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
1641{
1642 hns3_unmap_buffer(ring, &ring->desc_cb[i]);
1643 ring->desc[i].addr = 0;
1644}
1645
1646static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
1647{
1648 struct hns3_desc_cb *cb = &ring->desc_cb[i];
1649
1650 if (!ring->desc_cb[i].dma)
1651 return;
1652
1653 hns3_buffer_detach(ring, i);
1654 hns3_free_buffer(ring, cb);
1655}
1656
1657static void hns3_free_buffers(struct hns3_enet_ring *ring)
1658{
1659 int i;
1660
1661 for (i = 0; i < ring->desc_num; i++)
1662 hns3_free_buffer_detach(ring, i);
1663}
1664
1665/* free desc along with its attached buffer */
1666static void hns3_free_desc(struct hns3_enet_ring *ring)
1667{
1668 hns3_free_buffers(ring);
1669
1670 dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
1671 ring->desc_num * sizeof(ring->desc[0]),
1672 DMA_BIDIRECTIONAL);
1673 ring->desc_dma_addr = 0;
1674 kfree(ring->desc);
1675 ring->desc = NULL;
1676}
1677
1678static int hns3_alloc_desc(struct hns3_enet_ring *ring)
1679{
1680 int size = ring->desc_num * sizeof(ring->desc[0]);
1681
1682 ring->desc = kzalloc(size, GFP_KERNEL);
1683 if (!ring->desc)
1684 return -ENOMEM;
1685
1686 ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc,
1687 size, DMA_BIDIRECTIONAL);
1688 if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
1689 ring->desc_dma_addr = 0;
1690 kfree(ring->desc);
1691 ring->desc = NULL;
1692 return -ENOMEM;
1693 }
1694
1695 return 0;
1696}
1697
1698static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring,
1699 struct hns3_desc_cb *cb)
1700{
1701 int ret;
1702
1703 ret = hns3_alloc_buffer(ring, cb);
1704 if (ret)
1705 goto out;
1706
1707 ret = hns3_map_buffer(ring, cb);
1708 if (ret)
1709 goto out_with_buf;
1710
1711 return 0;
1712
1713out_with_buf:
Lipeng564883b2017-10-23 19:51:02 +08001714 hns3_free_buffer(ring, cb);
Salil76ad4f02017-08-02 16:59:45 +01001715out:
1716 return ret;
1717}
1718
1719static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i)
1720{
1721 int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]);
1722
1723 if (ret)
1724 return ret;
1725
1726 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1727
1728 return 0;
1729}
1730
1731/* Allocate memory for raw pkg, and map with dma */
1732static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
1733{
1734 int i, j, ret;
1735
1736 for (i = 0; i < ring->desc_num; i++) {
1737 ret = hns3_alloc_buffer_attach(ring, i);
1738 if (ret)
1739 goto out_buffer_fail;
1740 }
1741
1742 return 0;
1743
1744out_buffer_fail:
1745 for (j = i - 1; j >= 0; j--)
1746 hns3_free_buffer_detach(ring, j);
1747 return ret;
1748}
1749
1750/* detach a in-used buffer and replace with a reserved one */
1751static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
1752 struct hns3_desc_cb *res_cb)
1753{
Lipengb9077422017-10-23 19:51:01 +08001754 hns3_unmap_buffer(ring, &ring->desc_cb[i]);
Salil76ad4f02017-08-02 16:59:45 +01001755 ring->desc_cb[i] = *res_cb;
1756 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1757}
1758
1759static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
1760{
1761 ring->desc_cb[i].reuse_flag = 0;
1762 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
1763 + ring->desc_cb[i].page_offset);
1764}
1765
1766static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
1767 int *pkts)
1768{
1769 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
1770
1771 (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
1772 (*bytes) += desc_cb->length;
1773 /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
1774 hns3_free_buffer_detach(ring, ring->next_to_clean);
1775
1776 ring_ptr_move_fw(ring, next_to_clean);
1777}
1778
1779static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
1780{
1781 int u = ring->next_to_use;
1782 int c = ring->next_to_clean;
1783
1784 if (unlikely(h > ring->desc_num))
1785 return 0;
1786
1787 return u > c ? (h > c && h <= u) : (h > c || h <= u);
1788}
1789
Lipeng24e750c2017-10-23 19:51:07 +08001790bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
Salil76ad4f02017-08-02 16:59:45 +01001791{
1792 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1793 struct netdev_queue *dev_queue;
1794 int bytes, pkts;
1795 int head;
1796
1797 head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
1798 rmb(); /* Make sure head is ready before touch any data */
1799
1800 if (is_ring_empty(ring) || head == ring->next_to_clean)
Lipeng24e750c2017-10-23 19:51:07 +08001801 return true; /* no data to poll */
Salil76ad4f02017-08-02 16:59:45 +01001802
1803 if (!is_valid_clean_head(ring, head)) {
1804 netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
1805 ring->next_to_use, ring->next_to_clean);
1806
1807 u64_stats_update_begin(&ring->syncp);
1808 ring->stats.io_err_cnt++;
1809 u64_stats_update_end(&ring->syncp);
Lipeng24e750c2017-10-23 19:51:07 +08001810 return true;
Salil76ad4f02017-08-02 16:59:45 +01001811 }
1812
1813 bytes = 0;
1814 pkts = 0;
1815 while (head != ring->next_to_clean && budget) {
1816 hns3_nic_reclaim_one_desc(ring, &bytes, &pkts);
1817 /* Issue prefetch for next Tx descriptor */
1818 prefetch(&ring->desc_cb[ring->next_to_clean]);
1819 budget--;
1820 }
1821
1822 ring->tqp_vector->tx_group.total_bytes += bytes;
1823 ring->tqp_vector->tx_group.total_packets += pkts;
1824
1825 u64_stats_update_begin(&ring->syncp);
1826 ring->stats.tx_bytes += bytes;
1827 ring->stats.tx_pkts += pkts;
1828 u64_stats_update_end(&ring->syncp);
1829
1830 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
1831 netdev_tx_completed_queue(dev_queue, pkts, bytes);
1832
1833 if (unlikely(pkts && netif_carrier_ok(netdev) &&
1834 (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
1835 /* Make sure that anybody stopping the queue after this
1836 * sees the new next_to_clean.
1837 */
1838 smp_mb();
1839 if (netif_tx_queue_stopped(dev_queue)) {
1840 netif_tx_wake_queue(dev_queue);
1841 ring->stats.restart_queue++;
1842 }
1843 }
1844
1845 return !!budget;
1846}
1847
1848static int hns3_desc_unused(struct hns3_enet_ring *ring)
1849{
1850 int ntc = ring->next_to_clean;
1851 int ntu = ring->next_to_use;
1852
1853 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
1854}
1855
1856static void
1857hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
1858{
1859 struct hns3_desc_cb *desc_cb;
1860 struct hns3_desc_cb res_cbs;
1861 int i, ret;
1862
1863 for (i = 0; i < cleand_count; i++) {
1864 desc_cb = &ring->desc_cb[ring->next_to_use];
1865 if (desc_cb->reuse_flag) {
1866 u64_stats_update_begin(&ring->syncp);
1867 ring->stats.reuse_pg_cnt++;
1868 u64_stats_update_end(&ring->syncp);
1869
1870 hns3_reuse_buffer(ring, ring->next_to_use);
1871 } else {
1872 ret = hns3_reserve_buffer_map(ring, &res_cbs);
1873 if (ret) {
1874 u64_stats_update_begin(&ring->syncp);
1875 ring->stats.sw_err_cnt++;
1876 u64_stats_update_end(&ring->syncp);
1877
1878 netdev_err(ring->tqp->handle->kinfo.netdev,
1879 "hnae reserve buffer map failed.\n");
1880 break;
1881 }
1882 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
1883 }
1884
1885 ring_ptr_move_fw(ring, next_to_use);
1886 }
1887
1888 wmb(); /* Make all data has been write before submit */
1889 writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
1890}
1891
1892/* hns3_nic_get_headlen - determine size of header for LRO/GRO
1893 * @data: pointer to the start of the headers
1894 * @max: total length of section to find headers in
1895 *
1896 * This function is meant to determine the length of headers that will
1897 * be recognized by hardware for LRO, GRO, and RSC offloads. The main
1898 * motivation of doing this is to only perform one pull for IPv4 TCP
1899 * packets so that we can do basic things like calculating the gso_size
1900 * based on the average data per packet.
1901 */
1902static unsigned int hns3_nic_get_headlen(unsigned char *data, u32 flag,
1903 unsigned int max_size)
1904{
1905 unsigned char *network;
1906 u8 hlen;
1907
1908 /* This should never happen, but better safe than sorry */
1909 if (max_size < ETH_HLEN)
1910 return max_size;
1911
1912 /* Initialize network frame pointer */
1913 network = data;
1914
1915 /* Set first protocol and move network header forward */
1916 network += ETH_HLEN;
1917
1918 /* Handle any vlan tag if present */
1919 if (hnae_get_field(flag, HNS3_RXD_VLAN_M, HNS3_RXD_VLAN_S)
1920 == HNS3_RX_FLAG_VLAN_PRESENT) {
1921 if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
1922 return max_size;
1923
1924 network += VLAN_HLEN;
1925 }
1926
1927 /* Handle L3 protocols */
1928 if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
1929 == HNS3_RX_FLAG_L3ID_IPV4) {
1930 if ((typeof(max_size))(network - data) >
1931 (max_size - sizeof(struct iphdr)))
1932 return max_size;
1933
1934 /* Access ihl as a u8 to avoid unaligned access on ia64 */
1935 hlen = (network[0] & 0x0F) << 2;
1936
1937 /* Verify hlen meets minimum size requirements */
1938 if (hlen < sizeof(struct iphdr))
1939 return network - data;
1940
1941 /* Record next protocol if header is present */
1942 } else if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
1943 == HNS3_RX_FLAG_L3ID_IPV6) {
1944 if ((typeof(max_size))(network - data) >
1945 (max_size - sizeof(struct ipv6hdr)))
1946 return max_size;
1947
1948 /* Record next protocol */
1949 hlen = sizeof(struct ipv6hdr);
1950 } else {
1951 return network - data;
1952 }
1953
1954 /* Relocate pointer to start of L4 header */
1955 network += hlen;
1956
1957 /* Finally sort out TCP/UDP */
1958 if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
1959 == HNS3_RX_FLAG_L4ID_TCP) {
1960 if ((typeof(max_size))(network - data) >
1961 (max_size - sizeof(struct tcphdr)))
1962 return max_size;
1963
1964 /* Access doff as a u8 to avoid unaligned access on ia64 */
1965 hlen = (network[12] & 0xF0) >> 2;
1966
1967 /* Verify hlen meets minimum size requirements */
1968 if (hlen < sizeof(struct tcphdr))
1969 return network - data;
1970
1971 network += hlen;
1972 } else if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
1973 == HNS3_RX_FLAG_L4ID_UDP) {
1974 if ((typeof(max_size))(network - data) >
1975 (max_size - sizeof(struct udphdr)))
1976 return max_size;
1977
1978 network += sizeof(struct udphdr);
1979 }
1980
1981 /* If everything has gone correctly network should be the
1982 * data section of the packet and will be the end of the header.
1983 * If not then it probably represents the end of the last recognized
1984 * header.
1985 */
1986 if ((typeof(max_size))(network - data) < max_size)
1987 return network - data;
1988 else
1989 return max_size;
1990}
1991
1992static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
1993 struct hns3_enet_ring *ring, int pull_len,
1994 struct hns3_desc_cb *desc_cb)
1995{
1996 struct hns3_desc *desc;
1997 int truesize, size;
1998 int last_offset;
1999 bool twobufs;
2000
2001 twobufs = ((PAGE_SIZE < 8192) &&
2002 hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
2003
2004 desc = &ring->desc[ring->next_to_clean];
2005 size = le16_to_cpu(desc->rx.size);
2006
2007 if (twobufs) {
2008 truesize = hnae_buf_size(ring);
2009 } else {
2010 truesize = ALIGN(size, L1_CACHE_BYTES);
2011 last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
2012 }
2013
2014 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
2015 size - pull_len, truesize - pull_len);
2016
2017 /* Avoid re-using remote pages,flag default unreuse */
2018 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
2019 return;
2020
2021 if (twobufs) {
2022 /* If we are only owner of page we can reuse it */
2023 if (likely(page_count(desc_cb->priv) == 1)) {
2024 /* Flip page offset to other buffer */
2025 desc_cb->page_offset ^= truesize;
2026
2027 desc_cb->reuse_flag = 1;
2028 /* bump ref count on page before it is given*/
2029 get_page(desc_cb->priv);
2030 }
2031 return;
2032 }
2033
2034 /* Move offset up to the next cache line */
2035 desc_cb->page_offset += truesize;
2036
2037 if (desc_cb->page_offset <= last_offset) {
2038 desc_cb->reuse_flag = 1;
2039 /* Bump ref count on page before it is given*/
2040 get_page(desc_cb->priv);
2041 }
2042}
2043
2044static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
2045 struct hns3_desc *desc)
2046{
2047 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2048 int l3_type, l4_type;
2049 u32 bd_base_info;
2050 int ol4_type;
2051 u32 l234info;
2052
2053 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2054 l234info = le32_to_cpu(desc->rx.l234_info);
2055
2056 skb->ip_summed = CHECKSUM_NONE;
2057
2058 skb_checksum_none_assert(skb);
2059
2060 if (!(netdev->features & NETIF_F_RXCSUM))
2061 return;
2062
2063 /* check if hardware has done checksum */
2064 if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
2065 return;
2066
2067 if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) ||
2068 hnae_get_bit(l234info, HNS3_RXD_L4E_B) ||
2069 hnae_get_bit(l234info, HNS3_RXD_OL3E_B) ||
2070 hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) {
2071 netdev_err(netdev, "L3/L4 error pkt\n");
2072 u64_stats_update_begin(&ring->syncp);
2073 ring->stats.l3l4_csum_err++;
2074 u64_stats_update_end(&ring->syncp);
2075
2076 return;
2077 }
2078
2079 l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M,
2080 HNS3_RXD_L3ID_S);
2081 l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M,
2082 HNS3_RXD_L4ID_S);
2083
2084 ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
2085 switch (ol4_type) {
2086 case HNS3_OL4_TYPE_MAC_IN_UDP:
2087 case HNS3_OL4_TYPE_NVGRE:
2088 skb->csum_level = 1;
2089 case HNS3_OL4_TYPE_NO_TUN:
2090 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
2091 if (l3_type == HNS3_L3_TYPE_IPV4 ||
2092 (l3_type == HNS3_L3_TYPE_IPV6 &&
2093 (l4_type == HNS3_L4_TYPE_UDP ||
2094 l4_type == HNS3_L4_TYPE_TCP ||
2095 l4_type == HNS3_L4_TYPE_SCTP)))
2096 skb->ip_summed = CHECKSUM_UNNECESSARY;
2097 break;
2098 }
2099}
2100
Yunsheng Lind43e5ac2017-10-20 10:19:21 +08002101static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
2102{
2103 napi_gro_receive(&ring->tqp_vector->napi, skb);
2104}
2105
Salil76ad4f02017-08-02 16:59:45 +01002106static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
2107 struct sk_buff **out_skb, int *out_bnum)
2108{
2109 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2110 struct hns3_desc_cb *desc_cb;
2111 struct hns3_desc *desc;
2112 struct sk_buff *skb;
2113 unsigned char *va;
2114 u32 bd_base_info;
2115 int pull_len;
2116 u32 l234info;
2117 int length;
2118 int bnum;
2119
2120 desc = &ring->desc[ring->next_to_clean];
2121 desc_cb = &ring->desc_cb[ring->next_to_clean];
2122
2123 prefetch(desc);
2124
2125 length = le16_to_cpu(desc->rx.pkt_len);
2126 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2127 l234info = le32_to_cpu(desc->rx.l234_info);
2128
2129 /* Check valid BD */
2130 if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))
2131 return -EFAULT;
2132
2133 va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
2134
2135 /* Prefetch first cache line of first page
2136 * Idea is to cache few bytes of the header of the packet. Our L1 Cache
2137 * line size is 64B so need to prefetch twice to make it 128B. But in
2138 * actual we can have greater size of caches with 128B Level 1 cache
2139 * lines. In such a case, single fetch would suffice to cache in the
2140 * relevant part of the header.
2141 */
2142 prefetch(va);
2143#if L1_CACHE_BYTES < 128
2144 prefetch(va + L1_CACHE_BYTES);
2145#endif
2146
2147 skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi,
2148 HNS3_RX_HEAD_SIZE);
2149 if (unlikely(!skb)) {
2150 netdev_err(netdev, "alloc rx skb fail\n");
2151
2152 u64_stats_update_begin(&ring->syncp);
2153 ring->stats.sw_err_cnt++;
2154 u64_stats_update_end(&ring->syncp);
2155
2156 return -ENOMEM;
2157 }
2158
2159 prefetchw(skb->data);
2160
Peng Li9699cff2017-12-22 12:21:48 +08002161 /* Based on hw strategy, the tag offloaded will be stored at
2162 * ot_vlan_tag in two layer tag case, and stored at vlan_tag
2163 * in one layer tag case.
2164 */
2165 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
2166 u16 vlan_tag;
2167
2168 vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2169 if (!(vlan_tag & VLAN_VID_MASK))
2170 vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2171 if (vlan_tag & VLAN_VID_MASK)
2172 __vlan_hwaccel_put_tag(skb,
2173 htons(ETH_P_8021Q),
2174 vlan_tag);
2175 }
2176
Salil76ad4f02017-08-02 16:59:45 +01002177 bnum = 1;
2178 if (length <= HNS3_RX_HEAD_SIZE) {
2179 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
2180
2181 /* We can reuse buffer as-is, just make sure it is local */
2182 if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
2183 desc_cb->reuse_flag = 1;
2184 else /* This page cannot be reused so discard it */
2185 put_page(desc_cb->priv);
2186
2187 ring_ptr_move_fw(ring, next_to_clean);
2188 } else {
2189 u64_stats_update_begin(&ring->syncp);
2190 ring->stats.seg_pkt_cnt++;
2191 u64_stats_update_end(&ring->syncp);
2192
2193 pull_len = hns3_nic_get_headlen(va, l234info,
2194 HNS3_RX_HEAD_SIZE);
2195 memcpy(__skb_put(skb, pull_len), va,
2196 ALIGN(pull_len, sizeof(long)));
2197
2198 hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
2199 ring_ptr_move_fw(ring, next_to_clean);
2200
2201 while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
2202 desc = &ring->desc[ring->next_to_clean];
2203 desc_cb = &ring->desc_cb[ring->next_to_clean];
2204 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2205 hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb);
2206 ring_ptr_move_fw(ring, next_to_clean);
2207 bnum++;
2208 }
2209 }
2210
2211 *out_bnum = bnum;
2212
2213 if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
2214 netdev_err(netdev, "no valid bd,%016llx,%016llx\n",
2215 ((u64 *)desc)[0], ((u64 *)desc)[1]);
2216 u64_stats_update_begin(&ring->syncp);
2217 ring->stats.non_vld_descs++;
2218 u64_stats_update_end(&ring->syncp);
2219
2220 dev_kfree_skb_any(skb);
2221 return -EINVAL;
2222 }
2223
2224 if (unlikely((!desc->rx.pkt_len) ||
2225 hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
2226 netdev_err(netdev, "truncated pkt\n");
2227 u64_stats_update_begin(&ring->syncp);
2228 ring->stats.err_pkt_len++;
2229 u64_stats_update_end(&ring->syncp);
2230
2231 dev_kfree_skb_any(skb);
2232 return -EFAULT;
2233 }
2234
2235 if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) {
2236 netdev_err(netdev, "L2 error pkt\n");
2237 u64_stats_update_begin(&ring->syncp);
2238 ring->stats.l2_err++;
2239 u64_stats_update_end(&ring->syncp);
2240
2241 dev_kfree_skb_any(skb);
2242 return -EFAULT;
2243 }
2244
2245 u64_stats_update_begin(&ring->syncp);
2246 ring->stats.rx_pkts++;
2247 ring->stats.rx_bytes += skb->len;
2248 u64_stats_update_end(&ring->syncp);
2249
2250 ring->tqp_vector->rx_group.total_bytes += skb->len;
2251
2252 hns3_rx_checksum(ring, skb, desc);
2253 return 0;
2254}
2255
Yunsheng Lind43e5ac2017-10-20 10:19:21 +08002256int hns3_clean_rx_ring(
2257 struct hns3_enet_ring *ring, int budget,
2258 void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
Salil76ad4f02017-08-02 16:59:45 +01002259{
2260#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2261 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2262 int recv_pkts, recv_bds, clean_count, err;
2263 int unused_count = hns3_desc_unused(ring);
2264 struct sk_buff *skb = NULL;
2265 int num, bnum = 0;
2266
2267 num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
2268 rmb(); /* Make sure num taken effect before the other data is touched */
2269
2270 recv_pkts = 0, recv_bds = 0, clean_count = 0;
2271 num -= unused_count;
2272
2273 while (recv_pkts < budget && recv_bds < num) {
2274 /* Reuse or realloc buffers */
2275 if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
2276 hns3_nic_alloc_rx_buffers(ring,
2277 clean_count + unused_count);
2278 clean_count = 0;
2279 unused_count = hns3_desc_unused(ring);
2280 }
2281
2282 /* Poll one pkt */
2283 err = hns3_handle_rx_bd(ring, &skb, &bnum);
2284 if (unlikely(!skb)) /* This fault cannot be repaired */
2285 goto out;
2286
2287 recv_bds += bnum;
2288 clean_count += bnum;
2289 if (unlikely(err)) { /* Do jump the err */
2290 recv_pkts++;
2291 continue;
2292 }
2293
2294 /* Do update ip stack process */
2295 skb->protocol = eth_type_trans(skb, netdev);
Yunsheng Lind43e5ac2017-10-20 10:19:21 +08002296 rx_fn(ring, skb);
Salil76ad4f02017-08-02 16:59:45 +01002297
2298 recv_pkts++;
2299 }
2300
2301out:
2302 /* Make all data has been write before submit */
2303 if (clean_count + unused_count > 0)
2304 hns3_nic_alloc_rx_buffers(ring,
2305 clean_count + unused_count);
2306
2307 return recv_pkts;
2308}
2309
2310static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
2311{
2312#define HNS3_RX_ULTRA_PACKET_RATE 40000
2313 enum hns3_flow_level_range new_flow_level;
2314 struct hns3_enet_tqp_vector *tqp_vector;
2315 int packets_per_secs;
2316 int bytes_per_usecs;
2317 u16 new_int_gl;
2318 int usecs;
2319
2320 if (!ring_group->int_gl)
2321 return false;
2322
2323 if (ring_group->total_packets == 0) {
2324 ring_group->int_gl = HNS3_INT_GL_50K;
2325 ring_group->flow_level = HNS3_FLOW_LOW;
2326 return true;
2327 }
2328
2329 /* Simple throttlerate management
2330 * 0-10MB/s lower (50000 ints/s)
2331 * 10-20MB/s middle (20000 ints/s)
2332 * 20-1249MB/s high (18000 ints/s)
2333 * > 40000pps ultra (8000 ints/s)
2334 */
2335 new_flow_level = ring_group->flow_level;
2336 new_int_gl = ring_group->int_gl;
2337 tqp_vector = ring_group->ring->tqp_vector;
2338 usecs = (ring_group->int_gl << 1);
2339 bytes_per_usecs = ring_group->total_bytes / usecs;
2340 /* 1000000 microseconds */
2341 packets_per_secs = ring_group->total_packets * 1000000 / usecs;
2342
2343 switch (new_flow_level) {
2344 case HNS3_FLOW_LOW:
2345 if (bytes_per_usecs > 10)
2346 new_flow_level = HNS3_FLOW_MID;
2347 break;
2348 case HNS3_FLOW_MID:
2349 if (bytes_per_usecs > 20)
2350 new_flow_level = HNS3_FLOW_HIGH;
2351 else if (bytes_per_usecs <= 10)
2352 new_flow_level = HNS3_FLOW_LOW;
2353 break;
2354 case HNS3_FLOW_HIGH:
2355 case HNS3_FLOW_ULTRA:
2356 default:
2357 if (bytes_per_usecs <= 20)
2358 new_flow_level = HNS3_FLOW_MID;
2359 break;
2360 }
2361
2362 if ((packets_per_secs > HNS3_RX_ULTRA_PACKET_RATE) &&
2363 (&tqp_vector->rx_group == ring_group))
2364 new_flow_level = HNS3_FLOW_ULTRA;
2365
2366 switch (new_flow_level) {
2367 case HNS3_FLOW_LOW:
2368 new_int_gl = HNS3_INT_GL_50K;
2369 break;
2370 case HNS3_FLOW_MID:
2371 new_int_gl = HNS3_INT_GL_20K;
2372 break;
2373 case HNS3_FLOW_HIGH:
2374 new_int_gl = HNS3_INT_GL_18K;
2375 break;
2376 case HNS3_FLOW_ULTRA:
2377 new_int_gl = HNS3_INT_GL_8K;
2378 break;
2379 default:
2380 break;
2381 }
2382
2383 ring_group->total_bytes = 0;
2384 ring_group->total_packets = 0;
2385 ring_group->flow_level = new_flow_level;
2386 if (new_int_gl != ring_group->int_gl) {
2387 ring_group->int_gl = new_int_gl;
2388 return true;
2389 }
2390 return false;
2391}
2392
2393static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
2394{
2395 u16 rx_int_gl, tx_int_gl;
2396 bool rx, tx;
2397
2398 rx = hns3_get_new_int_gl(&tqp_vector->rx_group);
2399 tx = hns3_get_new_int_gl(&tqp_vector->tx_group);
2400 rx_int_gl = tqp_vector->rx_group.int_gl;
2401 tx_int_gl = tqp_vector->tx_group.int_gl;
2402 if (rx && tx) {
2403 if (rx_int_gl > tx_int_gl) {
2404 tqp_vector->tx_group.int_gl = rx_int_gl;
2405 tqp_vector->tx_group.flow_level =
2406 tqp_vector->rx_group.flow_level;
2407 hns3_set_vector_coalesc_gl(tqp_vector, rx_int_gl);
2408 } else {
2409 tqp_vector->rx_group.int_gl = tx_int_gl;
2410 tqp_vector->rx_group.flow_level =
2411 tqp_vector->tx_group.flow_level;
2412 hns3_set_vector_coalesc_gl(tqp_vector, tx_int_gl);
2413 }
2414 }
2415}
2416
2417static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
2418{
2419 struct hns3_enet_ring *ring;
2420 int rx_pkt_total = 0;
2421
2422 struct hns3_enet_tqp_vector *tqp_vector =
2423 container_of(napi, struct hns3_enet_tqp_vector, napi);
2424 bool clean_complete = true;
2425 int rx_budget;
2426
2427 /* Since the actual Tx work is minimal, we can give the Tx a larger
2428 * budget and be more aggressive about cleaning up the Tx descriptors.
2429 */
2430 hns3_for_each_ring(ring, tqp_vector->tx_group) {
2431 if (!hns3_clean_tx_ring(ring, budget))
2432 clean_complete = false;
2433 }
2434
2435 /* make sure rx ring budget not smaller than 1 */
2436 rx_budget = max(budget / tqp_vector->num_tqps, 1);
2437
2438 hns3_for_each_ring(ring, tqp_vector->rx_group) {
Yunsheng Lind43e5ac2017-10-20 10:19:21 +08002439 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
2440 hns3_rx_skb);
Salil76ad4f02017-08-02 16:59:45 +01002441
2442 if (rx_cleaned >= rx_budget)
2443 clean_complete = false;
2444
2445 rx_pkt_total += rx_cleaned;
2446 }
2447
2448 tqp_vector->rx_group.total_packets += rx_pkt_total;
2449
2450 if (!clean_complete)
2451 return budget;
2452
2453 napi_complete(napi);
2454 hns3_update_new_int_gl(tqp_vector);
2455 hns3_mask_vector_irq(tqp_vector, 1);
2456
2457 return rx_pkt_total;
2458}
2459
2460static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2461 struct hnae3_ring_chain_node *head)
2462{
2463 struct pci_dev *pdev = tqp_vector->handle->pdev;
2464 struct hnae3_ring_chain_node *cur_chain = head;
2465 struct hnae3_ring_chain_node *chain;
2466 struct hns3_enet_ring *tx_ring;
2467 struct hns3_enet_ring *rx_ring;
2468
2469 tx_ring = tqp_vector->tx_group.ring;
2470 if (tx_ring) {
2471 cur_chain->tqp_index = tx_ring->tqp->tqp_index;
2472 hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2473 HNAE3_RING_TYPE_TX);
2474
2475 cur_chain->next = NULL;
2476
2477 while (tx_ring->next) {
2478 tx_ring = tx_ring->next;
2479
2480 chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
2481 GFP_KERNEL);
2482 if (!chain)
2483 return -ENOMEM;
2484
2485 cur_chain->next = chain;
2486 chain->tqp_index = tx_ring->tqp->tqp_index;
2487 hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2488 HNAE3_RING_TYPE_TX);
2489
2490 cur_chain = chain;
2491 }
2492 }
2493
2494 rx_ring = tqp_vector->rx_group.ring;
2495 if (!tx_ring && rx_ring) {
2496 cur_chain->next = NULL;
2497 cur_chain->tqp_index = rx_ring->tqp->tqp_index;
2498 hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2499 HNAE3_RING_TYPE_RX);
2500
2501 rx_ring = rx_ring->next;
2502 }
2503
2504 while (rx_ring) {
2505 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
2506 if (!chain)
2507 return -ENOMEM;
2508
2509 cur_chain->next = chain;
2510 chain->tqp_index = rx_ring->tqp->tqp_index;
2511 hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2512 HNAE3_RING_TYPE_RX);
2513 cur_chain = chain;
2514
2515 rx_ring = rx_ring->next;
2516 }
2517
2518 return 0;
2519}
2520
2521static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2522 struct hnae3_ring_chain_node *head)
2523{
2524 struct pci_dev *pdev = tqp_vector->handle->pdev;
2525 struct hnae3_ring_chain_node *chain_tmp, *chain;
2526
2527 chain = head->next;
2528
2529 while (chain) {
2530 chain_tmp = chain->next;
2531 devm_kfree(&pdev->dev, chain);
2532 chain = chain_tmp;
2533 }
2534}
2535
2536static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
2537 struct hns3_enet_ring *ring)
2538{
2539 ring->next = group->ring;
2540 group->ring = ring;
2541
2542 group->count++;
2543}
2544
2545static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
2546{
2547 struct hnae3_ring_chain_node vector_ring_chain;
2548 struct hnae3_handle *h = priv->ae_handle;
2549 struct hns3_enet_tqp_vector *tqp_vector;
2550 struct hnae3_vector_info *vector;
2551 struct pci_dev *pdev = h->pdev;
2552 u16 tqp_num = h->kinfo.num_tqps;
2553 u16 vector_num;
2554 int ret = 0;
2555 u16 i;
2556
2557 /* RSS size, cpu online and vector_num should be the same */
2558 /* Should consider 2p/4p later */
2559 vector_num = min_t(u16, num_online_cpus(), tqp_num);
2560 vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
2561 GFP_KERNEL);
2562 if (!vector)
2563 return -ENOMEM;
2564
2565 vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
2566
2567 priv->vector_num = vector_num;
2568 priv->tqp_vector = (struct hns3_enet_tqp_vector *)
2569 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
2570 GFP_KERNEL);
2571 if (!priv->tqp_vector)
2572 return -ENOMEM;
2573
2574 for (i = 0; i < tqp_num; i++) {
2575 u16 vector_i = i % vector_num;
2576
2577 tqp_vector = &priv->tqp_vector[vector_i];
2578
2579 hns3_add_ring_to_group(&tqp_vector->tx_group,
2580 priv->ring_data[i].ring);
2581
2582 hns3_add_ring_to_group(&tqp_vector->rx_group,
2583 priv->ring_data[i + tqp_num].ring);
2584
2585 tqp_vector->idx = vector_i;
2586 tqp_vector->mask_addr = vector[vector_i].io_addr;
2587 tqp_vector->vector_irq = vector[vector_i].vector;
2588 tqp_vector->num_tqps++;
2589
2590 priv->ring_data[i].ring->tqp_vector = tqp_vector;
2591 priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
2592 }
2593
2594 for (i = 0; i < vector_num; i++) {
2595 tqp_vector = &priv->tqp_vector[i];
2596
2597 tqp_vector->rx_group.total_bytes = 0;
2598 tqp_vector->rx_group.total_packets = 0;
2599 tqp_vector->tx_group.total_bytes = 0;
2600 tqp_vector->tx_group.total_packets = 0;
2601 hns3_vector_gl_rl_init(tqp_vector);
2602 tqp_vector->handle = h;
2603
2604 ret = hns3_get_vector_ring_chain(tqp_vector,
2605 &vector_ring_chain);
2606 if (ret)
2607 goto out;
2608
2609 ret = h->ae_algo->ops->map_ring_to_vector(h,
2610 tqp_vector->vector_irq, &vector_ring_chain);
2611 if (ret)
2612 goto out;
2613
2614 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2615
2616 netif_napi_add(priv->netdev, &tqp_vector->napi,
2617 hns3_nic_common_poll, NAPI_POLL_WEIGHT);
2618 }
2619
2620out:
2621 devm_kfree(&pdev->dev, vector);
2622 return ret;
2623}
2624
2625static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
2626{
2627 struct hnae3_ring_chain_node vector_ring_chain;
2628 struct hnae3_handle *h = priv->ae_handle;
2629 struct hns3_enet_tqp_vector *tqp_vector;
2630 struct pci_dev *pdev = h->pdev;
2631 int i, ret;
2632
2633 for (i = 0; i < priv->vector_num; i++) {
2634 tqp_vector = &priv->tqp_vector[i];
2635
2636 ret = hns3_get_vector_ring_chain(tqp_vector,
2637 &vector_ring_chain);
2638 if (ret)
2639 return ret;
2640
2641 ret = h->ae_algo->ops->unmap_ring_from_vector(h,
2642 tqp_vector->vector_irq, &vector_ring_chain);
2643 if (ret)
2644 return ret;
2645
2646 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2647
2648 if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
2649 (void)irq_set_affinity_hint(
2650 priv->tqp_vector[i].vector_irq,
2651 NULL);
qumingguangae064e62017-11-02 20:45:22 +08002652 free_irq(priv->tqp_vector[i].vector_irq,
2653 &priv->tqp_vector[i]);
Salil76ad4f02017-08-02 16:59:45 +01002654 }
2655
2656 priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
2657
2658 netif_napi_del(&priv->tqp_vector[i].napi);
2659 }
2660
2661 devm_kfree(&pdev->dev, priv->tqp_vector);
2662
2663 return 0;
2664}
2665
2666static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
2667 int ring_type)
2668{
2669 struct hns3_nic_ring_data *ring_data = priv->ring_data;
2670 int queue_num = priv->ae_handle->kinfo.num_tqps;
2671 struct pci_dev *pdev = priv->ae_handle->pdev;
2672 struct hns3_enet_ring *ring;
2673
2674 ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
2675 if (!ring)
2676 return -ENOMEM;
2677
2678 if (ring_type == HNAE3_RING_TYPE_TX) {
2679 ring_data[q->tqp_index].ring = ring;
Lipeng66b44732017-10-23 19:51:05 +08002680 ring_data[q->tqp_index].queue_index = q->tqp_index;
Salil76ad4f02017-08-02 16:59:45 +01002681 ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
2682 } else {
2683 ring_data[q->tqp_index + queue_num].ring = ring;
Lipeng66b44732017-10-23 19:51:05 +08002684 ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index;
Salil76ad4f02017-08-02 16:59:45 +01002685 ring->io_base = q->io_base;
2686 }
2687
2688 hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
2689
Salil76ad4f02017-08-02 16:59:45 +01002690 ring->tqp = q;
2691 ring->desc = NULL;
2692 ring->desc_cb = NULL;
2693 ring->dev = priv->dev;
2694 ring->desc_dma_addr = 0;
2695 ring->buf_size = q->buf_size;
2696 ring->desc_num = q->desc_num;
2697 ring->next_to_use = 0;
2698 ring->next_to_clean = 0;
2699
2700 return 0;
2701}
2702
2703static int hns3_queue_to_ring(struct hnae3_queue *tqp,
2704 struct hns3_nic_priv *priv)
2705{
2706 int ret;
2707
2708 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
2709 if (ret)
2710 return ret;
2711
2712 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
2713 if (ret)
2714 return ret;
2715
2716 return 0;
2717}
2718
2719static int hns3_get_ring_config(struct hns3_nic_priv *priv)
2720{
2721 struct hnae3_handle *h = priv->ae_handle;
2722 struct pci_dev *pdev = h->pdev;
2723 int i, ret;
2724
2725 priv->ring_data = devm_kzalloc(&pdev->dev, h->kinfo.num_tqps *
2726 sizeof(*priv->ring_data) * 2,
2727 GFP_KERNEL);
2728 if (!priv->ring_data)
2729 return -ENOMEM;
2730
2731 for (i = 0; i < h->kinfo.num_tqps; i++) {
2732 ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv);
2733 if (ret)
2734 goto err;
2735 }
2736
2737 return 0;
2738err:
2739 devm_kfree(&pdev->dev, priv->ring_data);
2740 return ret;
2741}
2742
Peng Li09f2af62017-12-22 12:21:41 +08002743static void hns3_put_ring_config(struct hns3_nic_priv *priv)
2744{
2745 struct hnae3_handle *h = priv->ae_handle;
2746 int i;
2747
2748 for (i = 0; i < h->kinfo.num_tqps; i++) {
2749 devm_kfree(priv->dev, priv->ring_data[i].ring);
2750 devm_kfree(priv->dev,
2751 priv->ring_data[i + h->kinfo.num_tqps].ring);
2752 }
2753 devm_kfree(priv->dev, priv->ring_data);
2754}
2755
Salil76ad4f02017-08-02 16:59:45 +01002756static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
2757{
2758 int ret;
2759
2760 if (ring->desc_num <= 0 || ring->buf_size <= 0)
2761 return -EINVAL;
2762
2763 ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
2764 GFP_KERNEL);
2765 if (!ring->desc_cb) {
2766 ret = -ENOMEM;
2767 goto out;
2768 }
2769
2770 ret = hns3_alloc_desc(ring);
2771 if (ret)
2772 goto out_with_desc_cb;
2773
2774 if (!HNAE3_IS_TX_RING(ring)) {
2775 ret = hns3_alloc_ring_buffers(ring);
2776 if (ret)
2777 goto out_with_desc;
2778 }
2779
2780 return 0;
2781
2782out_with_desc:
2783 hns3_free_desc(ring);
2784out_with_desc_cb:
2785 kfree(ring->desc_cb);
2786 ring->desc_cb = NULL;
2787out:
2788 return ret;
2789}
2790
2791static void hns3_fini_ring(struct hns3_enet_ring *ring)
2792{
2793 hns3_free_desc(ring);
2794 kfree(ring->desc_cb);
2795 ring->desc_cb = NULL;
2796 ring->next_to_clean = 0;
2797 ring->next_to_use = 0;
2798}
2799
Yunsheng Lin1db9b1b2017-10-09 15:44:01 +08002800static int hns3_buf_size2type(u32 buf_size)
Salil76ad4f02017-08-02 16:59:45 +01002801{
2802 int bd_size_type;
2803
2804 switch (buf_size) {
2805 case 512:
2806 bd_size_type = HNS3_BD_SIZE_512_TYPE;
2807 break;
2808 case 1024:
2809 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
2810 break;
2811 case 2048:
2812 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
2813 break;
2814 case 4096:
2815 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
2816 break;
2817 default:
2818 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
2819 }
2820
2821 return bd_size_type;
2822}
2823
2824static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
2825{
2826 dma_addr_t dma = ring->desc_dma_addr;
2827 struct hnae3_queue *q = ring->tqp;
2828
2829 if (!HNAE3_IS_TX_RING(ring)) {
2830 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG,
2831 (u32)dma);
2832 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
2833 (u32)((dma >> 31) >> 1));
2834
2835 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
2836 hns3_buf_size2type(ring->buf_size));
2837 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
2838 ring->desc_num / 8 - 1);
2839
2840 } else {
2841 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
2842 (u32)dma);
2843 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
2844 (u32)((dma >> 31) >> 1));
2845
2846 hns3_write_dev(q, HNS3_RING_TX_RING_BD_LEN_REG,
2847 hns3_buf_size2type(ring->buf_size));
2848 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
2849 ring->desc_num / 8 - 1);
2850 }
2851}
2852
Lipeng5668abd2017-10-10 16:42:04 +08002853int hns3_init_all_ring(struct hns3_nic_priv *priv)
Salil76ad4f02017-08-02 16:59:45 +01002854{
2855 struct hnae3_handle *h = priv->ae_handle;
2856 int ring_num = h->kinfo.num_tqps * 2;
2857 int i, j;
2858 int ret;
2859
2860 for (i = 0; i < ring_num; i++) {
2861 ret = hns3_alloc_ring_memory(priv->ring_data[i].ring);
2862 if (ret) {
2863 dev_err(priv->dev,
2864 "Alloc ring memory fail! ret=%d\n", ret);
2865 goto out_when_alloc_ring_memory;
2866 }
2867
2868 hns3_init_ring_hw(priv->ring_data[i].ring);
2869
2870 u64_stats_init(&priv->ring_data[i].ring->syncp);
2871 }
2872
2873 return 0;
2874
2875out_when_alloc_ring_memory:
2876 for (j = i - 1; j >= 0; j--)
Lipengee83f772017-10-10 16:42:03 +08002877 hns3_fini_ring(priv->ring_data[j].ring);
Salil76ad4f02017-08-02 16:59:45 +01002878
2879 return -ENOMEM;
2880}
2881
Lipeng5668abd2017-10-10 16:42:04 +08002882int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
Salil76ad4f02017-08-02 16:59:45 +01002883{
2884 struct hnae3_handle *h = priv->ae_handle;
2885 int i;
2886
2887 for (i = 0; i < h->kinfo.num_tqps; i++) {
2888 if (h->ae_algo->ops->reset_queue)
2889 h->ae_algo->ops->reset_queue(h, i);
2890
2891 hns3_fini_ring(priv->ring_data[i].ring);
Peng Li99fdf6b2017-12-22 12:21:43 +08002892 devm_kfree(priv->dev, priv->ring_data[i].ring);
Salil76ad4f02017-08-02 16:59:45 +01002893 hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
Peng Li99fdf6b2017-12-22 12:21:43 +08002894 devm_kfree(priv->dev,
2895 priv->ring_data[i + h->kinfo.num_tqps].ring);
Salil76ad4f02017-08-02 16:59:45 +01002896 }
Peng Li99fdf6b2017-12-22 12:21:43 +08002897 devm_kfree(priv->dev, priv->ring_data);
Salil76ad4f02017-08-02 16:59:45 +01002898
2899 return 0;
2900}
2901
2902/* Set mac addr if it is configured. or leave it to the AE driver */
2903static void hns3_init_mac_addr(struct net_device *netdev)
2904{
2905 struct hns3_nic_priv *priv = netdev_priv(netdev);
2906 struct hnae3_handle *h = priv->ae_handle;
2907 u8 mac_addr_temp[ETH_ALEN];
2908
2909 if (h->ae_algo->ops->get_mac_addr) {
2910 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
2911 ether_addr_copy(netdev->dev_addr, mac_addr_temp);
2912 }
2913
2914 /* Check if the MAC address is valid, if not get a random one */
2915 if (!is_valid_ether_addr(netdev->dev_addr)) {
2916 eth_hw_addr_random(netdev);
2917 dev_warn(priv->dev, "using random MAC address %pM\n",
2918 netdev->dev_addr);
Salil76ad4f02017-08-02 16:59:45 +01002919 }
Lipeng139e8792017-09-19 17:17:13 +01002920
2921 if (h->ae_algo->ops->set_mac_addr)
2922 h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr);
2923
Salil76ad4f02017-08-02 16:59:45 +01002924}
2925
2926static void hns3_nic_set_priv_ops(struct net_device *netdev)
2927{
2928 struct hns3_nic_priv *priv = netdev_priv(netdev);
2929
2930 if ((netdev->features & NETIF_F_TSO) ||
2931 (netdev->features & NETIF_F_TSO6)) {
2932 priv->ops.fill_desc = hns3_fill_desc_tso;
2933 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
2934 } else {
2935 priv->ops.fill_desc = hns3_fill_desc;
2936 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
2937 }
2938}
2939
2940static int hns3_client_init(struct hnae3_handle *handle)
2941{
2942 struct pci_dev *pdev = handle->pdev;
2943 struct hns3_nic_priv *priv;
2944 struct net_device *netdev;
2945 int ret;
2946
2947 netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv),
2948 handle->kinfo.num_tqps);
2949 if (!netdev)
2950 return -ENOMEM;
2951
2952 priv = netdev_priv(netdev);
2953 priv->dev = &pdev->dev;
2954 priv->netdev = netdev;
2955 priv->ae_handle = handle;
Lipengf8fa222c2017-11-02 20:45:20 +08002956 priv->last_reset_time = jiffies;
2957 priv->reset_level = HNAE3_FUNC_RESET;
2958 priv->tx_timeout_count = 0;
Salil76ad4f02017-08-02 16:59:45 +01002959
2960 handle->kinfo.netdev = netdev;
2961 handle->priv = (void *)priv;
2962
2963 hns3_init_mac_addr(netdev);
2964
2965 hns3_set_default_feature(netdev);
2966
2967 netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
2968 netdev->priv_flags |= IFF_UNICAST_FLT;
2969 netdev->netdev_ops = &hns3_nic_netdev_ops;
2970 SET_NETDEV_DEV(netdev, &pdev->dev);
2971 hns3_ethtool_set_ops(netdev);
2972 hns3_nic_set_priv_ops(netdev);
2973
2974 /* Carrier off reporting is important to ethtool even BEFORE open */
2975 netif_carrier_off(netdev);
2976
2977 ret = hns3_get_ring_config(priv);
2978 if (ret) {
2979 ret = -ENOMEM;
2980 goto out_get_ring_cfg;
2981 }
2982
2983 ret = hns3_nic_init_vector_data(priv);
2984 if (ret) {
2985 ret = -ENOMEM;
2986 goto out_init_vector_data;
2987 }
2988
2989 ret = hns3_init_all_ring(priv);
2990 if (ret) {
2991 ret = -ENOMEM;
2992 goto out_init_ring_data;
2993 }
2994
2995 ret = register_netdev(netdev);
2996 if (ret) {
2997 dev_err(priv->dev, "probe register netdev fail!\n");
2998 goto out_reg_netdev_fail;
2999 }
3000
Yunsheng Lin986743d2017-09-27 09:45:30 +08003001 hns3_dcbnl_setup(handle);
3002
Salila8e8b7f2017-08-21 17:05:24 +01003003 /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */
3004 netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
3005
Salil76ad4f02017-08-02 16:59:45 +01003006 return ret;
3007
3008out_reg_netdev_fail:
3009out_init_ring_data:
3010 (void)hns3_nic_uninit_vector_data(priv);
3011 priv->ring_data = NULL;
3012out_init_vector_data:
3013out_get_ring_cfg:
3014 priv->ae_handle = NULL;
3015 free_netdev(netdev);
3016 return ret;
3017}
3018
3019static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
3020{
3021 struct net_device *netdev = handle->kinfo.netdev;
3022 struct hns3_nic_priv *priv = netdev_priv(netdev);
3023 int ret;
3024
3025 if (netdev->reg_state != NETREG_UNINITIALIZED)
3026 unregister_netdev(netdev);
3027
3028 ret = hns3_nic_uninit_vector_data(priv);
3029 if (ret)
3030 netdev_err(netdev, "uninit vector error\n");
3031
3032 ret = hns3_uninit_all_ring(priv);
3033 if (ret)
3034 netdev_err(netdev, "uninit ring error\n");
3035
3036 priv->ring_data = NULL;
3037
3038 free_netdev(netdev);
3039}
3040
3041static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
3042{
3043 struct net_device *netdev = handle->kinfo.netdev;
3044
3045 if (!netdev)
3046 return;
3047
3048 if (linkup) {
3049 netif_carrier_on(netdev);
3050 netif_tx_wake_all_queues(netdev);
3051 netdev_info(netdev, "link up\n");
3052 } else {
3053 netif_carrier_off(netdev);
3054 netif_tx_stop_all_queues(netdev);
3055 netdev_info(netdev, "link down\n");
3056 }
3057}
3058
Yunsheng Lin9df8f792017-09-27 09:45:32 +08003059static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
3060{
3061 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3062 struct net_device *ndev = kinfo->netdev;
Colin Ian King075cfdd2017-09-29 20:51:23 +01003063 bool if_running;
Yunsheng Lin9df8f792017-09-27 09:45:32 +08003064 int ret;
3065 u8 i;
3066
3067 if (tc > HNAE3_MAX_TC)
3068 return -EINVAL;
3069
3070 if (!ndev)
3071 return -ENODEV;
3072
Colin Ian King075cfdd2017-09-29 20:51:23 +01003073 if_running = netif_running(ndev);
3074
Yunsheng Lin9df8f792017-09-27 09:45:32 +08003075 ret = netdev_set_num_tc(ndev, tc);
3076 if (ret)
3077 return ret;
3078
3079 if (if_running) {
3080 (void)hns3_nic_net_stop(ndev);
3081 msleep(100);
3082 }
3083
3084 ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ?
3085 kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP;
3086 if (ret)
3087 goto err_out;
3088
3089 if (tc <= 1) {
3090 netdev_reset_tc(ndev);
3091 goto out;
3092 }
3093
3094 for (i = 0; i < HNAE3_MAX_TC; i++) {
3095 struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
3096
3097 if (tc_info->enable)
3098 netdev_set_tc_queue(ndev,
3099 tc_info->tc,
3100 tc_info->tqp_count,
3101 tc_info->tqp_offset);
3102 }
3103
3104 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
3105 netdev_set_prio_tc_map(ndev, i,
3106 kinfo->prio_tc[i]);
3107 }
3108
3109out:
3110 ret = hns3_nic_set_real_num_queue(ndev);
3111
3112err_out:
3113 if (if_running)
3114 (void)hns3_nic_net_open(ndev);
3115
3116 return ret;
3117}
3118
Lipengbb6b94a2017-11-02 20:45:21 +08003119static void hns3_recover_hw_addr(struct net_device *ndev)
3120{
3121 struct netdev_hw_addr_list *list;
3122 struct netdev_hw_addr *ha, *tmp;
3123
3124 /* go through and sync uc_addr entries to the device */
3125 list = &ndev->uc;
3126 list_for_each_entry_safe(ha, tmp, &list->list, list)
3127 hns3_nic_uc_sync(ndev, ha->addr);
3128
3129 /* go through and sync mc_addr entries to the device */
3130 list = &ndev->mc;
3131 list_for_each_entry_safe(ha, tmp, &list->list, list)
3132 hns3_nic_mc_sync(ndev, ha->addr);
3133}
3134
3135static void hns3_drop_skb_data(struct hns3_enet_ring *ring, struct sk_buff *skb)
3136{
3137 dev_kfree_skb_any(skb);
3138}
3139
3140static void hns3_clear_all_ring(struct hnae3_handle *h)
3141{
3142 struct net_device *ndev = h->kinfo.netdev;
3143 struct hns3_nic_priv *priv = netdev_priv(ndev);
3144 u32 i;
3145
3146 for (i = 0; i < h->kinfo.num_tqps; i++) {
3147 struct netdev_queue *dev_queue;
3148 struct hns3_enet_ring *ring;
3149
3150 ring = priv->ring_data[i].ring;
3151 hns3_clean_tx_ring(ring, ring->desc_num);
3152 dev_queue = netdev_get_tx_queue(ndev,
3153 priv->ring_data[i].queue_index);
3154 netdev_tx_reset_queue(dev_queue);
3155
3156 ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
3157 hns3_clean_rx_ring(ring, ring->desc_num, hns3_drop_skb_data);
3158 }
3159}
3160
3161static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
3162{
3163 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3164 struct net_device *ndev = kinfo->netdev;
3165
3166 if (!netif_running(ndev))
3167 return -EIO;
3168
3169 return hns3_nic_net_stop(ndev);
3170}
3171
3172static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
3173{
3174 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3175 struct hns3_nic_priv *priv = netdev_priv(kinfo->netdev);
3176 int ret = 0;
3177
3178 if (netif_running(kinfo->netdev)) {
3179 ret = hns3_nic_net_up(kinfo->netdev);
3180 if (ret) {
3181 netdev_err(kinfo->netdev,
3182 "hns net up fail, ret=%d!\n", ret);
3183 return ret;
3184 }
3185
3186 priv->last_reset_time = jiffies;
3187 }
3188
3189 return ret;
3190}
3191
3192static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
3193{
3194 struct net_device *netdev = handle->kinfo.netdev;
3195 struct hns3_nic_priv *priv = netdev_priv(netdev);
3196 int ret;
3197
3198 priv->reset_level = 1;
3199 hns3_init_mac_addr(netdev);
3200 hns3_nic_set_rx_mode(netdev);
3201 hns3_recover_hw_addr(netdev);
3202
3203 /* Carrier off reporting is important to ethtool even BEFORE open */
3204 netif_carrier_off(netdev);
3205
3206 ret = hns3_get_ring_config(priv);
3207 if (ret)
3208 return ret;
3209
3210 ret = hns3_nic_init_vector_data(priv);
3211 if (ret)
3212 return ret;
3213
3214 ret = hns3_init_all_ring(priv);
3215 if (ret) {
3216 hns3_nic_uninit_vector_data(priv);
3217 priv->ring_data = NULL;
3218 }
3219
3220 return ret;
3221}
3222
3223static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
3224{
3225 struct net_device *netdev = handle->kinfo.netdev;
3226 struct hns3_nic_priv *priv = netdev_priv(netdev);
3227 int ret;
3228
3229 hns3_clear_all_ring(handle);
3230
3231 ret = hns3_nic_uninit_vector_data(priv);
3232 if (ret) {
3233 netdev_err(netdev, "uninit vector error\n");
3234 return ret;
3235 }
3236
3237 ret = hns3_uninit_all_ring(priv);
3238 if (ret)
3239 netdev_err(netdev, "uninit ring error\n");
3240
3241 priv->ring_data = NULL;
3242
3243 return ret;
3244}
3245
3246static int hns3_reset_notify(struct hnae3_handle *handle,
3247 enum hnae3_reset_notify_type type)
3248{
3249 int ret = 0;
3250
3251 switch (type) {
3252 case HNAE3_UP_CLIENT:
3253 ret = hns3_reset_notify_up_enet(handle);
3254 break;
3255 case HNAE3_DOWN_CLIENT:
3256 ret = hns3_reset_notify_down_enet(handle);
3257 break;
3258 case HNAE3_INIT_CLIENT:
3259 ret = hns3_reset_notify_init_enet(handle);
3260 break;
3261 case HNAE3_UNINIT_CLIENT:
3262 ret = hns3_reset_notify_uninit_enet(handle);
3263 break;
3264 default:
3265 break;
3266 }
3267
3268 return ret;
3269}
3270
Peng Li09f2af62017-12-22 12:21:41 +08003271static u16 hns3_get_max_available_channels(struct net_device *netdev)
3272{
3273 struct hnae3_handle *h = hns3_get_handle(netdev);
3274 u16 free_tqps, max_rss_size, max_tqps;
3275
3276 h->ae_algo->ops->get_tqps_and_rss_info(h, &free_tqps, &max_rss_size);
3277 max_tqps = h->kinfo.num_tc * max_rss_size;
3278
3279 return min_t(u16, max_tqps, (free_tqps + h->kinfo.num_tqps));
3280}
3281
3282static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num)
3283{
3284 struct hns3_nic_priv *priv = netdev_priv(netdev);
3285 struct hnae3_handle *h = hns3_get_handle(netdev);
3286 int ret;
3287
3288 ret = h->ae_algo->ops->set_channels(h, new_tqp_num);
3289 if (ret)
3290 return ret;
3291
3292 ret = hns3_get_ring_config(priv);
3293 if (ret)
3294 return ret;
3295
3296 ret = hns3_nic_init_vector_data(priv);
3297 if (ret)
3298 goto err_uninit_vector;
3299
3300 ret = hns3_init_all_ring(priv);
3301 if (ret)
3302 goto err_put_ring;
3303
3304 return 0;
3305
3306err_put_ring:
3307 hns3_put_ring_config(priv);
3308err_uninit_vector:
3309 hns3_nic_uninit_vector_data(priv);
3310 return ret;
3311}
3312
3313static int hns3_adjust_tqps_num(u8 num_tc, u32 new_tqp_num)
3314{
3315 return (new_tqp_num / num_tc) * num_tc;
3316}
3317
3318int hns3_set_channels(struct net_device *netdev,
3319 struct ethtool_channels *ch)
3320{
3321 struct hns3_nic_priv *priv = netdev_priv(netdev);
3322 struct hnae3_handle *h = hns3_get_handle(netdev);
3323 struct hnae3_knic_private_info *kinfo = &h->kinfo;
3324 bool if_running = netif_running(netdev);
3325 u32 new_tqp_num = ch->combined_count;
3326 u16 org_tqp_num;
3327 int ret;
3328
3329 if (ch->rx_count || ch->tx_count)
3330 return -EINVAL;
3331
3332 if (new_tqp_num > hns3_get_max_available_channels(netdev) ||
3333 new_tqp_num < kinfo->num_tc) {
3334 dev_err(&netdev->dev,
3335 "Change tqps fail, the tqp range is from %d to %d",
3336 kinfo->num_tc,
3337 hns3_get_max_available_channels(netdev));
3338 return -EINVAL;
3339 }
3340
3341 new_tqp_num = hns3_adjust_tqps_num(kinfo->num_tc, new_tqp_num);
3342 if (kinfo->num_tqps == new_tqp_num)
3343 return 0;
3344
3345 if (if_running)
3346 dev_close(netdev);
3347
3348 hns3_clear_all_ring(h);
3349
3350 ret = hns3_nic_uninit_vector_data(priv);
3351 if (ret) {
3352 dev_err(&netdev->dev,
3353 "Unbind vector with tqp fail, nothing is changed");
3354 goto open_netdev;
3355 }
3356
3357 hns3_uninit_all_ring(priv);
3358
3359 org_tqp_num = h->kinfo.num_tqps;
3360 ret = hns3_modify_tqp_num(netdev, new_tqp_num);
3361 if (ret) {
3362 ret = hns3_modify_tqp_num(netdev, org_tqp_num);
3363 if (ret) {
3364 /* If revert to old tqp failed, fatal error occurred */
3365 dev_err(&netdev->dev,
3366 "Revert to old tqp num fail, ret=%d", ret);
3367 return ret;
3368 }
3369 dev_info(&netdev->dev,
3370 "Change tqp num fail, Revert to old tqp num");
3371 }
3372
3373open_netdev:
3374 if (if_running)
3375 dev_open(netdev);
3376
3377 return ret;
3378}
3379
Yunsheng Lin1db9b1b2017-10-09 15:44:01 +08003380static const struct hnae3_client_ops client_ops = {
Salil76ad4f02017-08-02 16:59:45 +01003381 .init_instance = hns3_client_init,
3382 .uninit_instance = hns3_client_uninit,
3383 .link_status_change = hns3_link_status_change,
Yunsheng Lin9df8f792017-09-27 09:45:32 +08003384 .setup_tc = hns3_client_setup_tc,
Lipengbb6b94a2017-11-02 20:45:21 +08003385 .reset_notify = hns3_reset_notify,
Salil76ad4f02017-08-02 16:59:45 +01003386};
3387
3388/* hns3_init_module - Driver registration routine
3389 * hns3_init_module is the first routine called when the driver is
3390 * loaded. All it does is register with the PCI subsystem.
3391 */
3392static int __init hns3_init_module(void)
3393{
3394 int ret;
3395
3396 pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
3397 pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
3398
3399 client.type = HNAE3_CLIENT_KNIC;
3400 snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s",
3401 hns3_driver_name);
3402
3403 client.ops = &client_ops;
3404
3405 ret = hnae3_register_client(&client);
3406 if (ret)
3407 return ret;
3408
3409 ret = pci_register_driver(&hns3_driver);
3410 if (ret)
3411 hnae3_unregister_client(&client);
3412
3413 return ret;
3414}
3415module_init(hns3_init_module);
3416
3417/* hns3_exit_module - Driver exit cleanup routine
3418 * hns3_exit_module is called just before the driver is removed
3419 * from memory.
3420 */
3421static void __exit hns3_exit_module(void)
3422{
3423 pci_unregister_driver(&hns3_driver);
3424 hnae3_unregister_client(&client);
3425}
3426module_exit(hns3_exit_module);
3427
3428MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
3429MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
3430MODULE_LICENSE("GPL");
3431MODULE_ALIAS("pci:hns-nic");