blob: af9e90f8710987c578295e7be1265dc11ff5f5d2 [file] [log] [blame]
Salil76ad4f02017-08-02 16:59:45 +01001/*
2 * Copyright (c) 2016~2017 Hisilicon Limited.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License as published by
6 * the Free Software Foundation; either version 2 of the License, or
7 * (at your option) any later version.
8 */
9
10#include <linux/dma-mapping.h>
11#include <linux/etherdevice.h>
12#include <linux/interrupt.h>
13#include <linux/if_vlan.h>
14#include <linux/ip.h>
15#include <linux/ipv6.h>
16#include <linux/module.h>
17#include <linux/pci.h>
18#include <linux/skbuff.h>
19#include <linux/sctp.h>
20#include <linux/vermagic.h>
21#include <net/gre.h>
Yunsheng Lin30d240d2017-10-17 14:51:30 +080022#include <net/pkt_cls.h>
Salil76ad4f02017-08-02 16:59:45 +010023#include <net/vxlan.h>
24
25#include "hnae3.h"
26#include "hns3_enet.h"
27
Yunsheng Lin1db9b1b2017-10-09 15:44:01 +080028static const char hns3_driver_name[] = "hns3";
Salil76ad4f02017-08-02 16:59:45 +010029const char hns3_driver_version[] = VERMAGIC_STRING;
30static const char hns3_driver_string[] =
31 "Hisilicon Ethernet Network Driver for Hip08 Family";
32static const char hns3_copyright[] = "Copyright (c) 2017 Huawei Corporation.";
33static struct hnae3_client client;
34
35/* hns3_pci_tbl - PCI Device ID Table
36 *
37 * Last entry must be all 0s
38 *
39 * { Vendor ID, Device ID, SubVendor ID, SubDevice ID,
40 * Class, Class Mask, private data (not used) }
41 */
42static const struct pci_device_id hns3_pci_tbl[] = {
43 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_GE), 0},
44 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE), 0},
Yunsheng Line92a0842017-09-20 18:52:50 +080045 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA),
Yunsheng Lin2daf4a62017-09-20 18:52:51 +080046 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
Yunsheng Line92a0842017-09-20 18:52:50 +080047 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_25GE_RDMA_MACSEC),
Yunsheng Lin2daf4a62017-09-20 18:52:51 +080048 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
Yunsheng Line92a0842017-09-20 18:52:50 +080049 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA),
Yunsheng Lin2daf4a62017-09-20 18:52:51 +080050 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
Yunsheng Line92a0842017-09-20 18:52:50 +080051 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_50GE_RDMA_MACSEC),
Yunsheng Lin2daf4a62017-09-20 18:52:51 +080052 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
Yunsheng Line92a0842017-09-20 18:52:50 +080053 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_MACSEC),
Yunsheng Lin2daf4a62017-09-20 18:52:51 +080054 HNAE3_DEV_SUPPORT_ROCE_DCB_BITS},
Salil Mehta424eb832017-12-14 18:03:06 +000055 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_VF), 0},
56 {PCI_VDEVICE(HUAWEI, HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF), 0},
Salil76ad4f02017-08-02 16:59:45 +010057 /* required last entry */
58 {0, }
59};
60MODULE_DEVICE_TABLE(pci, hns3_pci_tbl);
61
62static irqreturn_t hns3_irq_handle(int irq, void *dev)
63{
64 struct hns3_enet_tqp_vector *tqp_vector = dev;
65
66 napi_schedule(&tqp_vector->napi);
67
68 return IRQ_HANDLED;
69}
70
71static void hns3_nic_uninit_irq(struct hns3_nic_priv *priv)
72{
73 struct hns3_enet_tqp_vector *tqp_vectors;
74 unsigned int i;
75
76 for (i = 0; i < priv->vector_num; i++) {
77 tqp_vectors = &priv->tqp_vector[i];
78
79 if (tqp_vectors->irq_init_flag != HNS3_VECTOR_INITED)
80 continue;
81
82 /* release the irq resource */
83 free_irq(tqp_vectors->vector_irq, tqp_vectors);
84 tqp_vectors->irq_init_flag = HNS3_VECTOR_NOT_INITED;
85 }
86}
87
88static int hns3_nic_init_irq(struct hns3_nic_priv *priv)
89{
90 struct hns3_enet_tqp_vector *tqp_vectors;
91 int txrx_int_idx = 0;
92 int rx_int_idx = 0;
93 int tx_int_idx = 0;
94 unsigned int i;
95 int ret;
96
97 for (i = 0; i < priv->vector_num; i++) {
98 tqp_vectors = &priv->tqp_vector[i];
99
100 if (tqp_vectors->irq_init_flag == HNS3_VECTOR_INITED)
101 continue;
102
103 if (tqp_vectors->tx_group.ring && tqp_vectors->rx_group.ring) {
104 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
105 "%s-%s-%d", priv->netdev->name, "TxRx",
106 txrx_int_idx++);
107 txrx_int_idx++;
108 } else if (tqp_vectors->rx_group.ring) {
109 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
110 "%s-%s-%d", priv->netdev->name, "Rx",
111 rx_int_idx++);
112 } else if (tqp_vectors->tx_group.ring) {
113 snprintf(tqp_vectors->name, HNAE3_INT_NAME_LEN - 1,
114 "%s-%s-%d", priv->netdev->name, "Tx",
115 tx_int_idx++);
116 } else {
117 /* Skip this unused q_vector */
118 continue;
119 }
120
121 tqp_vectors->name[HNAE3_INT_NAME_LEN - 1] = '\0';
122
123 ret = request_irq(tqp_vectors->vector_irq, hns3_irq_handle, 0,
124 tqp_vectors->name,
125 tqp_vectors);
126 if (ret) {
127 netdev_err(priv->netdev, "request irq(%d) fail\n",
128 tqp_vectors->vector_irq);
129 return ret;
130 }
131
132 tqp_vectors->irq_init_flag = HNS3_VECTOR_INITED;
133 }
134
135 return 0;
136}
137
138static void hns3_mask_vector_irq(struct hns3_enet_tqp_vector *tqp_vector,
139 u32 mask_en)
140{
141 writel(mask_en, tqp_vector->mask_addr);
142}
143
144static void hns3_vector_enable(struct hns3_enet_tqp_vector *tqp_vector)
145{
146 napi_enable(&tqp_vector->napi);
147
148 /* enable vector */
149 hns3_mask_vector_irq(tqp_vector, 1);
150}
151
152static void hns3_vector_disable(struct hns3_enet_tqp_vector *tqp_vector)
153{
154 /* disable vector */
155 hns3_mask_vector_irq(tqp_vector, 0);
156
157 disable_irq(tqp_vector->vector_irq);
158 napi_disable(&tqp_vector->napi);
159}
160
Fuyun Liang434776a2018-01-12 16:23:10 +0800161void hns3_set_vector_coalesce_rl(struct hns3_enet_tqp_vector *tqp_vector,
162 u32 rl_value)
Salil76ad4f02017-08-02 16:59:45 +0100163{
Fuyun Liang434776a2018-01-12 16:23:10 +0800164 u32 rl_reg = hns3_rl_usec_to_reg(rl_value);
165
Salil76ad4f02017-08-02 16:59:45 +0100166 /* this defines the configuration for RL (Interrupt Rate Limiter).
167 * Rl defines rate of interrupts i.e. number of interrupts-per-second
168 * GL and RL(Rate Limiter) are 2 ways to acheive interrupt coalescing
169 */
Fuyun Liang434776a2018-01-12 16:23:10 +0800170
Yunsheng Lin9bc727a2018-03-09 10:37:03 +0800171 if (rl_reg > 0 && !tqp_vector->tx_group.coal.gl_adapt_enable &&
172 !tqp_vector->rx_group.coal.gl_adapt_enable)
Fuyun Liang434776a2018-01-12 16:23:10 +0800173 /* According to the hardware, the range of rl_reg is
174 * 0-59 and the unit is 4.
175 */
176 rl_reg |= HNS3_INT_RL_ENABLE_MASK;
177
178 writel(rl_reg, tqp_vector->mask_addr + HNS3_VECTOR_RL_OFFSET);
179}
180
181void hns3_set_vector_coalesce_rx_gl(struct hns3_enet_tqp_vector *tqp_vector,
182 u32 gl_value)
183{
184 u32 rx_gl_reg = hns3_gl_usec_to_reg(gl_value);
185
186 writel(rx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL0_OFFSET);
187}
188
189void hns3_set_vector_coalesce_tx_gl(struct hns3_enet_tqp_vector *tqp_vector,
190 u32 gl_value)
191{
192 u32 tx_gl_reg = hns3_gl_usec_to_reg(gl_value);
193
194 writel(tx_gl_reg, tqp_vector->mask_addr + HNS3_VECTOR_GL1_OFFSET);
Salil76ad4f02017-08-02 16:59:45 +0100195}
196
Fuyun Liang5fd47892018-01-12 16:23:11 +0800197static void hns3_vector_gl_rl_init(struct hns3_enet_tqp_vector *tqp_vector,
198 struct hns3_nic_priv *priv)
Salil76ad4f02017-08-02 16:59:45 +0100199{
Fuyun Liang5fd47892018-01-12 16:23:11 +0800200 struct hnae3_handle *h = priv->ae_handle;
201
Salil76ad4f02017-08-02 16:59:45 +0100202 /* initialize the configuration for interrupt coalescing.
203 * 1. GL (Interrupt Gap Limiter)
204 * 2. RL (Interrupt Rate Limiter)
205 */
206
Fuyun Liang5fd47892018-01-12 16:23:11 +0800207 /* Default: enable interrupt coalescing self-adaptive and GL */
Yunsheng Lin9bc727a2018-03-09 10:37:03 +0800208 tqp_vector->tx_group.coal.gl_adapt_enable = 1;
209 tqp_vector->rx_group.coal.gl_adapt_enable = 1;
Fuyun Liang5fd47892018-01-12 16:23:11 +0800210
Yunsheng Lin9bc727a2018-03-09 10:37:03 +0800211 tqp_vector->tx_group.coal.int_gl = HNS3_INT_GL_50K;
212 tqp_vector->rx_group.coal.int_gl = HNS3_INT_GL_50K;
Fuyun Liang5fd47892018-01-12 16:23:11 +0800213
Yunsheng Lindd38c722018-03-09 10:37:02 +0800214 /* Default: disable RL */
215 h->kinfo.int_rl_setting = 0;
216
Fuyun Liangcd9d1872018-03-21 15:49:25 +0800217 tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START;
Yunsheng Lin9bc727a2018-03-09 10:37:03 +0800218 tqp_vector->rx_group.coal.flow_level = HNS3_FLOW_LOW;
219 tqp_vector->tx_group.coal.flow_level = HNS3_FLOW_LOW;
Yunsheng Lindd38c722018-03-09 10:37:02 +0800220}
221
222static void hns3_vector_gl_rl_init_hw(struct hns3_enet_tqp_vector *tqp_vector,
223 struct hns3_nic_priv *priv)
224{
225 struct hnae3_handle *h = priv->ae_handle;
226
Fuyun Liang5fd47892018-01-12 16:23:11 +0800227 hns3_set_vector_coalesce_tx_gl(tqp_vector,
Yunsheng Lin9bc727a2018-03-09 10:37:03 +0800228 tqp_vector->tx_group.coal.int_gl);
Fuyun Liang5fd47892018-01-12 16:23:11 +0800229 hns3_set_vector_coalesce_rx_gl(tqp_vector,
Yunsheng Lin9bc727a2018-03-09 10:37:03 +0800230 tqp_vector->rx_group.coal.int_gl);
Fuyun Liang5fd47892018-01-12 16:23:11 +0800231 hns3_set_vector_coalesce_rl(tqp_vector, h->kinfo.int_rl_setting);
Salil76ad4f02017-08-02 16:59:45 +0100232}
233
Yunsheng Lin9df8f792017-09-27 09:45:32 +0800234static int hns3_nic_set_real_num_queue(struct net_device *netdev)
235{
Yunsheng Lin9780cb92017-10-09 15:43:56 +0800236 struct hnae3_handle *h = hns3_get_handle(netdev);
Yunsheng Lin9df8f792017-09-27 09:45:32 +0800237 struct hnae3_knic_private_info *kinfo = &h->kinfo;
238 unsigned int queue_size = kinfo->rss_size * kinfo->num_tc;
239 int ret;
240
241 ret = netif_set_real_num_tx_queues(netdev, queue_size);
242 if (ret) {
243 netdev_err(netdev,
244 "netif_set_real_num_tx_queues fail, ret=%d!\n",
245 ret);
246 return ret;
247 }
248
249 ret = netif_set_real_num_rx_queues(netdev, queue_size);
250 if (ret) {
251 netdev_err(netdev,
252 "netif_set_real_num_rx_queues fail, ret=%d!\n", ret);
253 return ret;
254 }
255
256 return 0;
257}
258
Peng Li678335a12018-03-08 19:41:54 +0800259static u16 hns3_get_max_available_channels(struct hnae3_handle *h)
260{
261 u16 free_tqps, max_rss_size, max_tqps;
262
263 h->ae_algo->ops->get_tqps_and_rss_info(h, &free_tqps, &max_rss_size);
264 max_tqps = h->kinfo.num_tc * max_rss_size;
265
266 return min_t(u16, max_tqps, (free_tqps + h->kinfo.num_tqps));
267}
268
Salil76ad4f02017-08-02 16:59:45 +0100269static int hns3_nic_net_up(struct net_device *netdev)
270{
271 struct hns3_nic_priv *priv = netdev_priv(netdev);
272 struct hnae3_handle *h = priv->ae_handle;
273 int i, j;
274 int ret;
275
276 /* get irq resource for all vectors */
277 ret = hns3_nic_init_irq(priv);
278 if (ret) {
279 netdev_err(netdev, "hns init irq failed! ret=%d\n", ret);
280 return ret;
281 }
282
283 /* enable the vectors */
284 for (i = 0; i < priv->vector_num; i++)
285 hns3_vector_enable(&priv->tqp_vector[i]);
286
287 /* start the ae_dev */
288 ret = h->ae_algo->ops->start ? h->ae_algo->ops->start(h) : 0;
289 if (ret)
290 goto out_start_err;
291
Jian Shenb875cc32018-01-05 18:18:11 +0800292 clear_bit(HNS3_NIC_STATE_DOWN, &priv->state);
293
Salil76ad4f02017-08-02 16:59:45 +0100294 return 0;
295
296out_start_err:
297 for (j = i - 1; j >= 0; j--)
298 hns3_vector_disable(&priv->tqp_vector[j]);
299
300 hns3_nic_uninit_irq(priv);
301
302 return ret;
303}
304
305static int hns3_nic_net_open(struct net_device *netdev)
306{
Lipengf8fa222c2017-11-02 20:45:20 +0800307 struct hns3_nic_priv *priv = netdev_priv(netdev);
Salil76ad4f02017-08-02 16:59:45 +0100308 int ret;
309
310 netif_carrier_off(netdev);
311
Yunsheng Lin9df8f792017-09-27 09:45:32 +0800312 ret = hns3_nic_set_real_num_queue(netdev);
313 if (ret)
Salil76ad4f02017-08-02 16:59:45 +0100314 return ret;
Salil76ad4f02017-08-02 16:59:45 +0100315
316 ret = hns3_nic_net_up(netdev);
317 if (ret) {
318 netdev_err(netdev,
319 "hns net up fail, ret=%d!\n", ret);
320 return ret;
321 }
322
Salil Mehta6d4c3982018-03-22 14:28:52 +0000323 priv->ae_handle->last_reset_time = jiffies;
Salil76ad4f02017-08-02 16:59:45 +0100324 return 0;
325}
326
327static void hns3_nic_net_down(struct net_device *netdev)
328{
329 struct hns3_nic_priv *priv = netdev_priv(netdev);
330 const struct hnae3_ae_ops *ops;
331 int i;
332
Jian Shenb875cc32018-01-05 18:18:11 +0800333 if (test_and_set_bit(HNS3_NIC_STATE_DOWN, &priv->state))
334 return;
335
Salil76ad4f02017-08-02 16:59:45 +0100336 /* stop ae_dev */
337 ops = priv->ae_handle->ae_algo->ops;
338 if (ops->stop)
339 ops->stop(priv->ae_handle);
340
341 /* disable vectors */
342 for (i = 0; i < priv->vector_num; i++)
343 hns3_vector_disable(&priv->tqp_vector[i]);
344
345 /* free irq resources */
346 hns3_nic_uninit_irq(priv);
347}
348
349static int hns3_nic_net_stop(struct net_device *netdev)
350{
351 netif_tx_stop_all_queues(netdev);
352 netif_carrier_off(netdev);
353
354 hns3_nic_net_down(netdev);
355
356 return 0;
357}
358
Salil76ad4f02017-08-02 16:59:45 +0100359static int hns3_nic_uc_sync(struct net_device *netdev,
360 const unsigned char *addr)
361{
Yunsheng Lin9780cb92017-10-09 15:43:56 +0800362 struct hnae3_handle *h = hns3_get_handle(netdev);
Salil76ad4f02017-08-02 16:59:45 +0100363
364 if (h->ae_algo->ops->add_uc_addr)
365 return h->ae_algo->ops->add_uc_addr(h, addr);
366
367 return 0;
368}
369
370static int hns3_nic_uc_unsync(struct net_device *netdev,
371 const unsigned char *addr)
372{
Yunsheng Lin9780cb92017-10-09 15:43:56 +0800373 struct hnae3_handle *h = hns3_get_handle(netdev);
Salil76ad4f02017-08-02 16:59:45 +0100374
375 if (h->ae_algo->ops->rm_uc_addr)
376 return h->ae_algo->ops->rm_uc_addr(h, addr);
377
378 return 0;
379}
380
381static int hns3_nic_mc_sync(struct net_device *netdev,
382 const unsigned char *addr)
383{
Yunsheng Lin9780cb92017-10-09 15:43:56 +0800384 struct hnae3_handle *h = hns3_get_handle(netdev);
Salil76ad4f02017-08-02 16:59:45 +0100385
Dan Carpenter720a8472017-08-10 12:56:14 +0300386 if (h->ae_algo->ops->add_mc_addr)
Salil76ad4f02017-08-02 16:59:45 +0100387 return h->ae_algo->ops->add_mc_addr(h, addr);
388
389 return 0;
390}
391
392static int hns3_nic_mc_unsync(struct net_device *netdev,
393 const unsigned char *addr)
394{
Yunsheng Lin9780cb92017-10-09 15:43:56 +0800395 struct hnae3_handle *h = hns3_get_handle(netdev);
Salil76ad4f02017-08-02 16:59:45 +0100396
Dan Carpenter720a8472017-08-10 12:56:14 +0300397 if (h->ae_algo->ops->rm_mc_addr)
Salil76ad4f02017-08-02 16:59:45 +0100398 return h->ae_algo->ops->rm_mc_addr(h, addr);
399
400 return 0;
401}
402
Yunsheng Lin1db9b1b2017-10-09 15:44:01 +0800403static void hns3_nic_set_rx_mode(struct net_device *netdev)
Salil76ad4f02017-08-02 16:59:45 +0100404{
Yunsheng Lin9780cb92017-10-09 15:43:56 +0800405 struct hnae3_handle *h = hns3_get_handle(netdev);
Salil76ad4f02017-08-02 16:59:45 +0100406
407 if (h->ae_algo->ops->set_promisc_mode) {
408 if (netdev->flags & IFF_PROMISC)
409 h->ae_algo->ops->set_promisc_mode(h, 1);
410 else
411 h->ae_algo->ops->set_promisc_mode(h, 0);
412 }
413 if (__dev_uc_sync(netdev, hns3_nic_uc_sync, hns3_nic_uc_unsync))
414 netdev_err(netdev, "sync uc address fail\n");
415 if (netdev->flags & IFF_MULTICAST)
416 if (__dev_mc_sync(netdev, hns3_nic_mc_sync, hns3_nic_mc_unsync))
417 netdev_err(netdev, "sync mc address fail\n");
418}
419
420static int hns3_set_tso(struct sk_buff *skb, u32 *paylen,
421 u16 *mss, u32 *type_cs_vlan_tso)
422{
423 u32 l4_offset, hdr_len;
424 union l3_hdr_info l3;
425 union l4_hdr_info l4;
426 u32 l4_paylen;
427 int ret;
428
429 if (!skb_is_gso(skb))
430 return 0;
431
432 ret = skb_cow_head(skb, 0);
433 if (ret)
434 return ret;
435
436 l3.hdr = skb_network_header(skb);
437 l4.hdr = skb_transport_header(skb);
438
439 /* Software should clear the IPv4's checksum field when tso is
440 * needed.
441 */
442 if (l3.v4->version == 4)
443 l3.v4->check = 0;
444
445 /* tunnel packet.*/
446 if (skb_shinfo(skb)->gso_type & (SKB_GSO_GRE |
447 SKB_GSO_GRE_CSUM |
448 SKB_GSO_UDP_TUNNEL |
449 SKB_GSO_UDP_TUNNEL_CSUM)) {
450 if ((!(skb_shinfo(skb)->gso_type &
451 SKB_GSO_PARTIAL)) &&
452 (skb_shinfo(skb)->gso_type &
453 SKB_GSO_UDP_TUNNEL_CSUM)) {
454 /* Software should clear the udp's checksum
455 * field when tso is needed.
456 */
457 l4.udp->check = 0;
458 }
459 /* reset l3&l4 pointers from outer to inner headers */
460 l3.hdr = skb_inner_network_header(skb);
461 l4.hdr = skb_inner_transport_header(skb);
462
463 /* Software should clear the IPv4's checksum field when
464 * tso is needed.
465 */
466 if (l3.v4->version == 4)
467 l3.v4->check = 0;
468 }
469
470 /* normal or tunnel packet*/
471 l4_offset = l4.hdr - skb->data;
472 hdr_len = (l4.tcp->doff * 4) + l4_offset;
473
474 /* remove payload length from inner pseudo checksum when tso*/
475 l4_paylen = skb->len - l4_offset;
476 csum_replace_by_diff(&l4.tcp->check,
477 (__force __wsum)htonl(l4_paylen));
478
479 /* find the txbd field values */
480 *paylen = skb->len - hdr_len;
481 hnae_set_bit(*type_cs_vlan_tso,
482 HNS3_TXD_TSO_B, 1);
483
484 /* get MSS for TSO */
485 *mss = skb_shinfo(skb)->gso_size;
486
487 return 0;
488}
489
Salil1898d4e2017-08-18 12:31:39 +0100490static int hns3_get_l4_protocol(struct sk_buff *skb, u8 *ol4_proto,
491 u8 *il4_proto)
Salil76ad4f02017-08-02 16:59:45 +0100492{
493 union {
494 struct iphdr *v4;
495 struct ipv6hdr *v6;
496 unsigned char *hdr;
497 } l3;
498 unsigned char *l4_hdr;
499 unsigned char *exthdr;
500 u8 l4_proto_tmp;
501 __be16 frag_off;
502
503 /* find outer header point */
504 l3.hdr = skb_network_header(skb);
Huazhong Tan35f58fd2018-05-01 19:55:58 +0100505 l4_hdr = skb_transport_header(skb);
Salil76ad4f02017-08-02 16:59:45 +0100506
507 if (skb->protocol == htons(ETH_P_IPV6)) {
508 exthdr = l3.hdr + sizeof(*l3.v6);
509 l4_proto_tmp = l3.v6->nexthdr;
510 if (l4_hdr != exthdr)
511 ipv6_skip_exthdr(skb, exthdr - skb->data,
512 &l4_proto_tmp, &frag_off);
513 } else if (skb->protocol == htons(ETH_P_IP)) {
514 l4_proto_tmp = l3.v4->protocol;
Salil1898d4e2017-08-18 12:31:39 +0100515 } else {
516 return -EINVAL;
Salil76ad4f02017-08-02 16:59:45 +0100517 }
518
519 *ol4_proto = l4_proto_tmp;
520
521 /* tunnel packet */
522 if (!skb->encapsulation) {
523 *il4_proto = 0;
Salil1898d4e2017-08-18 12:31:39 +0100524 return 0;
Salil76ad4f02017-08-02 16:59:45 +0100525 }
526
527 /* find inner header point */
528 l3.hdr = skb_inner_network_header(skb);
529 l4_hdr = skb_inner_transport_header(skb);
530
531 if (l3.v6->version == 6) {
532 exthdr = l3.hdr + sizeof(*l3.v6);
533 l4_proto_tmp = l3.v6->nexthdr;
534 if (l4_hdr != exthdr)
535 ipv6_skip_exthdr(skb, exthdr - skb->data,
536 &l4_proto_tmp, &frag_off);
537 } else if (l3.v4->version == 4) {
538 l4_proto_tmp = l3.v4->protocol;
539 }
540
541 *il4_proto = l4_proto_tmp;
Salil1898d4e2017-08-18 12:31:39 +0100542
543 return 0;
Salil76ad4f02017-08-02 16:59:45 +0100544}
545
546static void hns3_set_l2l3l4_len(struct sk_buff *skb, u8 ol4_proto,
547 u8 il4_proto, u32 *type_cs_vlan_tso,
548 u32 *ol_type_vlan_len_msec)
549{
550 union {
551 struct iphdr *v4;
552 struct ipv6hdr *v6;
553 unsigned char *hdr;
554 } l3;
555 union {
556 struct tcphdr *tcp;
557 struct udphdr *udp;
558 struct gre_base_hdr *gre;
559 unsigned char *hdr;
560 } l4;
561 unsigned char *l2_hdr;
562 u8 l4_proto = ol4_proto;
563 u32 ol2_len;
564 u32 ol3_len;
565 u32 ol4_len;
566 u32 l2_len;
567 u32 l3_len;
568
569 l3.hdr = skb_network_header(skb);
570 l4.hdr = skb_transport_header(skb);
571
572 /* compute L2 header size for normal packet, defined in 2 Bytes */
573 l2_len = l3.hdr - skb->data;
574 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
575 HNS3_TXD_L2LEN_S, l2_len >> 1);
576
577 /* tunnel packet*/
578 if (skb->encapsulation) {
579 /* compute OL2 header size, defined in 2 Bytes */
580 ol2_len = l2_len;
581 hnae_set_field(*ol_type_vlan_len_msec,
582 HNS3_TXD_L2LEN_M,
583 HNS3_TXD_L2LEN_S, ol2_len >> 1);
584
585 /* compute OL3 header size, defined in 4 Bytes */
586 ol3_len = l4.hdr - l3.hdr;
587 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L3LEN_M,
588 HNS3_TXD_L3LEN_S, ol3_len >> 2);
589
590 /* MAC in UDP, MAC in GRE (0x6558)*/
591 if ((ol4_proto == IPPROTO_UDP) || (ol4_proto == IPPROTO_GRE)) {
592 /* switch MAC header ptr from outer to inner header.*/
593 l2_hdr = skb_inner_mac_header(skb);
594
595 /* compute OL4 header size, defined in 4 Bytes. */
596 ol4_len = l2_hdr - l4.hdr;
597 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_L4LEN_M,
598 HNS3_TXD_L4LEN_S, ol4_len >> 2);
599
600 /* switch IP header ptr from outer to inner header */
601 l3.hdr = skb_inner_network_header(skb);
602
603 /* compute inner l2 header size, defined in 2 Bytes. */
604 l2_len = l3.hdr - l2_hdr;
605 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L2LEN_M,
606 HNS3_TXD_L2LEN_S, l2_len >> 1);
607 } else {
608 /* skb packet types not supported by hardware,
609 * txbd len fild doesn't be filled.
610 */
611 return;
612 }
613
614 /* switch L4 header pointer from outer to inner */
615 l4.hdr = skb_inner_transport_header(skb);
616
617 l4_proto = il4_proto;
618 }
619
620 /* compute inner(/normal) L3 header size, defined in 4 Bytes */
621 l3_len = l4.hdr - l3.hdr;
622 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3LEN_M,
623 HNS3_TXD_L3LEN_S, l3_len >> 2);
624
625 /* compute inner(/normal) L4 header size, defined in 4 Bytes */
626 switch (l4_proto) {
627 case IPPROTO_TCP:
628 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
629 HNS3_TXD_L4LEN_S, l4.tcp->doff);
630 break;
631 case IPPROTO_SCTP:
632 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
633 HNS3_TXD_L4LEN_S, (sizeof(struct sctphdr) >> 2));
634 break;
635 case IPPROTO_UDP:
636 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L4LEN_M,
637 HNS3_TXD_L4LEN_S, (sizeof(struct udphdr) >> 2));
638 break;
639 default:
640 /* skb packet types not supported by hardware,
641 * txbd len fild doesn't be filled.
642 */
643 return;
644 }
645}
646
647static int hns3_set_l3l4_type_csum(struct sk_buff *skb, u8 ol4_proto,
648 u8 il4_proto, u32 *type_cs_vlan_tso,
649 u32 *ol_type_vlan_len_msec)
650{
651 union {
652 struct iphdr *v4;
653 struct ipv6hdr *v6;
654 unsigned char *hdr;
655 } l3;
656 u32 l4_proto = ol4_proto;
657
658 l3.hdr = skb_network_header(skb);
659
660 /* define OL3 type and tunnel type(OL4).*/
661 if (skb->encapsulation) {
662 /* define outer network header type.*/
663 if (skb->protocol == htons(ETH_P_IP)) {
664 if (skb_is_gso(skb))
665 hnae_set_field(*ol_type_vlan_len_msec,
666 HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
667 HNS3_OL3T_IPV4_CSUM);
668 else
669 hnae_set_field(*ol_type_vlan_len_msec,
670 HNS3_TXD_OL3T_M, HNS3_TXD_OL3T_S,
671 HNS3_OL3T_IPV4_NO_CSUM);
672
673 } else if (skb->protocol == htons(ETH_P_IPV6)) {
674 hnae_set_field(*ol_type_vlan_len_msec, HNS3_TXD_OL3T_M,
675 HNS3_TXD_OL3T_S, HNS3_OL3T_IPV6);
676 }
677
678 /* define tunnel type(OL4).*/
679 switch (l4_proto) {
680 case IPPROTO_UDP:
681 hnae_set_field(*ol_type_vlan_len_msec,
682 HNS3_TXD_TUNTYPE_M,
683 HNS3_TXD_TUNTYPE_S,
684 HNS3_TUN_MAC_IN_UDP);
685 break;
686 case IPPROTO_GRE:
687 hnae_set_field(*ol_type_vlan_len_msec,
688 HNS3_TXD_TUNTYPE_M,
689 HNS3_TXD_TUNTYPE_S,
690 HNS3_TUN_NVGRE);
691 break;
692 default:
693 /* drop the skb tunnel packet if hardware don't support,
694 * because hardware can't calculate csum when TSO.
695 */
696 if (skb_is_gso(skb))
697 return -EDOM;
698
699 /* the stack computes the IP header already,
700 * driver calculate l4 checksum when not TSO.
701 */
702 skb_checksum_help(skb);
703 return 0;
704 }
705
706 l3.hdr = skb_inner_network_header(skb);
707 l4_proto = il4_proto;
708 }
709
710 if (l3.v4->version == 4) {
711 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
712 HNS3_TXD_L3T_S, HNS3_L3T_IPV4);
713
714 /* the stack computes the IP header already, the only time we
715 * need the hardware to recompute it is in the case of TSO.
716 */
717 if (skb_is_gso(skb))
718 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L3CS_B, 1);
719
720 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
721 } else if (l3.v6->version == 6) {
722 hnae_set_field(*type_cs_vlan_tso, HNS3_TXD_L3T_M,
723 HNS3_TXD_L3T_S, HNS3_L3T_IPV6);
724 hnae_set_bit(*type_cs_vlan_tso, HNS3_TXD_L4CS_B, 1);
725 }
726
727 switch (l4_proto) {
728 case IPPROTO_TCP:
729 hnae_set_field(*type_cs_vlan_tso,
730 HNS3_TXD_L4T_M,
731 HNS3_TXD_L4T_S,
732 HNS3_L4T_TCP);
733 break;
734 case IPPROTO_UDP:
735 hnae_set_field(*type_cs_vlan_tso,
736 HNS3_TXD_L4T_M,
737 HNS3_TXD_L4T_S,
738 HNS3_L4T_UDP);
739 break;
740 case IPPROTO_SCTP:
741 hnae_set_field(*type_cs_vlan_tso,
742 HNS3_TXD_L4T_M,
743 HNS3_TXD_L4T_S,
744 HNS3_L4T_SCTP);
745 break;
746 default:
747 /* drop the skb tunnel packet if hardware don't support,
748 * because hardware can't calculate csum when TSO.
749 */
750 if (skb_is_gso(skb))
751 return -EDOM;
752
753 /* the stack computes the IP header already,
754 * driver calculate l4 checksum when not TSO.
755 */
756 skb_checksum_help(skb);
757 return 0;
758 }
759
760 return 0;
761}
762
763static void hns3_set_txbd_baseinfo(u16 *bdtp_fe_sc_vld_ra_ri, int frag_end)
764{
765 /* Config bd buffer end */
766 hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_BDTYPE_M,
Fuyun Liang3c8f5c02018-03-24 11:32:45 +0800767 HNS3_TXD_BDTYPE_S, 0);
Salil76ad4f02017-08-02 16:59:45 +0100768 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_FE_B, !!frag_end);
769 hnae_set_bit(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_VLD_B, 1);
Lipeng7036d262017-10-24 21:02:09 +0800770 hnae_set_field(*bdtp_fe_sc_vld_ra_ri, HNS3_TXD_SC_M, HNS3_TXD_SC_S, 0);
Salil76ad4f02017-08-02 16:59:45 +0100771}
772
Peng Li9699cff2017-12-22 12:21:48 +0800773static int hns3_fill_desc_vtags(struct sk_buff *skb,
774 struct hns3_enet_ring *tx_ring,
775 u32 *inner_vlan_flag,
776 u32 *out_vlan_flag,
777 u16 *inner_vtag,
778 u16 *out_vtag)
779{
780#define HNS3_TX_VLAN_PRIO_SHIFT 13
781
782 if (skb->protocol == htons(ETH_P_8021Q) &&
783 !(tx_ring->tqp->handle->kinfo.netdev->features &
784 NETIF_F_HW_VLAN_CTAG_TX)) {
785 /* When HW VLAN acceleration is turned off, and the stack
786 * sets the protocol to 802.1q, the driver just need to
787 * set the protocol to the encapsulated ethertype.
788 */
789 skb->protocol = vlan_get_protocol(skb);
790 return 0;
791 }
792
793 if (skb_vlan_tag_present(skb)) {
794 u16 vlan_tag;
795
796 vlan_tag = skb_vlan_tag_get(skb);
797 vlan_tag |= (skb->priority & 0x7) << HNS3_TX_VLAN_PRIO_SHIFT;
798
799 /* Based on hw strategy, use out_vtag in two layer tag case,
800 * and use inner_vtag in one tag case.
801 */
802 if (skb->protocol == htons(ETH_P_8021Q)) {
803 hnae_set_bit(*out_vlan_flag, HNS3_TXD_OVLAN_B, 1);
804 *out_vtag = vlan_tag;
805 } else {
806 hnae_set_bit(*inner_vlan_flag, HNS3_TXD_VLAN_B, 1);
807 *inner_vtag = vlan_tag;
808 }
809 } else if (skb->protocol == htons(ETH_P_8021Q)) {
810 struct vlan_ethhdr *vhdr;
811 int rc;
812
813 rc = skb_cow_head(skb, 0);
814 if (rc < 0)
815 return rc;
816 vhdr = (struct vlan_ethhdr *)skb->data;
817 vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority & 0x7)
818 << HNS3_TX_VLAN_PRIO_SHIFT);
819 }
820
821 skb->protocol = vlan_get_protocol(skb);
822 return 0;
823}
824
Salil76ad4f02017-08-02 16:59:45 +0100825static int hns3_fill_desc(struct hns3_enet_ring *ring, void *priv,
826 int size, dma_addr_t dma, int frag_end,
827 enum hns_desc_type type)
828{
829 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_use];
830 struct hns3_desc *desc = &ring->desc[ring->next_to_use];
831 u32 ol_type_vlan_len_msec = 0;
832 u16 bdtp_fe_sc_vld_ra_ri = 0;
833 u32 type_cs_vlan_tso = 0;
834 struct sk_buff *skb;
Peng Li9699cff2017-12-22 12:21:48 +0800835 u16 inner_vtag = 0;
836 u16 out_vtag = 0;
Salil76ad4f02017-08-02 16:59:45 +0100837 u32 paylen = 0;
838 u16 mss = 0;
839 __be16 protocol;
840 u8 ol4_proto;
841 u8 il4_proto;
842 int ret;
843
844 /* The txbd's baseinfo of DESC_TYPE_PAGE & DESC_TYPE_SKB */
845 desc_cb->priv = priv;
846 desc_cb->length = size;
847 desc_cb->dma = dma;
848 desc_cb->type = type;
849
850 /* now, fill the descriptor */
851 desc->addr = cpu_to_le64(dma);
852 desc->tx.send_size = cpu_to_le16((u16)size);
853 hns3_set_txbd_baseinfo(&bdtp_fe_sc_vld_ra_ri, frag_end);
854 desc->tx.bdtp_fe_sc_vld_ra_ri = cpu_to_le16(bdtp_fe_sc_vld_ra_ri);
855
856 if (type == DESC_TYPE_SKB) {
857 skb = (struct sk_buff *)priv;
Yunsheng Lina90bb9a2017-10-09 15:44:00 +0800858 paylen = skb->len;
Salil76ad4f02017-08-02 16:59:45 +0100859
Peng Li9699cff2017-12-22 12:21:48 +0800860 ret = hns3_fill_desc_vtags(skb, ring, &type_cs_vlan_tso,
861 &ol_type_vlan_len_msec,
862 &inner_vtag, &out_vtag);
863 if (unlikely(ret))
864 return ret;
865
Salil76ad4f02017-08-02 16:59:45 +0100866 if (skb->ip_summed == CHECKSUM_PARTIAL) {
867 skb_reset_mac_len(skb);
868 protocol = skb->protocol;
869
Salil1898d4e2017-08-18 12:31:39 +0100870 ret = hns3_get_l4_protocol(skb, &ol4_proto, &il4_proto);
871 if (ret)
872 return ret;
Salil76ad4f02017-08-02 16:59:45 +0100873 hns3_set_l2l3l4_len(skb, ol4_proto, il4_proto,
874 &type_cs_vlan_tso,
875 &ol_type_vlan_len_msec);
876 ret = hns3_set_l3l4_type_csum(skb, ol4_proto, il4_proto,
877 &type_cs_vlan_tso,
878 &ol_type_vlan_len_msec);
879 if (ret)
880 return ret;
881
882 ret = hns3_set_tso(skb, &paylen, &mss,
883 &type_cs_vlan_tso);
884 if (ret)
885 return ret;
886 }
887
888 /* Set txbd */
889 desc->tx.ol_type_vlan_len_msec =
890 cpu_to_le32(ol_type_vlan_len_msec);
891 desc->tx.type_cs_vlan_tso_len =
892 cpu_to_le32(type_cs_vlan_tso);
Yunsheng Lina90bb9a2017-10-09 15:44:00 +0800893 desc->tx.paylen = cpu_to_le32(paylen);
Salil76ad4f02017-08-02 16:59:45 +0100894 desc->tx.mss = cpu_to_le16(mss);
Peng Li9699cff2017-12-22 12:21:48 +0800895 desc->tx.vlan_tag = cpu_to_le16(inner_vtag);
896 desc->tx.outer_vlan_tag = cpu_to_le16(out_vtag);
Salil76ad4f02017-08-02 16:59:45 +0100897 }
898
899 /* move ring pointer to next.*/
900 ring_ptr_move_fw(ring, next_to_use);
901
902 return 0;
903}
904
905static int hns3_fill_desc_tso(struct hns3_enet_ring *ring, void *priv,
906 int size, dma_addr_t dma, int frag_end,
907 enum hns_desc_type type)
908{
909 unsigned int frag_buf_num;
910 unsigned int k;
911 int sizeoflast;
912 int ret;
913
914 frag_buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
915 sizeoflast = size % HNS3_MAX_BD_SIZE;
916 sizeoflast = sizeoflast ? sizeoflast : HNS3_MAX_BD_SIZE;
917
918 /* When the frag size is bigger than hardware, split this frag */
919 for (k = 0; k < frag_buf_num; k++) {
920 ret = hns3_fill_desc(ring, priv,
921 (k == frag_buf_num - 1) ?
922 sizeoflast : HNS3_MAX_BD_SIZE,
923 dma + HNS3_MAX_BD_SIZE * k,
924 frag_end && (k == frag_buf_num - 1) ? 1 : 0,
925 (type == DESC_TYPE_SKB && !k) ?
926 DESC_TYPE_SKB : DESC_TYPE_PAGE);
927 if (ret)
928 return ret;
929 }
930
931 return 0;
932}
933
934static int hns3_nic_maybe_stop_tso(struct sk_buff **out_skb, int *bnum,
935 struct hns3_enet_ring *ring)
936{
937 struct sk_buff *skb = *out_skb;
938 struct skb_frag_struct *frag;
939 int bdnum_for_frag;
940 int frag_num;
941 int buf_num;
942 int size;
943 int i;
944
945 size = skb_headlen(skb);
946 buf_num = (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
947
948 frag_num = skb_shinfo(skb)->nr_frags;
949 for (i = 0; i < frag_num; i++) {
950 frag = &skb_shinfo(skb)->frags[i];
951 size = skb_frag_size(frag);
952 bdnum_for_frag =
953 (size + HNS3_MAX_BD_SIZE - 1) / HNS3_MAX_BD_SIZE;
954 if (bdnum_for_frag > HNS3_MAX_BD_PER_FRAG)
955 return -ENOMEM;
956
957 buf_num += bdnum_for_frag;
958 }
959
960 if (buf_num > ring_space(ring))
961 return -EBUSY;
962
963 *bnum = buf_num;
964 return 0;
965}
966
967static int hns3_nic_maybe_stop_tx(struct sk_buff **out_skb, int *bnum,
968 struct hns3_enet_ring *ring)
969{
970 struct sk_buff *skb = *out_skb;
971 int buf_num;
972
973 /* No. of segments (plus a header) */
974 buf_num = skb_shinfo(skb)->nr_frags + 1;
975
976 if (buf_num > ring_space(ring))
977 return -EBUSY;
978
979 *bnum = buf_num;
980
981 return 0;
982}
983
984static void hns_nic_dma_unmap(struct hns3_enet_ring *ring, int next_to_use_orig)
985{
986 struct device *dev = ring_to_dev(ring);
987 unsigned int i;
988
989 for (i = 0; i < ring->desc_num; i++) {
990 /* check if this is where we started */
991 if (ring->next_to_use == next_to_use_orig)
992 break;
993
994 /* unmap the descriptor dma address */
995 if (ring->desc_cb[ring->next_to_use].type == DESC_TYPE_SKB)
996 dma_unmap_single(dev,
997 ring->desc_cb[ring->next_to_use].dma,
998 ring->desc_cb[ring->next_to_use].length,
999 DMA_TO_DEVICE);
1000 else
1001 dma_unmap_page(dev,
1002 ring->desc_cb[ring->next_to_use].dma,
1003 ring->desc_cb[ring->next_to_use].length,
1004 DMA_TO_DEVICE);
1005
1006 /* rollback one */
1007 ring_ptr_move_bw(ring, next_to_use);
1008 }
1009}
1010
Yunsheng Lind43e5ac2017-10-20 10:19:21 +08001011netdev_tx_t hns3_nic_net_xmit(struct sk_buff *skb, struct net_device *netdev)
Salil76ad4f02017-08-02 16:59:45 +01001012{
1013 struct hns3_nic_priv *priv = netdev_priv(netdev);
1014 struct hns3_nic_ring_data *ring_data =
1015 &tx_ring_data(priv, skb->queue_mapping);
1016 struct hns3_enet_ring *ring = ring_data->ring;
1017 struct device *dev = priv->dev;
1018 struct netdev_queue *dev_queue;
1019 struct skb_frag_struct *frag;
1020 int next_to_use_head;
1021 int next_to_use_frag;
1022 dma_addr_t dma;
1023 int buf_num;
1024 int seg_num;
1025 int size;
1026 int ret;
1027 int i;
1028
1029 /* Prefetch the data used later */
1030 prefetch(skb->data);
1031
1032 switch (priv->ops.maybe_stop_tx(&skb, &buf_num, ring)) {
1033 case -EBUSY:
1034 u64_stats_update_begin(&ring->syncp);
1035 ring->stats.tx_busy++;
1036 u64_stats_update_end(&ring->syncp);
1037
1038 goto out_net_tx_busy;
1039 case -ENOMEM:
1040 u64_stats_update_begin(&ring->syncp);
1041 ring->stats.sw_err_cnt++;
1042 u64_stats_update_end(&ring->syncp);
1043 netdev_err(netdev, "no memory to xmit!\n");
1044
1045 goto out_err_tx_ok;
1046 default:
1047 break;
1048 }
1049
1050 /* No. of segments (plus a header) */
1051 seg_num = skb_shinfo(skb)->nr_frags + 1;
1052 /* Fill the first part */
1053 size = skb_headlen(skb);
1054
1055 next_to_use_head = ring->next_to_use;
1056
1057 dma = dma_map_single(dev, skb->data, size, DMA_TO_DEVICE);
1058 if (dma_mapping_error(dev, dma)) {
1059 netdev_err(netdev, "TX head DMA map failed\n");
1060 ring->stats.sw_err_cnt++;
1061 goto out_err_tx_ok;
1062 }
1063
1064 ret = priv->ops.fill_desc(ring, skb, size, dma, seg_num == 1 ? 1 : 0,
1065 DESC_TYPE_SKB);
1066 if (ret)
1067 goto head_dma_map_err;
1068
1069 next_to_use_frag = ring->next_to_use;
1070 /* Fill the fragments */
1071 for (i = 1; i < seg_num; i++) {
1072 frag = &skb_shinfo(skb)->frags[i - 1];
1073 size = skb_frag_size(frag);
1074 dma = skb_frag_dma_map(dev, frag, 0, size, DMA_TO_DEVICE);
1075 if (dma_mapping_error(dev, dma)) {
1076 netdev_err(netdev, "TX frag(%d) DMA map failed\n", i);
1077 ring->stats.sw_err_cnt++;
1078 goto frag_dma_map_err;
1079 }
1080 ret = priv->ops.fill_desc(ring, skb_frag_page(frag), size, dma,
1081 seg_num - 1 == i ? 1 : 0,
1082 DESC_TYPE_PAGE);
1083
1084 if (ret)
1085 goto frag_dma_map_err;
1086 }
1087
1088 /* Complete translate all packets */
1089 dev_queue = netdev_get_tx_queue(netdev, ring_data->queue_index);
1090 netdev_tx_sent_queue(dev_queue, skb->len);
1091
1092 wmb(); /* Commit all data before submit */
1093
1094 hnae_queue_xmit(ring->tqp, buf_num);
1095
1096 return NETDEV_TX_OK;
1097
1098frag_dma_map_err:
1099 hns_nic_dma_unmap(ring, next_to_use_frag);
1100
1101head_dma_map_err:
1102 hns_nic_dma_unmap(ring, next_to_use_head);
1103
1104out_err_tx_ok:
1105 dev_kfree_skb_any(skb);
1106 return NETDEV_TX_OK;
1107
1108out_net_tx_busy:
1109 netif_stop_subqueue(netdev, ring_data->queue_index);
1110 smp_mb(); /* Commit all data before submit */
1111
1112 return NETDEV_TX_BUSY;
1113}
1114
1115static int hns3_nic_net_set_mac_address(struct net_device *netdev, void *p)
1116{
Yunsheng Lin9780cb92017-10-09 15:43:56 +08001117 struct hnae3_handle *h = hns3_get_handle(netdev);
Salil76ad4f02017-08-02 16:59:45 +01001118 struct sockaddr *mac_addr = p;
1119 int ret;
1120
1121 if (!mac_addr || !is_valid_ether_addr((const u8 *)mac_addr->sa_data))
1122 return -EADDRNOTAVAIL;
1123
Fuyun Liang590980552018-03-10 11:29:22 +08001124 ret = h->ae_algo->ops->set_mac_addr(h, mac_addr->sa_data, false);
Salil76ad4f02017-08-02 16:59:45 +01001125 if (ret) {
1126 netdev_err(netdev, "set_mac_address fail, ret=%d!\n", ret);
1127 return ret;
1128 }
1129
1130 ether_addr_copy(netdev->dev_addr, mac_addr->sa_data);
1131
1132 return 0;
1133}
1134
1135static int hns3_nic_set_features(struct net_device *netdev,
1136 netdev_features_t features)
1137{
Jian Shen181d4542018-01-12 16:23:16 +08001138 netdev_features_t changed = netdev->features ^ features;
Salil76ad4f02017-08-02 16:59:45 +01001139 struct hns3_nic_priv *priv = netdev_priv(netdev);
Peng Li052ece62017-12-22 12:21:47 +08001140 struct hnae3_handle *h = priv->ae_handle;
Peng Li052ece62017-12-22 12:21:47 +08001141 int ret;
Salil76ad4f02017-08-02 16:59:45 +01001142
Jian Shen181d4542018-01-12 16:23:16 +08001143 if (changed & (NETIF_F_TSO | NETIF_F_TSO6)) {
1144 if (features & (NETIF_F_TSO | NETIF_F_TSO6)) {
1145 priv->ops.fill_desc = hns3_fill_desc_tso;
1146 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
1147 } else {
1148 priv->ops.fill_desc = hns3_fill_desc;
1149 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
1150 }
Salil76ad4f02017-08-02 16:59:45 +01001151 }
1152
Jian Shenbd368412018-01-12 16:23:17 +08001153 if ((changed & NETIF_F_HW_VLAN_CTAG_FILTER) &&
1154 h->ae_algo->ops->enable_vlan_filter) {
Jian Shen181d4542018-01-12 16:23:16 +08001155 if (features & NETIF_F_HW_VLAN_CTAG_FILTER)
1156 h->ae_algo->ops->enable_vlan_filter(h, true);
1157 else
1158 h->ae_algo->ops->enable_vlan_filter(h, false);
1159 }
Jian Shen391b5e92018-01-05 18:18:05 +08001160
Jian Shenbd368412018-01-12 16:23:17 +08001161 if ((changed & NETIF_F_HW_VLAN_CTAG_RX) &&
1162 h->ae_algo->ops->enable_hw_strip_rxvtag) {
Peng Li052ece62017-12-22 12:21:47 +08001163 if (features & NETIF_F_HW_VLAN_CTAG_RX)
1164 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, true);
1165 else
1166 ret = h->ae_algo->ops->enable_hw_strip_rxvtag(h, false);
1167
1168 if (ret)
1169 return ret;
1170 }
1171
Salil76ad4f02017-08-02 16:59:45 +01001172 netdev->features = features;
1173 return 0;
1174}
1175
Peng Li6c88d9d2018-01-09 14:50:59 +08001176static void hns3_nic_get_stats64(struct net_device *netdev,
1177 struct rtnl_link_stats64 *stats)
Salil76ad4f02017-08-02 16:59:45 +01001178{
1179 struct hns3_nic_priv *priv = netdev_priv(netdev);
1180 int queue_num = priv->ae_handle->kinfo.num_tqps;
Jian Shenc5f65482018-01-05 18:18:10 +08001181 struct hnae3_handle *handle = priv->ae_handle;
Salil76ad4f02017-08-02 16:59:45 +01001182 struct hns3_enet_ring *ring;
1183 unsigned int start;
1184 unsigned int idx;
1185 u64 tx_bytes = 0;
1186 u64 rx_bytes = 0;
1187 u64 tx_pkts = 0;
1188 u64 rx_pkts = 0;
Jian Shend2a5dca2018-01-05 18:18:12 +08001189 u64 tx_drop = 0;
1190 u64 rx_drop = 0;
Salil76ad4f02017-08-02 16:59:45 +01001191
Jian Shenb875cc32018-01-05 18:18:11 +08001192 if (test_bit(HNS3_NIC_STATE_DOWN, &priv->state))
1193 return;
1194
Jian Shenc5f65482018-01-05 18:18:10 +08001195 handle->ae_algo->ops->update_stats(handle, &netdev->stats);
1196
Salil76ad4f02017-08-02 16:59:45 +01001197 for (idx = 0; idx < queue_num; idx++) {
1198 /* fetch the tx stats */
1199 ring = priv->ring_data[idx].ring;
1200 do {
Salild36d36c2017-08-18 12:31:37 +01001201 start = u64_stats_fetch_begin_irq(&ring->syncp);
Salil76ad4f02017-08-02 16:59:45 +01001202 tx_bytes += ring->stats.tx_bytes;
1203 tx_pkts += ring->stats.tx_pkts;
Jian Shend2a5dca2018-01-05 18:18:12 +08001204 tx_drop += ring->stats.tx_busy;
1205 tx_drop += ring->stats.sw_err_cnt;
Salil76ad4f02017-08-02 16:59:45 +01001206 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1207
1208 /* fetch the rx stats */
1209 ring = priv->ring_data[idx + queue_num].ring;
1210 do {
Salild36d36c2017-08-18 12:31:37 +01001211 start = u64_stats_fetch_begin_irq(&ring->syncp);
Salil76ad4f02017-08-02 16:59:45 +01001212 rx_bytes += ring->stats.rx_bytes;
1213 rx_pkts += ring->stats.rx_pkts;
Jian Shend2a5dca2018-01-05 18:18:12 +08001214 rx_drop += ring->stats.non_vld_descs;
1215 rx_drop += ring->stats.err_pkt_len;
1216 rx_drop += ring->stats.l2_err;
Salil76ad4f02017-08-02 16:59:45 +01001217 } while (u64_stats_fetch_retry_irq(&ring->syncp, start));
1218 }
1219
1220 stats->tx_bytes = tx_bytes;
1221 stats->tx_packets = tx_pkts;
1222 stats->rx_bytes = rx_bytes;
1223 stats->rx_packets = rx_pkts;
1224
1225 stats->rx_errors = netdev->stats.rx_errors;
1226 stats->multicast = netdev->stats.multicast;
1227 stats->rx_length_errors = netdev->stats.rx_length_errors;
1228 stats->rx_crc_errors = netdev->stats.rx_crc_errors;
1229 stats->rx_missed_errors = netdev->stats.rx_missed_errors;
1230
1231 stats->tx_errors = netdev->stats.tx_errors;
Jian Shend2a5dca2018-01-05 18:18:12 +08001232 stats->rx_dropped = rx_drop + netdev->stats.rx_dropped;
1233 stats->tx_dropped = tx_drop + netdev->stats.tx_dropped;
Salil76ad4f02017-08-02 16:59:45 +01001234 stats->collisions = netdev->stats.collisions;
1235 stats->rx_over_errors = netdev->stats.rx_over_errors;
1236 stats->rx_frame_errors = netdev->stats.rx_frame_errors;
1237 stats->rx_fifo_errors = netdev->stats.rx_fifo_errors;
1238 stats->tx_aborted_errors = netdev->stats.tx_aborted_errors;
1239 stats->tx_carrier_errors = netdev->stats.tx_carrier_errors;
1240 stats->tx_fifo_errors = netdev->stats.tx_fifo_errors;
1241 stats->tx_heartbeat_errors = netdev->stats.tx_heartbeat_errors;
1242 stats->tx_window_errors = netdev->stats.tx_window_errors;
1243 stats->rx_compressed = netdev->stats.rx_compressed;
1244 stats->tx_compressed = netdev->stats.tx_compressed;
1245}
1246
Yunsheng Lin30d240d2017-10-17 14:51:30 +08001247static int hns3_setup_tc(struct net_device *netdev, void *type_data)
Salil76ad4f02017-08-02 16:59:45 +01001248{
Yunsheng Lin30d240d2017-10-17 14:51:30 +08001249 struct tc_mqprio_qopt_offload *mqprio_qopt = type_data;
Yunsheng Lin9780cb92017-10-09 15:43:56 +08001250 struct hnae3_handle *h = hns3_get_handle(netdev);
Salil76ad4f02017-08-02 16:59:45 +01001251 struct hnae3_knic_private_info *kinfo = &h->kinfo;
Yunsheng Lin30d240d2017-10-17 14:51:30 +08001252 u8 *prio_tc = mqprio_qopt->qopt.prio_tc_map;
1253 u8 tc = mqprio_qopt->qopt.num_tc;
1254 u16 mode = mqprio_qopt->mode;
1255 u8 hw = mqprio_qopt->qopt.hw;
1256 bool if_running;
Salil76ad4f02017-08-02 16:59:45 +01001257 unsigned int i;
1258 int ret;
1259
Yunsheng Lin30d240d2017-10-17 14:51:30 +08001260 if (!((hw == TC_MQPRIO_HW_OFFLOAD_TCS &&
1261 mode == TC_MQPRIO_MODE_CHANNEL) || (!hw && tc == 0)))
1262 return -EOPNOTSUPP;
1263
Salil76ad4f02017-08-02 16:59:45 +01001264 if (tc > HNAE3_MAX_TC)
1265 return -EINVAL;
1266
Salil76ad4f02017-08-02 16:59:45 +01001267 if (!netdev)
1268 return -EINVAL;
1269
Yunsheng Lin30d240d2017-10-17 14:51:30 +08001270 if_running = netif_running(netdev);
1271 if (if_running) {
1272 hns3_nic_net_stop(netdev);
1273 msleep(100);
Salil76ad4f02017-08-02 16:59:45 +01001274 }
1275
Yunsheng Lin30d240d2017-10-17 14:51:30 +08001276 ret = (kinfo->dcb_ops && kinfo->dcb_ops->setup_tc) ?
1277 kinfo->dcb_ops->setup_tc(h, tc, prio_tc) : -EOPNOTSUPP;
Salil76ad4f02017-08-02 16:59:45 +01001278 if (ret)
Yunsheng Lin30d240d2017-10-17 14:51:30 +08001279 goto out;
Salil76ad4f02017-08-02 16:59:45 +01001280
Yunsheng Lin30d240d2017-10-17 14:51:30 +08001281 if (tc <= 1) {
1282 netdev_reset_tc(netdev);
1283 } else {
1284 ret = netdev_set_num_tc(netdev, tc);
1285 if (ret)
1286 goto out;
1287
1288 for (i = 0; i < HNAE3_MAX_TC; i++) {
1289 if (!kinfo->tc_info[i].enable)
1290 continue;
1291
Salil76ad4f02017-08-02 16:59:45 +01001292 netdev_set_tc_queue(netdev,
1293 kinfo->tc_info[i].tc,
1294 kinfo->tc_info[i].tqp_count,
1295 kinfo->tc_info[i].tqp_offset);
Yunsheng Lin30d240d2017-10-17 14:51:30 +08001296 }
Salil76ad4f02017-08-02 16:59:45 +01001297 }
1298
Yunsheng Lin30d240d2017-10-17 14:51:30 +08001299 ret = hns3_nic_set_real_num_queue(netdev);
1300
1301out:
1302 if (if_running)
1303 hns3_nic_net_open(netdev);
1304
1305 return ret;
Salil76ad4f02017-08-02 16:59:45 +01001306}
1307
Jiri Pirko2572ac52017-08-07 10:15:17 +02001308static int hns3_nic_setup_tc(struct net_device *dev, enum tc_setup_type type,
Jiri Pirkode4784c2017-08-07 10:15:32 +02001309 void *type_data)
Salil76ad4f02017-08-02 16:59:45 +01001310{
Nogah Frankel575ed7d2017-11-06 07:23:42 +01001311 if (type != TC_SETUP_QDISC_MQPRIO)
Jiri Pirko38cf0422017-08-07 10:15:31 +02001312 return -EOPNOTSUPP;
Salil76ad4f02017-08-02 16:59:45 +01001313
Yunsheng Lin30d240d2017-10-17 14:51:30 +08001314 return hns3_setup_tc(dev, type_data);
Salil76ad4f02017-08-02 16:59:45 +01001315}
1316
1317static int hns3_vlan_rx_add_vid(struct net_device *netdev,
1318 __be16 proto, u16 vid)
1319{
Yunsheng Lin9780cb92017-10-09 15:43:56 +08001320 struct hnae3_handle *h = hns3_get_handle(netdev);
Yunsheng Lin681ec392018-03-21 15:49:22 +08001321 struct hns3_nic_priv *priv = netdev_priv(netdev);
Salil76ad4f02017-08-02 16:59:45 +01001322 int ret = -EIO;
1323
1324 if (h->ae_algo->ops->set_vlan_filter)
1325 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, false);
1326
Yunsheng Lin681ec392018-03-21 15:49:22 +08001327 if (!ret)
1328 set_bit(vid, priv->active_vlans);
1329
Salil76ad4f02017-08-02 16:59:45 +01001330 return ret;
1331}
1332
1333static int hns3_vlan_rx_kill_vid(struct net_device *netdev,
1334 __be16 proto, u16 vid)
1335{
Yunsheng Lin9780cb92017-10-09 15:43:56 +08001336 struct hnae3_handle *h = hns3_get_handle(netdev);
Yunsheng Lin681ec392018-03-21 15:49:22 +08001337 struct hns3_nic_priv *priv = netdev_priv(netdev);
Salil76ad4f02017-08-02 16:59:45 +01001338 int ret = -EIO;
1339
1340 if (h->ae_algo->ops->set_vlan_filter)
1341 ret = h->ae_algo->ops->set_vlan_filter(h, proto, vid, true);
1342
Yunsheng Lin681ec392018-03-21 15:49:22 +08001343 if (!ret)
1344 clear_bit(vid, priv->active_vlans);
1345
Salil76ad4f02017-08-02 16:59:45 +01001346 return ret;
1347}
1348
Yunsheng Lin681ec392018-03-21 15:49:22 +08001349static void hns3_restore_vlan(struct net_device *netdev)
1350{
1351 struct hns3_nic_priv *priv = netdev_priv(netdev);
1352 u16 vid;
1353 int ret;
1354
1355 for_each_set_bit(vid, priv->active_vlans, VLAN_N_VID) {
1356 ret = hns3_vlan_rx_add_vid(netdev, htons(ETH_P_8021Q), vid);
1357 if (ret)
1358 netdev_warn(netdev, "Restore vlan: %d filter, ret:%d\n",
1359 vid, ret);
1360 }
1361}
1362
Salil76ad4f02017-08-02 16:59:45 +01001363static int hns3_ndo_set_vf_vlan(struct net_device *netdev, int vf, u16 vlan,
1364 u8 qos, __be16 vlan_proto)
1365{
Yunsheng Lin9780cb92017-10-09 15:43:56 +08001366 struct hnae3_handle *h = hns3_get_handle(netdev);
Salil76ad4f02017-08-02 16:59:45 +01001367 int ret = -EIO;
1368
1369 if (h->ae_algo->ops->set_vf_vlan_filter)
1370 ret = h->ae_algo->ops->set_vf_vlan_filter(h, vf, vlan,
1371 qos, vlan_proto);
1372
1373 return ret;
1374}
1375
Salila8e8b7f2017-08-21 17:05:24 +01001376static int hns3_nic_change_mtu(struct net_device *netdev, int new_mtu)
1377{
Yunsheng Lin9780cb92017-10-09 15:43:56 +08001378 struct hnae3_handle *h = hns3_get_handle(netdev);
Salila8e8b7f2017-08-21 17:05:24 +01001379 bool if_running = netif_running(netdev);
1380 int ret;
1381
1382 if (!h->ae_algo->ops->set_mtu)
1383 return -EOPNOTSUPP;
1384
1385 /* if this was called with netdev up then bring netdevice down */
1386 if (if_running) {
1387 (void)hns3_nic_net_stop(netdev);
1388 msleep(100);
1389 }
1390
1391 ret = h->ae_algo->ops->set_mtu(h, new_mtu);
1392 if (ret) {
1393 netdev_err(netdev, "failed to change MTU in hardware %d\n",
1394 ret);
1395 return ret;
1396 }
1397
Fuyun Liang5bad95a2018-01-05 18:18:20 +08001398 netdev->mtu = new_mtu;
1399
Salila8e8b7f2017-08-21 17:05:24 +01001400 /* if the netdev was running earlier, bring it up again */
1401 if (if_running && hns3_nic_net_open(netdev))
1402 ret = -EINVAL;
1403
1404 return ret;
1405}
1406
Lipengf8fa222c2017-11-02 20:45:20 +08001407static bool hns3_get_tx_timeo_queue_info(struct net_device *ndev)
1408{
1409 struct hns3_nic_priv *priv = netdev_priv(ndev);
1410 struct hns3_enet_ring *tx_ring = NULL;
1411 int timeout_queue = 0;
1412 int hw_head, hw_tail;
1413 int i;
1414
1415 /* Find the stopped queue the same way the stack does */
1416 for (i = 0; i < ndev->real_num_tx_queues; i++) {
1417 struct netdev_queue *q;
1418 unsigned long trans_start;
1419
1420 q = netdev_get_tx_queue(ndev, i);
1421 trans_start = q->trans_start;
1422 if (netif_xmit_stopped(q) &&
1423 time_after(jiffies,
1424 (trans_start + ndev->watchdog_timeo))) {
1425 timeout_queue = i;
1426 break;
1427 }
1428 }
1429
1430 if (i == ndev->num_tx_queues) {
1431 netdev_info(ndev,
1432 "no netdev TX timeout queue found, timeout count: %llu\n",
1433 priv->tx_timeout_count);
1434 return false;
1435 }
1436
1437 tx_ring = priv->ring_data[timeout_queue].ring;
1438
1439 hw_head = readl_relaxed(tx_ring->tqp->io_base +
1440 HNS3_RING_TX_RING_HEAD_REG);
1441 hw_tail = readl_relaxed(tx_ring->tqp->io_base +
1442 HNS3_RING_TX_RING_TAIL_REG);
1443 netdev_info(ndev,
1444 "tx_timeout count: %llu, queue id: %d, SW_NTU: 0x%x, SW_NTC: 0x%x, HW_HEAD: 0x%x, HW_TAIL: 0x%x, INT: 0x%x\n",
1445 priv->tx_timeout_count,
1446 timeout_queue,
1447 tx_ring->next_to_use,
1448 tx_ring->next_to_clean,
1449 hw_head,
1450 hw_tail,
1451 readl(tx_ring->tqp_vector->mask_addr));
1452
1453 return true;
1454}
1455
1456static void hns3_nic_net_timeout(struct net_device *ndev)
1457{
1458 struct hns3_nic_priv *priv = netdev_priv(ndev);
Lipengf8fa222c2017-11-02 20:45:20 +08001459 struct hnae3_handle *h = priv->ae_handle;
1460
1461 if (!hns3_get_tx_timeo_queue_info(ndev))
1462 return;
1463
1464 priv->tx_timeout_count++;
1465
Salil Mehta6d4c3982018-03-22 14:28:52 +00001466 if (time_before(jiffies, (h->last_reset_time + ndev->watchdog_timeo)))
Lipengf8fa222c2017-11-02 20:45:20 +08001467 return;
1468
Salil Mehta6d4c3982018-03-22 14:28:52 +00001469 /* request the reset */
Lipengf8fa222c2017-11-02 20:45:20 +08001470 if (h->ae_algo->ops->reset_event)
Salil Mehta6d4c3982018-03-22 14:28:52 +00001471 h->ae_algo->ops->reset_event(h);
Lipengf8fa222c2017-11-02 20:45:20 +08001472}
1473
Salil76ad4f02017-08-02 16:59:45 +01001474static const struct net_device_ops hns3_nic_netdev_ops = {
1475 .ndo_open = hns3_nic_net_open,
1476 .ndo_stop = hns3_nic_net_stop,
1477 .ndo_start_xmit = hns3_nic_net_xmit,
Lipengf8fa222c2017-11-02 20:45:20 +08001478 .ndo_tx_timeout = hns3_nic_net_timeout,
Salil76ad4f02017-08-02 16:59:45 +01001479 .ndo_set_mac_address = hns3_nic_net_set_mac_address,
Salila8e8b7f2017-08-21 17:05:24 +01001480 .ndo_change_mtu = hns3_nic_change_mtu,
Salil76ad4f02017-08-02 16:59:45 +01001481 .ndo_set_features = hns3_nic_set_features,
1482 .ndo_get_stats64 = hns3_nic_get_stats64,
1483 .ndo_setup_tc = hns3_nic_setup_tc,
1484 .ndo_set_rx_mode = hns3_nic_set_rx_mode,
Salil76ad4f02017-08-02 16:59:45 +01001485 .ndo_vlan_rx_add_vid = hns3_vlan_rx_add_vid,
1486 .ndo_vlan_rx_kill_vid = hns3_vlan_rx_kill_vid,
1487 .ndo_set_vf_vlan = hns3_ndo_set_vf_vlan,
1488};
1489
Fuyun Liang2312e052018-05-15 19:20:05 +01001490static bool hns3_is_phys_func(struct pci_dev *pdev)
1491{
1492 u32 dev_id = pdev->device;
1493
1494 switch (dev_id) {
1495 case HNAE3_DEV_ID_GE:
1496 case HNAE3_DEV_ID_25GE:
1497 case HNAE3_DEV_ID_25GE_RDMA:
1498 case HNAE3_DEV_ID_25GE_RDMA_MACSEC:
1499 case HNAE3_DEV_ID_50GE_RDMA:
1500 case HNAE3_DEV_ID_50GE_RDMA_MACSEC:
1501 case HNAE3_DEV_ID_100G_RDMA_MACSEC:
1502 return true;
1503 case HNAE3_DEV_ID_100G_VF:
1504 case HNAE3_DEV_ID_100G_RDMA_DCB_PFC_VF:
1505 return false;
1506 default:
1507 dev_warn(&pdev->dev, "un-recognized pci device-id %d",
1508 dev_id);
1509 }
1510
1511 return false;
1512}
1513
1514static int get_num_req_vfs(struct pci_dev *pdev)
1515{
1516 /* a variable vf num will be supported later */
1517 return pci_sriov_get_totalvfs(pdev);
1518}
1519
1520static void hns3_enable_sriov(struct pci_dev *pdev)
1521{
1522 int num_req_vfs = get_num_req_vfs(pdev);
1523 int ret;
1524
1525 /* Enable SRIOV */
1526 if (!num_req_vfs)
1527 return;
1528
1529 dev_info(&pdev->dev, "active VFs(%d) found, enabling SRIOV\n",
1530 num_req_vfs);
1531
1532 ret = pci_enable_sriov(pdev, num_req_vfs);
1533 if (ret)
1534 dev_err(&pdev->dev, "SRIOV enable failed %d\n", ret);
1535}
1536
1537static void hns3_disable_sriov(struct pci_dev *pdev)
1538{
1539 /* If our VFs are assigned we cannot shut down SR-IOV
1540 * without causing issues, so just leave the hardware
1541 * available but disabled
1542 */
1543 if (pci_vfs_assigned(pdev)) {
1544 dev_warn(&pdev->dev,
1545 "disabling driver while VFs are assigned\n");
1546 return;
1547 }
1548
1549 pci_disable_sriov(pdev);
1550}
1551
Salil76ad4f02017-08-02 16:59:45 +01001552/* hns3_probe - Device initialization routine
1553 * @pdev: PCI device information struct
1554 * @ent: entry in hns3_pci_tbl
1555 *
1556 * hns3_probe initializes a PF identified by a pci_dev structure.
1557 * The OS initialization, configuring of the PF private structure,
1558 * and a hardware reset occur.
1559 *
1560 * Returns 0 on success, negative on failure
1561 */
1562static int hns3_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
1563{
1564 struct hnae3_ae_dev *ae_dev;
1565 int ret;
1566
1567 ae_dev = devm_kzalloc(&pdev->dev, sizeof(*ae_dev),
1568 GFP_KERNEL);
1569 if (!ae_dev) {
1570 ret = -ENOMEM;
1571 return ret;
1572 }
1573
1574 ae_dev->pdev = pdev;
Yunsheng Line92a0842017-09-20 18:52:50 +08001575 ae_dev->flag = ent->driver_data;
Salil76ad4f02017-08-02 16:59:45 +01001576 ae_dev->dev_type = HNAE3_DEV_KNIC;
1577 pci_set_drvdata(pdev, ae_dev);
1578
Fuyun Liang2312e052018-05-15 19:20:05 +01001579 ret = hnae3_register_ae_dev(ae_dev);
1580 if (ret)
1581 return ret;
1582
1583 if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))
1584 hns3_enable_sriov(pdev);
1585
1586 return 0;
Salil76ad4f02017-08-02 16:59:45 +01001587}
1588
1589/* hns3_remove - Device removal routine
1590 * @pdev: PCI device information struct
1591 */
1592static void hns3_remove(struct pci_dev *pdev)
1593{
1594 struct hnae3_ae_dev *ae_dev = pci_get_drvdata(pdev);
1595
Fuyun Liang2312e052018-05-15 19:20:05 +01001596 if (hns3_is_phys_func(pdev) && IS_ENABLED(CONFIG_PCI_IOV))
1597 hns3_disable_sriov(pdev);
1598
Salil76ad4f02017-08-02 16:59:45 +01001599 hnae3_unregister_ae_dev(ae_dev);
Salil76ad4f02017-08-02 16:59:45 +01001600}
1601
1602static struct pci_driver hns3_driver = {
1603 .name = hns3_driver_name,
1604 .id_table = hns3_pci_tbl,
1605 .probe = hns3_probe,
1606 .remove = hns3_remove,
1607};
1608
1609/* set default feature to hns3 */
1610static void hns3_set_default_feature(struct net_device *netdev)
1611{
1612 netdev->priv_flags |= IFF_UNICAST_FLT;
1613
1614 netdev->hw_enc_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1615 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1616 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1617 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1618 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1619
1620 netdev->hw_enc_features |= NETIF_F_TSO_MANGLEID;
1621
1622 netdev->gso_partial_features |= NETIF_F_GSO_GRE_CSUM;
1623
1624 netdev->features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
1625 NETIF_F_HW_VLAN_CTAG_FILTER |
Peng Li052ece62017-12-22 12:21:47 +08001626 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
Salil76ad4f02017-08-02 16:59:45 +01001627 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1628 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1629 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1630 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1631
1632 netdev->vlan_features |=
1633 NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_RXCSUM |
1634 NETIF_F_SG | NETIF_F_GSO | NETIF_F_GRO |
1635 NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1636 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1637 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1638
1639 netdev->hw_features |= NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM |
Yunsheng Linb2641e22018-05-03 17:28:11 +01001640 NETIF_F_HW_VLAN_CTAG_TX | NETIF_F_HW_VLAN_CTAG_RX |
Salil76ad4f02017-08-02 16:59:45 +01001641 NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_GSO |
1642 NETIF_F_GRO | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_GSO_GRE |
1643 NETIF_F_GSO_GRE_CSUM | NETIF_F_GSO_UDP_TUNNEL |
1644 NETIF_F_GSO_UDP_TUNNEL_CSUM;
1645}
1646
1647static int hns3_alloc_buffer(struct hns3_enet_ring *ring,
1648 struct hns3_desc_cb *cb)
1649{
1650 unsigned int order = hnae_page_order(ring);
1651 struct page *p;
1652
1653 p = dev_alloc_pages(order);
1654 if (!p)
1655 return -ENOMEM;
1656
1657 cb->priv = p;
1658 cb->page_offset = 0;
1659 cb->reuse_flag = 0;
1660 cb->buf = page_address(p);
1661 cb->length = hnae_page_size(ring);
1662 cb->type = DESC_TYPE_PAGE;
1663
Salil76ad4f02017-08-02 16:59:45 +01001664 return 0;
1665}
1666
1667static void hns3_free_buffer(struct hns3_enet_ring *ring,
1668 struct hns3_desc_cb *cb)
1669{
1670 if (cb->type == DESC_TYPE_SKB)
1671 dev_kfree_skb_any((struct sk_buff *)cb->priv);
1672 else if (!HNAE3_IS_TX_RING(ring))
1673 put_page((struct page *)cb->priv);
1674 memset(cb, 0, sizeof(*cb));
1675}
1676
1677static int hns3_map_buffer(struct hns3_enet_ring *ring, struct hns3_desc_cb *cb)
1678{
1679 cb->dma = dma_map_page(ring_to_dev(ring), cb->priv, 0,
1680 cb->length, ring_to_dma_dir(ring));
1681
1682 if (dma_mapping_error(ring_to_dev(ring), cb->dma))
1683 return -EIO;
1684
1685 return 0;
1686}
1687
1688static void hns3_unmap_buffer(struct hns3_enet_ring *ring,
1689 struct hns3_desc_cb *cb)
1690{
1691 if (cb->type == DESC_TYPE_SKB)
1692 dma_unmap_single(ring_to_dev(ring), cb->dma, cb->length,
1693 ring_to_dma_dir(ring));
1694 else
1695 dma_unmap_page(ring_to_dev(ring), cb->dma, cb->length,
1696 ring_to_dma_dir(ring));
1697}
1698
1699static void hns3_buffer_detach(struct hns3_enet_ring *ring, int i)
1700{
1701 hns3_unmap_buffer(ring, &ring->desc_cb[i]);
1702 ring->desc[i].addr = 0;
1703}
1704
1705static void hns3_free_buffer_detach(struct hns3_enet_ring *ring, int i)
1706{
1707 struct hns3_desc_cb *cb = &ring->desc_cb[i];
1708
1709 if (!ring->desc_cb[i].dma)
1710 return;
1711
1712 hns3_buffer_detach(ring, i);
1713 hns3_free_buffer(ring, cb);
1714}
1715
1716static void hns3_free_buffers(struct hns3_enet_ring *ring)
1717{
1718 int i;
1719
1720 for (i = 0; i < ring->desc_num; i++)
1721 hns3_free_buffer_detach(ring, i);
1722}
1723
1724/* free desc along with its attached buffer */
1725static void hns3_free_desc(struct hns3_enet_ring *ring)
1726{
1727 hns3_free_buffers(ring);
1728
1729 dma_unmap_single(ring_to_dev(ring), ring->desc_dma_addr,
1730 ring->desc_num * sizeof(ring->desc[0]),
1731 DMA_BIDIRECTIONAL);
1732 ring->desc_dma_addr = 0;
1733 kfree(ring->desc);
1734 ring->desc = NULL;
1735}
1736
1737static int hns3_alloc_desc(struct hns3_enet_ring *ring)
1738{
1739 int size = ring->desc_num * sizeof(ring->desc[0]);
1740
1741 ring->desc = kzalloc(size, GFP_KERNEL);
1742 if (!ring->desc)
1743 return -ENOMEM;
1744
1745 ring->desc_dma_addr = dma_map_single(ring_to_dev(ring), ring->desc,
1746 size, DMA_BIDIRECTIONAL);
1747 if (dma_mapping_error(ring_to_dev(ring), ring->desc_dma_addr)) {
1748 ring->desc_dma_addr = 0;
1749 kfree(ring->desc);
1750 ring->desc = NULL;
1751 return -ENOMEM;
1752 }
1753
1754 return 0;
1755}
1756
1757static int hns3_reserve_buffer_map(struct hns3_enet_ring *ring,
1758 struct hns3_desc_cb *cb)
1759{
1760 int ret;
1761
1762 ret = hns3_alloc_buffer(ring, cb);
1763 if (ret)
1764 goto out;
1765
1766 ret = hns3_map_buffer(ring, cb);
1767 if (ret)
1768 goto out_with_buf;
1769
1770 return 0;
1771
1772out_with_buf:
Lipeng564883b2017-10-23 19:51:02 +08001773 hns3_free_buffer(ring, cb);
Salil76ad4f02017-08-02 16:59:45 +01001774out:
1775 return ret;
1776}
1777
1778static int hns3_alloc_buffer_attach(struct hns3_enet_ring *ring, int i)
1779{
1780 int ret = hns3_reserve_buffer_map(ring, &ring->desc_cb[i]);
1781
1782 if (ret)
1783 return ret;
1784
1785 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1786
1787 return 0;
1788}
1789
1790/* Allocate memory for raw pkg, and map with dma */
1791static int hns3_alloc_ring_buffers(struct hns3_enet_ring *ring)
1792{
1793 int i, j, ret;
1794
1795 for (i = 0; i < ring->desc_num; i++) {
1796 ret = hns3_alloc_buffer_attach(ring, i);
1797 if (ret)
1798 goto out_buffer_fail;
1799 }
1800
1801 return 0;
1802
1803out_buffer_fail:
1804 for (j = i - 1; j >= 0; j--)
1805 hns3_free_buffer_detach(ring, j);
1806 return ret;
1807}
1808
1809/* detach a in-used buffer and replace with a reserved one */
1810static void hns3_replace_buffer(struct hns3_enet_ring *ring, int i,
1811 struct hns3_desc_cb *res_cb)
1812{
Lipengb9077422017-10-23 19:51:01 +08001813 hns3_unmap_buffer(ring, &ring->desc_cb[i]);
Salil76ad4f02017-08-02 16:59:45 +01001814 ring->desc_cb[i] = *res_cb;
1815 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma);
1816}
1817
1818static void hns3_reuse_buffer(struct hns3_enet_ring *ring, int i)
1819{
1820 ring->desc_cb[i].reuse_flag = 0;
1821 ring->desc[i].addr = cpu_to_le64(ring->desc_cb[i].dma
1822 + ring->desc_cb[i].page_offset);
1823}
1824
1825static void hns3_nic_reclaim_one_desc(struct hns3_enet_ring *ring, int *bytes,
1826 int *pkts)
1827{
1828 struct hns3_desc_cb *desc_cb = &ring->desc_cb[ring->next_to_clean];
1829
1830 (*pkts) += (desc_cb->type == DESC_TYPE_SKB);
1831 (*bytes) += desc_cb->length;
1832 /* desc_cb will be cleaned, after hnae_free_buffer_detach*/
1833 hns3_free_buffer_detach(ring, ring->next_to_clean);
1834
1835 ring_ptr_move_fw(ring, next_to_clean);
1836}
1837
1838static int is_valid_clean_head(struct hns3_enet_ring *ring, int h)
1839{
1840 int u = ring->next_to_use;
1841 int c = ring->next_to_clean;
1842
1843 if (unlikely(h > ring->desc_num))
1844 return 0;
1845
1846 return u > c ? (h > c && h <= u) : (h > c || h <= u);
1847}
1848
Lipeng24e750c2017-10-23 19:51:07 +08001849bool hns3_clean_tx_ring(struct hns3_enet_ring *ring, int budget)
Salil76ad4f02017-08-02 16:59:45 +01001850{
1851 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
1852 struct netdev_queue *dev_queue;
1853 int bytes, pkts;
1854 int head;
1855
1856 head = readl_relaxed(ring->tqp->io_base + HNS3_RING_TX_RING_HEAD_REG);
1857 rmb(); /* Make sure head is ready before touch any data */
1858
1859 if (is_ring_empty(ring) || head == ring->next_to_clean)
Lipeng24e750c2017-10-23 19:51:07 +08001860 return true; /* no data to poll */
Salil76ad4f02017-08-02 16:59:45 +01001861
1862 if (!is_valid_clean_head(ring, head)) {
1863 netdev_err(netdev, "wrong head (%d, %d-%d)\n", head,
1864 ring->next_to_use, ring->next_to_clean);
1865
1866 u64_stats_update_begin(&ring->syncp);
1867 ring->stats.io_err_cnt++;
1868 u64_stats_update_end(&ring->syncp);
Lipeng24e750c2017-10-23 19:51:07 +08001869 return true;
Salil76ad4f02017-08-02 16:59:45 +01001870 }
1871
1872 bytes = 0;
1873 pkts = 0;
1874 while (head != ring->next_to_clean && budget) {
1875 hns3_nic_reclaim_one_desc(ring, &bytes, &pkts);
1876 /* Issue prefetch for next Tx descriptor */
1877 prefetch(&ring->desc_cb[ring->next_to_clean]);
1878 budget--;
1879 }
1880
1881 ring->tqp_vector->tx_group.total_bytes += bytes;
1882 ring->tqp_vector->tx_group.total_packets += pkts;
1883
1884 u64_stats_update_begin(&ring->syncp);
1885 ring->stats.tx_bytes += bytes;
1886 ring->stats.tx_pkts += pkts;
1887 u64_stats_update_end(&ring->syncp);
1888
1889 dev_queue = netdev_get_tx_queue(netdev, ring->tqp->tqp_index);
1890 netdev_tx_completed_queue(dev_queue, pkts, bytes);
1891
1892 if (unlikely(pkts && netif_carrier_ok(netdev) &&
1893 (ring_space(ring) > HNS3_MAX_BD_PER_PKT))) {
1894 /* Make sure that anybody stopping the queue after this
1895 * sees the new next_to_clean.
1896 */
1897 smp_mb();
1898 if (netif_tx_queue_stopped(dev_queue)) {
1899 netif_tx_wake_queue(dev_queue);
1900 ring->stats.restart_queue++;
1901 }
1902 }
1903
1904 return !!budget;
1905}
1906
1907static int hns3_desc_unused(struct hns3_enet_ring *ring)
1908{
1909 int ntc = ring->next_to_clean;
1910 int ntu = ring->next_to_use;
1911
1912 return ((ntc >= ntu) ? 0 : ring->desc_num) + ntc - ntu;
1913}
1914
1915static void
1916hns3_nic_alloc_rx_buffers(struct hns3_enet_ring *ring, int cleand_count)
1917{
1918 struct hns3_desc_cb *desc_cb;
1919 struct hns3_desc_cb res_cbs;
1920 int i, ret;
1921
1922 for (i = 0; i < cleand_count; i++) {
1923 desc_cb = &ring->desc_cb[ring->next_to_use];
1924 if (desc_cb->reuse_flag) {
1925 u64_stats_update_begin(&ring->syncp);
1926 ring->stats.reuse_pg_cnt++;
1927 u64_stats_update_end(&ring->syncp);
1928
1929 hns3_reuse_buffer(ring, ring->next_to_use);
1930 } else {
1931 ret = hns3_reserve_buffer_map(ring, &res_cbs);
1932 if (ret) {
1933 u64_stats_update_begin(&ring->syncp);
1934 ring->stats.sw_err_cnt++;
1935 u64_stats_update_end(&ring->syncp);
1936
1937 netdev_err(ring->tqp->handle->kinfo.netdev,
1938 "hnae reserve buffer map failed.\n");
1939 break;
1940 }
1941 hns3_replace_buffer(ring, ring->next_to_use, &res_cbs);
1942 }
1943
1944 ring_ptr_move_fw(ring, next_to_use);
1945 }
1946
1947 wmb(); /* Make all data has been write before submit */
1948 writel_relaxed(i, ring->tqp->io_base + HNS3_RING_RX_RING_HEAD_REG);
1949}
1950
1951/* hns3_nic_get_headlen - determine size of header for LRO/GRO
1952 * @data: pointer to the start of the headers
1953 * @max: total length of section to find headers in
1954 *
1955 * This function is meant to determine the length of headers that will
1956 * be recognized by hardware for LRO, GRO, and RSC offloads. The main
1957 * motivation of doing this is to only perform one pull for IPv4 TCP
1958 * packets so that we can do basic things like calculating the gso_size
1959 * based on the average data per packet.
1960 */
1961static unsigned int hns3_nic_get_headlen(unsigned char *data, u32 flag,
1962 unsigned int max_size)
1963{
1964 unsigned char *network;
1965 u8 hlen;
1966
1967 /* This should never happen, but better safe than sorry */
1968 if (max_size < ETH_HLEN)
1969 return max_size;
1970
1971 /* Initialize network frame pointer */
1972 network = data;
1973
1974 /* Set first protocol and move network header forward */
1975 network += ETH_HLEN;
1976
1977 /* Handle any vlan tag if present */
1978 if (hnae_get_field(flag, HNS3_RXD_VLAN_M, HNS3_RXD_VLAN_S)
1979 == HNS3_RX_FLAG_VLAN_PRESENT) {
1980 if ((typeof(max_size))(network - data) > (max_size - VLAN_HLEN))
1981 return max_size;
1982
1983 network += VLAN_HLEN;
1984 }
1985
1986 /* Handle L3 protocols */
1987 if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
1988 == HNS3_RX_FLAG_L3ID_IPV4) {
1989 if ((typeof(max_size))(network - data) >
1990 (max_size - sizeof(struct iphdr)))
1991 return max_size;
1992
1993 /* Access ihl as a u8 to avoid unaligned access on ia64 */
1994 hlen = (network[0] & 0x0F) << 2;
1995
1996 /* Verify hlen meets minimum size requirements */
1997 if (hlen < sizeof(struct iphdr))
1998 return network - data;
1999
2000 /* Record next protocol if header is present */
2001 } else if (hnae_get_field(flag, HNS3_RXD_L3ID_M, HNS3_RXD_L3ID_S)
2002 == HNS3_RX_FLAG_L3ID_IPV6) {
2003 if ((typeof(max_size))(network - data) >
2004 (max_size - sizeof(struct ipv6hdr)))
2005 return max_size;
2006
2007 /* Record next protocol */
2008 hlen = sizeof(struct ipv6hdr);
2009 } else {
2010 return network - data;
2011 }
2012
2013 /* Relocate pointer to start of L4 header */
2014 network += hlen;
2015
2016 /* Finally sort out TCP/UDP */
2017 if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
2018 == HNS3_RX_FLAG_L4ID_TCP) {
2019 if ((typeof(max_size))(network - data) >
2020 (max_size - sizeof(struct tcphdr)))
2021 return max_size;
2022
2023 /* Access doff as a u8 to avoid unaligned access on ia64 */
2024 hlen = (network[12] & 0xF0) >> 2;
2025
2026 /* Verify hlen meets minimum size requirements */
2027 if (hlen < sizeof(struct tcphdr))
2028 return network - data;
2029
2030 network += hlen;
2031 } else if (hnae_get_field(flag, HNS3_RXD_L4ID_M, HNS3_RXD_L4ID_S)
2032 == HNS3_RX_FLAG_L4ID_UDP) {
2033 if ((typeof(max_size))(network - data) >
2034 (max_size - sizeof(struct udphdr)))
2035 return max_size;
2036
2037 network += sizeof(struct udphdr);
2038 }
2039
2040 /* If everything has gone correctly network should be the
2041 * data section of the packet and will be the end of the header.
2042 * If not then it probably represents the end of the last recognized
2043 * header.
2044 */
2045 if ((typeof(max_size))(network - data) < max_size)
2046 return network - data;
2047 else
2048 return max_size;
2049}
2050
2051static void hns3_nic_reuse_page(struct sk_buff *skb, int i,
2052 struct hns3_enet_ring *ring, int pull_len,
2053 struct hns3_desc_cb *desc_cb)
2054{
2055 struct hns3_desc *desc;
2056 int truesize, size;
2057 int last_offset;
2058 bool twobufs;
2059
2060 twobufs = ((PAGE_SIZE < 8192) &&
2061 hnae_buf_size(ring) == HNS3_BUFFER_SIZE_2048);
2062
2063 desc = &ring->desc[ring->next_to_clean];
2064 size = le16_to_cpu(desc->rx.size);
2065
Peng Lif8d291f2018-03-10 11:29:26 +08002066 truesize = hnae_buf_size(ring);
2067
2068 if (!twobufs)
Salil76ad4f02017-08-02 16:59:45 +01002069 last_offset = hnae_page_size(ring) - hnae_buf_size(ring);
Salil76ad4f02017-08-02 16:59:45 +01002070
2071 skb_add_rx_frag(skb, i, desc_cb->priv, desc_cb->page_offset + pull_len,
Peng Lif8d291f2018-03-10 11:29:26 +08002072 size - pull_len, truesize);
Salil76ad4f02017-08-02 16:59:45 +01002073
2074 /* Avoid re-using remote pages,flag default unreuse */
2075 if (unlikely(page_to_nid(desc_cb->priv) != numa_node_id()))
2076 return;
2077
2078 if (twobufs) {
2079 /* If we are only owner of page we can reuse it */
2080 if (likely(page_count(desc_cb->priv) == 1)) {
2081 /* Flip page offset to other buffer */
2082 desc_cb->page_offset ^= truesize;
2083
2084 desc_cb->reuse_flag = 1;
2085 /* bump ref count on page before it is given*/
2086 get_page(desc_cb->priv);
2087 }
2088 return;
2089 }
2090
2091 /* Move offset up to the next cache line */
2092 desc_cb->page_offset += truesize;
2093
2094 if (desc_cb->page_offset <= last_offset) {
2095 desc_cb->reuse_flag = 1;
2096 /* Bump ref count on page before it is given*/
2097 get_page(desc_cb->priv);
2098 }
2099}
2100
2101static void hns3_rx_checksum(struct hns3_enet_ring *ring, struct sk_buff *skb,
2102 struct hns3_desc *desc)
2103{
2104 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2105 int l3_type, l4_type;
2106 u32 bd_base_info;
2107 int ol4_type;
2108 u32 l234info;
2109
2110 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2111 l234info = le32_to_cpu(desc->rx.l234_info);
2112
2113 skb->ip_summed = CHECKSUM_NONE;
2114
2115 skb_checksum_none_assert(skb);
2116
2117 if (!(netdev->features & NETIF_F_RXCSUM))
2118 return;
2119
2120 /* check if hardware has done checksum */
2121 if (!hnae_get_bit(bd_base_info, HNS3_RXD_L3L4P_B))
2122 return;
2123
2124 if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L3E_B) ||
2125 hnae_get_bit(l234info, HNS3_RXD_L4E_B) ||
2126 hnae_get_bit(l234info, HNS3_RXD_OL3E_B) ||
2127 hnae_get_bit(l234info, HNS3_RXD_OL4E_B))) {
2128 netdev_err(netdev, "L3/L4 error pkt\n");
2129 u64_stats_update_begin(&ring->syncp);
2130 ring->stats.l3l4_csum_err++;
2131 u64_stats_update_end(&ring->syncp);
2132
2133 return;
2134 }
2135
2136 l3_type = hnae_get_field(l234info, HNS3_RXD_L3ID_M,
2137 HNS3_RXD_L3ID_S);
2138 l4_type = hnae_get_field(l234info, HNS3_RXD_L4ID_M,
2139 HNS3_RXD_L4ID_S);
2140
2141 ol4_type = hnae_get_field(l234info, HNS3_RXD_OL4ID_M, HNS3_RXD_OL4ID_S);
2142 switch (ol4_type) {
2143 case HNS3_OL4_TYPE_MAC_IN_UDP:
2144 case HNS3_OL4_TYPE_NVGRE:
2145 skb->csum_level = 1;
2146 case HNS3_OL4_TYPE_NO_TUN:
2147 /* Can checksum ipv4 or ipv6 + UDP/TCP/SCTP packets */
2148 if (l3_type == HNS3_L3_TYPE_IPV4 ||
2149 (l3_type == HNS3_L3_TYPE_IPV6 &&
2150 (l4_type == HNS3_L4_TYPE_UDP ||
2151 l4_type == HNS3_L4_TYPE_TCP ||
2152 l4_type == HNS3_L4_TYPE_SCTP)))
2153 skb->ip_summed = CHECKSUM_UNNECESSARY;
2154 break;
2155 }
2156}
2157
Yunsheng Lind43e5ac2017-10-20 10:19:21 +08002158static void hns3_rx_skb(struct hns3_enet_ring *ring, struct sk_buff *skb)
2159{
2160 napi_gro_receive(&ring->tqp_vector->napi, skb);
2161}
2162
Salil76ad4f02017-08-02 16:59:45 +01002163static int hns3_handle_rx_bd(struct hns3_enet_ring *ring,
2164 struct sk_buff **out_skb, int *out_bnum)
2165{
2166 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2167 struct hns3_desc_cb *desc_cb;
2168 struct hns3_desc *desc;
2169 struct sk_buff *skb;
2170 unsigned char *va;
2171 u32 bd_base_info;
2172 int pull_len;
2173 u32 l234info;
2174 int length;
2175 int bnum;
2176
2177 desc = &ring->desc[ring->next_to_clean];
2178 desc_cb = &ring->desc_cb[ring->next_to_clean];
2179
2180 prefetch(desc);
2181
2182 length = le16_to_cpu(desc->rx.pkt_len);
2183 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2184 l234info = le32_to_cpu(desc->rx.l234_info);
2185
2186 /* Check valid BD */
2187 if (!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))
2188 return -EFAULT;
2189
2190 va = (unsigned char *)desc_cb->buf + desc_cb->page_offset;
2191
2192 /* Prefetch first cache line of first page
2193 * Idea is to cache few bytes of the header of the packet. Our L1 Cache
2194 * line size is 64B so need to prefetch twice to make it 128B. But in
2195 * actual we can have greater size of caches with 128B Level 1 cache
2196 * lines. In such a case, single fetch would suffice to cache in the
2197 * relevant part of the header.
2198 */
2199 prefetch(va);
2200#if L1_CACHE_BYTES < 128
2201 prefetch(va + L1_CACHE_BYTES);
2202#endif
2203
2204 skb = *out_skb = napi_alloc_skb(&ring->tqp_vector->napi,
2205 HNS3_RX_HEAD_SIZE);
2206 if (unlikely(!skb)) {
2207 netdev_err(netdev, "alloc rx skb fail\n");
2208
2209 u64_stats_update_begin(&ring->syncp);
2210 ring->stats.sw_err_cnt++;
2211 u64_stats_update_end(&ring->syncp);
2212
2213 return -ENOMEM;
2214 }
2215
2216 prefetchw(skb->data);
2217
Peng Li9699cff2017-12-22 12:21:48 +08002218 /* Based on hw strategy, the tag offloaded will be stored at
2219 * ot_vlan_tag in two layer tag case, and stored at vlan_tag
2220 * in one layer tag case.
2221 */
2222 if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX) {
2223 u16 vlan_tag;
2224
2225 vlan_tag = le16_to_cpu(desc->rx.ot_vlan_tag);
2226 if (!(vlan_tag & VLAN_VID_MASK))
2227 vlan_tag = le16_to_cpu(desc->rx.vlan_tag);
2228 if (vlan_tag & VLAN_VID_MASK)
2229 __vlan_hwaccel_put_tag(skb,
2230 htons(ETH_P_8021Q),
2231 vlan_tag);
2232 }
2233
Salil76ad4f02017-08-02 16:59:45 +01002234 bnum = 1;
2235 if (length <= HNS3_RX_HEAD_SIZE) {
2236 memcpy(__skb_put(skb, length), va, ALIGN(length, sizeof(long)));
2237
2238 /* We can reuse buffer as-is, just make sure it is local */
2239 if (likely(page_to_nid(desc_cb->priv) == numa_node_id()))
2240 desc_cb->reuse_flag = 1;
2241 else /* This page cannot be reused so discard it */
2242 put_page(desc_cb->priv);
2243
2244 ring_ptr_move_fw(ring, next_to_clean);
2245 } else {
2246 u64_stats_update_begin(&ring->syncp);
2247 ring->stats.seg_pkt_cnt++;
2248 u64_stats_update_end(&ring->syncp);
2249
2250 pull_len = hns3_nic_get_headlen(va, l234info,
2251 HNS3_RX_HEAD_SIZE);
2252 memcpy(__skb_put(skb, pull_len), va,
2253 ALIGN(pull_len, sizeof(long)));
2254
2255 hns3_nic_reuse_page(skb, 0, ring, pull_len, desc_cb);
2256 ring_ptr_move_fw(ring, next_to_clean);
2257
2258 while (!hnae_get_bit(bd_base_info, HNS3_RXD_FE_B)) {
2259 desc = &ring->desc[ring->next_to_clean];
2260 desc_cb = &ring->desc_cb[ring->next_to_clean];
2261 bd_base_info = le32_to_cpu(desc->rx.bd_base_info);
2262 hns3_nic_reuse_page(skb, bnum, ring, 0, desc_cb);
2263 ring_ptr_move_fw(ring, next_to_clean);
2264 bnum++;
2265 }
2266 }
2267
2268 *out_bnum = bnum;
2269
2270 if (unlikely(!hnae_get_bit(bd_base_info, HNS3_RXD_VLD_B))) {
2271 netdev_err(netdev, "no valid bd,%016llx,%016llx\n",
2272 ((u64 *)desc)[0], ((u64 *)desc)[1]);
2273 u64_stats_update_begin(&ring->syncp);
2274 ring->stats.non_vld_descs++;
2275 u64_stats_update_end(&ring->syncp);
2276
2277 dev_kfree_skb_any(skb);
2278 return -EINVAL;
2279 }
2280
2281 if (unlikely((!desc->rx.pkt_len) ||
2282 hnae_get_bit(l234info, HNS3_RXD_TRUNCAT_B))) {
2283 netdev_err(netdev, "truncated pkt\n");
2284 u64_stats_update_begin(&ring->syncp);
2285 ring->stats.err_pkt_len++;
2286 u64_stats_update_end(&ring->syncp);
2287
2288 dev_kfree_skb_any(skb);
2289 return -EFAULT;
2290 }
2291
2292 if (unlikely(hnae_get_bit(l234info, HNS3_RXD_L2E_B))) {
2293 netdev_err(netdev, "L2 error pkt\n");
2294 u64_stats_update_begin(&ring->syncp);
2295 ring->stats.l2_err++;
2296 u64_stats_update_end(&ring->syncp);
2297
2298 dev_kfree_skb_any(skb);
2299 return -EFAULT;
2300 }
2301
2302 u64_stats_update_begin(&ring->syncp);
2303 ring->stats.rx_pkts++;
2304 ring->stats.rx_bytes += skb->len;
2305 u64_stats_update_end(&ring->syncp);
2306
2307 ring->tqp_vector->rx_group.total_bytes += skb->len;
2308
2309 hns3_rx_checksum(ring, skb, desc);
2310 return 0;
2311}
2312
Yunsheng Lind43e5ac2017-10-20 10:19:21 +08002313int hns3_clean_rx_ring(
2314 struct hns3_enet_ring *ring, int budget,
2315 void (*rx_fn)(struct hns3_enet_ring *, struct sk_buff *))
Salil76ad4f02017-08-02 16:59:45 +01002316{
2317#define RCB_NOF_ALLOC_RX_BUFF_ONCE 16
2318 struct net_device *netdev = ring->tqp->handle->kinfo.netdev;
2319 int recv_pkts, recv_bds, clean_count, err;
2320 int unused_count = hns3_desc_unused(ring);
2321 struct sk_buff *skb = NULL;
2322 int num, bnum = 0;
2323
2324 num = readl_relaxed(ring->tqp->io_base + HNS3_RING_RX_RING_FBDNUM_REG);
2325 rmb(); /* Make sure num taken effect before the other data is touched */
2326
2327 recv_pkts = 0, recv_bds = 0, clean_count = 0;
2328 num -= unused_count;
2329
2330 while (recv_pkts < budget && recv_bds < num) {
2331 /* Reuse or realloc buffers */
2332 if (clean_count + unused_count >= RCB_NOF_ALLOC_RX_BUFF_ONCE) {
2333 hns3_nic_alloc_rx_buffers(ring,
2334 clean_count + unused_count);
2335 clean_count = 0;
2336 unused_count = hns3_desc_unused(ring);
2337 }
2338
2339 /* Poll one pkt */
2340 err = hns3_handle_rx_bd(ring, &skb, &bnum);
2341 if (unlikely(!skb)) /* This fault cannot be repaired */
2342 goto out;
2343
2344 recv_bds += bnum;
2345 clean_count += bnum;
2346 if (unlikely(err)) { /* Do jump the err */
2347 recv_pkts++;
2348 continue;
2349 }
2350
2351 /* Do update ip stack process */
2352 skb->protocol = eth_type_trans(skb, netdev);
Yunsheng Lind43e5ac2017-10-20 10:19:21 +08002353 rx_fn(ring, skb);
Salil76ad4f02017-08-02 16:59:45 +01002354
2355 recv_pkts++;
2356 }
2357
2358out:
2359 /* Make all data has been write before submit */
2360 if (clean_count + unused_count > 0)
2361 hns3_nic_alloc_rx_buffers(ring,
2362 clean_count + unused_count);
2363
2364 return recv_pkts;
2365}
2366
2367static bool hns3_get_new_int_gl(struct hns3_enet_ring_group *ring_group)
2368{
Fuyun Lianga95e1f82018-03-21 15:49:26 +08002369 struct hns3_enet_tqp_vector *tqp_vector =
2370 ring_group->ring->tqp_vector;
Salil76ad4f02017-08-02 16:59:45 +01002371 enum hns3_flow_level_range new_flow_level;
Fuyun Lianga95e1f82018-03-21 15:49:26 +08002372 int packets_per_msecs;
2373 int bytes_per_msecs;
2374 u32 time_passed_ms;
Salil76ad4f02017-08-02 16:59:45 +01002375 u16 new_int_gl;
Salil76ad4f02017-08-02 16:59:45 +01002376
Fuyun Lianga95e1f82018-03-21 15:49:26 +08002377 if (!ring_group->coal.int_gl || !tqp_vector->last_jiffies)
Salil76ad4f02017-08-02 16:59:45 +01002378 return false;
2379
2380 if (ring_group->total_packets == 0) {
Yunsheng Lin9bc727a2018-03-09 10:37:03 +08002381 ring_group->coal.int_gl = HNS3_INT_GL_50K;
2382 ring_group->coal.flow_level = HNS3_FLOW_LOW;
Salil76ad4f02017-08-02 16:59:45 +01002383 return true;
2384 }
2385
2386 /* Simple throttlerate management
2387 * 0-10MB/s lower (50000 ints/s)
2388 * 10-20MB/s middle (20000 ints/s)
2389 * 20-1249MB/s high (18000 ints/s)
2390 * > 40000pps ultra (8000 ints/s)
2391 */
Yunsheng Lin9bc727a2018-03-09 10:37:03 +08002392 new_flow_level = ring_group->coal.flow_level;
2393 new_int_gl = ring_group->coal.int_gl;
Fuyun Lianga95e1f82018-03-21 15:49:26 +08002394 time_passed_ms =
2395 jiffies_to_msecs(jiffies - tqp_vector->last_jiffies);
2396
2397 if (!time_passed_ms)
2398 return false;
2399
2400 do_div(ring_group->total_packets, time_passed_ms);
2401 packets_per_msecs = ring_group->total_packets;
2402
2403 do_div(ring_group->total_bytes, time_passed_ms);
2404 bytes_per_msecs = ring_group->total_bytes;
2405
2406#define HNS3_RX_LOW_BYTE_RATE 10000
2407#define HNS3_RX_MID_BYTE_RATE 20000
Salil76ad4f02017-08-02 16:59:45 +01002408
2409 switch (new_flow_level) {
2410 case HNS3_FLOW_LOW:
Fuyun Lianga95e1f82018-03-21 15:49:26 +08002411 if (bytes_per_msecs > HNS3_RX_LOW_BYTE_RATE)
Salil76ad4f02017-08-02 16:59:45 +01002412 new_flow_level = HNS3_FLOW_MID;
2413 break;
2414 case HNS3_FLOW_MID:
Fuyun Lianga95e1f82018-03-21 15:49:26 +08002415 if (bytes_per_msecs > HNS3_RX_MID_BYTE_RATE)
Salil76ad4f02017-08-02 16:59:45 +01002416 new_flow_level = HNS3_FLOW_HIGH;
Fuyun Lianga95e1f82018-03-21 15:49:26 +08002417 else if (bytes_per_msecs <= HNS3_RX_LOW_BYTE_RATE)
Salil76ad4f02017-08-02 16:59:45 +01002418 new_flow_level = HNS3_FLOW_LOW;
2419 break;
2420 case HNS3_FLOW_HIGH:
2421 case HNS3_FLOW_ULTRA:
2422 default:
Fuyun Lianga95e1f82018-03-21 15:49:26 +08002423 if (bytes_per_msecs <= HNS3_RX_MID_BYTE_RATE)
Salil76ad4f02017-08-02 16:59:45 +01002424 new_flow_level = HNS3_FLOW_MID;
2425 break;
2426 }
2427
Fuyun Lianga95e1f82018-03-21 15:49:26 +08002428#define HNS3_RX_ULTRA_PACKET_RATE 40
2429
2430 if (packets_per_msecs > HNS3_RX_ULTRA_PACKET_RATE &&
2431 &tqp_vector->rx_group == ring_group)
Salil76ad4f02017-08-02 16:59:45 +01002432 new_flow_level = HNS3_FLOW_ULTRA;
2433
2434 switch (new_flow_level) {
2435 case HNS3_FLOW_LOW:
2436 new_int_gl = HNS3_INT_GL_50K;
2437 break;
2438 case HNS3_FLOW_MID:
2439 new_int_gl = HNS3_INT_GL_20K;
2440 break;
2441 case HNS3_FLOW_HIGH:
2442 new_int_gl = HNS3_INT_GL_18K;
2443 break;
2444 case HNS3_FLOW_ULTRA:
2445 new_int_gl = HNS3_INT_GL_8K;
2446 break;
2447 default:
2448 break;
2449 }
2450
2451 ring_group->total_bytes = 0;
2452 ring_group->total_packets = 0;
Yunsheng Lin9bc727a2018-03-09 10:37:03 +08002453 ring_group->coal.flow_level = new_flow_level;
2454 if (new_int_gl != ring_group->coal.int_gl) {
2455 ring_group->coal.int_gl = new_int_gl;
Salil76ad4f02017-08-02 16:59:45 +01002456 return true;
2457 }
2458 return false;
2459}
2460
2461static void hns3_update_new_int_gl(struct hns3_enet_tqp_vector *tqp_vector)
2462{
Fuyun Liang8b1ff1e2018-01-12 16:23:12 +08002463 struct hns3_enet_ring_group *rx_group = &tqp_vector->rx_group;
2464 struct hns3_enet_ring_group *tx_group = &tqp_vector->tx_group;
2465 bool rx_update, tx_update;
Salil76ad4f02017-08-02 16:59:45 +01002466
Fuyun Liangcd9d1872018-03-21 15:49:25 +08002467 if (tqp_vector->int_adapt_down > 0) {
2468 tqp_vector->int_adapt_down--;
2469 return;
2470 }
2471
Yunsheng Lin9bc727a2018-03-09 10:37:03 +08002472 if (rx_group->coal.gl_adapt_enable) {
Fuyun Liang8b1ff1e2018-01-12 16:23:12 +08002473 rx_update = hns3_get_new_int_gl(rx_group);
2474 if (rx_update)
2475 hns3_set_vector_coalesce_rx_gl(tqp_vector,
Yunsheng Lin9bc727a2018-03-09 10:37:03 +08002476 rx_group->coal.int_gl);
Fuyun Liang8b1ff1e2018-01-12 16:23:12 +08002477 }
2478
Yunsheng Lin9bc727a2018-03-09 10:37:03 +08002479 if (tx_group->coal.gl_adapt_enable) {
Fuyun Liang8b1ff1e2018-01-12 16:23:12 +08002480 tx_update = hns3_get_new_int_gl(&tqp_vector->tx_group);
2481 if (tx_update)
2482 hns3_set_vector_coalesce_tx_gl(tqp_vector,
Yunsheng Lin9bc727a2018-03-09 10:37:03 +08002483 tx_group->coal.int_gl);
Salil76ad4f02017-08-02 16:59:45 +01002484 }
Fuyun Liangcd9d1872018-03-21 15:49:25 +08002485
Fuyun Lianga95e1f82018-03-21 15:49:26 +08002486 tqp_vector->last_jiffies = jiffies;
Fuyun Liangcd9d1872018-03-21 15:49:25 +08002487 tqp_vector->int_adapt_down = HNS3_INT_ADAPT_DOWN_START;
Salil76ad4f02017-08-02 16:59:45 +01002488}
2489
2490static int hns3_nic_common_poll(struct napi_struct *napi, int budget)
2491{
2492 struct hns3_enet_ring *ring;
2493 int rx_pkt_total = 0;
2494
2495 struct hns3_enet_tqp_vector *tqp_vector =
2496 container_of(napi, struct hns3_enet_tqp_vector, napi);
2497 bool clean_complete = true;
2498 int rx_budget;
2499
2500 /* Since the actual Tx work is minimal, we can give the Tx a larger
2501 * budget and be more aggressive about cleaning up the Tx descriptors.
2502 */
2503 hns3_for_each_ring(ring, tqp_vector->tx_group) {
2504 if (!hns3_clean_tx_ring(ring, budget))
2505 clean_complete = false;
2506 }
2507
2508 /* make sure rx ring budget not smaller than 1 */
2509 rx_budget = max(budget / tqp_vector->num_tqps, 1);
2510
2511 hns3_for_each_ring(ring, tqp_vector->rx_group) {
Yunsheng Lind43e5ac2017-10-20 10:19:21 +08002512 int rx_cleaned = hns3_clean_rx_ring(ring, rx_budget,
2513 hns3_rx_skb);
Salil76ad4f02017-08-02 16:59:45 +01002514
2515 if (rx_cleaned >= rx_budget)
2516 clean_complete = false;
2517
2518 rx_pkt_total += rx_cleaned;
2519 }
2520
2521 tqp_vector->rx_group.total_packets += rx_pkt_total;
2522
2523 if (!clean_complete)
2524 return budget;
2525
2526 napi_complete(napi);
2527 hns3_update_new_int_gl(tqp_vector);
2528 hns3_mask_vector_irq(tqp_vector, 1);
2529
2530 return rx_pkt_total;
2531}
2532
2533static int hns3_get_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2534 struct hnae3_ring_chain_node *head)
2535{
2536 struct pci_dev *pdev = tqp_vector->handle->pdev;
2537 struct hnae3_ring_chain_node *cur_chain = head;
2538 struct hnae3_ring_chain_node *chain;
2539 struct hns3_enet_ring *tx_ring;
2540 struct hns3_enet_ring *rx_ring;
2541
2542 tx_ring = tqp_vector->tx_group.ring;
2543 if (tx_ring) {
2544 cur_chain->tqp_index = tx_ring->tqp->tqp_index;
2545 hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2546 HNAE3_RING_TYPE_TX);
Fuyun Liang11af96a2018-01-12 16:23:15 +08002547 hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2548 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_TX);
Salil76ad4f02017-08-02 16:59:45 +01002549
2550 cur_chain->next = NULL;
2551
2552 while (tx_ring->next) {
2553 tx_ring = tx_ring->next;
2554
2555 chain = devm_kzalloc(&pdev->dev, sizeof(*chain),
2556 GFP_KERNEL);
2557 if (!chain)
2558 return -ENOMEM;
2559
2560 cur_chain->next = chain;
2561 chain->tqp_index = tx_ring->tqp->tqp_index;
2562 hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2563 HNAE3_RING_TYPE_TX);
Fuyun Liang11af96a2018-01-12 16:23:15 +08002564 hnae_set_field(chain->int_gl_idx,
2565 HNAE3_RING_GL_IDX_M,
2566 HNAE3_RING_GL_IDX_S,
2567 HNAE3_RING_GL_TX);
Salil76ad4f02017-08-02 16:59:45 +01002568
2569 cur_chain = chain;
2570 }
2571 }
2572
2573 rx_ring = tqp_vector->rx_group.ring;
2574 if (!tx_ring && rx_ring) {
2575 cur_chain->next = NULL;
2576 cur_chain->tqp_index = rx_ring->tqp->tqp_index;
2577 hnae_set_bit(cur_chain->flag, HNAE3_RING_TYPE_B,
2578 HNAE3_RING_TYPE_RX);
Fuyun Liang11af96a2018-01-12 16:23:15 +08002579 hnae_set_field(cur_chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2580 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
Salil76ad4f02017-08-02 16:59:45 +01002581
2582 rx_ring = rx_ring->next;
2583 }
2584
2585 while (rx_ring) {
2586 chain = devm_kzalloc(&pdev->dev, sizeof(*chain), GFP_KERNEL);
2587 if (!chain)
2588 return -ENOMEM;
2589
2590 cur_chain->next = chain;
2591 chain->tqp_index = rx_ring->tqp->tqp_index;
2592 hnae_set_bit(chain->flag, HNAE3_RING_TYPE_B,
2593 HNAE3_RING_TYPE_RX);
Fuyun Liang11af96a2018-01-12 16:23:15 +08002594 hnae_set_field(chain->int_gl_idx, HNAE3_RING_GL_IDX_M,
2595 HNAE3_RING_GL_IDX_S, HNAE3_RING_GL_RX);
2596
Salil76ad4f02017-08-02 16:59:45 +01002597 cur_chain = chain;
2598
2599 rx_ring = rx_ring->next;
2600 }
2601
2602 return 0;
2603}
2604
2605static void hns3_free_vector_ring_chain(struct hns3_enet_tqp_vector *tqp_vector,
2606 struct hnae3_ring_chain_node *head)
2607{
2608 struct pci_dev *pdev = tqp_vector->handle->pdev;
2609 struct hnae3_ring_chain_node *chain_tmp, *chain;
2610
2611 chain = head->next;
2612
2613 while (chain) {
2614 chain_tmp = chain->next;
2615 devm_kfree(&pdev->dev, chain);
2616 chain = chain_tmp;
2617 }
2618}
2619
2620static void hns3_add_ring_to_group(struct hns3_enet_ring_group *group,
2621 struct hns3_enet_ring *ring)
2622{
2623 ring->next = group->ring;
2624 group->ring = ring;
2625
2626 group->count++;
2627}
2628
2629static int hns3_nic_init_vector_data(struct hns3_nic_priv *priv)
2630{
2631 struct hnae3_ring_chain_node vector_ring_chain;
2632 struct hnae3_handle *h = priv->ae_handle;
2633 struct hns3_enet_tqp_vector *tqp_vector;
Yunsheng Lindd38c722018-03-09 10:37:02 +08002634 int ret = 0;
2635 u16 i;
2636
2637 for (i = 0; i < priv->vector_num; i++) {
2638 tqp_vector = &priv->tqp_vector[i];
2639 hns3_vector_gl_rl_init_hw(tqp_vector, priv);
2640 tqp_vector->num_tqps = 0;
2641 }
2642
2643 for (i = 0; i < h->kinfo.num_tqps; i++) {
2644 u16 vector_i = i % priv->vector_num;
2645 u16 tqp_num = h->kinfo.num_tqps;
2646
2647 tqp_vector = &priv->tqp_vector[vector_i];
2648
2649 hns3_add_ring_to_group(&tqp_vector->tx_group,
2650 priv->ring_data[i].ring);
2651
2652 hns3_add_ring_to_group(&tqp_vector->rx_group,
2653 priv->ring_data[i + tqp_num].ring);
2654
2655 priv->ring_data[i].ring->tqp_vector = tqp_vector;
2656 priv->ring_data[i + tqp_num].ring->tqp_vector = tqp_vector;
2657 tqp_vector->num_tqps++;
2658 }
2659
2660 for (i = 0; i < priv->vector_num; i++) {
2661 tqp_vector = &priv->tqp_vector[i];
2662
2663 tqp_vector->rx_group.total_bytes = 0;
2664 tqp_vector->rx_group.total_packets = 0;
2665 tqp_vector->tx_group.total_bytes = 0;
2666 tqp_vector->tx_group.total_packets = 0;
2667 tqp_vector->handle = h;
2668
2669 ret = hns3_get_vector_ring_chain(tqp_vector,
2670 &vector_ring_chain);
2671 if (ret)
2672 return ret;
2673
2674 ret = h->ae_algo->ops->map_ring_to_vector(h,
2675 tqp_vector->vector_irq, &vector_ring_chain);
2676
2677 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2678
2679 if (ret)
2680 return ret;
2681
2682 netif_napi_add(priv->netdev, &tqp_vector->napi,
2683 hns3_nic_common_poll, NAPI_POLL_WEIGHT);
2684 }
2685
2686 return 0;
2687}
2688
2689static int hns3_nic_alloc_vector_data(struct hns3_nic_priv *priv)
2690{
2691 struct hnae3_handle *h = priv->ae_handle;
2692 struct hns3_enet_tqp_vector *tqp_vector;
Salil76ad4f02017-08-02 16:59:45 +01002693 struct hnae3_vector_info *vector;
2694 struct pci_dev *pdev = h->pdev;
2695 u16 tqp_num = h->kinfo.num_tqps;
2696 u16 vector_num;
2697 int ret = 0;
2698 u16 i;
2699
2700 /* RSS size, cpu online and vector_num should be the same */
2701 /* Should consider 2p/4p later */
2702 vector_num = min_t(u16, num_online_cpus(), tqp_num);
2703 vector = devm_kcalloc(&pdev->dev, vector_num, sizeof(*vector),
2704 GFP_KERNEL);
2705 if (!vector)
2706 return -ENOMEM;
2707
2708 vector_num = h->ae_algo->ops->get_vector(h, vector_num, vector);
2709
2710 priv->vector_num = vector_num;
2711 priv->tqp_vector = (struct hns3_enet_tqp_vector *)
2712 devm_kcalloc(&pdev->dev, vector_num, sizeof(*priv->tqp_vector),
2713 GFP_KERNEL);
Yunsheng Lindd38c722018-03-09 10:37:02 +08002714 if (!priv->tqp_vector) {
2715 ret = -ENOMEM;
2716 goto out;
Salil76ad4f02017-08-02 16:59:45 +01002717 }
2718
Yunsheng Lindd38c722018-03-09 10:37:02 +08002719 for (i = 0; i < priv->vector_num; i++) {
Salil76ad4f02017-08-02 16:59:45 +01002720 tqp_vector = &priv->tqp_vector[i];
Yunsheng Lindd38c722018-03-09 10:37:02 +08002721 tqp_vector->idx = i;
2722 tqp_vector->mask_addr = vector[i].io_addr;
2723 tqp_vector->vector_irq = vector[i].vector;
Fuyun Liang5fd47892018-01-12 16:23:11 +08002724 hns3_vector_gl_rl_init(tqp_vector, priv);
Salil76ad4f02017-08-02 16:59:45 +01002725 }
2726
2727out:
2728 devm_kfree(&pdev->dev, vector);
2729 return ret;
2730}
2731
Yunsheng Lindd38c722018-03-09 10:37:02 +08002732static void hns3_clear_ring_group(struct hns3_enet_ring_group *group)
2733{
2734 group->ring = NULL;
2735 group->count = 0;
2736}
2737
Salil76ad4f02017-08-02 16:59:45 +01002738static int hns3_nic_uninit_vector_data(struct hns3_nic_priv *priv)
2739{
2740 struct hnae3_ring_chain_node vector_ring_chain;
2741 struct hnae3_handle *h = priv->ae_handle;
2742 struct hns3_enet_tqp_vector *tqp_vector;
Salil76ad4f02017-08-02 16:59:45 +01002743 int i, ret;
2744
2745 for (i = 0; i < priv->vector_num; i++) {
2746 tqp_vector = &priv->tqp_vector[i];
2747
2748 ret = hns3_get_vector_ring_chain(tqp_vector,
2749 &vector_ring_chain);
2750 if (ret)
2751 return ret;
2752
2753 ret = h->ae_algo->ops->unmap_ring_from_vector(h,
2754 tqp_vector->vector_irq, &vector_ring_chain);
2755 if (ret)
2756 return ret;
2757
Yunsheng Lin0d3e6632018-03-09 10:37:01 +08002758 ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
2759 if (ret)
2760 return ret;
2761
Salil76ad4f02017-08-02 16:59:45 +01002762 hns3_free_vector_ring_chain(tqp_vector, &vector_ring_chain);
2763
2764 if (priv->tqp_vector[i].irq_init_flag == HNS3_VECTOR_INITED) {
2765 (void)irq_set_affinity_hint(
2766 priv->tqp_vector[i].vector_irq,
2767 NULL);
qumingguangae064e62017-11-02 20:45:22 +08002768 free_irq(priv->tqp_vector[i].vector_irq,
2769 &priv->tqp_vector[i]);
Salil76ad4f02017-08-02 16:59:45 +01002770 }
2771
2772 priv->ring_data[i].ring->irq_init_flag = HNS3_VECTOR_NOT_INITED;
Yunsheng Lindd38c722018-03-09 10:37:02 +08002773 hns3_clear_ring_group(&tqp_vector->rx_group);
2774 hns3_clear_ring_group(&tqp_vector->tx_group);
Salil76ad4f02017-08-02 16:59:45 +01002775 netif_napi_del(&priv->tqp_vector[i].napi);
2776 }
2777
Yunsheng Lindd38c722018-03-09 10:37:02 +08002778 return 0;
2779}
Salil76ad4f02017-08-02 16:59:45 +01002780
Yunsheng Lindd38c722018-03-09 10:37:02 +08002781static int hns3_nic_dealloc_vector_data(struct hns3_nic_priv *priv)
2782{
2783 struct hnae3_handle *h = priv->ae_handle;
2784 struct pci_dev *pdev = h->pdev;
2785 int i, ret;
2786
2787 for (i = 0; i < priv->vector_num; i++) {
2788 struct hns3_enet_tqp_vector *tqp_vector;
2789
2790 tqp_vector = &priv->tqp_vector[i];
2791 ret = h->ae_algo->ops->put_vector(h, tqp_vector->vector_irq);
2792 if (ret)
2793 return ret;
2794 }
2795
2796 devm_kfree(&pdev->dev, priv->tqp_vector);
Salil76ad4f02017-08-02 16:59:45 +01002797 return 0;
2798}
2799
2800static int hns3_ring_get_cfg(struct hnae3_queue *q, struct hns3_nic_priv *priv,
2801 int ring_type)
2802{
2803 struct hns3_nic_ring_data *ring_data = priv->ring_data;
2804 int queue_num = priv->ae_handle->kinfo.num_tqps;
2805 struct pci_dev *pdev = priv->ae_handle->pdev;
2806 struct hns3_enet_ring *ring;
2807
2808 ring = devm_kzalloc(&pdev->dev, sizeof(*ring), GFP_KERNEL);
2809 if (!ring)
2810 return -ENOMEM;
2811
2812 if (ring_type == HNAE3_RING_TYPE_TX) {
2813 ring_data[q->tqp_index].ring = ring;
Lipeng66b44732017-10-23 19:51:05 +08002814 ring_data[q->tqp_index].queue_index = q->tqp_index;
Salil76ad4f02017-08-02 16:59:45 +01002815 ring->io_base = (u8 __iomem *)q->io_base + HNS3_TX_REG_OFFSET;
2816 } else {
2817 ring_data[q->tqp_index + queue_num].ring = ring;
Lipeng66b44732017-10-23 19:51:05 +08002818 ring_data[q->tqp_index + queue_num].queue_index = q->tqp_index;
Salil76ad4f02017-08-02 16:59:45 +01002819 ring->io_base = q->io_base;
2820 }
2821
2822 hnae_set_bit(ring->flag, HNAE3_RING_TYPE_B, ring_type);
2823
Salil76ad4f02017-08-02 16:59:45 +01002824 ring->tqp = q;
2825 ring->desc = NULL;
2826 ring->desc_cb = NULL;
2827 ring->dev = priv->dev;
2828 ring->desc_dma_addr = 0;
2829 ring->buf_size = q->buf_size;
2830 ring->desc_num = q->desc_num;
2831 ring->next_to_use = 0;
2832 ring->next_to_clean = 0;
2833
2834 return 0;
2835}
2836
2837static int hns3_queue_to_ring(struct hnae3_queue *tqp,
2838 struct hns3_nic_priv *priv)
2839{
2840 int ret;
2841
2842 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_TX);
2843 if (ret)
2844 return ret;
2845
2846 ret = hns3_ring_get_cfg(tqp, priv, HNAE3_RING_TYPE_RX);
2847 if (ret)
2848 return ret;
2849
2850 return 0;
2851}
2852
2853static int hns3_get_ring_config(struct hns3_nic_priv *priv)
2854{
2855 struct hnae3_handle *h = priv->ae_handle;
2856 struct pci_dev *pdev = h->pdev;
2857 int i, ret;
2858
2859 priv->ring_data = devm_kzalloc(&pdev->dev, h->kinfo.num_tqps *
2860 sizeof(*priv->ring_data) * 2,
2861 GFP_KERNEL);
2862 if (!priv->ring_data)
2863 return -ENOMEM;
2864
2865 for (i = 0; i < h->kinfo.num_tqps; i++) {
2866 ret = hns3_queue_to_ring(h->kinfo.tqp[i], priv);
2867 if (ret)
2868 goto err;
2869 }
2870
2871 return 0;
2872err:
2873 devm_kfree(&pdev->dev, priv->ring_data);
2874 return ret;
2875}
2876
Peng Li09f2af62017-12-22 12:21:41 +08002877static void hns3_put_ring_config(struct hns3_nic_priv *priv)
2878{
2879 struct hnae3_handle *h = priv->ae_handle;
2880 int i;
2881
2882 for (i = 0; i < h->kinfo.num_tqps; i++) {
2883 devm_kfree(priv->dev, priv->ring_data[i].ring);
2884 devm_kfree(priv->dev,
2885 priv->ring_data[i + h->kinfo.num_tqps].ring);
2886 }
2887 devm_kfree(priv->dev, priv->ring_data);
2888}
2889
Salil76ad4f02017-08-02 16:59:45 +01002890static int hns3_alloc_ring_memory(struct hns3_enet_ring *ring)
2891{
2892 int ret;
2893
2894 if (ring->desc_num <= 0 || ring->buf_size <= 0)
2895 return -EINVAL;
2896
2897 ring->desc_cb = kcalloc(ring->desc_num, sizeof(ring->desc_cb[0]),
2898 GFP_KERNEL);
2899 if (!ring->desc_cb) {
2900 ret = -ENOMEM;
2901 goto out;
2902 }
2903
2904 ret = hns3_alloc_desc(ring);
2905 if (ret)
2906 goto out_with_desc_cb;
2907
2908 if (!HNAE3_IS_TX_RING(ring)) {
2909 ret = hns3_alloc_ring_buffers(ring);
2910 if (ret)
2911 goto out_with_desc;
2912 }
2913
2914 return 0;
2915
2916out_with_desc:
2917 hns3_free_desc(ring);
2918out_with_desc_cb:
2919 kfree(ring->desc_cb);
2920 ring->desc_cb = NULL;
2921out:
2922 return ret;
2923}
2924
2925static void hns3_fini_ring(struct hns3_enet_ring *ring)
2926{
2927 hns3_free_desc(ring);
2928 kfree(ring->desc_cb);
2929 ring->desc_cb = NULL;
2930 ring->next_to_clean = 0;
2931 ring->next_to_use = 0;
2932}
2933
Yunsheng Lin1db9b1b2017-10-09 15:44:01 +08002934static int hns3_buf_size2type(u32 buf_size)
Salil76ad4f02017-08-02 16:59:45 +01002935{
2936 int bd_size_type;
2937
2938 switch (buf_size) {
2939 case 512:
2940 bd_size_type = HNS3_BD_SIZE_512_TYPE;
2941 break;
2942 case 1024:
2943 bd_size_type = HNS3_BD_SIZE_1024_TYPE;
2944 break;
2945 case 2048:
2946 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
2947 break;
2948 case 4096:
2949 bd_size_type = HNS3_BD_SIZE_4096_TYPE;
2950 break;
2951 default:
2952 bd_size_type = HNS3_BD_SIZE_2048_TYPE;
2953 }
2954
2955 return bd_size_type;
2956}
2957
2958static void hns3_init_ring_hw(struct hns3_enet_ring *ring)
2959{
2960 dma_addr_t dma = ring->desc_dma_addr;
2961 struct hnae3_queue *q = ring->tqp;
2962
2963 if (!HNAE3_IS_TX_RING(ring)) {
2964 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_L_REG,
2965 (u32)dma);
2966 hns3_write_dev(q, HNS3_RING_RX_RING_BASEADDR_H_REG,
2967 (u32)((dma >> 31) >> 1));
2968
2969 hns3_write_dev(q, HNS3_RING_RX_RING_BD_LEN_REG,
2970 hns3_buf_size2type(ring->buf_size));
2971 hns3_write_dev(q, HNS3_RING_RX_RING_BD_NUM_REG,
2972 ring->desc_num / 8 - 1);
2973
2974 } else {
2975 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_L_REG,
2976 (u32)dma);
2977 hns3_write_dev(q, HNS3_RING_TX_RING_BASEADDR_H_REG,
2978 (u32)((dma >> 31) >> 1));
2979
2980 hns3_write_dev(q, HNS3_RING_TX_RING_BD_LEN_REG,
2981 hns3_buf_size2type(ring->buf_size));
2982 hns3_write_dev(q, HNS3_RING_TX_RING_BD_NUM_REG,
2983 ring->desc_num / 8 - 1);
2984 }
2985}
2986
Lipeng5668abd2017-10-10 16:42:04 +08002987int hns3_init_all_ring(struct hns3_nic_priv *priv)
Salil76ad4f02017-08-02 16:59:45 +01002988{
2989 struct hnae3_handle *h = priv->ae_handle;
2990 int ring_num = h->kinfo.num_tqps * 2;
2991 int i, j;
2992 int ret;
2993
2994 for (i = 0; i < ring_num; i++) {
2995 ret = hns3_alloc_ring_memory(priv->ring_data[i].ring);
2996 if (ret) {
2997 dev_err(priv->dev,
2998 "Alloc ring memory fail! ret=%d\n", ret);
2999 goto out_when_alloc_ring_memory;
3000 }
3001
3002 hns3_init_ring_hw(priv->ring_data[i].ring);
3003
3004 u64_stats_init(&priv->ring_data[i].ring->syncp);
3005 }
3006
3007 return 0;
3008
3009out_when_alloc_ring_memory:
3010 for (j = i - 1; j >= 0; j--)
Lipengee83f772017-10-10 16:42:03 +08003011 hns3_fini_ring(priv->ring_data[j].ring);
Salil76ad4f02017-08-02 16:59:45 +01003012
3013 return -ENOMEM;
3014}
3015
Lipeng5668abd2017-10-10 16:42:04 +08003016int hns3_uninit_all_ring(struct hns3_nic_priv *priv)
Salil76ad4f02017-08-02 16:59:45 +01003017{
3018 struct hnae3_handle *h = priv->ae_handle;
3019 int i;
3020
3021 for (i = 0; i < h->kinfo.num_tqps; i++) {
3022 if (h->ae_algo->ops->reset_queue)
3023 h->ae_algo->ops->reset_queue(h, i);
3024
3025 hns3_fini_ring(priv->ring_data[i].ring);
3026 hns3_fini_ring(priv->ring_data[i + h->kinfo.num_tqps].ring);
3027 }
Salil76ad4f02017-08-02 16:59:45 +01003028 return 0;
3029}
3030
3031/* Set mac addr if it is configured. or leave it to the AE driver */
Yunsheng Linf09555f2018-05-09 17:24:38 +01003032static void hns3_init_mac_addr(struct net_device *netdev, bool init)
Salil76ad4f02017-08-02 16:59:45 +01003033{
3034 struct hns3_nic_priv *priv = netdev_priv(netdev);
3035 struct hnae3_handle *h = priv->ae_handle;
3036 u8 mac_addr_temp[ETH_ALEN];
3037
Yunsheng Linf09555f2018-05-09 17:24:38 +01003038 if (h->ae_algo->ops->get_mac_addr && init) {
Salil76ad4f02017-08-02 16:59:45 +01003039 h->ae_algo->ops->get_mac_addr(h, mac_addr_temp);
3040 ether_addr_copy(netdev->dev_addr, mac_addr_temp);
3041 }
3042
3043 /* Check if the MAC address is valid, if not get a random one */
3044 if (!is_valid_ether_addr(netdev->dev_addr)) {
3045 eth_hw_addr_random(netdev);
3046 dev_warn(priv->dev, "using random MAC address %pM\n",
3047 netdev->dev_addr);
Salil76ad4f02017-08-02 16:59:45 +01003048 }
Lipeng139e8792017-09-19 17:17:13 +01003049
3050 if (h->ae_algo->ops->set_mac_addr)
Fuyun Liang590980552018-03-10 11:29:22 +08003051 h->ae_algo->ops->set_mac_addr(h, netdev->dev_addr, true);
Lipeng139e8792017-09-19 17:17:13 +01003052
Salil76ad4f02017-08-02 16:59:45 +01003053}
3054
3055static void hns3_nic_set_priv_ops(struct net_device *netdev)
3056{
3057 struct hns3_nic_priv *priv = netdev_priv(netdev);
3058
3059 if ((netdev->features & NETIF_F_TSO) ||
3060 (netdev->features & NETIF_F_TSO6)) {
3061 priv->ops.fill_desc = hns3_fill_desc_tso;
3062 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tso;
3063 } else {
3064 priv->ops.fill_desc = hns3_fill_desc;
3065 priv->ops.maybe_stop_tx = hns3_nic_maybe_stop_tx;
3066 }
3067}
3068
3069static int hns3_client_init(struct hnae3_handle *handle)
3070{
3071 struct pci_dev *pdev = handle->pdev;
3072 struct hns3_nic_priv *priv;
3073 struct net_device *netdev;
3074 int ret;
3075
3076 netdev = alloc_etherdev_mq(sizeof(struct hns3_nic_priv),
Peng Li678335a12018-03-08 19:41:54 +08003077 hns3_get_max_available_channels(handle));
Salil76ad4f02017-08-02 16:59:45 +01003078 if (!netdev)
3079 return -ENOMEM;
3080
3081 priv = netdev_priv(netdev);
3082 priv->dev = &pdev->dev;
3083 priv->netdev = netdev;
3084 priv->ae_handle = handle;
Salil Mehta6d4c3982018-03-22 14:28:52 +00003085 priv->ae_handle->reset_level = HNAE3_NONE_RESET;
3086 priv->ae_handle->last_reset_time = jiffies;
Lipengf8fa222c2017-11-02 20:45:20 +08003087 priv->tx_timeout_count = 0;
Salil76ad4f02017-08-02 16:59:45 +01003088
3089 handle->kinfo.netdev = netdev;
3090 handle->priv = (void *)priv;
3091
Yunsheng Linf09555f2018-05-09 17:24:38 +01003092 hns3_init_mac_addr(netdev, true);
Salil76ad4f02017-08-02 16:59:45 +01003093
3094 hns3_set_default_feature(netdev);
3095
3096 netdev->watchdog_timeo = HNS3_TX_TIMEOUT;
3097 netdev->priv_flags |= IFF_UNICAST_FLT;
3098 netdev->netdev_ops = &hns3_nic_netdev_ops;
3099 SET_NETDEV_DEV(netdev, &pdev->dev);
3100 hns3_ethtool_set_ops(netdev);
3101 hns3_nic_set_priv_ops(netdev);
3102
3103 /* Carrier off reporting is important to ethtool even BEFORE open */
3104 netif_carrier_off(netdev);
3105
3106 ret = hns3_get_ring_config(priv);
3107 if (ret) {
3108 ret = -ENOMEM;
3109 goto out_get_ring_cfg;
3110 }
3111
Yunsheng Lindd38c722018-03-09 10:37:02 +08003112 ret = hns3_nic_alloc_vector_data(priv);
3113 if (ret) {
3114 ret = -ENOMEM;
3115 goto out_alloc_vector_data;
3116 }
3117
Salil76ad4f02017-08-02 16:59:45 +01003118 ret = hns3_nic_init_vector_data(priv);
3119 if (ret) {
3120 ret = -ENOMEM;
3121 goto out_init_vector_data;
3122 }
3123
3124 ret = hns3_init_all_ring(priv);
3125 if (ret) {
3126 ret = -ENOMEM;
3127 goto out_init_ring_data;
3128 }
3129
3130 ret = register_netdev(netdev);
3131 if (ret) {
3132 dev_err(priv->dev, "probe register netdev fail!\n");
3133 goto out_reg_netdev_fail;
3134 }
3135
Yunsheng Lin986743d2017-09-27 09:45:30 +08003136 hns3_dcbnl_setup(handle);
3137
Salila8e8b7f2017-08-21 17:05:24 +01003138 /* MTU range: (ETH_MIN_MTU(kernel default) - 9706) */
3139 netdev->max_mtu = HNS3_MAX_MTU - (ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
3140
Salil76ad4f02017-08-02 16:59:45 +01003141 return ret;
3142
3143out_reg_netdev_fail:
3144out_init_ring_data:
3145 (void)hns3_nic_uninit_vector_data(priv);
Salil76ad4f02017-08-02 16:59:45 +01003146out_init_vector_data:
Yunsheng Lindd38c722018-03-09 10:37:02 +08003147 hns3_nic_dealloc_vector_data(priv);
3148out_alloc_vector_data:
3149 priv->ring_data = NULL;
Salil76ad4f02017-08-02 16:59:45 +01003150out_get_ring_cfg:
3151 priv->ae_handle = NULL;
3152 free_netdev(netdev);
3153 return ret;
3154}
3155
3156static void hns3_client_uninit(struct hnae3_handle *handle, bool reset)
3157{
3158 struct net_device *netdev = handle->kinfo.netdev;
3159 struct hns3_nic_priv *priv = netdev_priv(netdev);
3160 int ret;
3161
3162 if (netdev->reg_state != NETREG_UNINITIALIZED)
3163 unregister_netdev(netdev);
3164
3165 ret = hns3_nic_uninit_vector_data(priv);
3166 if (ret)
3167 netdev_err(netdev, "uninit vector error\n");
3168
Yunsheng Lindd38c722018-03-09 10:37:02 +08003169 ret = hns3_nic_dealloc_vector_data(priv);
3170 if (ret)
3171 netdev_err(netdev, "dealloc vector error\n");
3172
Salil76ad4f02017-08-02 16:59:45 +01003173 ret = hns3_uninit_all_ring(priv);
3174 if (ret)
3175 netdev_err(netdev, "uninit ring error\n");
3176
Yunsheng Linec777892018-03-09 10:37:00 +08003177 hns3_put_ring_config(priv);
3178
Salil76ad4f02017-08-02 16:59:45 +01003179 priv->ring_data = NULL;
3180
3181 free_netdev(netdev);
3182}
3183
3184static void hns3_link_status_change(struct hnae3_handle *handle, bool linkup)
3185{
3186 struct net_device *netdev = handle->kinfo.netdev;
3187
3188 if (!netdev)
3189 return;
3190
3191 if (linkup) {
3192 netif_carrier_on(netdev);
3193 netif_tx_wake_all_queues(netdev);
3194 netdev_info(netdev, "link up\n");
3195 } else {
3196 netif_carrier_off(netdev);
3197 netif_tx_stop_all_queues(netdev);
3198 netdev_info(netdev, "link down\n");
3199 }
3200}
3201
Yunsheng Lin9df8f792017-09-27 09:45:32 +08003202static int hns3_client_setup_tc(struct hnae3_handle *handle, u8 tc)
3203{
3204 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3205 struct net_device *ndev = kinfo->netdev;
Colin Ian King075cfdd2017-09-29 20:51:23 +01003206 bool if_running;
Yunsheng Lin9df8f792017-09-27 09:45:32 +08003207 int ret;
3208 u8 i;
3209
3210 if (tc > HNAE3_MAX_TC)
3211 return -EINVAL;
3212
3213 if (!ndev)
3214 return -ENODEV;
3215
Colin Ian King075cfdd2017-09-29 20:51:23 +01003216 if_running = netif_running(ndev);
3217
Yunsheng Lin9df8f792017-09-27 09:45:32 +08003218 ret = netdev_set_num_tc(ndev, tc);
3219 if (ret)
3220 return ret;
3221
3222 if (if_running) {
3223 (void)hns3_nic_net_stop(ndev);
3224 msleep(100);
3225 }
3226
3227 ret = (kinfo->dcb_ops && kinfo->dcb_ops->map_update) ?
3228 kinfo->dcb_ops->map_update(handle) : -EOPNOTSUPP;
3229 if (ret)
3230 goto err_out;
3231
3232 if (tc <= 1) {
3233 netdev_reset_tc(ndev);
3234 goto out;
3235 }
3236
3237 for (i = 0; i < HNAE3_MAX_TC; i++) {
3238 struct hnae3_tc_info *tc_info = &kinfo->tc_info[i];
3239
3240 if (tc_info->enable)
3241 netdev_set_tc_queue(ndev,
3242 tc_info->tc,
3243 tc_info->tqp_count,
3244 tc_info->tqp_offset);
3245 }
3246
3247 for (i = 0; i < HNAE3_MAX_USER_PRIO; i++) {
3248 netdev_set_prio_tc_map(ndev, i,
3249 kinfo->prio_tc[i]);
3250 }
3251
3252out:
3253 ret = hns3_nic_set_real_num_queue(ndev);
3254
3255err_out:
3256 if (if_running)
3257 (void)hns3_nic_net_open(ndev);
3258
3259 return ret;
3260}
3261
Lipengbb6b94a2017-11-02 20:45:21 +08003262static void hns3_recover_hw_addr(struct net_device *ndev)
3263{
3264 struct netdev_hw_addr_list *list;
3265 struct netdev_hw_addr *ha, *tmp;
3266
3267 /* go through and sync uc_addr entries to the device */
3268 list = &ndev->uc;
3269 list_for_each_entry_safe(ha, tmp, &list->list, list)
3270 hns3_nic_uc_sync(ndev, ha->addr);
3271
3272 /* go through and sync mc_addr entries to the device */
3273 list = &ndev->mc;
3274 list_for_each_entry_safe(ha, tmp, &list->list, list)
3275 hns3_nic_mc_sync(ndev, ha->addr);
3276}
3277
Yunsheng Linbeebca3a2018-05-09 17:24:40 +01003278static void hns3_clear_tx_ring(struct hns3_enet_ring *ring)
Lipengbb6b94a2017-11-02 20:45:21 +08003279{
Yunsheng Linbeebca3a2018-05-09 17:24:40 +01003280 if (!HNAE3_IS_TX_RING(ring))
3281 return;
3282
3283 while (ring->next_to_clean != ring->next_to_use) {
3284 hns3_free_buffer_detach(ring, ring->next_to_clean);
3285 ring_ptr_move_fw(ring, next_to_clean);
3286 }
3287}
3288
3289static void hns3_clear_rx_ring(struct hns3_enet_ring *ring)
3290{
3291 if (HNAE3_IS_TX_RING(ring))
3292 return;
3293
3294 while (ring->next_to_use != ring->next_to_clean) {
3295 /* When a buffer is not reused, it's memory has been
3296 * freed in hns3_handle_rx_bd or will be freed by
3297 * stack, so only need to unmap the buffer here.
3298 */
3299 if (!ring->desc_cb[ring->next_to_use].reuse_flag) {
3300 hns3_unmap_buffer(ring,
3301 &ring->desc_cb[ring->next_to_use]);
3302 ring->desc_cb[ring->next_to_use].dma = 0;
3303 }
3304
3305 ring_ptr_move_fw(ring, next_to_use);
3306 }
Lipengbb6b94a2017-11-02 20:45:21 +08003307}
3308
3309static void hns3_clear_all_ring(struct hnae3_handle *h)
3310{
3311 struct net_device *ndev = h->kinfo.netdev;
3312 struct hns3_nic_priv *priv = netdev_priv(ndev);
3313 u32 i;
3314
3315 for (i = 0; i < h->kinfo.num_tqps; i++) {
3316 struct netdev_queue *dev_queue;
3317 struct hns3_enet_ring *ring;
3318
3319 ring = priv->ring_data[i].ring;
Yunsheng Linbeebca3a2018-05-09 17:24:40 +01003320 hns3_clear_tx_ring(ring);
Lipengbb6b94a2017-11-02 20:45:21 +08003321 dev_queue = netdev_get_tx_queue(ndev,
3322 priv->ring_data[i].queue_index);
3323 netdev_tx_reset_queue(dev_queue);
3324
3325 ring = priv->ring_data[i + h->kinfo.num_tqps].ring;
Yunsheng Linbeebca3a2018-05-09 17:24:40 +01003326 hns3_clear_rx_ring(ring);
Lipengbb6b94a2017-11-02 20:45:21 +08003327 }
3328}
3329
3330static int hns3_reset_notify_down_enet(struct hnae3_handle *handle)
3331{
3332 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
3333 struct net_device *ndev = kinfo->netdev;
3334
3335 if (!netif_running(ndev))
3336 return -EIO;
3337
3338 return hns3_nic_net_stop(ndev);
3339}
3340
3341static int hns3_reset_notify_up_enet(struct hnae3_handle *handle)
3342{
3343 struct hnae3_knic_private_info *kinfo = &handle->kinfo;
Lipengbb6b94a2017-11-02 20:45:21 +08003344 int ret = 0;
3345
3346 if (netif_running(kinfo->netdev)) {
3347 ret = hns3_nic_net_up(kinfo->netdev);
3348 if (ret) {
3349 netdev_err(kinfo->netdev,
3350 "hns net up fail, ret=%d!\n", ret);
3351 return ret;
3352 }
Salil Mehta6d4c3982018-03-22 14:28:52 +00003353 handle->last_reset_time = jiffies;
Lipengbb6b94a2017-11-02 20:45:21 +08003354 }
3355
3356 return ret;
3357}
3358
3359static int hns3_reset_notify_init_enet(struct hnae3_handle *handle)
3360{
3361 struct net_device *netdev = handle->kinfo.netdev;
3362 struct hns3_nic_priv *priv = netdev_priv(netdev);
3363 int ret;
3364
Yunsheng Linf09555f2018-05-09 17:24:38 +01003365 hns3_init_mac_addr(netdev, false);
Lipengbb6b94a2017-11-02 20:45:21 +08003366 hns3_nic_set_rx_mode(netdev);
3367 hns3_recover_hw_addr(netdev);
3368
Yunsheng Lin681ec392018-03-21 15:49:22 +08003369 /* Hardware table is only clear when pf resets */
3370 if (!(handle->flags & HNAE3_SUPPORT_VF))
3371 hns3_restore_vlan(netdev);
3372
Lipengbb6b94a2017-11-02 20:45:21 +08003373 /* Carrier off reporting is important to ethtool even BEFORE open */
3374 netif_carrier_off(netdev);
3375
3376 ret = hns3_get_ring_config(priv);
3377 if (ret)
3378 return ret;
3379
3380 ret = hns3_nic_init_vector_data(priv);
3381 if (ret)
3382 return ret;
3383
3384 ret = hns3_init_all_ring(priv);
3385 if (ret) {
3386 hns3_nic_uninit_vector_data(priv);
3387 priv->ring_data = NULL;
3388 }
3389
3390 return ret;
3391}
3392
3393static int hns3_reset_notify_uninit_enet(struct hnae3_handle *handle)
3394{
3395 struct net_device *netdev = handle->kinfo.netdev;
3396 struct hns3_nic_priv *priv = netdev_priv(netdev);
3397 int ret;
3398
3399 hns3_clear_all_ring(handle);
3400
3401 ret = hns3_nic_uninit_vector_data(priv);
3402 if (ret) {
3403 netdev_err(netdev, "uninit vector error\n");
3404 return ret;
3405 }
3406
3407 ret = hns3_uninit_all_ring(priv);
3408 if (ret)
3409 netdev_err(netdev, "uninit ring error\n");
3410
Yunsheng Linec777892018-03-09 10:37:00 +08003411 hns3_put_ring_config(priv);
3412
Lipengbb6b94a2017-11-02 20:45:21 +08003413 priv->ring_data = NULL;
3414
3415 return ret;
3416}
3417
3418static int hns3_reset_notify(struct hnae3_handle *handle,
3419 enum hnae3_reset_notify_type type)
3420{
3421 int ret = 0;
3422
3423 switch (type) {
3424 case HNAE3_UP_CLIENT:
Salil Mehtae1586242018-01-19 15:20:53 +00003425 ret = hns3_reset_notify_up_enet(handle);
3426 break;
Lipengbb6b94a2017-11-02 20:45:21 +08003427 case HNAE3_DOWN_CLIENT:
3428 ret = hns3_reset_notify_down_enet(handle);
3429 break;
3430 case HNAE3_INIT_CLIENT:
3431 ret = hns3_reset_notify_init_enet(handle);
3432 break;
3433 case HNAE3_UNINIT_CLIENT:
3434 ret = hns3_reset_notify_uninit_enet(handle);
3435 break;
3436 default:
3437 break;
3438 }
3439
3440 return ret;
3441}
3442
Yunsheng Lin7a242b22018-03-09 10:37:04 +08003443static void hns3_restore_coal(struct hns3_nic_priv *priv,
3444 struct hns3_enet_coalesce *tx,
3445 struct hns3_enet_coalesce *rx)
3446{
3447 u16 vector_num = priv->vector_num;
3448 int i;
3449
3450 for (i = 0; i < vector_num; i++) {
3451 memcpy(&priv->tqp_vector[i].tx_group.coal, tx,
3452 sizeof(struct hns3_enet_coalesce));
3453 memcpy(&priv->tqp_vector[i].rx_group.coal, rx,
3454 sizeof(struct hns3_enet_coalesce));
3455 }
3456}
3457
3458static int hns3_modify_tqp_num(struct net_device *netdev, u16 new_tqp_num,
3459 struct hns3_enet_coalesce *tx,
3460 struct hns3_enet_coalesce *rx)
Peng Li09f2af62017-12-22 12:21:41 +08003461{
3462 struct hns3_nic_priv *priv = netdev_priv(netdev);
3463 struct hnae3_handle *h = hns3_get_handle(netdev);
3464 int ret;
3465
3466 ret = h->ae_algo->ops->set_channels(h, new_tqp_num);
3467 if (ret)
3468 return ret;
3469
3470 ret = hns3_get_ring_config(priv);
3471 if (ret)
3472 return ret;
3473
Yunsheng Lindd38c722018-03-09 10:37:02 +08003474 ret = hns3_nic_alloc_vector_data(priv);
3475 if (ret)
3476 goto err_alloc_vector;
3477
Yunsheng Lin7a242b22018-03-09 10:37:04 +08003478 hns3_restore_coal(priv, tx, rx);
3479
Peng Li09f2af62017-12-22 12:21:41 +08003480 ret = hns3_nic_init_vector_data(priv);
3481 if (ret)
3482 goto err_uninit_vector;
3483
3484 ret = hns3_init_all_ring(priv);
3485 if (ret)
3486 goto err_put_ring;
3487
3488 return 0;
3489
3490err_put_ring:
3491 hns3_put_ring_config(priv);
3492err_uninit_vector:
3493 hns3_nic_uninit_vector_data(priv);
Yunsheng Lindd38c722018-03-09 10:37:02 +08003494err_alloc_vector:
3495 hns3_nic_dealloc_vector_data(priv);
Peng Li09f2af62017-12-22 12:21:41 +08003496 return ret;
3497}
3498
3499static int hns3_adjust_tqps_num(u8 num_tc, u32 new_tqp_num)
3500{
3501 return (new_tqp_num / num_tc) * num_tc;
3502}
3503
3504int hns3_set_channels(struct net_device *netdev,
3505 struct ethtool_channels *ch)
3506{
3507 struct hns3_nic_priv *priv = netdev_priv(netdev);
3508 struct hnae3_handle *h = hns3_get_handle(netdev);
3509 struct hnae3_knic_private_info *kinfo = &h->kinfo;
Yunsheng Lin7a242b22018-03-09 10:37:04 +08003510 struct hns3_enet_coalesce tx_coal, rx_coal;
Peng Li09f2af62017-12-22 12:21:41 +08003511 bool if_running = netif_running(netdev);
3512 u32 new_tqp_num = ch->combined_count;
3513 u16 org_tqp_num;
3514 int ret;
3515
3516 if (ch->rx_count || ch->tx_count)
3517 return -EINVAL;
3518
Peng Li678335a12018-03-08 19:41:54 +08003519 if (new_tqp_num > hns3_get_max_available_channels(h) ||
Peng Li09f2af62017-12-22 12:21:41 +08003520 new_tqp_num < kinfo->num_tc) {
3521 dev_err(&netdev->dev,
3522 "Change tqps fail, the tqp range is from %d to %d",
3523 kinfo->num_tc,
Peng Li678335a12018-03-08 19:41:54 +08003524 hns3_get_max_available_channels(h));
Peng Li09f2af62017-12-22 12:21:41 +08003525 return -EINVAL;
3526 }
3527
3528 new_tqp_num = hns3_adjust_tqps_num(kinfo->num_tc, new_tqp_num);
3529 if (kinfo->num_tqps == new_tqp_num)
3530 return 0;
3531
3532 if (if_running)
Fuyun Liang20e4bf982018-03-10 11:29:24 +08003533 hns3_nic_net_stop(netdev);
Peng Li09f2af62017-12-22 12:21:41 +08003534
3535 hns3_clear_all_ring(h);
3536
3537 ret = hns3_nic_uninit_vector_data(priv);
3538 if (ret) {
3539 dev_err(&netdev->dev,
3540 "Unbind vector with tqp fail, nothing is changed");
3541 goto open_netdev;
3542 }
3543
Yunsheng Lin7a242b22018-03-09 10:37:04 +08003544 /* Changing the tqp num may also change the vector num,
3545 * ethtool only support setting and querying one coal
3546 * configuation for now, so save the vector 0' coal
3547 * configuation here in order to restore it.
3548 */
3549 memcpy(&tx_coal, &priv->tqp_vector[0].tx_group.coal,
3550 sizeof(struct hns3_enet_coalesce));
3551 memcpy(&rx_coal, &priv->tqp_vector[0].rx_group.coal,
3552 sizeof(struct hns3_enet_coalesce));
3553
Yunsheng Lindd38c722018-03-09 10:37:02 +08003554 hns3_nic_dealloc_vector_data(priv);
3555
Peng Li09f2af62017-12-22 12:21:41 +08003556 hns3_uninit_all_ring(priv);
Yunsheng Linec777892018-03-09 10:37:00 +08003557 hns3_put_ring_config(priv);
Peng Li09f2af62017-12-22 12:21:41 +08003558
3559 org_tqp_num = h->kinfo.num_tqps;
Yunsheng Lin7a242b22018-03-09 10:37:04 +08003560 ret = hns3_modify_tqp_num(netdev, new_tqp_num, &tx_coal, &rx_coal);
Peng Li09f2af62017-12-22 12:21:41 +08003561 if (ret) {
Yunsheng Lin7a242b22018-03-09 10:37:04 +08003562 ret = hns3_modify_tqp_num(netdev, org_tqp_num,
3563 &tx_coal, &rx_coal);
Peng Li09f2af62017-12-22 12:21:41 +08003564 if (ret) {
3565 /* If revert to old tqp failed, fatal error occurred */
3566 dev_err(&netdev->dev,
3567 "Revert to old tqp num fail, ret=%d", ret);
3568 return ret;
3569 }
3570 dev_info(&netdev->dev,
3571 "Change tqp num fail, Revert to old tqp num");
3572 }
3573
3574open_netdev:
3575 if (if_running)
Fuyun Liang20e4bf982018-03-10 11:29:24 +08003576 hns3_nic_net_open(netdev);
Peng Li09f2af62017-12-22 12:21:41 +08003577
3578 return ret;
3579}
3580
Yunsheng Lin1db9b1b2017-10-09 15:44:01 +08003581static const struct hnae3_client_ops client_ops = {
Salil76ad4f02017-08-02 16:59:45 +01003582 .init_instance = hns3_client_init,
3583 .uninit_instance = hns3_client_uninit,
3584 .link_status_change = hns3_link_status_change,
Yunsheng Lin9df8f792017-09-27 09:45:32 +08003585 .setup_tc = hns3_client_setup_tc,
Lipengbb6b94a2017-11-02 20:45:21 +08003586 .reset_notify = hns3_reset_notify,
Salil76ad4f02017-08-02 16:59:45 +01003587};
3588
3589/* hns3_init_module - Driver registration routine
3590 * hns3_init_module is the first routine called when the driver is
3591 * loaded. All it does is register with the PCI subsystem.
3592 */
3593static int __init hns3_init_module(void)
3594{
3595 int ret;
3596
3597 pr_info("%s: %s - version\n", hns3_driver_name, hns3_driver_string);
3598 pr_info("%s: %s\n", hns3_driver_name, hns3_copyright);
3599
3600 client.type = HNAE3_CLIENT_KNIC;
3601 snprintf(client.name, HNAE3_CLIENT_NAME_LENGTH - 1, "%s",
3602 hns3_driver_name);
3603
3604 client.ops = &client_ops;
3605
3606 ret = hnae3_register_client(&client);
3607 if (ret)
3608 return ret;
3609
3610 ret = pci_register_driver(&hns3_driver);
3611 if (ret)
3612 hnae3_unregister_client(&client);
3613
3614 return ret;
3615}
3616module_init(hns3_init_module);
3617
3618/* hns3_exit_module - Driver exit cleanup routine
3619 * hns3_exit_module is called just before the driver is removed
3620 * from memory.
3621 */
3622static void __exit hns3_exit_module(void)
3623{
3624 pci_unregister_driver(&hns3_driver);
3625 hnae3_unregister_client(&client);
3626}
3627module_exit(hns3_exit_module);
3628
3629MODULE_DESCRIPTION("HNS3: Hisilicon Ethernet Driver");
3630MODULE_AUTHOR("Huawei Tech. Co., Ltd.");
3631MODULE_LICENSE("GPL");
3632MODULE_ALIAS("pci:hns-nic");