Vishwanathapura, Niranjana | d4829ea | 2017-04-12 20:29:28 -0700 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright(c) 2017 Intel Corporation. |
| 3 | * |
| 4 | * This file is provided under a dual BSD/GPLv2 license. When using or |
| 5 | * redistributing this file, you may do so under either license. |
| 6 | * |
| 7 | * GPL LICENSE SUMMARY |
| 8 | * |
| 9 | * This program is free software; you can redistribute it and/or modify |
| 10 | * it under the terms of version 2 of the GNU General Public License as |
| 11 | * published by the Free Software Foundation. |
| 12 | * |
| 13 | * This program is distributed in the hope that it will be useful, but |
| 14 | * WITHOUT ANY WARRANTY; without even the implied warranty of |
| 15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 16 | * General Public License for more details. |
| 17 | * |
| 18 | * BSD LICENSE |
| 19 | * |
| 20 | * Redistribution and use in source and binary forms, with or without |
| 21 | * modification, are permitted provided that the following conditions |
| 22 | * are met: |
| 23 | * |
| 24 | * - Redistributions of source code must retain the above copyright |
| 25 | * notice, this list of conditions and the following disclaimer. |
| 26 | * - Redistributions in binary form must reproduce the above copyright |
| 27 | * notice, this list of conditions and the following disclaimer in |
| 28 | * the documentation and/or other materials provided with the |
| 29 | * distribution. |
| 30 | * - Neither the name of Intel Corporation nor the names of its |
| 31 | * contributors may be used to endorse or promote products derived |
| 32 | * from this software without specific prior written permission. |
| 33 | * |
| 34 | * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 35 | * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 36 | * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 37 | * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 38 | * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 39 | * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 40 | * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 41 | * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 42 | * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 43 | * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 44 | * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 45 | * |
| 46 | */ |
| 47 | |
| 48 | /* |
| 49 | * This file contains HFI1 support for VNIC functionality |
| 50 | */ |
| 51 | |
| 52 | #include <linux/io.h> |
| 53 | #include <linux/if_vlan.h> |
| 54 | |
| 55 | #include "vnic.h" |
| 56 | |
| 57 | #define HFI_TX_TIMEOUT_MS 1000 |
| 58 | |
| 59 | #define HFI1_VNIC_RCV_Q_SIZE 1024 |
| 60 | |
| 61 | #define HFI1_VNIC_UP 0 |
| 62 | |
| 63 | static DEFINE_SPINLOCK(vport_cntr_lock); |
| 64 | |
| 65 | void hfi1_vnic_setup(struct hfi1_devdata *dd) |
| 66 | { |
| 67 | idr_init(&dd->vnic.vesw_idr); |
| 68 | } |
| 69 | |
| 70 | void hfi1_vnic_cleanup(struct hfi1_devdata *dd) |
| 71 | { |
| 72 | idr_destroy(&dd->vnic.vesw_idr); |
| 73 | } |
| 74 | |
| 75 | #define SUM_GRP_COUNTERS(stats, qstats, x_grp) do { \ |
| 76 | u64 *src64, *dst64; \ |
| 77 | for (src64 = &qstats->x_grp.unicast, \ |
| 78 | dst64 = &stats->x_grp.unicast; \ |
| 79 | dst64 <= &stats->x_grp.s_1519_max;) { \ |
| 80 | *dst64++ += *src64++; \ |
| 81 | } \ |
| 82 | } while (0) |
| 83 | |
| 84 | /* hfi1_vnic_update_stats - update statistics */ |
| 85 | static void hfi1_vnic_update_stats(struct hfi1_vnic_vport_info *vinfo, |
| 86 | struct opa_vnic_stats *stats) |
| 87 | { |
| 88 | struct net_device *netdev = vinfo->netdev; |
| 89 | u8 i; |
| 90 | |
| 91 | /* add tx counters on different queues */ |
| 92 | for (i = 0; i < vinfo->num_tx_q; i++) { |
| 93 | struct opa_vnic_stats *qstats = &vinfo->stats[i]; |
| 94 | struct rtnl_link_stats64 *qnstats = &vinfo->stats[i].netstats; |
| 95 | |
| 96 | stats->netstats.tx_fifo_errors += qnstats->tx_fifo_errors; |
| 97 | stats->netstats.tx_carrier_errors += qnstats->tx_carrier_errors; |
| 98 | stats->tx_drop_state += qstats->tx_drop_state; |
| 99 | stats->tx_dlid_zero += qstats->tx_dlid_zero; |
| 100 | |
| 101 | SUM_GRP_COUNTERS(stats, qstats, tx_grp); |
| 102 | stats->netstats.tx_packets += qnstats->tx_packets; |
| 103 | stats->netstats.tx_bytes += qnstats->tx_bytes; |
| 104 | } |
| 105 | |
| 106 | /* add rx counters on different queues */ |
| 107 | for (i = 0; i < vinfo->num_rx_q; i++) { |
| 108 | struct opa_vnic_stats *qstats = &vinfo->stats[i]; |
| 109 | struct rtnl_link_stats64 *qnstats = &vinfo->stats[i].netstats; |
| 110 | |
| 111 | stats->netstats.rx_fifo_errors += qnstats->rx_fifo_errors; |
| 112 | stats->netstats.rx_nohandler += qnstats->rx_nohandler; |
| 113 | stats->rx_drop_state += qstats->rx_drop_state; |
| 114 | stats->rx_oversize += qstats->rx_oversize; |
| 115 | stats->rx_runt += qstats->rx_runt; |
| 116 | |
| 117 | SUM_GRP_COUNTERS(stats, qstats, rx_grp); |
| 118 | stats->netstats.rx_packets += qnstats->rx_packets; |
| 119 | stats->netstats.rx_bytes += qnstats->rx_bytes; |
| 120 | } |
| 121 | |
| 122 | stats->netstats.tx_errors = stats->netstats.tx_fifo_errors + |
| 123 | stats->netstats.tx_carrier_errors + |
| 124 | stats->tx_drop_state + stats->tx_dlid_zero; |
| 125 | stats->netstats.tx_dropped = stats->netstats.tx_errors; |
| 126 | |
| 127 | stats->netstats.rx_errors = stats->netstats.rx_fifo_errors + |
| 128 | stats->netstats.rx_nohandler + |
| 129 | stats->rx_drop_state + stats->rx_oversize + |
| 130 | stats->rx_runt; |
| 131 | stats->netstats.rx_dropped = stats->netstats.rx_errors; |
| 132 | |
| 133 | netdev->stats.tx_packets = stats->netstats.tx_packets; |
| 134 | netdev->stats.tx_bytes = stats->netstats.tx_bytes; |
| 135 | netdev->stats.tx_fifo_errors = stats->netstats.tx_fifo_errors; |
| 136 | netdev->stats.tx_carrier_errors = stats->netstats.tx_carrier_errors; |
| 137 | netdev->stats.tx_errors = stats->netstats.tx_errors; |
| 138 | netdev->stats.tx_dropped = stats->netstats.tx_dropped; |
| 139 | |
| 140 | netdev->stats.rx_packets = stats->netstats.rx_packets; |
| 141 | netdev->stats.rx_bytes = stats->netstats.rx_bytes; |
| 142 | netdev->stats.rx_fifo_errors = stats->netstats.rx_fifo_errors; |
| 143 | netdev->stats.multicast = stats->rx_grp.mcastbcast; |
| 144 | netdev->stats.rx_length_errors = stats->rx_oversize + stats->rx_runt; |
| 145 | netdev->stats.rx_errors = stats->netstats.rx_errors; |
| 146 | netdev->stats.rx_dropped = stats->netstats.rx_dropped; |
| 147 | } |
| 148 | |
| 149 | /* update_len_counters - update pkt's len histogram counters */ |
| 150 | static inline void update_len_counters(struct opa_vnic_grp_stats *grp, |
| 151 | int len) |
| 152 | { |
| 153 | /* account for 4 byte FCS */ |
| 154 | if (len >= 1515) |
| 155 | grp->s_1519_max++; |
| 156 | else if (len >= 1020) |
| 157 | grp->s_1024_1518++; |
| 158 | else if (len >= 508) |
| 159 | grp->s_512_1023++; |
| 160 | else if (len >= 252) |
| 161 | grp->s_256_511++; |
| 162 | else if (len >= 124) |
| 163 | grp->s_128_255++; |
| 164 | else if (len >= 61) |
| 165 | grp->s_65_127++; |
| 166 | else |
| 167 | grp->s_64++; |
| 168 | } |
| 169 | |
| 170 | /* hfi1_vnic_update_tx_counters - update transmit counters */ |
| 171 | static void hfi1_vnic_update_tx_counters(struct hfi1_vnic_vport_info *vinfo, |
| 172 | u8 q_idx, struct sk_buff *skb, int err) |
| 173 | { |
| 174 | struct ethhdr *mac_hdr = (struct ethhdr *)skb_mac_header(skb); |
| 175 | struct opa_vnic_stats *stats = &vinfo->stats[q_idx]; |
| 176 | struct opa_vnic_grp_stats *tx_grp = &stats->tx_grp; |
| 177 | u16 vlan_tci; |
| 178 | |
| 179 | stats->netstats.tx_packets++; |
| 180 | stats->netstats.tx_bytes += skb->len + ETH_FCS_LEN; |
| 181 | |
| 182 | update_len_counters(tx_grp, skb->len); |
| 183 | |
| 184 | /* rest of the counts are for good packets only */ |
| 185 | if (unlikely(err)) |
| 186 | return; |
| 187 | |
| 188 | if (is_multicast_ether_addr(mac_hdr->h_dest)) |
| 189 | tx_grp->mcastbcast++; |
| 190 | else |
| 191 | tx_grp->unicast++; |
| 192 | |
| 193 | if (!__vlan_get_tag(skb, &vlan_tci)) |
| 194 | tx_grp->vlan++; |
| 195 | else |
| 196 | tx_grp->untagged++; |
| 197 | } |
| 198 | |
| 199 | /* hfi1_vnic_update_rx_counters - update receive counters */ |
| 200 | static void hfi1_vnic_update_rx_counters(struct hfi1_vnic_vport_info *vinfo, |
| 201 | u8 q_idx, struct sk_buff *skb, int err) |
| 202 | { |
| 203 | struct ethhdr *mac_hdr = (struct ethhdr *)skb->data; |
| 204 | struct opa_vnic_stats *stats = &vinfo->stats[q_idx]; |
| 205 | struct opa_vnic_grp_stats *rx_grp = &stats->rx_grp; |
| 206 | u16 vlan_tci; |
| 207 | |
| 208 | stats->netstats.rx_packets++; |
| 209 | stats->netstats.rx_bytes += skb->len + ETH_FCS_LEN; |
| 210 | |
| 211 | update_len_counters(rx_grp, skb->len); |
| 212 | |
| 213 | /* rest of the counts are for good packets only */ |
| 214 | if (unlikely(err)) |
| 215 | return; |
| 216 | |
| 217 | if (is_multicast_ether_addr(mac_hdr->h_dest)) |
| 218 | rx_grp->mcastbcast++; |
| 219 | else |
| 220 | rx_grp->unicast++; |
| 221 | |
| 222 | if (!__vlan_get_tag(skb, &vlan_tci)) |
| 223 | rx_grp->vlan++; |
| 224 | else |
| 225 | rx_grp->untagged++; |
| 226 | } |
| 227 | |
| 228 | /* This function is overloaded for opa_vnic specific implementation */ |
| 229 | static void hfi1_vnic_get_stats64(struct net_device *netdev, |
| 230 | struct rtnl_link_stats64 *stats) |
| 231 | { |
| 232 | struct opa_vnic_stats *vstats = (struct opa_vnic_stats *)stats; |
| 233 | struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev); |
| 234 | |
| 235 | hfi1_vnic_update_stats(vinfo, vstats); |
| 236 | } |
| 237 | |
| 238 | static u64 create_bypass_pbc(u32 vl, u32 dw_len) |
| 239 | { |
| 240 | u64 pbc; |
| 241 | |
| 242 | pbc = ((u64)PBC_IHCRC_NONE << PBC_INSERT_HCRC_SHIFT) |
| 243 | | PBC_INSERT_BYPASS_ICRC | PBC_CREDIT_RETURN |
| 244 | | PBC_PACKET_BYPASS |
| 245 | | ((vl & PBC_VL_MASK) << PBC_VL_SHIFT) |
| 246 | | (dw_len & PBC_LENGTH_DWS_MASK) << PBC_LENGTH_DWS_SHIFT; |
| 247 | |
| 248 | return pbc; |
| 249 | } |
| 250 | |
| 251 | /* hfi1_vnic_maybe_stop_tx - stop tx queue if required */ |
| 252 | static void hfi1_vnic_maybe_stop_tx(struct hfi1_vnic_vport_info *vinfo, |
| 253 | u8 q_idx) |
| 254 | { |
| 255 | netif_stop_subqueue(vinfo->netdev, q_idx); |
| 256 | } |
| 257 | |
| 258 | static netdev_tx_t hfi1_netdev_start_xmit(struct sk_buff *skb, |
| 259 | struct net_device *netdev) |
| 260 | { |
| 261 | struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev); |
| 262 | u8 pad_len, q_idx = skb->queue_mapping; |
| 263 | struct hfi1_devdata *dd = vinfo->dd; |
| 264 | struct opa_vnic_skb_mdata *mdata; |
| 265 | u32 pkt_len, total_len; |
| 266 | int err = -EINVAL; |
| 267 | u64 pbc; |
| 268 | |
| 269 | v_dbg("xmit: queue %d skb len %d\n", q_idx, skb->len); |
| 270 | if (unlikely(!netif_oper_up(netdev))) { |
| 271 | vinfo->stats[q_idx].tx_drop_state++; |
| 272 | goto tx_finish; |
| 273 | } |
| 274 | |
| 275 | /* take out meta data */ |
| 276 | mdata = (struct opa_vnic_skb_mdata *)skb->data; |
| 277 | skb_pull(skb, sizeof(*mdata)); |
| 278 | if (unlikely(mdata->flags & OPA_VNIC_SKB_MDATA_ENCAP_ERR)) { |
| 279 | vinfo->stats[q_idx].tx_dlid_zero++; |
| 280 | goto tx_finish; |
| 281 | } |
| 282 | |
| 283 | /* add tail padding (for 8 bytes size alignment) and icrc */ |
| 284 | pad_len = -(skb->len + OPA_VNIC_ICRC_TAIL_LEN) & 0x7; |
| 285 | pad_len += OPA_VNIC_ICRC_TAIL_LEN; |
| 286 | |
| 287 | /* |
| 288 | * pkt_len is how much data we have to write, includes header and data. |
| 289 | * total_len is length of the packet in Dwords plus the PBC should not |
| 290 | * include the CRC. |
| 291 | */ |
| 292 | pkt_len = (skb->len + pad_len) >> 2; |
| 293 | total_len = pkt_len + 2; /* PBC + packet */ |
| 294 | |
| 295 | pbc = create_bypass_pbc(mdata->vl, total_len); |
| 296 | |
| 297 | skb_get(skb); |
| 298 | v_dbg("pbc 0x%016llX len %d pad_len %d\n", pbc, skb->len, pad_len); |
| 299 | err = dd->process_vnic_dma_send(dd, q_idx, vinfo, skb, pbc, pad_len); |
| 300 | if (unlikely(err)) { |
| 301 | if (err == -ENOMEM) |
| 302 | vinfo->stats[q_idx].netstats.tx_fifo_errors++; |
| 303 | else if (err != -EBUSY) |
| 304 | vinfo->stats[q_idx].netstats.tx_carrier_errors++; |
| 305 | } |
| 306 | /* remove the header before updating tx counters */ |
| 307 | skb_pull(skb, OPA_VNIC_HDR_LEN); |
| 308 | |
| 309 | if (unlikely(err == -EBUSY)) { |
| 310 | hfi1_vnic_maybe_stop_tx(vinfo, q_idx); |
| 311 | dev_kfree_skb_any(skb); |
| 312 | return NETDEV_TX_BUSY; |
| 313 | } |
| 314 | |
| 315 | tx_finish: |
| 316 | /* update tx counters */ |
| 317 | hfi1_vnic_update_tx_counters(vinfo, q_idx, skb, err); |
| 318 | dev_kfree_skb_any(skb); |
| 319 | return NETDEV_TX_OK; |
| 320 | } |
| 321 | |
| 322 | static u16 hfi1_vnic_select_queue(struct net_device *netdev, |
| 323 | struct sk_buff *skb, |
| 324 | void *accel_priv, |
| 325 | select_queue_fallback_t fallback) |
| 326 | { |
| 327 | return 0; |
| 328 | } |
| 329 | |
| 330 | /* hfi1_vnic_decap_skb - strip OPA header from the skb (ethernet) packet */ |
| 331 | static inline int hfi1_vnic_decap_skb(struct hfi1_vnic_rx_queue *rxq, |
| 332 | struct sk_buff *skb) |
| 333 | { |
| 334 | struct hfi1_vnic_vport_info *vinfo = rxq->vinfo; |
| 335 | int max_len = vinfo->netdev->mtu + VLAN_ETH_HLEN; |
| 336 | int rc = -EFAULT; |
| 337 | |
| 338 | skb_pull(skb, OPA_VNIC_HDR_LEN); |
| 339 | |
| 340 | /* Validate Packet length */ |
| 341 | if (unlikely(skb->len > max_len)) |
| 342 | vinfo->stats[rxq->idx].rx_oversize++; |
| 343 | else if (unlikely(skb->len < ETH_ZLEN)) |
| 344 | vinfo->stats[rxq->idx].rx_runt++; |
| 345 | else |
| 346 | rc = 0; |
| 347 | return rc; |
| 348 | } |
| 349 | |
| 350 | static inline struct sk_buff *hfi1_vnic_get_skb(struct hfi1_vnic_rx_queue *rxq) |
| 351 | { |
| 352 | unsigned char *pad_info; |
| 353 | struct sk_buff *skb; |
| 354 | |
| 355 | skb = skb_dequeue(&rxq->skbq); |
| 356 | if (unlikely(!skb)) |
| 357 | return NULL; |
| 358 | |
| 359 | /* remove tail padding and icrc */ |
| 360 | pad_info = skb->data + skb->len - 1; |
| 361 | skb_trim(skb, (skb->len - OPA_VNIC_ICRC_TAIL_LEN - |
| 362 | ((*pad_info) & 0x7))); |
| 363 | |
| 364 | return skb; |
| 365 | } |
| 366 | |
| 367 | /* hfi1_vnic_handle_rx - handle skb receive */ |
| 368 | static void hfi1_vnic_handle_rx(struct hfi1_vnic_rx_queue *rxq, |
| 369 | int *work_done, int work_to_do) |
| 370 | { |
| 371 | struct hfi1_vnic_vport_info *vinfo = rxq->vinfo; |
| 372 | struct sk_buff *skb; |
| 373 | int rc; |
| 374 | |
| 375 | while (1) { |
| 376 | if (*work_done >= work_to_do) |
| 377 | break; |
| 378 | |
| 379 | skb = hfi1_vnic_get_skb(rxq); |
| 380 | if (unlikely(!skb)) |
| 381 | break; |
| 382 | |
| 383 | rc = hfi1_vnic_decap_skb(rxq, skb); |
| 384 | /* update rx counters */ |
| 385 | hfi1_vnic_update_rx_counters(vinfo, rxq->idx, skb, rc); |
| 386 | if (unlikely(rc)) { |
| 387 | dev_kfree_skb_any(skb); |
| 388 | continue; |
| 389 | } |
| 390 | |
| 391 | skb_checksum_none_assert(skb); |
| 392 | skb->protocol = eth_type_trans(skb, rxq->netdev); |
| 393 | |
| 394 | napi_gro_receive(&rxq->napi, skb); |
| 395 | (*work_done)++; |
| 396 | } |
| 397 | } |
| 398 | |
| 399 | /* hfi1_vnic_napi - napi receive polling callback function */ |
| 400 | static int hfi1_vnic_napi(struct napi_struct *napi, int budget) |
| 401 | { |
| 402 | struct hfi1_vnic_rx_queue *rxq = container_of(napi, |
| 403 | struct hfi1_vnic_rx_queue, napi); |
| 404 | struct hfi1_vnic_vport_info *vinfo = rxq->vinfo; |
| 405 | int work_done = 0; |
| 406 | |
| 407 | v_dbg("napi %d budget %d\n", rxq->idx, budget); |
| 408 | hfi1_vnic_handle_rx(rxq, &work_done, budget); |
| 409 | |
| 410 | v_dbg("napi %d work_done %d\n", rxq->idx, work_done); |
| 411 | if (work_done < budget) |
| 412 | napi_complete(napi); |
| 413 | |
| 414 | return work_done; |
| 415 | } |
| 416 | |
| 417 | void hfi1_vnic_bypass_rcv(struct hfi1_packet *packet) |
| 418 | { |
| 419 | struct hfi1_devdata *dd = packet->rcd->dd; |
| 420 | struct hfi1_vnic_vport_info *vinfo = NULL; |
| 421 | struct hfi1_vnic_rx_queue *rxq; |
| 422 | struct sk_buff *skb; |
| 423 | int l4_type, vesw_id = -1; |
| 424 | u8 q_idx; |
| 425 | |
| 426 | l4_type = HFI1_GET_L4_TYPE(packet->ebuf); |
| 427 | if (likely(l4_type == OPA_VNIC_L4_ETHR)) { |
| 428 | vesw_id = HFI1_VNIC_GET_VESWID(packet->ebuf); |
| 429 | vinfo = idr_find(&dd->vnic.vesw_idr, vesw_id); |
| 430 | |
| 431 | /* |
| 432 | * In case of invalid vesw id, count the error on |
| 433 | * the first available vport. |
| 434 | */ |
| 435 | if (unlikely(!vinfo)) { |
| 436 | struct hfi1_vnic_vport_info *vinfo_tmp; |
| 437 | int id_tmp = 0; |
| 438 | |
| 439 | vinfo_tmp = idr_get_next(&dd->vnic.vesw_idr, &id_tmp); |
| 440 | if (vinfo_tmp) { |
| 441 | spin_lock(&vport_cntr_lock); |
| 442 | vinfo_tmp->stats[0].netstats.rx_nohandler++; |
| 443 | spin_unlock(&vport_cntr_lock); |
| 444 | } |
| 445 | } |
| 446 | } |
| 447 | |
| 448 | if (unlikely(!vinfo)) { |
| 449 | dd_dev_warn(dd, "vnic rcv err: l4 %d vesw id %d ctx %d\n", |
| 450 | l4_type, vesw_id, packet->rcd->ctxt); |
| 451 | return; |
| 452 | } |
| 453 | |
| 454 | q_idx = packet->rcd->vnic_q_idx; |
| 455 | rxq = &vinfo->rxq[q_idx]; |
| 456 | if (unlikely(!netif_oper_up(vinfo->netdev))) { |
| 457 | vinfo->stats[q_idx].rx_drop_state++; |
| 458 | skb_queue_purge(&rxq->skbq); |
| 459 | return; |
| 460 | } |
| 461 | |
| 462 | if (unlikely(skb_queue_len(&rxq->skbq) > HFI1_VNIC_RCV_Q_SIZE)) { |
| 463 | vinfo->stats[q_idx].netstats.rx_fifo_errors++; |
| 464 | return; |
| 465 | } |
| 466 | |
| 467 | skb = netdev_alloc_skb(vinfo->netdev, packet->tlen); |
| 468 | if (unlikely(!skb)) { |
| 469 | vinfo->stats[q_idx].netstats.rx_fifo_errors++; |
| 470 | return; |
| 471 | } |
| 472 | |
| 473 | memcpy(skb->data, packet->ebuf, packet->tlen); |
| 474 | skb_put(skb, packet->tlen); |
| 475 | skb_queue_tail(&rxq->skbq, skb); |
| 476 | |
| 477 | if (napi_schedule_prep(&rxq->napi)) { |
| 478 | v_dbg("napi %d scheduling\n", q_idx); |
| 479 | __napi_schedule(&rxq->napi); |
| 480 | } |
| 481 | } |
| 482 | |
| 483 | static int hfi1_vnic_up(struct hfi1_vnic_vport_info *vinfo) |
| 484 | { |
| 485 | struct hfi1_devdata *dd = vinfo->dd; |
| 486 | struct net_device *netdev = vinfo->netdev; |
| 487 | int i, rc; |
| 488 | |
| 489 | /* ensure virtual eth switch id is valid */ |
| 490 | if (!vinfo->vesw_id) |
| 491 | return -EINVAL; |
| 492 | |
| 493 | rc = idr_alloc(&dd->vnic.vesw_idr, vinfo, vinfo->vesw_id, |
| 494 | vinfo->vesw_id + 1, GFP_NOWAIT); |
| 495 | if (rc < 0) |
| 496 | return rc; |
| 497 | |
| 498 | for (i = 0; i < vinfo->num_rx_q; i++) { |
| 499 | struct hfi1_vnic_rx_queue *rxq = &vinfo->rxq[i]; |
| 500 | |
| 501 | skb_queue_head_init(&rxq->skbq); |
| 502 | napi_enable(&rxq->napi); |
| 503 | } |
| 504 | |
| 505 | netif_carrier_on(netdev); |
| 506 | netif_tx_start_all_queues(netdev); |
| 507 | set_bit(HFI1_VNIC_UP, &vinfo->flags); |
| 508 | |
| 509 | return 0; |
| 510 | } |
| 511 | |
| 512 | static void hfi1_vnic_down(struct hfi1_vnic_vport_info *vinfo) |
| 513 | { |
| 514 | struct hfi1_devdata *dd = vinfo->dd; |
| 515 | u8 i; |
| 516 | |
| 517 | clear_bit(HFI1_VNIC_UP, &vinfo->flags); |
| 518 | netif_carrier_off(vinfo->netdev); |
| 519 | netif_tx_disable(vinfo->netdev); |
| 520 | idr_remove(&dd->vnic.vesw_idr, vinfo->vesw_id); |
| 521 | |
| 522 | /* remove unread skbs */ |
| 523 | for (i = 0; i < vinfo->num_rx_q; i++) { |
| 524 | struct hfi1_vnic_rx_queue *rxq = &vinfo->rxq[i]; |
| 525 | |
| 526 | napi_disable(&rxq->napi); |
| 527 | skb_queue_purge(&rxq->skbq); |
| 528 | } |
| 529 | } |
| 530 | |
| 531 | static int hfi1_netdev_open(struct net_device *netdev) |
| 532 | { |
| 533 | struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev); |
| 534 | int rc; |
| 535 | |
| 536 | mutex_lock(&vinfo->lock); |
| 537 | rc = hfi1_vnic_up(vinfo); |
| 538 | mutex_unlock(&vinfo->lock); |
| 539 | return rc; |
| 540 | } |
| 541 | |
| 542 | static int hfi1_netdev_close(struct net_device *netdev) |
| 543 | { |
| 544 | struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev); |
| 545 | |
| 546 | mutex_lock(&vinfo->lock); |
| 547 | if (test_bit(HFI1_VNIC_UP, &vinfo->flags)) |
| 548 | hfi1_vnic_down(vinfo); |
| 549 | mutex_unlock(&vinfo->lock); |
| 550 | return 0; |
| 551 | } |
| 552 | |
| 553 | static void hfi1_vnic_set_vesw_id(struct net_device *netdev, int id) |
| 554 | { |
| 555 | struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev); |
| 556 | bool reopen = false; |
| 557 | |
| 558 | /* |
| 559 | * If vesw_id is being changed, and if the vnic port is up, |
| 560 | * reset the vnic port to ensure new vesw_id gets picked up |
| 561 | */ |
| 562 | if (id != vinfo->vesw_id) { |
| 563 | mutex_lock(&vinfo->lock); |
| 564 | if (test_bit(HFI1_VNIC_UP, &vinfo->flags)) { |
| 565 | hfi1_vnic_down(vinfo); |
| 566 | reopen = true; |
| 567 | } |
| 568 | |
| 569 | vinfo->vesw_id = id; |
| 570 | if (reopen) |
| 571 | hfi1_vnic_up(vinfo); |
| 572 | |
| 573 | mutex_unlock(&vinfo->lock); |
| 574 | } |
| 575 | } |
| 576 | |
| 577 | /* netdev ops */ |
| 578 | static const struct net_device_ops hfi1_netdev_ops = { |
| 579 | .ndo_open = hfi1_netdev_open, |
| 580 | .ndo_stop = hfi1_netdev_close, |
| 581 | .ndo_start_xmit = hfi1_netdev_start_xmit, |
| 582 | .ndo_select_queue = hfi1_vnic_select_queue, |
| 583 | .ndo_get_stats64 = hfi1_vnic_get_stats64, |
| 584 | }; |
| 585 | |
| 586 | struct net_device *hfi1_vnic_alloc_rn(struct ib_device *device, |
| 587 | u8 port_num, |
| 588 | enum rdma_netdev_t type, |
| 589 | const char *name, |
| 590 | unsigned char name_assign_type, |
| 591 | void (*setup)(struct net_device *)) |
| 592 | { |
| 593 | struct hfi1_devdata *dd = dd_from_ibdev(device); |
| 594 | struct hfi1_vnic_vport_info *vinfo; |
| 595 | struct net_device *netdev; |
| 596 | struct rdma_netdev *rn; |
| 597 | int i, size; |
| 598 | |
| 599 | if (!port_num || (port_num > dd->num_pports)) |
| 600 | return ERR_PTR(-EINVAL); |
| 601 | |
| 602 | if (type != RDMA_NETDEV_OPA_VNIC) |
| 603 | return ERR_PTR(-EOPNOTSUPP); |
| 604 | |
| 605 | size = sizeof(struct opa_vnic_rdma_netdev) + sizeof(*vinfo); |
| 606 | netdev = alloc_netdev_mqs(size, name, name_assign_type, setup, |
| 607 | dd->chip_sdma_engines, HFI1_NUM_VNIC_CTXT); |
| 608 | if (!netdev) |
| 609 | return ERR_PTR(-ENOMEM); |
| 610 | |
| 611 | rn = netdev_priv(netdev); |
| 612 | vinfo = opa_vnic_dev_priv(netdev); |
| 613 | vinfo->dd = dd; |
| 614 | vinfo->num_tx_q = dd->chip_sdma_engines; |
| 615 | vinfo->num_rx_q = HFI1_NUM_VNIC_CTXT; |
| 616 | vinfo->netdev = netdev; |
| 617 | rn->set_id = hfi1_vnic_set_vesw_id; |
| 618 | |
| 619 | netdev->features = NETIF_F_HIGHDMA | NETIF_F_SG; |
| 620 | netdev->hw_features = netdev->features; |
| 621 | netdev->vlan_features = netdev->features; |
| 622 | netdev->watchdog_timeo = msecs_to_jiffies(HFI_TX_TIMEOUT_MS); |
| 623 | netdev->netdev_ops = &hfi1_netdev_ops; |
| 624 | mutex_init(&vinfo->lock); |
| 625 | |
| 626 | for (i = 0; i < vinfo->num_rx_q; i++) { |
| 627 | struct hfi1_vnic_rx_queue *rxq = &vinfo->rxq[i]; |
| 628 | |
| 629 | rxq->idx = i; |
| 630 | rxq->vinfo = vinfo; |
| 631 | rxq->netdev = netdev; |
| 632 | netif_napi_add(netdev, &rxq->napi, hfi1_vnic_napi, 64); |
| 633 | } |
| 634 | |
| 635 | return netdev; |
| 636 | } |
| 637 | |
| 638 | void hfi1_vnic_free_rn(struct net_device *netdev) |
| 639 | { |
| 640 | struct hfi1_vnic_vport_info *vinfo = opa_vnic_dev_priv(netdev); |
| 641 | |
| 642 | mutex_destroy(&vinfo->lock); |
| 643 | free_netdev(netdev); |
| 644 | } |