Yi Zou | d3a2ae6 | 2009-05-13 13:10:21 +0000 | [diff] [blame] | 1 | /******************************************************************************* |
| 2 | |
| 3 | Intel 10 Gigabit PCI Express Linux driver |
Don Skidmore | a52055e | 2011-02-23 09:58:39 +0000 | [diff] [blame] | 4 | Copyright(c) 1999 - 2011 Intel Corporation. |
Yi Zou | d3a2ae6 | 2009-05-13 13:10:21 +0000 | [diff] [blame] | 5 | |
| 6 | This program is free software; you can redistribute it and/or modify it |
| 7 | under the terms and conditions of the GNU General Public License, |
| 8 | version 2, as published by the Free Software Foundation. |
| 9 | |
| 10 | This program is distributed in the hope it will be useful, but WITHOUT |
| 11 | ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or |
| 12 | FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for |
| 13 | more details. |
| 14 | |
| 15 | You should have received a copy of the GNU General Public License along with |
| 16 | this program; if not, write to the Free Software Foundation, Inc., |
| 17 | 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA. |
| 18 | |
| 19 | The full GNU General Public License is included in this distribution in |
| 20 | the file called "COPYING". |
| 21 | |
| 22 | Contact Information: |
| 23 | e1000-devel Mailing List <e1000-devel@lists.sourceforge.net> |
| 24 | Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497 |
| 25 | |
| 26 | *******************************************************************************/ |
| 27 | |
Yi Zou | d3a2ae6 | 2009-05-13 13:10:21 +0000 | [diff] [blame] | 28 | #include "ixgbe.h" |
Yi Zou | 6ee1652 | 2009-08-31 12:34:28 +0000 | [diff] [blame] | 29 | #ifdef CONFIG_IXGBE_DCB |
| 30 | #include "ixgbe_dcb_82599.h" |
| 31 | #endif /* CONFIG_IXGBE_DCB */ |
Yi Zou | d3a2ae6 | 2009-05-13 13:10:21 +0000 | [diff] [blame] | 32 | #include <linux/if_ether.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 33 | #include <linux/gfp.h> |
Yi Zou | be5d507 | 2010-05-18 16:00:05 +0000 | [diff] [blame] | 34 | #include <linux/if_vlan.h> |
Yi Zou | d3a2ae6 | 2009-05-13 13:10:21 +0000 | [diff] [blame] | 35 | #include <scsi/scsi_cmnd.h> |
| 36 | #include <scsi/scsi_device.h> |
| 37 | #include <scsi/fc/fc_fs.h> |
| 38 | #include <scsi/fc/fc_fcoe.h> |
| 39 | #include <scsi/libfc.h> |
| 40 | #include <scsi/libfcoe.h> |
| 41 | |
| 42 | /** |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 43 | * ixgbe_rx_is_fcoe - check the rx desc for incoming pkt type |
| 44 | * @rx_desc: advanced rx descriptor |
| 45 | * |
| 46 | * Returns : true if it is FCoE pkt |
| 47 | */ |
| 48 | static inline bool ixgbe_rx_is_fcoe(union ixgbe_adv_rx_desc *rx_desc) |
| 49 | { |
| 50 | u16 p; |
| 51 | |
| 52 | p = le16_to_cpu(rx_desc->wb.lower.lo_dword.hs_rss.pkt_info); |
| 53 | if (p & IXGBE_RXDADV_PKTTYPE_ETQF) { |
| 54 | p &= IXGBE_RXDADV_PKTTYPE_ETQF_MASK; |
| 55 | p >>= IXGBE_RXDADV_PKTTYPE_ETQF_SHIFT; |
| 56 | return p == IXGBE_ETQF_FILTER_FCOE; |
| 57 | } |
| 58 | return false; |
| 59 | } |
| 60 | |
| 61 | /** |
| 62 | * ixgbe_fcoe_clear_ddp - clear the given ddp context |
| 63 | * @ddp - ptr to the ixgbe_fcoe_ddp |
| 64 | * |
| 65 | * Returns : none |
| 66 | * |
| 67 | */ |
| 68 | static inline void ixgbe_fcoe_clear_ddp(struct ixgbe_fcoe_ddp *ddp) |
| 69 | { |
| 70 | ddp->len = 0; |
Yi Zou | 8ca371e | 2010-11-16 19:27:13 -0800 | [diff] [blame] | 71 | ddp->err = 1; |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 72 | ddp->udl = NULL; |
| 73 | ddp->udp = 0UL; |
| 74 | ddp->sgl = NULL; |
| 75 | ddp->sgc = 0; |
| 76 | } |
| 77 | |
| 78 | /** |
| 79 | * ixgbe_fcoe_ddp_put - free the ddp context for a given xid |
| 80 | * @netdev: the corresponding net_device |
| 81 | * @xid: the xid that corresponding ddp will be freed |
| 82 | * |
| 83 | * This is the implementation of net_device_ops.ndo_fcoe_ddp_done |
| 84 | * and it is expected to be called by ULD, i.e., FCP layer of libfc |
| 85 | * to release the corresponding ddp context when the I/O is done. |
| 86 | * |
| 87 | * Returns : data length already ddp-ed in bytes |
| 88 | */ |
| 89 | int ixgbe_fcoe_ddp_put(struct net_device *netdev, u16 xid) |
| 90 | { |
| 91 | int len = 0; |
| 92 | struct ixgbe_fcoe *fcoe; |
| 93 | struct ixgbe_adapter *adapter; |
| 94 | struct ixgbe_fcoe_ddp *ddp; |
Yi Zou | 9b55bb0 | 2010-11-16 19:27:14 -0800 | [diff] [blame] | 95 | u32 fcbuff; |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 96 | |
| 97 | if (!netdev) |
| 98 | goto out_ddp_put; |
| 99 | |
| 100 | if (xid >= IXGBE_FCOE_DDP_MAX) |
| 101 | goto out_ddp_put; |
| 102 | |
| 103 | adapter = netdev_priv(netdev); |
| 104 | fcoe = &adapter->fcoe; |
| 105 | ddp = &fcoe->ddp[xid]; |
| 106 | if (!ddp->udl) |
| 107 | goto out_ddp_put; |
| 108 | |
| 109 | len = ddp->len; |
| 110 | /* if there an error, force to invalidate ddp context */ |
| 111 | if (ddp->err) { |
| 112 | spin_lock_bh(&fcoe->lock); |
| 113 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLT, 0); |
| 114 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCFLTRW, |
| 115 | (xid | IXGBE_FCFLTRW_WE)); |
| 116 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCBUFF, 0); |
| 117 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, |
| 118 | (xid | IXGBE_FCDMARW_WE)); |
Yi Zou | 9b55bb0 | 2010-11-16 19:27:14 -0800 | [diff] [blame] | 119 | |
| 120 | /* guaranteed to be invalidated after 100us */ |
| 121 | IXGBE_WRITE_REG(&adapter->hw, IXGBE_FCDMARW, |
| 122 | (xid | IXGBE_FCDMARW_RE)); |
| 123 | fcbuff = IXGBE_READ_REG(&adapter->hw, IXGBE_FCBUFF); |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 124 | spin_unlock_bh(&fcoe->lock); |
Yi Zou | 9b55bb0 | 2010-11-16 19:27:14 -0800 | [diff] [blame] | 125 | if (fcbuff & IXGBE_FCBUFF_VALID) |
| 126 | udelay(100); |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 127 | } |
| 128 | if (ddp->sgl) |
| 129 | pci_unmap_sg(adapter->pdev, ddp->sgl, ddp->sgc, |
| 130 | DMA_FROM_DEVICE); |
Vasu Dev | dadbe85 | 2011-05-11 05:41:46 +0000 | [diff] [blame^] | 131 | if (ddp->pool) { |
| 132 | pci_pool_free(ddp->pool, ddp->udl, ddp->udp); |
| 133 | ddp->pool = NULL; |
| 134 | } |
| 135 | |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 136 | ixgbe_fcoe_clear_ddp(ddp); |
| 137 | |
| 138 | out_ddp_put: |
| 139 | return len; |
| 140 | } |
| 141 | |
Yi Zou | 68a683c | 2011-02-01 07:22:16 +0000 | [diff] [blame] | 142 | |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 143 | /** |
Yi Zou | 68a683c | 2011-02-01 07:22:16 +0000 | [diff] [blame] | 144 | * ixgbe_fcoe_ddp_setup - called to set up ddp context |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 145 | * @netdev: the corresponding net_device |
| 146 | * @xid: the exchange id requesting ddp |
| 147 | * @sgl: the scatter-gather list for this request |
| 148 | * @sgc: the number of scatter-gather items |
| 149 | * |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 150 | * Returns : 1 for success and 0 for no ddp |
| 151 | */ |
Yi Zou | 68a683c | 2011-02-01 07:22:16 +0000 | [diff] [blame] | 152 | static int ixgbe_fcoe_ddp_setup(struct net_device *netdev, u16 xid, |
| 153 | struct scatterlist *sgl, unsigned int sgc, |
| 154 | int target_mode) |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 155 | { |
| 156 | struct ixgbe_adapter *adapter; |
| 157 | struct ixgbe_hw *hw; |
| 158 | struct ixgbe_fcoe *fcoe; |
| 159 | struct ixgbe_fcoe_ddp *ddp; |
| 160 | struct scatterlist *sg; |
| 161 | unsigned int i, j, dmacount; |
| 162 | unsigned int len; |
Amir Hanania | c600636 | 2011-02-15 09:11:31 +0000 | [diff] [blame] | 163 | static const unsigned int bufflen = IXGBE_FCBUFF_MIN; |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 164 | unsigned int firstoff = 0; |
| 165 | unsigned int lastsize; |
| 166 | unsigned int thisoff = 0; |
| 167 | unsigned int thislen = 0; |
Yi Zou | 68a683c | 2011-02-01 07:22:16 +0000 | [diff] [blame] | 168 | u32 fcbuff, fcdmarw, fcfltrw, fcrxctl; |
Don Skidmore | fbbea32 | 2011-01-26 06:04:17 +0000 | [diff] [blame] | 169 | dma_addr_t addr = 0; |
Vasu Dev | dadbe85 | 2011-05-11 05:41:46 +0000 | [diff] [blame^] | 170 | struct pci_pool *pool; |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 171 | |
| 172 | if (!netdev || !sgl) |
| 173 | return 0; |
| 174 | |
| 175 | adapter = netdev_priv(netdev); |
| 176 | if (xid >= IXGBE_FCOE_DDP_MAX) { |
Emil Tantilov | 396e799 | 2010-07-01 20:05:12 +0000 | [diff] [blame] | 177 | e_warn(drv, "xid=0x%x out-of-range\n", xid); |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 178 | return 0; |
| 179 | } |
| 180 | |
Yi Zou | a41c059 | 2010-11-16 19:27:13 -0800 | [diff] [blame] | 181 | /* no DDP if we are already down or resetting */ |
| 182 | if (test_bit(__IXGBE_DOWN, &adapter->state) || |
| 183 | test_bit(__IXGBE_RESETTING, &adapter->state)) |
| 184 | return 0; |
| 185 | |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 186 | fcoe = &adapter->fcoe; |
| 187 | if (!fcoe->pool) { |
Emil Tantilov | 396e799 | 2010-07-01 20:05:12 +0000 | [diff] [blame] | 188 | e_warn(drv, "xid=0x%x no ddp pool for fcoe\n", xid); |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 189 | return 0; |
| 190 | } |
| 191 | |
| 192 | ddp = &fcoe->ddp[xid]; |
| 193 | if (ddp->sgl) { |
Emil Tantilov | 396e799 | 2010-07-01 20:05:12 +0000 | [diff] [blame] | 194 | e_err(drv, "xid 0x%x w/ non-null sgl=%p nents=%d\n", |
| 195 | xid, ddp->sgl, ddp->sgc); |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 196 | return 0; |
| 197 | } |
| 198 | ixgbe_fcoe_clear_ddp(ddp); |
| 199 | |
| 200 | /* setup dma from scsi command sgl */ |
| 201 | dmacount = pci_map_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE); |
| 202 | if (dmacount == 0) { |
Emil Tantilov | 396e799 | 2010-07-01 20:05:12 +0000 | [diff] [blame] | 203 | e_err(drv, "xid 0x%x DMA map error\n", xid); |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 204 | return 0; |
| 205 | } |
| 206 | |
Vasu Dev | dadbe85 | 2011-05-11 05:41:46 +0000 | [diff] [blame^] | 207 | /* alloc the udl from per cpu ddp pool */ |
| 208 | pool = *per_cpu_ptr(fcoe->pool, get_cpu()); |
| 209 | ddp->udl = pci_pool_alloc(pool, GFP_ATOMIC, &ddp->udp); |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 210 | if (!ddp->udl) { |
Emil Tantilov | 396e799 | 2010-07-01 20:05:12 +0000 | [diff] [blame] | 211 | e_err(drv, "failed allocated ddp context\n"); |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 212 | goto out_noddp_unmap; |
| 213 | } |
Vasu Dev | dadbe85 | 2011-05-11 05:41:46 +0000 | [diff] [blame^] | 214 | ddp->pool = pool; |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 215 | ddp->sgl = sgl; |
| 216 | ddp->sgc = sgc; |
| 217 | |
| 218 | j = 0; |
| 219 | for_each_sg(sgl, sg, dmacount, i) { |
| 220 | addr = sg_dma_address(sg); |
| 221 | len = sg_dma_len(sg); |
| 222 | while (len) { |
Robert Love | a7551b7 | 2010-03-24 10:02:04 +0000 | [diff] [blame] | 223 | /* max number of buffers allowed in one DDP context */ |
| 224 | if (j >= IXGBE_BUFFCNT_MAX) { |
Emil Tantilov | 396e799 | 2010-07-01 20:05:12 +0000 | [diff] [blame] | 225 | e_err(drv, "xid=%x:%d,%d,%d:addr=%llx " |
Emil Tantilov | 849c454 | 2010-06-03 16:53:41 +0000 | [diff] [blame] | 226 | "not enough descriptors\n", |
| 227 | xid, i, j, dmacount, (u64)addr); |
Robert Love | a7551b7 | 2010-03-24 10:02:04 +0000 | [diff] [blame] | 228 | goto out_noddp_free; |
| 229 | } |
| 230 | |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 231 | /* get the offset of length of current buffer */ |
| 232 | thisoff = addr & ((dma_addr_t)bufflen - 1); |
| 233 | thislen = min((bufflen - thisoff), len); |
| 234 | /* |
| 235 | * all but the 1st buffer (j == 0) |
| 236 | * must be aligned on bufflen |
| 237 | */ |
| 238 | if ((j != 0) && (thisoff)) |
| 239 | goto out_noddp_free; |
| 240 | /* |
| 241 | * all but the last buffer |
| 242 | * ((i == (dmacount - 1)) && (thislen == len)) |
| 243 | * must end at bufflen |
| 244 | */ |
| 245 | if (((i != (dmacount - 1)) || (thislen != len)) |
| 246 | && ((thislen + thisoff) != bufflen)) |
| 247 | goto out_noddp_free; |
| 248 | |
| 249 | ddp->udl[j] = (u64)(addr - thisoff); |
| 250 | /* only the first buffer may have none-zero offset */ |
| 251 | if (j == 0) |
| 252 | firstoff = thisoff; |
| 253 | len -= thislen; |
| 254 | addr += thislen; |
| 255 | j++; |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 256 | } |
| 257 | } |
| 258 | /* only the last buffer may have non-full bufflen */ |
| 259 | lastsize = thisoff + thislen; |
| 260 | |
Amir Hanania | c600636 | 2011-02-15 09:11:31 +0000 | [diff] [blame] | 261 | /* |
| 262 | * lastsize can not be buffer len. |
| 263 | * If it is then adding another buffer with lastsize = 1. |
| 264 | */ |
| 265 | if (lastsize == bufflen) { |
| 266 | if (j >= IXGBE_BUFFCNT_MAX) { |
| 267 | e_err(drv, "xid=%x:%d,%d,%d:addr=%llx " |
| 268 | "not enough user buffers. We need an extra " |
| 269 | "buffer because lastsize is bufflen.\n", |
| 270 | xid, i, j, dmacount, (u64)addr); |
| 271 | goto out_noddp_free; |
| 272 | } |
| 273 | |
| 274 | ddp->udl[j] = (u64)(fcoe->extra_ddp_buffer_dma); |
| 275 | j++; |
| 276 | lastsize = 1; |
| 277 | } |
Vasu Dev | dadbe85 | 2011-05-11 05:41:46 +0000 | [diff] [blame^] | 278 | put_cpu(); |
Amir Hanania | c600636 | 2011-02-15 09:11:31 +0000 | [diff] [blame] | 279 | |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 280 | fcbuff = (IXGBE_FCBUFF_4KB << IXGBE_FCBUFF_BUFFSIZE_SHIFT); |
Robert Love | a7551b7 | 2010-03-24 10:02:04 +0000 | [diff] [blame] | 281 | fcbuff |= ((j & 0xff) << IXGBE_FCBUFF_BUFFCNT_SHIFT); |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 282 | fcbuff |= (firstoff << IXGBE_FCBUFF_OFFSET_SHIFT); |
Yi Zou | 68a683c | 2011-02-01 07:22:16 +0000 | [diff] [blame] | 283 | /* Set WRCONTX bit to allow DDP for target */ |
| 284 | if (target_mode) |
| 285 | fcbuff |= (IXGBE_FCBUFF_WRCONTX); |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 286 | fcbuff |= (IXGBE_FCBUFF_VALID); |
| 287 | |
| 288 | fcdmarw = xid; |
| 289 | fcdmarw |= IXGBE_FCDMARW_WE; |
| 290 | fcdmarw |= (lastsize << IXGBE_FCDMARW_LASTSIZE_SHIFT); |
| 291 | |
| 292 | fcfltrw = xid; |
| 293 | fcfltrw |= IXGBE_FCFLTRW_WE; |
| 294 | |
| 295 | /* program DMA context */ |
| 296 | hw = &adapter->hw; |
| 297 | spin_lock_bh(&fcoe->lock); |
Yi Zou | 68a683c | 2011-02-01 07:22:16 +0000 | [diff] [blame] | 298 | |
| 299 | /* turn on last frame indication for target mode as FCP_RSPtarget is |
| 300 | * supposed to send FCP_RSP when it is done. */ |
| 301 | if (target_mode && !test_bit(__IXGBE_FCOE_TARGET, &fcoe->mode)) { |
| 302 | set_bit(__IXGBE_FCOE_TARGET, &fcoe->mode); |
| 303 | fcrxctl = IXGBE_READ_REG(hw, IXGBE_FCRXCTRL); |
| 304 | fcrxctl |= IXGBE_FCRXCTRL_LASTSEQH; |
| 305 | IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, fcrxctl); |
| 306 | } |
| 307 | |
Andrew Morton | 8e20ce9 | 2009-06-18 16:49:17 -0700 | [diff] [blame] | 308 | IXGBE_WRITE_REG(hw, IXGBE_FCPTRL, ddp->udp & DMA_BIT_MASK(32)); |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 309 | IXGBE_WRITE_REG(hw, IXGBE_FCPTRH, (u64)ddp->udp >> 32); |
| 310 | IXGBE_WRITE_REG(hw, IXGBE_FCBUFF, fcbuff); |
| 311 | IXGBE_WRITE_REG(hw, IXGBE_FCDMARW, fcdmarw); |
| 312 | /* program filter context */ |
| 313 | IXGBE_WRITE_REG(hw, IXGBE_FCPARAM, 0); |
| 314 | IXGBE_WRITE_REG(hw, IXGBE_FCFLT, IXGBE_FCFLT_VALID); |
| 315 | IXGBE_WRITE_REG(hw, IXGBE_FCFLTRW, fcfltrw); |
Yi Zou | 68a683c | 2011-02-01 07:22:16 +0000 | [diff] [blame] | 316 | |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 317 | spin_unlock_bh(&fcoe->lock); |
| 318 | |
| 319 | return 1; |
| 320 | |
| 321 | out_noddp_free: |
Vasu Dev | dadbe85 | 2011-05-11 05:41:46 +0000 | [diff] [blame^] | 322 | pci_pool_free(pool, ddp->udl, ddp->udp); |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 323 | ixgbe_fcoe_clear_ddp(ddp); |
| 324 | |
| 325 | out_noddp_unmap: |
| 326 | pci_unmap_sg(adapter->pdev, sgl, sgc, DMA_FROM_DEVICE); |
Vasu Dev | dadbe85 | 2011-05-11 05:41:46 +0000 | [diff] [blame^] | 327 | put_cpu(); |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 328 | return 0; |
| 329 | } |
| 330 | |
| 331 | /** |
Yi Zou | 68a683c | 2011-02-01 07:22:16 +0000 | [diff] [blame] | 332 | * ixgbe_fcoe_ddp_get - called to set up ddp context in initiator mode |
| 333 | * @netdev: the corresponding net_device |
| 334 | * @xid: the exchange id requesting ddp |
| 335 | * @sgl: the scatter-gather list for this request |
| 336 | * @sgc: the number of scatter-gather items |
| 337 | * |
| 338 | * This is the implementation of net_device_ops.ndo_fcoe_ddp_setup |
| 339 | * and is expected to be called from ULD, e.g., FCP layer of libfc |
| 340 | * to set up ddp for the corresponding xid of the given sglist for |
| 341 | * the corresponding I/O. |
| 342 | * |
| 343 | * Returns : 1 for success and 0 for no ddp |
| 344 | */ |
| 345 | int ixgbe_fcoe_ddp_get(struct net_device *netdev, u16 xid, |
| 346 | struct scatterlist *sgl, unsigned int sgc) |
| 347 | { |
| 348 | return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 0); |
| 349 | } |
| 350 | |
| 351 | /** |
| 352 | * ixgbe_fcoe_ddp_target - called to set up ddp context in target mode |
| 353 | * @netdev: the corresponding net_device |
| 354 | * @xid: the exchange id requesting ddp |
| 355 | * @sgl: the scatter-gather list for this request |
| 356 | * @sgc: the number of scatter-gather items |
| 357 | * |
| 358 | * This is the implementation of net_device_ops.ndo_fcoe_ddp_target |
| 359 | * and is expected to be called from ULD, e.g., FCP layer of libfc |
| 360 | * to set up ddp for the corresponding xid of the given sglist for |
| 361 | * the corresponding I/O. The DDP in target mode is a write I/O request |
| 362 | * from the initiator. |
| 363 | * |
| 364 | * Returns : 1 for success and 0 for no ddp |
| 365 | */ |
| 366 | int ixgbe_fcoe_ddp_target(struct net_device *netdev, u16 xid, |
| 367 | struct scatterlist *sgl, unsigned int sgc) |
| 368 | { |
| 369 | return ixgbe_fcoe_ddp_setup(netdev, xid, sgl, sgc, 1); |
| 370 | } |
| 371 | |
| 372 | /** |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 373 | * ixgbe_fcoe_ddp - check ddp status and mark it done |
| 374 | * @adapter: ixgbe adapter |
| 375 | * @rx_desc: advanced rx descriptor |
| 376 | * @skb: the skb holding the received data |
| 377 | * |
| 378 | * This checks ddp status. |
| 379 | * |
Yi Zou | 3d8fd38 | 2009-06-08 14:38:44 +0000 | [diff] [blame] | 380 | * Returns : < 0 indicates an error or not a FCiE ddp, 0 indicates |
| 381 | * not passing the skb to ULD, > 0 indicates is the length of data |
| 382 | * being ddped. |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 383 | */ |
| 384 | int ixgbe_fcoe_ddp(struct ixgbe_adapter *adapter, |
| 385 | union ixgbe_adv_rx_desc *rx_desc, |
| 386 | struct sk_buff *skb) |
| 387 | { |
| 388 | u16 xid; |
Yi Zou | d4ab881 | 2009-09-03 14:56:31 +0000 | [diff] [blame] | 389 | u32 fctl; |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 390 | u32 sterr, fceofe, fcerr, fcstat; |
| 391 | int rc = -EINVAL; |
| 392 | struct ixgbe_fcoe *fcoe; |
| 393 | struct ixgbe_fcoe_ddp *ddp; |
| 394 | struct fc_frame_header *fh; |
Yi Zou | 68a683c | 2011-02-01 07:22:16 +0000 | [diff] [blame] | 395 | struct fcoe_crc_eof *crc; |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 396 | |
| 397 | if (!ixgbe_rx_is_fcoe(rx_desc)) |
| 398 | goto ddp_out; |
| 399 | |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 400 | sterr = le32_to_cpu(rx_desc->wb.upper.status_error); |
| 401 | fcerr = (sterr & IXGBE_RXDADV_ERR_FCERR); |
| 402 | fceofe = (sterr & IXGBE_RXDADV_ERR_FCEOFE); |
| 403 | if (fcerr == IXGBE_FCERR_BADCRC) |
Eric Dumazet | bc8acf2 | 2010-09-02 13:07:41 -0700 | [diff] [blame] | 404 | skb_checksum_none_assert(skb); |
| 405 | else |
| 406 | skb->ip_summed = CHECKSUM_UNNECESSARY; |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 407 | |
Yi Zou | be5d507 | 2010-05-18 16:00:05 +0000 | [diff] [blame] | 408 | if (eth_hdr(skb)->h_proto == htons(ETH_P_8021Q)) |
| 409 | fh = (struct fc_frame_header *)(skb->data + |
| 410 | sizeof(struct vlan_hdr) + sizeof(struct fcoe_hdr)); |
| 411 | else |
| 412 | fh = (struct fc_frame_header *)(skb->data + |
| 413 | sizeof(struct fcoe_hdr)); |
Yi Zou | d4ab881 | 2009-09-03 14:56:31 +0000 | [diff] [blame] | 414 | fctl = ntoh24(fh->fh_f_ctl); |
| 415 | if (fctl & FC_FC_EX_CTX) |
| 416 | xid = be16_to_cpu(fh->fh_ox_id); |
| 417 | else |
| 418 | xid = be16_to_cpu(fh->fh_rx_id); |
| 419 | |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 420 | if (xid >= IXGBE_FCOE_DDP_MAX) |
| 421 | goto ddp_out; |
| 422 | |
| 423 | fcoe = &adapter->fcoe; |
| 424 | ddp = &fcoe->ddp[xid]; |
| 425 | if (!ddp->udl) |
| 426 | goto ddp_out; |
| 427 | |
Yi Zou | 7aba7b0 | 2011-04-09 08:34:12 +0000 | [diff] [blame] | 428 | if (fcerr | fceofe) |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 429 | goto ddp_out; |
| 430 | |
| 431 | fcstat = (sterr & IXGBE_RXDADV_STAT_FCSTAT); |
| 432 | if (fcstat) { |
| 433 | /* update length of DDPed data */ |
| 434 | ddp->len = le32_to_cpu(rx_desc->wb.lower.hi_dword.rss); |
| 435 | /* unmap the sg list when FCP_RSP is received */ |
| 436 | if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_FCPRSP) { |
| 437 | pci_unmap_sg(adapter->pdev, ddp->sgl, |
| 438 | ddp->sgc, DMA_FROM_DEVICE); |
Yi Zou | 7aba7b0 | 2011-04-09 08:34:12 +0000 | [diff] [blame] | 439 | ddp->err = (fcerr | fceofe); |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 440 | ddp->sgl = NULL; |
| 441 | ddp->sgc = 0; |
| 442 | } |
| 443 | /* return 0 to bypass going to ULD for DDPed data */ |
| 444 | if (fcstat == IXGBE_RXDADV_STAT_FCSTAT_DDP) |
| 445 | rc = 0; |
Yi Zou | 17e78b0 | 2009-08-13 14:09:58 +0000 | [diff] [blame] | 446 | else if (ddp->len) |
Yi Zou | 3d8fd38 | 2009-06-08 14:38:44 +0000 | [diff] [blame] | 447 | rc = ddp->len; |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 448 | } |
Yi Zou | 68a683c | 2011-02-01 07:22:16 +0000 | [diff] [blame] | 449 | /* In target mode, check the last data frame of the sequence. |
| 450 | * For DDP in target mode, data is already DDPed but the header |
| 451 | * indication of the last data frame ould allow is to tell if we |
| 452 | * got all the data and the ULP can send FCP_RSP back, as this is |
| 453 | * not a full fcoe frame, we fill the trailer here so it won't be |
| 454 | * dropped by the ULP stack. |
| 455 | */ |
| 456 | if ((fh->fh_r_ctl == FC_RCTL_DD_SOL_DATA) && |
| 457 | (fctl & FC_FC_END_SEQ)) { |
| 458 | crc = (struct fcoe_crc_eof *)skb_put(skb, sizeof(*crc)); |
| 459 | crc->fcoe_eof = FC_EOF_T; |
| 460 | } |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 461 | ddp_out: |
| 462 | return rc; |
| 463 | } |
| 464 | |
| 465 | /** |
Yi Zou | bc07922 | 2009-05-13 13:10:44 +0000 | [diff] [blame] | 466 | * ixgbe_fso - ixgbe FCoE Sequence Offload (FSO) |
| 467 | * @adapter: ixgbe adapter |
| 468 | * @tx_ring: tx desc ring |
| 469 | * @skb: associated skb |
| 470 | * @tx_flags: tx flags |
| 471 | * @hdr_len: hdr_len to be returned |
| 472 | * |
| 473 | * This sets up large send offload for FCoE |
| 474 | * |
| 475 | * Returns : 0 indicates no FSO, > 0 for FSO, < 0 for error |
| 476 | */ |
| 477 | int ixgbe_fso(struct ixgbe_adapter *adapter, |
| 478 | struct ixgbe_ring *tx_ring, struct sk_buff *skb, |
| 479 | u32 tx_flags, u8 *hdr_len) |
| 480 | { |
| 481 | u8 sof, eof; |
| 482 | u32 vlan_macip_lens; |
| 483 | u32 fcoe_sof_eof; |
| 484 | u32 type_tucmd; |
| 485 | u32 mss_l4len_idx; |
| 486 | int mss = 0; |
| 487 | unsigned int i; |
| 488 | struct ixgbe_tx_buffer *tx_buffer_info; |
| 489 | struct ixgbe_adv_tx_context_desc *context_desc; |
| 490 | struct fc_frame_header *fh; |
| 491 | |
| 492 | if (skb_is_gso(skb) && (skb_shinfo(skb)->gso_type != SKB_GSO_FCOE)) { |
Emil Tantilov | 396e799 | 2010-07-01 20:05:12 +0000 | [diff] [blame] | 493 | e_err(drv, "Wrong gso type %d:expecting SKB_GSO_FCOE\n", |
Emil Tantilov | 849c454 | 2010-06-03 16:53:41 +0000 | [diff] [blame] | 494 | skb_shinfo(skb)->gso_type); |
Yi Zou | bc07922 | 2009-05-13 13:10:44 +0000 | [diff] [blame] | 495 | return -EINVAL; |
| 496 | } |
| 497 | |
| 498 | /* resets the header to point fcoe/fc */ |
| 499 | skb_set_network_header(skb, skb->mac_len); |
| 500 | skb_set_transport_header(skb, skb->mac_len + |
| 501 | sizeof(struct fcoe_hdr)); |
| 502 | |
| 503 | /* sets up SOF and ORIS */ |
| 504 | fcoe_sof_eof = 0; |
| 505 | sof = ((struct fcoe_hdr *)skb_network_header(skb))->fcoe_sof; |
| 506 | switch (sof) { |
| 507 | case FC_SOF_I2: |
| 508 | fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_ORIS; |
| 509 | break; |
| 510 | case FC_SOF_I3: |
| 511 | fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_SOF; |
| 512 | fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_ORIS; |
| 513 | break; |
| 514 | case FC_SOF_N2: |
| 515 | break; |
| 516 | case FC_SOF_N3: |
| 517 | fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_SOF; |
| 518 | break; |
| 519 | default: |
Emil Tantilov | 396e799 | 2010-07-01 20:05:12 +0000 | [diff] [blame] | 520 | e_warn(drv, "unknown sof = 0x%x\n", sof); |
Yi Zou | bc07922 | 2009-05-13 13:10:44 +0000 | [diff] [blame] | 521 | return -EINVAL; |
| 522 | } |
| 523 | |
| 524 | /* the first byte of the last dword is EOF */ |
| 525 | skb_copy_bits(skb, skb->len - 4, &eof, 1); |
| 526 | /* sets up EOF and ORIE */ |
| 527 | switch (eof) { |
| 528 | case FC_EOF_N: |
| 529 | fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N; |
| 530 | break; |
| 531 | case FC_EOF_T: |
| 532 | /* lso needs ORIE */ |
| 533 | if (skb_is_gso(skb)) { |
| 534 | fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_N; |
| 535 | fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_ORIE; |
| 536 | } else { |
| 537 | fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_T; |
| 538 | } |
| 539 | break; |
| 540 | case FC_EOF_NI: |
| 541 | fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_NI; |
| 542 | break; |
| 543 | case FC_EOF_A: |
| 544 | fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_EOF_A; |
| 545 | break; |
| 546 | default: |
Emil Tantilov | 396e799 | 2010-07-01 20:05:12 +0000 | [diff] [blame] | 547 | e_warn(drv, "unknown eof = 0x%x\n", eof); |
Yi Zou | bc07922 | 2009-05-13 13:10:44 +0000 | [diff] [blame] | 548 | return -EINVAL; |
| 549 | } |
| 550 | |
| 551 | /* sets up PARINC indicating data offset */ |
| 552 | fh = (struct fc_frame_header *)skb_transport_header(skb); |
| 553 | if (fh->fh_f_ctl[2] & FC_FC_REL_OFF) |
| 554 | fcoe_sof_eof |= IXGBE_ADVTXD_FCOEF_PARINC; |
| 555 | |
| 556 | /* hdr_len includes fc_hdr if FCoE lso is enabled */ |
| 557 | *hdr_len = sizeof(struct fcoe_crc_eof); |
| 558 | if (skb_is_gso(skb)) |
| 559 | *hdr_len += (skb_transport_offset(skb) + |
| 560 | sizeof(struct fc_frame_header)); |
| 561 | /* vlan_macip_lens: HEADLEN, MACLEN, VLAN tag */ |
| 562 | vlan_macip_lens = (skb_transport_offset(skb) + |
| 563 | sizeof(struct fc_frame_header)); |
| 564 | vlan_macip_lens |= ((skb_transport_offset(skb) - 4) |
| 565 | << IXGBE_ADVTXD_MACLEN_SHIFT); |
| 566 | vlan_macip_lens |= (tx_flags & IXGBE_TX_FLAGS_VLAN_MASK); |
| 567 | |
| 568 | /* type_tycmd and mss: set TUCMD.FCoE to enable offload */ |
| 569 | type_tucmd = IXGBE_TXD_CMD_DEXT | IXGBE_ADVTXD_DTYP_CTXT | |
| 570 | IXGBE_ADVTXT_TUCMD_FCOE; |
| 571 | if (skb_is_gso(skb)) |
| 572 | mss = skb_shinfo(skb)->gso_size; |
| 573 | /* mss_l4len_id: use 1 for FSO as TSO, no need for L4LEN */ |
| 574 | mss_l4len_idx = (mss << IXGBE_ADVTXD_MSS_SHIFT) | |
| 575 | (1 << IXGBE_ADVTXD_IDX_SHIFT); |
| 576 | |
| 577 | /* write context desc */ |
| 578 | i = tx_ring->next_to_use; |
Alexander Duyck | 31f05a2 | 2010-08-19 13:40:31 +0000 | [diff] [blame] | 579 | context_desc = IXGBE_TX_CTXTDESC_ADV(tx_ring, i); |
Yi Zou | bc07922 | 2009-05-13 13:10:44 +0000 | [diff] [blame] | 580 | context_desc->vlan_macip_lens = cpu_to_le32(vlan_macip_lens); |
| 581 | context_desc->seqnum_seed = cpu_to_le32(fcoe_sof_eof); |
| 582 | context_desc->type_tucmd_mlhl = cpu_to_le32(type_tucmd); |
| 583 | context_desc->mss_l4len_idx = cpu_to_le32(mss_l4len_idx); |
| 584 | |
| 585 | tx_buffer_info = &tx_ring->tx_buffer_info[i]; |
| 586 | tx_buffer_info->time_stamp = jiffies; |
| 587 | tx_buffer_info->next_to_watch = i; |
| 588 | |
| 589 | i++; |
| 590 | if (i == tx_ring->count) |
| 591 | i = 0; |
| 592 | tx_ring->next_to_use = i; |
| 593 | |
| 594 | return skb_is_gso(skb); |
| 595 | } |
| 596 | |
Vasu Dev | dadbe85 | 2011-05-11 05:41:46 +0000 | [diff] [blame^] | 597 | static void ixgbe_fcoe_ddp_pools_free(struct ixgbe_fcoe *fcoe) |
| 598 | { |
| 599 | unsigned int cpu; |
| 600 | struct pci_pool **pool; |
| 601 | |
| 602 | for_each_possible_cpu(cpu) { |
| 603 | pool = per_cpu_ptr(fcoe->pool, cpu); |
| 604 | if (*pool) |
| 605 | pci_pool_destroy(*pool); |
| 606 | } |
| 607 | free_percpu(fcoe->pool); |
| 608 | fcoe->pool = NULL; |
| 609 | } |
| 610 | |
| 611 | static void ixgbe_fcoe_ddp_pools_alloc(struct ixgbe_adapter *adapter) |
| 612 | { |
| 613 | struct ixgbe_fcoe *fcoe = &adapter->fcoe; |
| 614 | unsigned int cpu; |
| 615 | struct pci_pool **pool; |
| 616 | char pool_name[32]; |
| 617 | |
| 618 | fcoe->pool = alloc_percpu(struct pci_pool *); |
| 619 | if (!fcoe->pool) |
| 620 | return; |
| 621 | |
| 622 | /* allocate pci pool for each cpu */ |
| 623 | for_each_possible_cpu(cpu) { |
| 624 | snprintf(pool_name, 32, "ixgbe_fcoe_ddp_%d", cpu); |
| 625 | pool = per_cpu_ptr(fcoe->pool, cpu); |
| 626 | *pool = pci_pool_create(pool_name, |
| 627 | adapter->pdev, IXGBE_FCPTR_MAX, |
| 628 | IXGBE_FCPTR_ALIGN, PAGE_SIZE); |
| 629 | if (!*pool) { |
| 630 | e_err(drv, "failed to alloc DDP pool on cpu:%d\n", cpu); |
| 631 | ixgbe_fcoe_ddp_pools_free(fcoe); |
| 632 | return; |
| 633 | } |
| 634 | } |
| 635 | } |
| 636 | |
Yi Zou | bc07922 | 2009-05-13 13:10:44 +0000 | [diff] [blame] | 637 | /** |
Yi Zou | d3a2ae6 | 2009-05-13 13:10:21 +0000 | [diff] [blame] | 638 | * ixgbe_configure_fcoe - configures registers for fcoe at start |
| 639 | * @adapter: ptr to ixgbe adapter |
| 640 | * |
| 641 | * This sets up FCoE related registers |
| 642 | * |
| 643 | * Returns : none |
| 644 | */ |
| 645 | void ixgbe_configure_fcoe(struct ixgbe_adapter *adapter) |
| 646 | { |
Yi Zou | 29ebf6f | 2009-05-17 12:34:14 +0000 | [diff] [blame] | 647 | int i, fcoe_q, fcoe_i; |
Yi Zou | d3a2ae6 | 2009-05-13 13:10:21 +0000 | [diff] [blame] | 648 | struct ixgbe_hw *hw = &adapter->hw; |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 649 | struct ixgbe_fcoe *fcoe = &adapter->fcoe; |
Yi Zou | 29ebf6f | 2009-05-17 12:34:14 +0000 | [diff] [blame] | 650 | struct ixgbe_ring_feature *f = &adapter->ring_feature[RING_F_FCOE]; |
Yi Zou | 61a0f42 | 2009-12-03 11:32:22 +0000 | [diff] [blame] | 651 | #ifdef CONFIG_IXGBE_DCB |
| 652 | u8 tc; |
| 653 | u32 up2tc; |
| 654 | #endif |
Yi Zou | d3a2ae6 | 2009-05-13 13:10:21 +0000 | [diff] [blame] | 655 | |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 656 | if (!fcoe->pool) { |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 657 | spin_lock_init(&fcoe->lock); |
Amir Hanania | c600636 | 2011-02-15 09:11:31 +0000 | [diff] [blame] | 658 | |
Vasu Dev | dadbe85 | 2011-05-11 05:41:46 +0000 | [diff] [blame^] | 659 | ixgbe_fcoe_ddp_pools_alloc(adapter); |
| 660 | if (!fcoe->pool) { |
| 661 | e_err(drv, "failed to alloc percpu fcoe DDP pools\n"); |
| 662 | return; |
| 663 | } |
| 664 | |
Amir Hanania | c600636 | 2011-02-15 09:11:31 +0000 | [diff] [blame] | 665 | /* Extra buffer to be shared by all DDPs for HW work around */ |
| 666 | fcoe->extra_ddp_buffer = kmalloc(IXGBE_FCBUFF_MIN, GFP_ATOMIC); |
| 667 | if (fcoe->extra_ddp_buffer == NULL) { |
| 668 | e_err(drv, "failed to allocated extra DDP buffer\n"); |
Vasu Dev | dadbe85 | 2011-05-11 05:41:46 +0000 | [diff] [blame^] | 669 | goto out_ddp_pools; |
Amir Hanania | c600636 | 2011-02-15 09:11:31 +0000 | [diff] [blame] | 670 | } |
| 671 | |
| 672 | fcoe->extra_ddp_buffer_dma = |
| 673 | dma_map_single(&adapter->pdev->dev, |
| 674 | fcoe->extra_ddp_buffer, |
| 675 | IXGBE_FCBUFF_MIN, |
| 676 | DMA_FROM_DEVICE); |
| 677 | if (dma_mapping_error(&adapter->pdev->dev, |
| 678 | fcoe->extra_ddp_buffer_dma)) { |
| 679 | e_err(drv, "failed to map extra DDP buffer\n"); |
Vasu Dev | dadbe85 | 2011-05-11 05:41:46 +0000 | [diff] [blame^] | 680 | goto out_extra_ddp_buffer; |
Amir Hanania | c600636 | 2011-02-15 09:11:31 +0000 | [diff] [blame] | 681 | } |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 682 | } |
Yi Zou | 29ebf6f | 2009-05-17 12:34:14 +0000 | [diff] [blame] | 683 | |
| 684 | /* Enable L2 eth type filter for FCoE */ |
Yi Zou | d3a2ae6 | 2009-05-13 13:10:21 +0000 | [diff] [blame] | 685 | IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FCOE), |
| 686 | (ETH_P_FCOE | IXGBE_ETQF_FCOE | IXGBE_ETQF_FILTER_EN)); |
Chris Leech | af06393 | 2010-03-24 12:45:21 +0000 | [diff] [blame] | 687 | /* Enable L2 eth type filter for FIP */ |
| 688 | IXGBE_WRITE_REG(hw, IXGBE_ETQF(IXGBE_ETQF_FILTER_FIP), |
| 689 | (ETH_P_FIP | IXGBE_ETQF_FILTER_EN)); |
Yi Zou | 29ebf6f | 2009-05-17 12:34:14 +0000 | [diff] [blame] | 690 | if (adapter->ring_feature[RING_F_FCOE].indices) { |
| 691 | /* Use multiple rx queues for FCoE by redirection table */ |
| 692 | for (i = 0; i < IXGBE_FCRETA_SIZE; i++) { |
| 693 | fcoe_i = f->mask + i % f->indices; |
| 694 | fcoe_i &= IXGBE_FCRETA_ENTRY_MASK; |
PJ Waskiewicz | 4a0b9ca | 2010-02-03 14:19:12 +0000 | [diff] [blame] | 695 | fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; |
Yi Zou | 29ebf6f | 2009-05-17 12:34:14 +0000 | [diff] [blame] | 696 | IXGBE_WRITE_REG(hw, IXGBE_FCRETA(i), fcoe_q); |
| 697 | } |
| 698 | IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, IXGBE_FCRECTL_ENA); |
| 699 | IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), 0); |
| 700 | } else { |
| 701 | /* Use single rx queue for FCoE */ |
| 702 | fcoe_i = f->mask; |
PJ Waskiewicz | 4a0b9ca | 2010-02-03 14:19:12 +0000 | [diff] [blame] | 703 | fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; |
Yi Zou | 29ebf6f | 2009-05-17 12:34:14 +0000 | [diff] [blame] | 704 | IXGBE_WRITE_REG(hw, IXGBE_FCRECTL, 0); |
| 705 | IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FCOE), |
| 706 | IXGBE_ETQS_QUEUE_EN | |
| 707 | (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); |
| 708 | } |
Chris Leech | af06393 | 2010-03-24 12:45:21 +0000 | [diff] [blame] | 709 | /* send FIP frames to the first FCoE queue */ |
| 710 | fcoe_i = f->mask; |
| 711 | fcoe_q = adapter->rx_ring[fcoe_i]->reg_idx; |
| 712 | IXGBE_WRITE_REG(hw, IXGBE_ETQS(IXGBE_ETQF_FILTER_FIP), |
| 713 | IXGBE_ETQS_QUEUE_EN | |
| 714 | (fcoe_q << IXGBE_ETQS_RX_QUEUE_SHIFT)); |
Yi Zou | 29ebf6f | 2009-05-17 12:34:14 +0000 | [diff] [blame] | 715 | |
Yi Zou | d3a2ae6 | 2009-05-13 13:10:21 +0000 | [diff] [blame] | 716 | IXGBE_WRITE_REG(hw, IXGBE_FCRXCTRL, |
| 717 | IXGBE_FCRXCTRL_FCOELLI | |
| 718 | IXGBE_FCRXCTRL_FCCRCBO | |
| 719 | (FC_FCOE_VER << IXGBE_FCRXCTRL_FCOEVER_SHIFT)); |
Yi Zou | 61a0f42 | 2009-12-03 11:32:22 +0000 | [diff] [blame] | 720 | #ifdef CONFIG_IXGBE_DCB |
| 721 | up2tc = IXGBE_READ_REG(&adapter->hw, IXGBE_RTTUP2TC); |
| 722 | for (i = 0; i < MAX_USER_PRIORITY; i++) { |
| 723 | tc = (u8)(up2tc >> (i * IXGBE_RTTUP2TC_UP_SHIFT)); |
| 724 | tc &= (MAX_TRAFFIC_CLASS - 1); |
| 725 | if (fcoe->tc == tc) { |
| 726 | fcoe->up = i; |
| 727 | break; |
| 728 | } |
| 729 | } |
| 730 | #endif |
Amir Hanania | c600636 | 2011-02-15 09:11:31 +0000 | [diff] [blame] | 731 | |
| 732 | return; |
| 733 | |
Vasu Dev | dadbe85 | 2011-05-11 05:41:46 +0000 | [diff] [blame^] | 734 | out_extra_ddp_buffer: |
Amir Hanania | c600636 | 2011-02-15 09:11:31 +0000 | [diff] [blame] | 735 | kfree(fcoe->extra_ddp_buffer); |
Vasu Dev | dadbe85 | 2011-05-11 05:41:46 +0000 | [diff] [blame^] | 736 | out_ddp_pools: |
| 737 | ixgbe_fcoe_ddp_pools_free(fcoe); |
Yi Zou | d3a2ae6 | 2009-05-13 13:10:21 +0000 | [diff] [blame] | 738 | } |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 739 | |
| 740 | /** |
| 741 | * ixgbe_cleanup_fcoe - release all fcoe ddp context resources |
| 742 | * @adapter : ixgbe adapter |
| 743 | * |
| 744 | * Cleans up outstanding ddp context resources |
| 745 | * |
| 746 | * Returns : none |
| 747 | */ |
| 748 | void ixgbe_cleanup_fcoe(struct ixgbe_adapter *adapter) |
| 749 | { |
| 750 | int i; |
| 751 | struct ixgbe_fcoe *fcoe = &adapter->fcoe; |
| 752 | |
Vasu Dev | dadbe85 | 2011-05-11 05:41:46 +0000 | [diff] [blame^] | 753 | if (!fcoe->pool) |
| 754 | return; |
| 755 | |
| 756 | for (i = 0; i < IXGBE_FCOE_DDP_MAX; i++) |
| 757 | ixgbe_fcoe_ddp_put(adapter->netdev, i); |
| 758 | dma_unmap_single(&adapter->pdev->dev, |
| 759 | fcoe->extra_ddp_buffer_dma, |
| 760 | IXGBE_FCBUFF_MIN, |
| 761 | DMA_FROM_DEVICE); |
| 762 | kfree(fcoe->extra_ddp_buffer); |
| 763 | ixgbe_fcoe_ddp_pools_free(fcoe); |
Yi Zou | d0ed893 | 2009-05-13 13:11:29 +0000 | [diff] [blame] | 764 | } |
Yi Zou | 8450ff8 | 2009-08-31 12:32:14 +0000 | [diff] [blame] | 765 | |
| 766 | /** |
| 767 | * ixgbe_fcoe_enable - turn on FCoE offload feature |
| 768 | * @netdev: the corresponding netdev |
| 769 | * |
| 770 | * Turns on FCoE offload feature in 82599. |
| 771 | * |
| 772 | * Returns : 0 indicates success or -EINVAL on failure |
| 773 | */ |
| 774 | int ixgbe_fcoe_enable(struct net_device *netdev) |
| 775 | { |
| 776 | int rc = -EINVAL; |
| 777 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
Yi Zou | 27ab760 | 2010-10-20 23:00:30 +0000 | [diff] [blame] | 778 | struct ixgbe_fcoe *fcoe = &adapter->fcoe; |
Yi Zou | 8450ff8 | 2009-08-31 12:32:14 +0000 | [diff] [blame] | 779 | |
| 780 | |
| 781 | if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) |
| 782 | goto out_enable; |
| 783 | |
Yi Zou | 27ab760 | 2010-10-20 23:00:30 +0000 | [diff] [blame] | 784 | atomic_inc(&fcoe->refcnt); |
Yi Zou | 8450ff8 | 2009-08-31 12:32:14 +0000 | [diff] [blame] | 785 | if (adapter->flags & IXGBE_FLAG_FCOE_ENABLED) |
| 786 | goto out_enable; |
| 787 | |
Emil Tantilov | 396e799 | 2010-07-01 20:05:12 +0000 | [diff] [blame] | 788 | e_info(drv, "Enabling FCoE offload features.\n"); |
Yi Zou | 8450ff8 | 2009-08-31 12:32:14 +0000 | [diff] [blame] | 789 | if (netif_running(netdev)) |
| 790 | netdev->netdev_ops->ndo_stop(netdev); |
| 791 | |
| 792 | ixgbe_clear_interrupt_scheme(adapter); |
| 793 | |
| 794 | adapter->flags |= IXGBE_FLAG_FCOE_ENABLED; |
| 795 | adapter->ring_feature[RING_F_FCOE].indices = IXGBE_FCRETA_SIZE; |
| 796 | netdev->features |= NETIF_F_FCOE_CRC; |
| 797 | netdev->features |= NETIF_F_FSO; |
| 798 | netdev->features |= NETIF_F_FCOE_MTU; |
Yi Zou | 8450ff8 | 2009-08-31 12:32:14 +0000 | [diff] [blame] | 799 | netdev->fcoe_ddp_xid = IXGBE_FCOE_DDP_MAX - 1; |
Yi Zou | 8450ff8 | 2009-08-31 12:32:14 +0000 | [diff] [blame] | 800 | |
| 801 | ixgbe_init_interrupt_scheme(adapter); |
Vasu Dev | 936332b | 2010-03-19 04:33:10 +0000 | [diff] [blame] | 802 | netdev_features_change(netdev); |
Yi Zou | 8450ff8 | 2009-08-31 12:32:14 +0000 | [diff] [blame] | 803 | |
| 804 | if (netif_running(netdev)) |
| 805 | netdev->netdev_ops->ndo_open(netdev); |
| 806 | rc = 0; |
| 807 | |
| 808 | out_enable: |
| 809 | return rc; |
| 810 | } |
| 811 | |
| 812 | /** |
| 813 | * ixgbe_fcoe_disable - turn off FCoE offload feature |
| 814 | * @netdev: the corresponding netdev |
| 815 | * |
| 816 | * Turns off FCoE offload feature in 82599. |
| 817 | * |
| 818 | * Returns : 0 indicates success or -EINVAL on failure |
| 819 | */ |
| 820 | int ixgbe_fcoe_disable(struct net_device *netdev) |
| 821 | { |
| 822 | int rc = -EINVAL; |
| 823 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
Yi Zou | 27ab760 | 2010-10-20 23:00:30 +0000 | [diff] [blame] | 824 | struct ixgbe_fcoe *fcoe = &adapter->fcoe; |
Yi Zou | 8450ff8 | 2009-08-31 12:32:14 +0000 | [diff] [blame] | 825 | |
| 826 | if (!(adapter->flags & IXGBE_FLAG_FCOE_CAPABLE)) |
| 827 | goto out_disable; |
| 828 | |
| 829 | if (!(adapter->flags & IXGBE_FLAG_FCOE_ENABLED)) |
| 830 | goto out_disable; |
| 831 | |
Yi Zou | 27ab760 | 2010-10-20 23:00:30 +0000 | [diff] [blame] | 832 | if (!atomic_dec_and_test(&fcoe->refcnt)) |
| 833 | goto out_disable; |
| 834 | |
Emil Tantilov | 396e799 | 2010-07-01 20:05:12 +0000 | [diff] [blame] | 835 | e_info(drv, "Disabling FCoE offload features.\n"); |
Yi Zou | 5e09d7f | 2010-07-19 13:59:52 +0000 | [diff] [blame] | 836 | netdev->features &= ~NETIF_F_FCOE_CRC; |
| 837 | netdev->features &= ~NETIF_F_FSO; |
| 838 | netdev->features &= ~NETIF_F_FCOE_MTU; |
| 839 | netdev->fcoe_ddp_xid = 0; |
| 840 | netdev_features_change(netdev); |
| 841 | |
Yi Zou | 8450ff8 | 2009-08-31 12:32:14 +0000 | [diff] [blame] | 842 | if (netif_running(netdev)) |
| 843 | netdev->netdev_ops->ndo_stop(netdev); |
| 844 | |
| 845 | ixgbe_clear_interrupt_scheme(adapter); |
Yi Zou | 8450ff8 | 2009-08-31 12:32:14 +0000 | [diff] [blame] | 846 | adapter->flags &= ~IXGBE_FLAG_FCOE_ENABLED; |
| 847 | adapter->ring_feature[RING_F_FCOE].indices = 0; |
Yi Zou | 8450ff8 | 2009-08-31 12:32:14 +0000 | [diff] [blame] | 848 | ixgbe_cleanup_fcoe(adapter); |
Yi Zou | 8450ff8 | 2009-08-31 12:32:14 +0000 | [diff] [blame] | 849 | ixgbe_init_interrupt_scheme(adapter); |
Vasu Dev | 936332b | 2010-03-19 04:33:10 +0000 | [diff] [blame] | 850 | |
Yi Zou | 8450ff8 | 2009-08-31 12:32:14 +0000 | [diff] [blame] | 851 | if (netif_running(netdev)) |
| 852 | netdev->netdev_ops->ndo_open(netdev); |
| 853 | rc = 0; |
| 854 | |
| 855 | out_disable: |
| 856 | return rc; |
| 857 | } |
Yi Zou | 6ee1652 | 2009-08-31 12:34:28 +0000 | [diff] [blame] | 858 | |
| 859 | #ifdef CONFIG_IXGBE_DCB |
| 860 | /** |
Yi Zou | 6ee1652 | 2009-08-31 12:34:28 +0000 | [diff] [blame] | 861 | * ixgbe_fcoe_setapp - sets the user priority bitmap for FCoE |
| 862 | * @adapter : ixgbe adapter |
| 863 | * @up : 802.1p user priority bitmap |
| 864 | * |
| 865 | * Finds out the traffic class from the input user priority |
| 866 | * bitmap for FCoE. |
| 867 | * |
| 868 | * Returns : 0 on success otherwise returns 1 on error |
| 869 | */ |
| 870 | u8 ixgbe_fcoe_setapp(struct ixgbe_adapter *adapter, u8 up) |
| 871 | { |
| 872 | int i; |
| 873 | u32 up2tc; |
| 874 | |
| 875 | /* valid user priority bitmap must not be 0 */ |
| 876 | if (up) { |
| 877 | /* from user priority to the corresponding traffic class */ |
| 878 | up2tc = IXGBE_READ_REG(&adapter->hw, IXGBE_RTTUP2TC); |
| 879 | for (i = 0; i < MAX_USER_PRIORITY; i++) { |
| 880 | if (up & (1 << i)) { |
| 881 | up2tc >>= (i * IXGBE_RTTUP2TC_UP_SHIFT); |
| 882 | up2tc &= (MAX_TRAFFIC_CLASS - 1); |
| 883 | adapter->fcoe.tc = (u8)up2tc; |
Yi Zou | 61a0f42 | 2009-12-03 11:32:22 +0000 | [diff] [blame] | 884 | adapter->fcoe.up = i; |
Yi Zou | 6ee1652 | 2009-08-31 12:34:28 +0000 | [diff] [blame] | 885 | return 0; |
| 886 | } |
| 887 | } |
| 888 | } |
| 889 | |
| 890 | return 1; |
| 891 | } |
| 892 | #endif /* CONFIG_IXGBE_DCB */ |
Yi Zou | 61a1fa1 | 2009-10-28 18:24:56 +0000 | [diff] [blame] | 893 | |
| 894 | /** |
| 895 | * ixgbe_fcoe_get_wwn - get world wide name for the node or the port |
| 896 | * @netdev : ixgbe adapter |
| 897 | * @wwn : the world wide name |
| 898 | * @type: the type of world wide name |
| 899 | * |
| 900 | * Returns the node or port world wide name if both the prefix and the san |
| 901 | * mac address are valid, then the wwn is formed based on the NAA-2 for |
| 902 | * IEEE Extended name identifier (ref. to T10 FC-LS Spec., Sec. 15.3). |
| 903 | * |
| 904 | * Returns : 0 on success |
| 905 | */ |
| 906 | int ixgbe_fcoe_get_wwn(struct net_device *netdev, u64 *wwn, int type) |
| 907 | { |
| 908 | int rc = -EINVAL; |
| 909 | u16 prefix = 0xffff; |
| 910 | struct ixgbe_adapter *adapter = netdev_priv(netdev); |
| 911 | struct ixgbe_mac_info *mac = &adapter->hw.mac; |
| 912 | |
| 913 | switch (type) { |
| 914 | case NETDEV_FCOE_WWNN: |
| 915 | prefix = mac->wwnn_prefix; |
| 916 | break; |
| 917 | case NETDEV_FCOE_WWPN: |
| 918 | prefix = mac->wwpn_prefix; |
| 919 | break; |
| 920 | default: |
| 921 | break; |
| 922 | } |
| 923 | |
| 924 | if ((prefix != 0xffff) && |
| 925 | is_valid_ether_addr(mac->san_addr)) { |
| 926 | *wwn = ((u64) prefix << 48) | |
| 927 | ((u64) mac->san_addr[0] << 40) | |
| 928 | ((u64) mac->san_addr[1] << 32) | |
| 929 | ((u64) mac->san_addr[2] << 24) | |
| 930 | ((u64) mac->san_addr[3] << 16) | |
| 931 | ((u64) mac->san_addr[4] << 8) | |
| 932 | ((u64) mac->san_addr[5]); |
| 933 | rc = 0; |
| 934 | } |
| 935 | return rc; |
| 936 | } |