Varun Prakash | 9730ffc | 2016-04-20 00:00:20 +0530 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2016 Chelsio Communications, Inc. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 as |
| 6 | * published by the Free Software Foundation. |
| 7 | */ |
| 8 | |
| 9 | #define DRV_NAME "cxgbit" |
| 10 | #define DRV_VERSION "1.0.0-ko" |
| 11 | #define pr_fmt(fmt) DRV_NAME ": " fmt |
| 12 | |
| 13 | #include "cxgbit.h" |
| 14 | |
| 15 | #ifdef CONFIG_CHELSIO_T4_DCB |
| 16 | #include <net/dcbevent.h> |
| 17 | #include "cxgb4_dcb.h" |
| 18 | #endif |
| 19 | |
| 20 | LIST_HEAD(cdev_list_head); |
| 21 | /* cdev list lock */ |
| 22 | DEFINE_MUTEX(cdev_list_lock); |
| 23 | |
| 24 | void _cxgbit_free_cdev(struct kref *kref) |
| 25 | { |
| 26 | struct cxgbit_device *cdev; |
| 27 | |
| 28 | cdev = container_of(kref, struct cxgbit_device, kref); |
| 29 | kfree(cdev); |
| 30 | } |
| 31 | |
| 32 | static void cxgbit_set_mdsl(struct cxgbit_device *cdev) |
| 33 | { |
| 34 | struct cxgb4_lld_info *lldi = &cdev->lldi; |
| 35 | u32 mdsl; |
| 36 | |
| 37 | #define ULP2_MAX_PKT_LEN 16224 |
| 38 | #define ISCSI_PDU_NONPAYLOAD_LEN 312 |
| 39 | mdsl = min_t(u32, lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN, |
| 40 | ULP2_MAX_PKT_LEN - ISCSI_PDU_NONPAYLOAD_LEN); |
| 41 | mdsl = min_t(u32, mdsl, 8192); |
| 42 | mdsl = min_t(u32, mdsl, (MAX_SKB_FRAGS - 1) * PAGE_SIZE); |
| 43 | |
| 44 | cdev->mdsl = mdsl; |
| 45 | } |
| 46 | |
| 47 | static void *cxgbit_uld_add(const struct cxgb4_lld_info *lldi) |
| 48 | { |
| 49 | struct cxgbit_device *cdev; |
| 50 | |
| 51 | if (is_t4(lldi->adapter_type)) |
| 52 | return ERR_PTR(-ENODEV); |
| 53 | |
| 54 | cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); |
| 55 | if (!cdev) |
| 56 | return ERR_PTR(-ENOMEM); |
| 57 | |
| 58 | kref_init(&cdev->kref); |
| 59 | |
| 60 | cdev->lldi = *lldi; |
| 61 | |
| 62 | cxgbit_set_mdsl(cdev); |
| 63 | |
| 64 | if (cxgbit_ddp_init(cdev) < 0) { |
| 65 | kfree(cdev); |
| 66 | return ERR_PTR(-EINVAL); |
| 67 | } |
| 68 | |
| 69 | if (!test_bit(CDEV_DDP_ENABLE, &cdev->flags)) |
| 70 | pr_info("cdev %s ddp init failed\n", |
| 71 | pci_name(lldi->pdev)); |
| 72 | |
| 73 | if (lldi->fw_vers >= 0x10d2b00) |
| 74 | set_bit(CDEV_ISO_ENABLE, &cdev->flags); |
| 75 | |
| 76 | spin_lock_init(&cdev->cskq.lock); |
| 77 | INIT_LIST_HEAD(&cdev->cskq.list); |
| 78 | |
| 79 | mutex_lock(&cdev_list_lock); |
| 80 | list_add_tail(&cdev->list, &cdev_list_head); |
| 81 | mutex_unlock(&cdev_list_lock); |
| 82 | |
| 83 | pr_info("cdev %s added for iSCSI target transport\n", |
| 84 | pci_name(lldi->pdev)); |
| 85 | |
| 86 | return cdev; |
| 87 | } |
| 88 | |
| 89 | static void cxgbit_close_conn(struct cxgbit_device *cdev) |
| 90 | { |
| 91 | struct cxgbit_sock *csk; |
| 92 | struct sk_buff *skb; |
| 93 | bool wakeup_thread = false; |
| 94 | |
| 95 | spin_lock_bh(&cdev->cskq.lock); |
| 96 | list_for_each_entry(csk, &cdev->cskq.list, list) { |
| 97 | skb = alloc_skb(0, GFP_ATOMIC); |
| 98 | if (!skb) |
| 99 | continue; |
| 100 | |
| 101 | spin_lock_bh(&csk->rxq.lock); |
| 102 | __skb_queue_tail(&csk->rxq, skb); |
| 103 | if (skb_queue_len(&csk->rxq) == 1) |
| 104 | wakeup_thread = true; |
| 105 | spin_unlock_bh(&csk->rxq.lock); |
| 106 | |
| 107 | if (wakeup_thread) { |
| 108 | wake_up(&csk->waitq); |
| 109 | wakeup_thread = false; |
| 110 | } |
| 111 | } |
| 112 | spin_unlock_bh(&cdev->cskq.lock); |
| 113 | } |
| 114 | |
| 115 | static void cxgbit_detach_cdev(struct cxgbit_device *cdev) |
| 116 | { |
| 117 | bool free_cdev = false; |
| 118 | |
| 119 | spin_lock_bh(&cdev->cskq.lock); |
| 120 | if (list_empty(&cdev->cskq.list)) |
| 121 | free_cdev = true; |
| 122 | spin_unlock_bh(&cdev->cskq.lock); |
| 123 | |
| 124 | if (free_cdev) { |
| 125 | mutex_lock(&cdev_list_lock); |
| 126 | list_del(&cdev->list); |
| 127 | mutex_unlock(&cdev_list_lock); |
| 128 | |
| 129 | cxgbit_put_cdev(cdev); |
| 130 | } else { |
| 131 | cxgbit_close_conn(cdev); |
| 132 | } |
| 133 | } |
| 134 | |
| 135 | static int cxgbit_uld_state_change(void *handle, enum cxgb4_state state) |
| 136 | { |
| 137 | struct cxgbit_device *cdev = handle; |
| 138 | |
| 139 | switch (state) { |
| 140 | case CXGB4_STATE_UP: |
| 141 | set_bit(CDEV_STATE_UP, &cdev->flags); |
| 142 | pr_info("cdev %s state UP.\n", pci_name(cdev->lldi.pdev)); |
| 143 | break; |
| 144 | case CXGB4_STATE_START_RECOVERY: |
| 145 | clear_bit(CDEV_STATE_UP, &cdev->flags); |
| 146 | cxgbit_close_conn(cdev); |
| 147 | pr_info("cdev %s state RECOVERY.\n", pci_name(cdev->lldi.pdev)); |
| 148 | break; |
| 149 | case CXGB4_STATE_DOWN: |
| 150 | pr_info("cdev %s state DOWN.\n", pci_name(cdev->lldi.pdev)); |
| 151 | break; |
| 152 | case CXGB4_STATE_DETACH: |
| 153 | clear_bit(CDEV_STATE_UP, &cdev->flags); |
| 154 | pr_info("cdev %s state DETACH.\n", pci_name(cdev->lldi.pdev)); |
| 155 | cxgbit_detach_cdev(cdev); |
| 156 | break; |
| 157 | default: |
| 158 | pr_info("cdev %s unknown state %d.\n", |
| 159 | pci_name(cdev->lldi.pdev), state); |
| 160 | break; |
| 161 | } |
| 162 | return 0; |
| 163 | } |
| 164 | |
| 165 | static void |
| 166 | cxgbit_proc_ddp_status(unsigned int tid, struct cpl_rx_data_ddp *cpl, |
| 167 | struct cxgbit_lro_pdu_cb *pdu_cb) |
| 168 | { |
| 169 | unsigned int status = ntohl(cpl->ddpvld); |
| 170 | |
| 171 | pdu_cb->flags |= PDUCBF_RX_STATUS; |
| 172 | pdu_cb->ddigest = ntohl(cpl->ulp_crc); |
| 173 | pdu_cb->pdulen = ntohs(cpl->len); |
| 174 | |
| 175 | if (status & (1 << CPL_RX_ISCSI_DDP_STATUS_HCRC_SHIFT)) { |
| 176 | pr_info("tid 0x%x, status 0x%x, hcrc bad.\n", tid, status); |
| 177 | pdu_cb->flags |= PDUCBF_RX_HCRC_ERR; |
| 178 | } |
| 179 | |
| 180 | if (status & (1 << CPL_RX_ISCSI_DDP_STATUS_DCRC_SHIFT)) { |
| 181 | pr_info("tid 0x%x, status 0x%x, dcrc bad.\n", tid, status); |
| 182 | pdu_cb->flags |= PDUCBF_RX_DCRC_ERR; |
| 183 | } |
| 184 | |
| 185 | if (status & (1 << CPL_RX_ISCSI_DDP_STATUS_PAD_SHIFT)) |
| 186 | pr_info("tid 0x%x, status 0x%x, pad bad.\n", tid, status); |
| 187 | |
| 188 | if ((status & (1 << CPL_RX_ISCSI_DDP_STATUS_DDP_SHIFT)) && |
| 189 | (!(pdu_cb->flags & PDUCBF_RX_DATA))) { |
| 190 | pdu_cb->flags |= PDUCBF_RX_DATA_DDPD; |
| 191 | } |
| 192 | } |
| 193 | |
| 194 | static void |
| 195 | cxgbit_lro_add_packet_rsp(struct sk_buff *skb, u8 op, const __be64 *rsp) |
| 196 | { |
| 197 | struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb); |
| 198 | struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, |
| 199 | lro_cb->pdu_idx); |
| 200 | struct cpl_rx_iscsi_ddp *cpl = (struct cpl_rx_iscsi_ddp *)(rsp + 1); |
| 201 | |
| 202 | cxgbit_proc_ddp_status(lro_cb->csk->tid, cpl, pdu_cb); |
| 203 | |
| 204 | if (pdu_cb->flags & PDUCBF_RX_HDR) |
| 205 | pdu_cb->complete = true; |
| 206 | |
| 207 | lro_cb->complete = true; |
| 208 | lro_cb->pdu_totallen += pdu_cb->pdulen; |
| 209 | lro_cb->pdu_idx++; |
| 210 | } |
| 211 | |
| 212 | static void |
| 213 | cxgbit_copy_frags(struct sk_buff *skb, const struct pkt_gl *gl, |
| 214 | unsigned int offset) |
| 215 | { |
| 216 | u8 skb_frag_idx = skb_shinfo(skb)->nr_frags; |
| 217 | u8 i; |
| 218 | |
| 219 | /* usually there's just one frag */ |
| 220 | __skb_fill_page_desc(skb, skb_frag_idx, gl->frags[0].page, |
| 221 | gl->frags[0].offset + offset, |
| 222 | gl->frags[0].size - offset); |
| 223 | for (i = 1; i < gl->nfrags; i++) |
| 224 | __skb_fill_page_desc(skb, skb_frag_idx + i, |
| 225 | gl->frags[i].page, |
| 226 | gl->frags[i].offset, |
| 227 | gl->frags[i].size); |
| 228 | |
| 229 | skb_shinfo(skb)->nr_frags += gl->nfrags; |
| 230 | |
| 231 | /* get a reference to the last page, we don't own it */ |
| 232 | get_page(gl->frags[gl->nfrags - 1].page); |
| 233 | } |
| 234 | |
| 235 | static void |
| 236 | cxgbit_lro_add_packet_gl(struct sk_buff *skb, u8 op, const struct pkt_gl *gl) |
| 237 | { |
| 238 | struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb); |
| 239 | struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, |
| 240 | lro_cb->pdu_idx); |
| 241 | u32 len, offset; |
| 242 | |
| 243 | if (op == CPL_ISCSI_HDR) { |
| 244 | struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)gl->va; |
| 245 | |
| 246 | offset = sizeof(struct cpl_iscsi_hdr); |
| 247 | pdu_cb->flags |= PDUCBF_RX_HDR; |
| 248 | pdu_cb->seq = ntohl(cpl->seq); |
| 249 | len = ntohs(cpl->len); |
| 250 | pdu_cb->hdr = gl->va + offset; |
| 251 | pdu_cb->hlen = len; |
| 252 | pdu_cb->hfrag_idx = skb_shinfo(skb)->nr_frags; |
| 253 | |
| 254 | if (unlikely(gl->nfrags > 1)) |
| 255 | cxgbit_skcb_flags(skb) = 0; |
| 256 | |
| 257 | lro_cb->complete = false; |
| 258 | } else { |
| 259 | struct cpl_iscsi_data *cpl = (struct cpl_iscsi_data *)gl->va; |
| 260 | |
| 261 | offset = sizeof(struct cpl_iscsi_data); |
| 262 | pdu_cb->flags |= PDUCBF_RX_DATA; |
| 263 | len = ntohs(cpl->len); |
| 264 | pdu_cb->dlen = len; |
| 265 | pdu_cb->doffset = lro_cb->offset; |
| 266 | pdu_cb->nr_dfrags = gl->nfrags; |
| 267 | pdu_cb->dfrag_idx = skb_shinfo(skb)->nr_frags; |
| 268 | } |
| 269 | |
| 270 | cxgbit_copy_frags(skb, gl, offset); |
| 271 | |
| 272 | pdu_cb->frags += gl->nfrags; |
| 273 | lro_cb->offset += len; |
| 274 | skb->len += len; |
| 275 | skb->data_len += len; |
| 276 | skb->truesize += len; |
| 277 | } |
| 278 | |
| 279 | static struct sk_buff * |
| 280 | cxgbit_lro_init_skb(struct cxgbit_sock *csk, u8 op, const struct pkt_gl *gl, |
| 281 | const __be64 *rsp, struct napi_struct *napi) |
| 282 | { |
| 283 | struct sk_buff *skb; |
| 284 | struct cxgbit_lro_cb *lro_cb; |
| 285 | |
| 286 | skb = napi_alloc_skb(napi, LRO_SKB_MAX_HEADROOM); |
| 287 | |
| 288 | if (unlikely(!skb)) |
| 289 | return NULL; |
| 290 | |
| 291 | memset(skb->data, 0, LRO_SKB_MAX_HEADROOM); |
| 292 | |
| 293 | cxgbit_skcb_flags(skb) |= SKCBF_RX_LRO; |
| 294 | |
| 295 | lro_cb = cxgbit_skb_lro_cb(skb); |
| 296 | |
| 297 | cxgbit_get_csk(csk); |
| 298 | |
| 299 | lro_cb->csk = csk; |
| 300 | |
| 301 | return skb; |
| 302 | } |
| 303 | |
| 304 | static void cxgbit_queue_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb) |
| 305 | { |
| 306 | bool wakeup_thread = false; |
| 307 | |
| 308 | spin_lock(&csk->rxq.lock); |
| 309 | __skb_queue_tail(&csk->rxq, skb); |
| 310 | if (skb_queue_len(&csk->rxq) == 1) |
| 311 | wakeup_thread = true; |
| 312 | spin_unlock(&csk->rxq.lock); |
| 313 | |
| 314 | if (wakeup_thread) |
| 315 | wake_up(&csk->waitq); |
| 316 | } |
| 317 | |
| 318 | static void cxgbit_lro_flush(struct t4_lro_mgr *lro_mgr, struct sk_buff *skb) |
| 319 | { |
| 320 | struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb); |
| 321 | struct cxgbit_sock *csk = lro_cb->csk; |
| 322 | |
| 323 | csk->lro_skb = NULL; |
| 324 | |
| 325 | __skb_unlink(skb, &lro_mgr->lroq); |
| 326 | cxgbit_queue_lro_skb(csk, skb); |
| 327 | |
| 328 | cxgbit_put_csk(csk); |
| 329 | |
| 330 | lro_mgr->lro_pkts++; |
| 331 | lro_mgr->lro_session_cnt--; |
| 332 | } |
| 333 | |
| 334 | static void cxgbit_uld_lro_flush(struct t4_lro_mgr *lro_mgr) |
| 335 | { |
| 336 | struct sk_buff *skb; |
| 337 | |
| 338 | while ((skb = skb_peek(&lro_mgr->lroq))) |
| 339 | cxgbit_lro_flush(lro_mgr, skb); |
| 340 | } |
| 341 | |
| 342 | static int |
| 343 | cxgbit_lro_receive(struct cxgbit_sock *csk, u8 op, const __be64 *rsp, |
| 344 | const struct pkt_gl *gl, struct t4_lro_mgr *lro_mgr, |
| 345 | struct napi_struct *napi) |
| 346 | { |
| 347 | struct sk_buff *skb; |
| 348 | struct cxgbit_lro_cb *lro_cb; |
| 349 | |
| 350 | if (!csk) { |
| 351 | pr_err("%s: csk NULL, op 0x%x.\n", __func__, op); |
| 352 | goto out; |
| 353 | } |
| 354 | |
| 355 | if (csk->lro_skb) |
| 356 | goto add_packet; |
| 357 | |
| 358 | start_lro: |
| 359 | if (lro_mgr->lro_session_cnt >= MAX_LRO_SESSIONS) { |
| 360 | cxgbit_uld_lro_flush(lro_mgr); |
| 361 | goto start_lro; |
| 362 | } |
| 363 | |
| 364 | skb = cxgbit_lro_init_skb(csk, op, gl, rsp, napi); |
| 365 | if (unlikely(!skb)) |
| 366 | goto out; |
| 367 | |
| 368 | csk->lro_skb = skb; |
| 369 | |
| 370 | __skb_queue_tail(&lro_mgr->lroq, skb); |
| 371 | lro_mgr->lro_session_cnt++; |
| 372 | |
| 373 | add_packet: |
| 374 | skb = csk->lro_skb; |
| 375 | lro_cb = cxgbit_skb_lro_cb(skb); |
| 376 | |
| 377 | if ((gl && (((skb_shinfo(skb)->nr_frags + gl->nfrags) > |
| 378 | MAX_SKB_FRAGS) || (lro_cb->pdu_totallen >= LRO_FLUSH_LEN_MAX))) || |
| 379 | (lro_cb->pdu_idx >= MAX_SKB_FRAGS)) { |
| 380 | cxgbit_lro_flush(lro_mgr, skb); |
| 381 | goto start_lro; |
| 382 | } |
| 383 | |
| 384 | if (gl) |
| 385 | cxgbit_lro_add_packet_gl(skb, op, gl); |
| 386 | else |
| 387 | cxgbit_lro_add_packet_rsp(skb, op, rsp); |
| 388 | |
| 389 | lro_mgr->lro_merged++; |
| 390 | |
| 391 | return 0; |
| 392 | |
| 393 | out: |
| 394 | return -1; |
| 395 | } |
| 396 | |
| 397 | static int |
| 398 | cxgbit_uld_lro_rx_handler(void *hndl, const __be64 *rsp, |
| 399 | const struct pkt_gl *gl, struct t4_lro_mgr *lro_mgr, |
| 400 | struct napi_struct *napi) |
| 401 | { |
| 402 | struct cxgbit_device *cdev = hndl; |
| 403 | struct cxgb4_lld_info *lldi = &cdev->lldi; |
| 404 | struct cpl_tx_data *rpl = NULL; |
| 405 | struct cxgbit_sock *csk = NULL; |
| 406 | unsigned int tid = 0; |
| 407 | struct sk_buff *skb; |
| 408 | unsigned int op = *(u8 *)rsp; |
| 409 | bool lro_flush = true; |
| 410 | |
| 411 | switch (op) { |
| 412 | case CPL_ISCSI_HDR: |
| 413 | case CPL_ISCSI_DATA: |
| 414 | case CPL_RX_ISCSI_DDP: |
| 415 | case CPL_FW4_ACK: |
| 416 | lro_flush = false; |
| 417 | case CPL_ABORT_RPL_RSS: |
| 418 | case CPL_PASS_ESTABLISH: |
| 419 | case CPL_PEER_CLOSE: |
| 420 | case CPL_CLOSE_CON_RPL: |
| 421 | case CPL_ABORT_REQ_RSS: |
| 422 | case CPL_SET_TCB_RPL: |
| 423 | case CPL_RX_DATA: |
| 424 | rpl = gl ? (struct cpl_tx_data *)gl->va : |
| 425 | (struct cpl_tx_data *)(rsp + 1); |
| 426 | tid = GET_TID(rpl); |
| 427 | csk = lookup_tid(lldi->tids, tid); |
| 428 | break; |
| 429 | default: |
| 430 | break; |
| 431 | } |
| 432 | |
| 433 | if (csk && csk->lro_skb && lro_flush) |
| 434 | cxgbit_lro_flush(lro_mgr, csk->lro_skb); |
| 435 | |
| 436 | if (!gl) { |
| 437 | unsigned int len; |
| 438 | |
| 439 | if (op == CPL_RX_ISCSI_DDP) { |
| 440 | if (!cxgbit_lro_receive(csk, op, rsp, NULL, lro_mgr, |
| 441 | napi)) |
| 442 | return 0; |
| 443 | } |
| 444 | |
| 445 | len = 64 - sizeof(struct rsp_ctrl) - 8; |
| 446 | skb = napi_alloc_skb(napi, len); |
| 447 | if (!skb) |
| 448 | goto nomem; |
| 449 | __skb_put(skb, len); |
| 450 | skb_copy_to_linear_data(skb, &rsp[1], len); |
| 451 | } else { |
| 452 | if (unlikely(op != *(u8 *)gl->va)) { |
| 453 | pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n", |
| 454 | gl->va, be64_to_cpu(*rsp), |
| 455 | be64_to_cpu(*(u64 *)gl->va), |
| 456 | gl->tot_len); |
| 457 | return 0; |
| 458 | } |
| 459 | |
| 460 | if (op == CPL_ISCSI_HDR || op == CPL_ISCSI_DATA) { |
| 461 | if (!cxgbit_lro_receive(csk, op, rsp, gl, lro_mgr, |
| 462 | napi)) |
| 463 | return 0; |
| 464 | } |
| 465 | |
| 466 | #define RX_PULL_LEN 128 |
| 467 | skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN); |
| 468 | if (unlikely(!skb)) |
| 469 | goto nomem; |
| 470 | } |
| 471 | |
| 472 | rpl = (struct cpl_tx_data *)skb->data; |
| 473 | op = rpl->ot.opcode; |
| 474 | cxgbit_skcb_rx_opcode(skb) = op; |
| 475 | |
| 476 | pr_debug("cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n", |
| 477 | cdev, op, rpl->ot.opcode_tid, |
| 478 | ntohl(rpl->ot.opcode_tid), skb); |
| 479 | |
| 480 | if (op < NUM_CPL_CMDS && cxgbit_cplhandlers[op]) { |
| 481 | cxgbit_cplhandlers[op](cdev, skb); |
| 482 | } else { |
| 483 | pr_err("No handler for opcode 0x%x.\n", op); |
| 484 | __kfree_skb(skb); |
| 485 | } |
| 486 | return 0; |
| 487 | nomem: |
| 488 | pr_err("%s OOM bailing out.\n", __func__); |
| 489 | return 1; |
| 490 | } |
| 491 | |
| 492 | #ifdef CONFIG_CHELSIO_T4_DCB |
| 493 | struct cxgbit_dcb_work { |
| 494 | struct dcb_app_type dcb_app; |
| 495 | struct work_struct work; |
| 496 | }; |
| 497 | |
| 498 | static void |
| 499 | cxgbit_update_dcb_priority(struct cxgbit_device *cdev, u8 port_id, |
| 500 | u8 dcb_priority, u16 port_num) |
| 501 | { |
| 502 | struct cxgbit_sock *csk; |
| 503 | struct sk_buff *skb; |
| 504 | u16 local_port; |
| 505 | bool wakeup_thread = false; |
| 506 | |
| 507 | spin_lock_bh(&cdev->cskq.lock); |
| 508 | list_for_each_entry(csk, &cdev->cskq.list, list) { |
| 509 | if (csk->port_id != port_id) |
| 510 | continue; |
| 511 | |
| 512 | if (csk->com.local_addr.ss_family == AF_INET6) { |
| 513 | struct sockaddr_in6 *sock_in6; |
| 514 | |
| 515 | sock_in6 = (struct sockaddr_in6 *)&csk->com.local_addr; |
| 516 | local_port = ntohs(sock_in6->sin6_port); |
| 517 | } else { |
| 518 | struct sockaddr_in *sock_in; |
| 519 | |
| 520 | sock_in = (struct sockaddr_in *)&csk->com.local_addr; |
| 521 | local_port = ntohs(sock_in->sin_port); |
| 522 | } |
| 523 | |
| 524 | if (local_port != port_num) |
| 525 | continue; |
| 526 | |
| 527 | if (csk->dcb_priority == dcb_priority) |
| 528 | continue; |
| 529 | |
| 530 | skb = alloc_skb(0, GFP_ATOMIC); |
| 531 | if (!skb) |
| 532 | continue; |
| 533 | |
| 534 | spin_lock(&csk->rxq.lock); |
| 535 | __skb_queue_tail(&csk->rxq, skb); |
| 536 | if (skb_queue_len(&csk->rxq) == 1) |
| 537 | wakeup_thread = true; |
| 538 | spin_unlock(&csk->rxq.lock); |
| 539 | |
| 540 | if (wakeup_thread) { |
| 541 | wake_up(&csk->waitq); |
| 542 | wakeup_thread = false; |
| 543 | } |
| 544 | } |
| 545 | spin_unlock_bh(&cdev->cskq.lock); |
| 546 | } |
| 547 | |
| 548 | static void cxgbit_dcb_workfn(struct work_struct *work) |
| 549 | { |
| 550 | struct cxgbit_dcb_work *dcb_work; |
| 551 | struct net_device *ndev; |
| 552 | struct cxgbit_device *cdev = NULL; |
| 553 | struct dcb_app_type *iscsi_app; |
| 554 | u8 priority, port_id = 0xff; |
| 555 | |
| 556 | dcb_work = container_of(work, struct cxgbit_dcb_work, work); |
| 557 | iscsi_app = &dcb_work->dcb_app; |
| 558 | |
| 559 | if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) { |
| 560 | if (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY) |
| 561 | goto out; |
| 562 | |
| 563 | priority = iscsi_app->app.priority; |
| 564 | |
| 565 | } else if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_CEE) { |
| 566 | if (iscsi_app->app.selector != DCB_APP_IDTYPE_PORTNUM) |
| 567 | goto out; |
| 568 | |
| 569 | if (!iscsi_app->app.priority) |
| 570 | goto out; |
| 571 | |
| 572 | priority = ffs(iscsi_app->app.priority) - 1; |
| 573 | } else { |
| 574 | goto out; |
| 575 | } |
| 576 | |
| 577 | pr_debug("priority for ifid %d is %u\n", |
| 578 | iscsi_app->ifindex, priority); |
| 579 | |
| 580 | ndev = dev_get_by_index(&init_net, iscsi_app->ifindex); |
| 581 | |
| 582 | if (!ndev) |
| 583 | goto out; |
| 584 | |
| 585 | mutex_lock(&cdev_list_lock); |
| 586 | cdev = cxgbit_find_device(ndev, &port_id); |
| 587 | |
| 588 | dev_put(ndev); |
| 589 | |
| 590 | if (!cdev) { |
| 591 | mutex_unlock(&cdev_list_lock); |
| 592 | goto out; |
| 593 | } |
| 594 | |
| 595 | cxgbit_update_dcb_priority(cdev, port_id, priority, |
| 596 | iscsi_app->app.protocol); |
| 597 | mutex_unlock(&cdev_list_lock); |
| 598 | out: |
| 599 | kfree(dcb_work); |
| 600 | } |
| 601 | |
| 602 | static int |
| 603 | cxgbit_dcbevent_notify(struct notifier_block *nb, unsigned long action, |
| 604 | void *data) |
| 605 | { |
| 606 | struct cxgbit_dcb_work *dcb_work; |
| 607 | struct dcb_app_type *dcb_app = data; |
| 608 | |
| 609 | dcb_work = kzalloc(sizeof(*dcb_work), GFP_ATOMIC); |
| 610 | if (!dcb_work) |
| 611 | return NOTIFY_DONE; |
| 612 | |
| 613 | dcb_work->dcb_app = *dcb_app; |
| 614 | INIT_WORK(&dcb_work->work, cxgbit_dcb_workfn); |
| 615 | schedule_work(&dcb_work->work); |
| 616 | return NOTIFY_OK; |
| 617 | } |
| 618 | #endif |
| 619 | |
| 620 | static enum target_prot_op cxgbit_get_sup_prot_ops(struct iscsi_conn *conn) |
| 621 | { |
| 622 | return TARGET_PROT_NORMAL; |
| 623 | } |
| 624 | |
| 625 | static struct iscsit_transport cxgbit_transport = { |
| 626 | .name = DRV_NAME, |
| 627 | .transport_type = ISCSI_HW_OFFLOAD, |
Nicholas Bellinger | bd027d8 | 2016-05-14 22:23:34 -0700 | [diff] [blame^] | 628 | .rdma_shutdown = false, |
Varun Prakash | 9730ffc | 2016-04-20 00:00:20 +0530 | [diff] [blame] | 629 | .priv_size = sizeof(struct cxgbit_cmd), |
| 630 | .owner = THIS_MODULE, |
| 631 | .iscsit_setup_np = cxgbit_setup_np, |
| 632 | .iscsit_accept_np = cxgbit_accept_np, |
| 633 | .iscsit_free_np = cxgbit_free_np, |
| 634 | .iscsit_free_conn = cxgbit_free_conn, |
| 635 | .iscsit_get_login_rx = cxgbit_get_login_rx, |
| 636 | .iscsit_put_login_tx = cxgbit_put_login_tx, |
| 637 | .iscsit_immediate_queue = iscsit_immediate_queue, |
| 638 | .iscsit_response_queue = iscsit_response_queue, |
| 639 | .iscsit_get_dataout = iscsit_build_r2ts_for_cmd, |
| 640 | .iscsit_queue_data_in = iscsit_queue_rsp, |
| 641 | .iscsit_queue_status = iscsit_queue_rsp, |
| 642 | .iscsit_xmit_pdu = cxgbit_xmit_pdu, |
| 643 | .iscsit_get_r2t_ttt = cxgbit_get_r2t_ttt, |
| 644 | .iscsit_get_rx_pdu = cxgbit_get_rx_pdu, |
| 645 | .iscsit_validate_params = cxgbit_validate_params, |
| 646 | .iscsit_release_cmd = cxgbit_release_cmd, |
| 647 | .iscsit_aborted_task = iscsit_aborted_task, |
| 648 | .iscsit_get_sup_prot_ops = cxgbit_get_sup_prot_ops, |
| 649 | }; |
| 650 | |
| 651 | static struct cxgb4_uld_info cxgbit_uld_info = { |
| 652 | .name = DRV_NAME, |
| 653 | .add = cxgbit_uld_add, |
| 654 | .state_change = cxgbit_uld_state_change, |
| 655 | .lro_rx_handler = cxgbit_uld_lro_rx_handler, |
| 656 | .lro_flush = cxgbit_uld_lro_flush, |
| 657 | }; |
| 658 | |
| 659 | #ifdef CONFIG_CHELSIO_T4_DCB |
| 660 | static struct notifier_block cxgbit_dcbevent_nb = { |
| 661 | .notifier_call = cxgbit_dcbevent_notify, |
| 662 | }; |
| 663 | #endif |
| 664 | |
| 665 | static int __init cxgbit_init(void) |
| 666 | { |
| 667 | cxgb4_register_uld(CXGB4_ULD_ISCSIT, &cxgbit_uld_info); |
| 668 | iscsit_register_transport(&cxgbit_transport); |
| 669 | |
| 670 | #ifdef CONFIG_CHELSIO_T4_DCB |
| 671 | pr_info("%s dcb enabled.\n", DRV_NAME); |
| 672 | register_dcbevent_notifier(&cxgbit_dcbevent_nb); |
| 673 | #endif |
| 674 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, cb) < |
| 675 | sizeof(union cxgbit_skb_cb)); |
| 676 | return 0; |
| 677 | } |
| 678 | |
| 679 | static void __exit cxgbit_exit(void) |
| 680 | { |
| 681 | struct cxgbit_device *cdev, *tmp; |
| 682 | |
| 683 | #ifdef CONFIG_CHELSIO_T4_DCB |
| 684 | unregister_dcbevent_notifier(&cxgbit_dcbevent_nb); |
| 685 | #endif |
| 686 | mutex_lock(&cdev_list_lock); |
| 687 | list_for_each_entry_safe(cdev, tmp, &cdev_list_head, list) { |
| 688 | list_del(&cdev->list); |
| 689 | cxgbit_put_cdev(cdev); |
| 690 | } |
| 691 | mutex_unlock(&cdev_list_lock); |
| 692 | iscsit_unregister_transport(&cxgbit_transport); |
| 693 | cxgb4_unregister_uld(CXGB4_ULD_ISCSIT); |
| 694 | } |
| 695 | |
| 696 | module_init(cxgbit_init); |
| 697 | module_exit(cxgbit_exit); |
| 698 | |
| 699 | MODULE_DESCRIPTION("Chelsio iSCSI target offload driver"); |
| 700 | MODULE_AUTHOR("Chelsio Communications"); |
| 701 | MODULE_VERSION(DRV_VERSION); |
| 702 | MODULE_LICENSE("GPL"); |