Varun Prakash | 9730ffc | 2016-04-20 00:00:20 +0530 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (c) 2016 Chelsio Communications, Inc. |
| 3 | * |
| 4 | * This program is free software; you can redistribute it and/or modify |
| 5 | * it under the terms of the GNU General Public License version 2 as |
| 6 | * published by the Free Software Foundation. |
| 7 | */ |
| 8 | |
| 9 | #define DRV_NAME "cxgbit" |
| 10 | #define DRV_VERSION "1.0.0-ko" |
| 11 | #define pr_fmt(fmt) DRV_NAME ": " fmt |
| 12 | |
| 13 | #include "cxgbit.h" |
| 14 | |
| 15 | #ifdef CONFIG_CHELSIO_T4_DCB |
| 16 | #include <net/dcbevent.h> |
| 17 | #include "cxgb4_dcb.h" |
| 18 | #endif |
| 19 | |
| 20 | LIST_HEAD(cdev_list_head); |
| 21 | /* cdev list lock */ |
| 22 | DEFINE_MUTEX(cdev_list_lock); |
| 23 | |
| 24 | void _cxgbit_free_cdev(struct kref *kref) |
| 25 | { |
| 26 | struct cxgbit_device *cdev; |
| 27 | |
| 28 | cdev = container_of(kref, struct cxgbit_device, kref); |
Varun Prakash | 9d5c44b | 2016-07-21 22:57:18 +0530 | [diff] [blame] | 29 | |
| 30 | cxgbi_ppm_release(cdev2ppm(cdev)); |
Varun Prakash | 9730ffc | 2016-04-20 00:00:20 +0530 | [diff] [blame] | 31 | kfree(cdev); |
| 32 | } |
| 33 | |
| 34 | static void cxgbit_set_mdsl(struct cxgbit_device *cdev) |
| 35 | { |
| 36 | struct cxgb4_lld_info *lldi = &cdev->lldi; |
| 37 | u32 mdsl; |
| 38 | |
| 39 | #define ULP2_MAX_PKT_LEN 16224 |
| 40 | #define ISCSI_PDU_NONPAYLOAD_LEN 312 |
| 41 | mdsl = min_t(u32, lldi->iscsi_iolen - ISCSI_PDU_NONPAYLOAD_LEN, |
| 42 | ULP2_MAX_PKT_LEN - ISCSI_PDU_NONPAYLOAD_LEN); |
| 43 | mdsl = min_t(u32, mdsl, 8192); |
| 44 | mdsl = min_t(u32, mdsl, (MAX_SKB_FRAGS - 1) * PAGE_SIZE); |
| 45 | |
| 46 | cdev->mdsl = mdsl; |
| 47 | } |
| 48 | |
| 49 | static void *cxgbit_uld_add(const struct cxgb4_lld_info *lldi) |
| 50 | { |
| 51 | struct cxgbit_device *cdev; |
| 52 | |
| 53 | if (is_t4(lldi->adapter_type)) |
| 54 | return ERR_PTR(-ENODEV); |
| 55 | |
| 56 | cdev = kzalloc(sizeof(*cdev), GFP_KERNEL); |
| 57 | if (!cdev) |
| 58 | return ERR_PTR(-ENOMEM); |
| 59 | |
| 60 | kref_init(&cdev->kref); |
| 61 | |
| 62 | cdev->lldi = *lldi; |
| 63 | |
| 64 | cxgbit_set_mdsl(cdev); |
| 65 | |
| 66 | if (cxgbit_ddp_init(cdev) < 0) { |
| 67 | kfree(cdev); |
| 68 | return ERR_PTR(-EINVAL); |
| 69 | } |
| 70 | |
| 71 | if (!test_bit(CDEV_DDP_ENABLE, &cdev->flags)) |
| 72 | pr_info("cdev %s ddp init failed\n", |
| 73 | pci_name(lldi->pdev)); |
| 74 | |
| 75 | if (lldi->fw_vers >= 0x10d2b00) |
| 76 | set_bit(CDEV_ISO_ENABLE, &cdev->flags); |
| 77 | |
| 78 | spin_lock_init(&cdev->cskq.lock); |
| 79 | INIT_LIST_HEAD(&cdev->cskq.list); |
| 80 | |
| 81 | mutex_lock(&cdev_list_lock); |
| 82 | list_add_tail(&cdev->list, &cdev_list_head); |
| 83 | mutex_unlock(&cdev_list_lock); |
| 84 | |
| 85 | pr_info("cdev %s added for iSCSI target transport\n", |
| 86 | pci_name(lldi->pdev)); |
| 87 | |
| 88 | return cdev; |
| 89 | } |
| 90 | |
| 91 | static void cxgbit_close_conn(struct cxgbit_device *cdev) |
| 92 | { |
| 93 | struct cxgbit_sock *csk; |
| 94 | struct sk_buff *skb; |
| 95 | bool wakeup_thread = false; |
| 96 | |
| 97 | spin_lock_bh(&cdev->cskq.lock); |
| 98 | list_for_each_entry(csk, &cdev->cskq.list, list) { |
| 99 | skb = alloc_skb(0, GFP_ATOMIC); |
| 100 | if (!skb) |
| 101 | continue; |
| 102 | |
| 103 | spin_lock_bh(&csk->rxq.lock); |
| 104 | __skb_queue_tail(&csk->rxq, skb); |
| 105 | if (skb_queue_len(&csk->rxq) == 1) |
| 106 | wakeup_thread = true; |
| 107 | spin_unlock_bh(&csk->rxq.lock); |
| 108 | |
| 109 | if (wakeup_thread) { |
| 110 | wake_up(&csk->waitq); |
| 111 | wakeup_thread = false; |
| 112 | } |
| 113 | } |
| 114 | spin_unlock_bh(&cdev->cskq.lock); |
| 115 | } |
| 116 | |
| 117 | static void cxgbit_detach_cdev(struct cxgbit_device *cdev) |
| 118 | { |
| 119 | bool free_cdev = false; |
| 120 | |
| 121 | spin_lock_bh(&cdev->cskq.lock); |
| 122 | if (list_empty(&cdev->cskq.list)) |
| 123 | free_cdev = true; |
| 124 | spin_unlock_bh(&cdev->cskq.lock); |
| 125 | |
| 126 | if (free_cdev) { |
| 127 | mutex_lock(&cdev_list_lock); |
| 128 | list_del(&cdev->list); |
| 129 | mutex_unlock(&cdev_list_lock); |
| 130 | |
| 131 | cxgbit_put_cdev(cdev); |
| 132 | } else { |
| 133 | cxgbit_close_conn(cdev); |
| 134 | } |
| 135 | } |
| 136 | |
| 137 | static int cxgbit_uld_state_change(void *handle, enum cxgb4_state state) |
| 138 | { |
| 139 | struct cxgbit_device *cdev = handle; |
| 140 | |
| 141 | switch (state) { |
| 142 | case CXGB4_STATE_UP: |
| 143 | set_bit(CDEV_STATE_UP, &cdev->flags); |
| 144 | pr_info("cdev %s state UP.\n", pci_name(cdev->lldi.pdev)); |
| 145 | break; |
| 146 | case CXGB4_STATE_START_RECOVERY: |
| 147 | clear_bit(CDEV_STATE_UP, &cdev->flags); |
| 148 | cxgbit_close_conn(cdev); |
| 149 | pr_info("cdev %s state RECOVERY.\n", pci_name(cdev->lldi.pdev)); |
| 150 | break; |
| 151 | case CXGB4_STATE_DOWN: |
| 152 | pr_info("cdev %s state DOWN.\n", pci_name(cdev->lldi.pdev)); |
| 153 | break; |
| 154 | case CXGB4_STATE_DETACH: |
| 155 | clear_bit(CDEV_STATE_UP, &cdev->flags); |
| 156 | pr_info("cdev %s state DETACH.\n", pci_name(cdev->lldi.pdev)); |
| 157 | cxgbit_detach_cdev(cdev); |
| 158 | break; |
| 159 | default: |
| 160 | pr_info("cdev %s unknown state %d.\n", |
| 161 | pci_name(cdev->lldi.pdev), state); |
| 162 | break; |
| 163 | } |
| 164 | return 0; |
| 165 | } |
| 166 | |
| 167 | static void |
| 168 | cxgbit_proc_ddp_status(unsigned int tid, struct cpl_rx_data_ddp *cpl, |
| 169 | struct cxgbit_lro_pdu_cb *pdu_cb) |
| 170 | { |
| 171 | unsigned int status = ntohl(cpl->ddpvld); |
| 172 | |
| 173 | pdu_cb->flags |= PDUCBF_RX_STATUS; |
| 174 | pdu_cb->ddigest = ntohl(cpl->ulp_crc); |
| 175 | pdu_cb->pdulen = ntohs(cpl->len); |
| 176 | |
| 177 | if (status & (1 << CPL_RX_ISCSI_DDP_STATUS_HCRC_SHIFT)) { |
| 178 | pr_info("tid 0x%x, status 0x%x, hcrc bad.\n", tid, status); |
| 179 | pdu_cb->flags |= PDUCBF_RX_HCRC_ERR; |
| 180 | } |
| 181 | |
| 182 | if (status & (1 << CPL_RX_ISCSI_DDP_STATUS_DCRC_SHIFT)) { |
| 183 | pr_info("tid 0x%x, status 0x%x, dcrc bad.\n", tid, status); |
| 184 | pdu_cb->flags |= PDUCBF_RX_DCRC_ERR; |
| 185 | } |
| 186 | |
| 187 | if (status & (1 << CPL_RX_ISCSI_DDP_STATUS_PAD_SHIFT)) |
| 188 | pr_info("tid 0x%x, status 0x%x, pad bad.\n", tid, status); |
| 189 | |
| 190 | if ((status & (1 << CPL_RX_ISCSI_DDP_STATUS_DDP_SHIFT)) && |
| 191 | (!(pdu_cb->flags & PDUCBF_RX_DATA))) { |
| 192 | pdu_cb->flags |= PDUCBF_RX_DATA_DDPD; |
| 193 | } |
| 194 | } |
| 195 | |
| 196 | static void |
| 197 | cxgbit_lro_add_packet_rsp(struct sk_buff *skb, u8 op, const __be64 *rsp) |
| 198 | { |
| 199 | struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb); |
| 200 | struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, |
| 201 | lro_cb->pdu_idx); |
| 202 | struct cpl_rx_iscsi_ddp *cpl = (struct cpl_rx_iscsi_ddp *)(rsp + 1); |
| 203 | |
| 204 | cxgbit_proc_ddp_status(lro_cb->csk->tid, cpl, pdu_cb); |
| 205 | |
| 206 | if (pdu_cb->flags & PDUCBF_RX_HDR) |
| 207 | pdu_cb->complete = true; |
| 208 | |
| 209 | lro_cb->complete = true; |
| 210 | lro_cb->pdu_totallen += pdu_cb->pdulen; |
| 211 | lro_cb->pdu_idx++; |
| 212 | } |
| 213 | |
| 214 | static void |
| 215 | cxgbit_copy_frags(struct sk_buff *skb, const struct pkt_gl *gl, |
| 216 | unsigned int offset) |
| 217 | { |
| 218 | u8 skb_frag_idx = skb_shinfo(skb)->nr_frags; |
| 219 | u8 i; |
| 220 | |
| 221 | /* usually there's just one frag */ |
| 222 | __skb_fill_page_desc(skb, skb_frag_idx, gl->frags[0].page, |
| 223 | gl->frags[0].offset + offset, |
| 224 | gl->frags[0].size - offset); |
| 225 | for (i = 1; i < gl->nfrags; i++) |
| 226 | __skb_fill_page_desc(skb, skb_frag_idx + i, |
| 227 | gl->frags[i].page, |
| 228 | gl->frags[i].offset, |
| 229 | gl->frags[i].size); |
| 230 | |
| 231 | skb_shinfo(skb)->nr_frags += gl->nfrags; |
| 232 | |
| 233 | /* get a reference to the last page, we don't own it */ |
| 234 | get_page(gl->frags[gl->nfrags - 1].page); |
| 235 | } |
| 236 | |
| 237 | static void |
| 238 | cxgbit_lro_add_packet_gl(struct sk_buff *skb, u8 op, const struct pkt_gl *gl) |
| 239 | { |
| 240 | struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb); |
| 241 | struct cxgbit_lro_pdu_cb *pdu_cb = cxgbit_skb_lro_pdu_cb(skb, |
| 242 | lro_cb->pdu_idx); |
| 243 | u32 len, offset; |
| 244 | |
| 245 | if (op == CPL_ISCSI_HDR) { |
| 246 | struct cpl_iscsi_hdr *cpl = (struct cpl_iscsi_hdr *)gl->va; |
| 247 | |
| 248 | offset = sizeof(struct cpl_iscsi_hdr); |
| 249 | pdu_cb->flags |= PDUCBF_RX_HDR; |
| 250 | pdu_cb->seq = ntohl(cpl->seq); |
| 251 | len = ntohs(cpl->len); |
| 252 | pdu_cb->hdr = gl->va + offset; |
| 253 | pdu_cb->hlen = len; |
| 254 | pdu_cb->hfrag_idx = skb_shinfo(skb)->nr_frags; |
| 255 | |
| 256 | if (unlikely(gl->nfrags > 1)) |
| 257 | cxgbit_skcb_flags(skb) = 0; |
| 258 | |
| 259 | lro_cb->complete = false; |
| 260 | } else { |
| 261 | struct cpl_iscsi_data *cpl = (struct cpl_iscsi_data *)gl->va; |
| 262 | |
| 263 | offset = sizeof(struct cpl_iscsi_data); |
| 264 | pdu_cb->flags |= PDUCBF_RX_DATA; |
| 265 | len = ntohs(cpl->len); |
| 266 | pdu_cb->dlen = len; |
| 267 | pdu_cb->doffset = lro_cb->offset; |
| 268 | pdu_cb->nr_dfrags = gl->nfrags; |
| 269 | pdu_cb->dfrag_idx = skb_shinfo(skb)->nr_frags; |
| 270 | } |
| 271 | |
| 272 | cxgbit_copy_frags(skb, gl, offset); |
| 273 | |
| 274 | pdu_cb->frags += gl->nfrags; |
| 275 | lro_cb->offset += len; |
| 276 | skb->len += len; |
| 277 | skb->data_len += len; |
| 278 | skb->truesize += len; |
| 279 | } |
| 280 | |
| 281 | static struct sk_buff * |
| 282 | cxgbit_lro_init_skb(struct cxgbit_sock *csk, u8 op, const struct pkt_gl *gl, |
| 283 | const __be64 *rsp, struct napi_struct *napi) |
| 284 | { |
| 285 | struct sk_buff *skb; |
| 286 | struct cxgbit_lro_cb *lro_cb; |
| 287 | |
| 288 | skb = napi_alloc_skb(napi, LRO_SKB_MAX_HEADROOM); |
| 289 | |
| 290 | if (unlikely(!skb)) |
| 291 | return NULL; |
| 292 | |
| 293 | memset(skb->data, 0, LRO_SKB_MAX_HEADROOM); |
| 294 | |
| 295 | cxgbit_skcb_flags(skb) |= SKCBF_RX_LRO; |
| 296 | |
| 297 | lro_cb = cxgbit_skb_lro_cb(skb); |
| 298 | |
| 299 | cxgbit_get_csk(csk); |
| 300 | |
| 301 | lro_cb->csk = csk; |
| 302 | |
| 303 | return skb; |
| 304 | } |
| 305 | |
| 306 | static void cxgbit_queue_lro_skb(struct cxgbit_sock *csk, struct sk_buff *skb) |
| 307 | { |
| 308 | bool wakeup_thread = false; |
| 309 | |
| 310 | spin_lock(&csk->rxq.lock); |
| 311 | __skb_queue_tail(&csk->rxq, skb); |
| 312 | if (skb_queue_len(&csk->rxq) == 1) |
| 313 | wakeup_thread = true; |
| 314 | spin_unlock(&csk->rxq.lock); |
| 315 | |
| 316 | if (wakeup_thread) |
| 317 | wake_up(&csk->waitq); |
| 318 | } |
| 319 | |
| 320 | static void cxgbit_lro_flush(struct t4_lro_mgr *lro_mgr, struct sk_buff *skb) |
| 321 | { |
| 322 | struct cxgbit_lro_cb *lro_cb = cxgbit_skb_lro_cb(skb); |
| 323 | struct cxgbit_sock *csk = lro_cb->csk; |
| 324 | |
| 325 | csk->lro_skb = NULL; |
| 326 | |
| 327 | __skb_unlink(skb, &lro_mgr->lroq); |
| 328 | cxgbit_queue_lro_skb(csk, skb); |
| 329 | |
| 330 | cxgbit_put_csk(csk); |
| 331 | |
| 332 | lro_mgr->lro_pkts++; |
| 333 | lro_mgr->lro_session_cnt--; |
| 334 | } |
| 335 | |
| 336 | static void cxgbit_uld_lro_flush(struct t4_lro_mgr *lro_mgr) |
| 337 | { |
| 338 | struct sk_buff *skb; |
| 339 | |
| 340 | while ((skb = skb_peek(&lro_mgr->lroq))) |
| 341 | cxgbit_lro_flush(lro_mgr, skb); |
| 342 | } |
| 343 | |
| 344 | static int |
| 345 | cxgbit_lro_receive(struct cxgbit_sock *csk, u8 op, const __be64 *rsp, |
| 346 | const struct pkt_gl *gl, struct t4_lro_mgr *lro_mgr, |
| 347 | struct napi_struct *napi) |
| 348 | { |
| 349 | struct sk_buff *skb; |
| 350 | struct cxgbit_lro_cb *lro_cb; |
| 351 | |
| 352 | if (!csk) { |
| 353 | pr_err("%s: csk NULL, op 0x%x.\n", __func__, op); |
| 354 | goto out; |
| 355 | } |
| 356 | |
| 357 | if (csk->lro_skb) |
| 358 | goto add_packet; |
| 359 | |
| 360 | start_lro: |
| 361 | if (lro_mgr->lro_session_cnt >= MAX_LRO_SESSIONS) { |
| 362 | cxgbit_uld_lro_flush(lro_mgr); |
| 363 | goto start_lro; |
| 364 | } |
| 365 | |
| 366 | skb = cxgbit_lro_init_skb(csk, op, gl, rsp, napi); |
| 367 | if (unlikely(!skb)) |
| 368 | goto out; |
| 369 | |
| 370 | csk->lro_skb = skb; |
| 371 | |
| 372 | __skb_queue_tail(&lro_mgr->lroq, skb); |
| 373 | lro_mgr->lro_session_cnt++; |
| 374 | |
| 375 | add_packet: |
| 376 | skb = csk->lro_skb; |
| 377 | lro_cb = cxgbit_skb_lro_cb(skb); |
| 378 | |
| 379 | if ((gl && (((skb_shinfo(skb)->nr_frags + gl->nfrags) > |
| 380 | MAX_SKB_FRAGS) || (lro_cb->pdu_totallen >= LRO_FLUSH_LEN_MAX))) || |
| 381 | (lro_cb->pdu_idx >= MAX_SKB_FRAGS)) { |
| 382 | cxgbit_lro_flush(lro_mgr, skb); |
| 383 | goto start_lro; |
| 384 | } |
| 385 | |
| 386 | if (gl) |
| 387 | cxgbit_lro_add_packet_gl(skb, op, gl); |
| 388 | else |
| 389 | cxgbit_lro_add_packet_rsp(skb, op, rsp); |
| 390 | |
| 391 | lro_mgr->lro_merged++; |
| 392 | |
| 393 | return 0; |
| 394 | |
| 395 | out: |
| 396 | return -1; |
| 397 | } |
| 398 | |
| 399 | static int |
| 400 | cxgbit_uld_lro_rx_handler(void *hndl, const __be64 *rsp, |
| 401 | const struct pkt_gl *gl, struct t4_lro_mgr *lro_mgr, |
| 402 | struct napi_struct *napi) |
| 403 | { |
| 404 | struct cxgbit_device *cdev = hndl; |
| 405 | struct cxgb4_lld_info *lldi = &cdev->lldi; |
| 406 | struct cpl_tx_data *rpl = NULL; |
| 407 | struct cxgbit_sock *csk = NULL; |
| 408 | unsigned int tid = 0; |
| 409 | struct sk_buff *skb; |
| 410 | unsigned int op = *(u8 *)rsp; |
| 411 | bool lro_flush = true; |
| 412 | |
| 413 | switch (op) { |
| 414 | case CPL_ISCSI_HDR: |
| 415 | case CPL_ISCSI_DATA: |
| 416 | case CPL_RX_ISCSI_DDP: |
| 417 | case CPL_FW4_ACK: |
| 418 | lro_flush = false; |
| 419 | case CPL_ABORT_RPL_RSS: |
| 420 | case CPL_PASS_ESTABLISH: |
| 421 | case CPL_PEER_CLOSE: |
| 422 | case CPL_CLOSE_CON_RPL: |
| 423 | case CPL_ABORT_REQ_RSS: |
| 424 | case CPL_SET_TCB_RPL: |
| 425 | case CPL_RX_DATA: |
| 426 | rpl = gl ? (struct cpl_tx_data *)gl->va : |
| 427 | (struct cpl_tx_data *)(rsp + 1); |
| 428 | tid = GET_TID(rpl); |
| 429 | csk = lookup_tid(lldi->tids, tid); |
| 430 | break; |
| 431 | default: |
| 432 | break; |
| 433 | } |
| 434 | |
| 435 | if (csk && csk->lro_skb && lro_flush) |
| 436 | cxgbit_lro_flush(lro_mgr, csk->lro_skb); |
| 437 | |
| 438 | if (!gl) { |
| 439 | unsigned int len; |
| 440 | |
| 441 | if (op == CPL_RX_ISCSI_DDP) { |
| 442 | if (!cxgbit_lro_receive(csk, op, rsp, NULL, lro_mgr, |
| 443 | napi)) |
| 444 | return 0; |
| 445 | } |
| 446 | |
| 447 | len = 64 - sizeof(struct rsp_ctrl) - 8; |
| 448 | skb = napi_alloc_skb(napi, len); |
| 449 | if (!skb) |
| 450 | goto nomem; |
| 451 | __skb_put(skb, len); |
| 452 | skb_copy_to_linear_data(skb, &rsp[1], len); |
| 453 | } else { |
| 454 | if (unlikely(op != *(u8 *)gl->va)) { |
| 455 | pr_info("? FL 0x%p,RSS%#llx,FL %#llx,len %u.\n", |
| 456 | gl->va, be64_to_cpu(*rsp), |
| 457 | be64_to_cpu(*(u64 *)gl->va), |
| 458 | gl->tot_len); |
| 459 | return 0; |
| 460 | } |
| 461 | |
| 462 | if (op == CPL_ISCSI_HDR || op == CPL_ISCSI_DATA) { |
| 463 | if (!cxgbit_lro_receive(csk, op, rsp, gl, lro_mgr, |
| 464 | napi)) |
| 465 | return 0; |
| 466 | } |
| 467 | |
| 468 | #define RX_PULL_LEN 128 |
| 469 | skb = cxgb4_pktgl_to_skb(gl, RX_PULL_LEN, RX_PULL_LEN); |
| 470 | if (unlikely(!skb)) |
| 471 | goto nomem; |
| 472 | } |
| 473 | |
| 474 | rpl = (struct cpl_tx_data *)skb->data; |
| 475 | op = rpl->ot.opcode; |
| 476 | cxgbit_skcb_rx_opcode(skb) = op; |
| 477 | |
| 478 | pr_debug("cdev %p, opcode 0x%x(0x%x,0x%x), skb %p.\n", |
| 479 | cdev, op, rpl->ot.opcode_tid, |
| 480 | ntohl(rpl->ot.opcode_tid), skb); |
| 481 | |
| 482 | if (op < NUM_CPL_CMDS && cxgbit_cplhandlers[op]) { |
| 483 | cxgbit_cplhandlers[op](cdev, skb); |
| 484 | } else { |
| 485 | pr_err("No handler for opcode 0x%x.\n", op); |
| 486 | __kfree_skb(skb); |
| 487 | } |
| 488 | return 0; |
| 489 | nomem: |
| 490 | pr_err("%s OOM bailing out.\n", __func__); |
| 491 | return 1; |
| 492 | } |
| 493 | |
| 494 | #ifdef CONFIG_CHELSIO_T4_DCB |
| 495 | struct cxgbit_dcb_work { |
| 496 | struct dcb_app_type dcb_app; |
| 497 | struct work_struct work; |
| 498 | }; |
| 499 | |
| 500 | static void |
| 501 | cxgbit_update_dcb_priority(struct cxgbit_device *cdev, u8 port_id, |
| 502 | u8 dcb_priority, u16 port_num) |
| 503 | { |
| 504 | struct cxgbit_sock *csk; |
| 505 | struct sk_buff *skb; |
| 506 | u16 local_port; |
| 507 | bool wakeup_thread = false; |
| 508 | |
| 509 | spin_lock_bh(&cdev->cskq.lock); |
| 510 | list_for_each_entry(csk, &cdev->cskq.list, list) { |
| 511 | if (csk->port_id != port_id) |
| 512 | continue; |
| 513 | |
| 514 | if (csk->com.local_addr.ss_family == AF_INET6) { |
| 515 | struct sockaddr_in6 *sock_in6; |
| 516 | |
| 517 | sock_in6 = (struct sockaddr_in6 *)&csk->com.local_addr; |
| 518 | local_port = ntohs(sock_in6->sin6_port); |
| 519 | } else { |
| 520 | struct sockaddr_in *sock_in; |
| 521 | |
| 522 | sock_in = (struct sockaddr_in *)&csk->com.local_addr; |
| 523 | local_port = ntohs(sock_in->sin_port); |
| 524 | } |
| 525 | |
| 526 | if (local_port != port_num) |
| 527 | continue; |
| 528 | |
| 529 | if (csk->dcb_priority == dcb_priority) |
| 530 | continue; |
| 531 | |
| 532 | skb = alloc_skb(0, GFP_ATOMIC); |
| 533 | if (!skb) |
| 534 | continue; |
| 535 | |
| 536 | spin_lock(&csk->rxq.lock); |
| 537 | __skb_queue_tail(&csk->rxq, skb); |
| 538 | if (skb_queue_len(&csk->rxq) == 1) |
| 539 | wakeup_thread = true; |
| 540 | spin_unlock(&csk->rxq.lock); |
| 541 | |
| 542 | if (wakeup_thread) { |
| 543 | wake_up(&csk->waitq); |
| 544 | wakeup_thread = false; |
| 545 | } |
| 546 | } |
| 547 | spin_unlock_bh(&cdev->cskq.lock); |
| 548 | } |
| 549 | |
| 550 | static void cxgbit_dcb_workfn(struct work_struct *work) |
| 551 | { |
| 552 | struct cxgbit_dcb_work *dcb_work; |
| 553 | struct net_device *ndev; |
| 554 | struct cxgbit_device *cdev = NULL; |
| 555 | struct dcb_app_type *iscsi_app; |
| 556 | u8 priority, port_id = 0xff; |
| 557 | |
| 558 | dcb_work = container_of(work, struct cxgbit_dcb_work, work); |
| 559 | iscsi_app = &dcb_work->dcb_app; |
| 560 | |
| 561 | if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_IEEE) { |
| 562 | if (iscsi_app->app.selector != IEEE_8021QAZ_APP_SEL_ANY) |
| 563 | goto out; |
| 564 | |
| 565 | priority = iscsi_app->app.priority; |
| 566 | |
| 567 | } else if (iscsi_app->dcbx & DCB_CAP_DCBX_VER_CEE) { |
| 568 | if (iscsi_app->app.selector != DCB_APP_IDTYPE_PORTNUM) |
| 569 | goto out; |
| 570 | |
| 571 | if (!iscsi_app->app.priority) |
| 572 | goto out; |
| 573 | |
| 574 | priority = ffs(iscsi_app->app.priority) - 1; |
| 575 | } else { |
| 576 | goto out; |
| 577 | } |
| 578 | |
| 579 | pr_debug("priority for ifid %d is %u\n", |
| 580 | iscsi_app->ifindex, priority); |
| 581 | |
| 582 | ndev = dev_get_by_index(&init_net, iscsi_app->ifindex); |
| 583 | |
| 584 | if (!ndev) |
| 585 | goto out; |
| 586 | |
| 587 | mutex_lock(&cdev_list_lock); |
| 588 | cdev = cxgbit_find_device(ndev, &port_id); |
| 589 | |
| 590 | dev_put(ndev); |
| 591 | |
| 592 | if (!cdev) { |
| 593 | mutex_unlock(&cdev_list_lock); |
| 594 | goto out; |
| 595 | } |
| 596 | |
| 597 | cxgbit_update_dcb_priority(cdev, port_id, priority, |
| 598 | iscsi_app->app.protocol); |
| 599 | mutex_unlock(&cdev_list_lock); |
| 600 | out: |
| 601 | kfree(dcb_work); |
| 602 | } |
| 603 | |
| 604 | static int |
| 605 | cxgbit_dcbevent_notify(struct notifier_block *nb, unsigned long action, |
| 606 | void *data) |
| 607 | { |
| 608 | struct cxgbit_dcb_work *dcb_work; |
| 609 | struct dcb_app_type *dcb_app = data; |
| 610 | |
| 611 | dcb_work = kzalloc(sizeof(*dcb_work), GFP_ATOMIC); |
| 612 | if (!dcb_work) |
| 613 | return NOTIFY_DONE; |
| 614 | |
| 615 | dcb_work->dcb_app = *dcb_app; |
| 616 | INIT_WORK(&dcb_work->work, cxgbit_dcb_workfn); |
| 617 | schedule_work(&dcb_work->work); |
| 618 | return NOTIFY_OK; |
| 619 | } |
| 620 | #endif |
| 621 | |
| 622 | static enum target_prot_op cxgbit_get_sup_prot_ops(struct iscsi_conn *conn) |
| 623 | { |
| 624 | return TARGET_PROT_NORMAL; |
| 625 | } |
| 626 | |
| 627 | static struct iscsit_transport cxgbit_transport = { |
| 628 | .name = DRV_NAME, |
Nicholas Bellinger | ff7199b | 2016-05-14 22:28:51 -0700 | [diff] [blame] | 629 | .transport_type = ISCSI_CXGBIT, |
Nicholas Bellinger | bd027d8 | 2016-05-14 22:23:34 -0700 | [diff] [blame] | 630 | .rdma_shutdown = false, |
Varun Prakash | 9730ffc | 2016-04-20 00:00:20 +0530 | [diff] [blame] | 631 | .priv_size = sizeof(struct cxgbit_cmd), |
| 632 | .owner = THIS_MODULE, |
| 633 | .iscsit_setup_np = cxgbit_setup_np, |
| 634 | .iscsit_accept_np = cxgbit_accept_np, |
| 635 | .iscsit_free_np = cxgbit_free_np, |
| 636 | .iscsit_free_conn = cxgbit_free_conn, |
| 637 | .iscsit_get_login_rx = cxgbit_get_login_rx, |
| 638 | .iscsit_put_login_tx = cxgbit_put_login_tx, |
| 639 | .iscsit_immediate_queue = iscsit_immediate_queue, |
| 640 | .iscsit_response_queue = iscsit_response_queue, |
| 641 | .iscsit_get_dataout = iscsit_build_r2ts_for_cmd, |
| 642 | .iscsit_queue_data_in = iscsit_queue_rsp, |
| 643 | .iscsit_queue_status = iscsit_queue_rsp, |
| 644 | .iscsit_xmit_pdu = cxgbit_xmit_pdu, |
| 645 | .iscsit_get_r2t_ttt = cxgbit_get_r2t_ttt, |
| 646 | .iscsit_get_rx_pdu = cxgbit_get_rx_pdu, |
| 647 | .iscsit_validate_params = cxgbit_validate_params, |
| 648 | .iscsit_release_cmd = cxgbit_release_cmd, |
| 649 | .iscsit_aborted_task = iscsit_aborted_task, |
| 650 | .iscsit_get_sup_prot_ops = cxgbit_get_sup_prot_ops, |
| 651 | }; |
| 652 | |
| 653 | static struct cxgb4_uld_info cxgbit_uld_info = { |
| 654 | .name = DRV_NAME, |
| 655 | .add = cxgbit_uld_add, |
| 656 | .state_change = cxgbit_uld_state_change, |
| 657 | .lro_rx_handler = cxgbit_uld_lro_rx_handler, |
| 658 | .lro_flush = cxgbit_uld_lro_flush, |
| 659 | }; |
| 660 | |
| 661 | #ifdef CONFIG_CHELSIO_T4_DCB |
| 662 | static struct notifier_block cxgbit_dcbevent_nb = { |
| 663 | .notifier_call = cxgbit_dcbevent_notify, |
| 664 | }; |
| 665 | #endif |
| 666 | |
| 667 | static int __init cxgbit_init(void) |
| 668 | { |
| 669 | cxgb4_register_uld(CXGB4_ULD_ISCSIT, &cxgbit_uld_info); |
| 670 | iscsit_register_transport(&cxgbit_transport); |
| 671 | |
| 672 | #ifdef CONFIG_CHELSIO_T4_DCB |
| 673 | pr_info("%s dcb enabled.\n", DRV_NAME); |
| 674 | register_dcbevent_notifier(&cxgbit_dcbevent_nb); |
| 675 | #endif |
| 676 | BUILD_BUG_ON(FIELD_SIZEOF(struct sk_buff, cb) < |
| 677 | sizeof(union cxgbit_skb_cb)); |
| 678 | return 0; |
| 679 | } |
| 680 | |
| 681 | static void __exit cxgbit_exit(void) |
| 682 | { |
| 683 | struct cxgbit_device *cdev, *tmp; |
| 684 | |
| 685 | #ifdef CONFIG_CHELSIO_T4_DCB |
| 686 | unregister_dcbevent_notifier(&cxgbit_dcbevent_nb); |
| 687 | #endif |
| 688 | mutex_lock(&cdev_list_lock); |
| 689 | list_for_each_entry_safe(cdev, tmp, &cdev_list_head, list) { |
| 690 | list_del(&cdev->list); |
| 691 | cxgbit_put_cdev(cdev); |
| 692 | } |
| 693 | mutex_unlock(&cdev_list_lock); |
| 694 | iscsit_unregister_transport(&cxgbit_transport); |
| 695 | cxgb4_unregister_uld(CXGB4_ULD_ISCSIT); |
| 696 | } |
| 697 | |
| 698 | module_init(cxgbit_init); |
| 699 | module_exit(cxgbit_exit); |
| 700 | |
| 701 | MODULE_DESCRIPTION("Chelsio iSCSI target offload driver"); |
| 702 | MODULE_AUTHOR("Chelsio Communications"); |
| 703 | MODULE_VERSION(DRV_VERSION); |
| 704 | MODULE_LICENSE("GPL"); |