blob: f756d5f85c7a70bd35814f994361e11d411e6b40 [file] [log] [blame]
Bhanu Gollapudi853e2bd2011-02-04 12:10:34 -08001/* bnx2fc_hwi.c: Broadcom NetXtreme II Linux FCoE offload driver.
2 * This file contains the code that low level functions that interact
3 * with 57712 FCoE firmware.
4 *
5 * Copyright (c) 2008 - 2010 Broadcom Corporation
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation.
10 *
11 * Written by: Bhanu Prakash Gollapudi (bprakash@broadcom.com)
12 */
13
14#include "bnx2fc.h"
15
16DECLARE_PER_CPU(struct bnx2fc_percpu_s, bnx2fc_percpu);
17
18static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
19 struct fcoe_kcqe *new_cqe_kcqe);
20static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
21 struct fcoe_kcqe *ofld_kcqe);
22static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
23 struct fcoe_kcqe *ofld_kcqe);
24static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code);
25static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
26 struct fcoe_kcqe *conn_destroy);
27
28int bnx2fc_send_stat_req(struct bnx2fc_hba *hba)
29{
30 struct fcoe_kwqe_stat stat_req;
31 struct kwqe *kwqe_arr[2];
32 int num_kwqes = 1;
33 int rc = 0;
34
35 memset(&stat_req, 0x00, sizeof(struct fcoe_kwqe_stat));
36 stat_req.hdr.op_code = FCOE_KWQE_OPCODE_STAT;
37 stat_req.hdr.flags =
38 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
39
40 stat_req.stat_params_addr_lo = (u32) hba->stats_buf_dma;
41 stat_req.stat_params_addr_hi = (u32) ((u64)hba->stats_buf_dma >> 32);
42
43 kwqe_arr[0] = (struct kwqe *) &stat_req;
44
45 if (hba->cnic && hba->cnic->submit_kwqes)
46 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
47
48 return rc;
49}
50
51/**
52 * bnx2fc_send_fw_fcoe_init_msg - initiates initial handshake with FCoE f/w
53 *
54 * @hba: adapter structure pointer
55 *
56 * Send down FCoE firmware init KWQEs which initiates the initial handshake
57 * with the f/w.
58 *
59 */
60int bnx2fc_send_fw_fcoe_init_msg(struct bnx2fc_hba *hba)
61{
62 struct fcoe_kwqe_init1 fcoe_init1;
63 struct fcoe_kwqe_init2 fcoe_init2;
64 struct fcoe_kwqe_init3 fcoe_init3;
65 struct kwqe *kwqe_arr[3];
66 int num_kwqes = 3;
67 int rc = 0;
68
69 if (!hba->cnic) {
70 printk(KERN_ALERT PFX "hba->cnic NULL during fcoe fw init\n");
71 return -ENODEV;
72 }
73
74 /* fill init1 KWQE */
75 memset(&fcoe_init1, 0x00, sizeof(struct fcoe_kwqe_init1));
76 fcoe_init1.hdr.op_code = FCOE_KWQE_OPCODE_INIT1;
77 fcoe_init1.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
78 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
79
80 fcoe_init1.num_tasks = BNX2FC_MAX_TASKS;
81 fcoe_init1.sq_num_wqes = BNX2FC_SQ_WQES_MAX;
82 fcoe_init1.rq_num_wqes = BNX2FC_RQ_WQES_MAX;
83 fcoe_init1.rq_buffer_log_size = BNX2FC_RQ_BUF_LOG_SZ;
84 fcoe_init1.cq_num_wqes = BNX2FC_CQ_WQES_MAX;
85 fcoe_init1.dummy_buffer_addr_lo = (u32) hba->dummy_buf_dma;
86 fcoe_init1.dummy_buffer_addr_hi = (u32) ((u64)hba->dummy_buf_dma >> 32);
87 fcoe_init1.task_list_pbl_addr_lo = (u32) hba->task_ctx_bd_dma;
88 fcoe_init1.task_list_pbl_addr_hi =
89 (u32) ((u64) hba->task_ctx_bd_dma >> 32);
Bhanu Gollapudi1294bfe2011-03-17 17:13:34 -070090 fcoe_init1.mtu = BNX2FC_MINI_JUMBO_MTU;
Bhanu Gollapudi853e2bd2011-02-04 12:10:34 -080091
92 fcoe_init1.flags = (PAGE_SHIFT <<
93 FCOE_KWQE_INIT1_LOG_PAGE_SIZE_SHIFT);
94
95 fcoe_init1.num_sessions_log = BNX2FC_NUM_MAX_SESS_LOG;
96
97 /* fill init2 KWQE */
98 memset(&fcoe_init2, 0x00, sizeof(struct fcoe_kwqe_init2));
99 fcoe_init2.hdr.op_code = FCOE_KWQE_OPCODE_INIT2;
100 fcoe_init2.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
101 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
102
103 fcoe_init2.hash_tbl_pbl_addr_lo = (u32) hba->hash_tbl_pbl_dma;
104 fcoe_init2.hash_tbl_pbl_addr_hi = (u32)
105 ((u64) hba->hash_tbl_pbl_dma >> 32);
106
107 fcoe_init2.t2_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_dma;
108 fcoe_init2.t2_hash_tbl_addr_hi = (u32)
109 ((u64) hba->t2_hash_tbl_dma >> 32);
110
111 fcoe_init2.t2_ptr_hash_tbl_addr_lo = (u32) hba->t2_hash_tbl_ptr_dma;
112 fcoe_init2.t2_ptr_hash_tbl_addr_hi = (u32)
113 ((u64) hba->t2_hash_tbl_ptr_dma >> 32);
114
115 fcoe_init2.free_list_count = BNX2FC_NUM_MAX_SESS;
116
117 /* fill init3 KWQE */
118 memset(&fcoe_init3, 0x00, sizeof(struct fcoe_kwqe_init3));
119 fcoe_init3.hdr.op_code = FCOE_KWQE_OPCODE_INIT3;
120 fcoe_init3.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
121 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
122 fcoe_init3.error_bit_map_lo = 0xffffffff;
123 fcoe_init3.error_bit_map_hi = 0xffffffff;
124
125
126 kwqe_arr[0] = (struct kwqe *) &fcoe_init1;
127 kwqe_arr[1] = (struct kwqe *) &fcoe_init2;
128 kwqe_arr[2] = (struct kwqe *) &fcoe_init3;
129
130 if (hba->cnic && hba->cnic->submit_kwqes)
131 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
132
133 return rc;
134}
135int bnx2fc_send_fw_fcoe_destroy_msg(struct bnx2fc_hba *hba)
136{
137 struct fcoe_kwqe_destroy fcoe_destroy;
138 struct kwqe *kwqe_arr[2];
139 int num_kwqes = 1;
140 int rc = -1;
141
142 /* fill destroy KWQE */
143 memset(&fcoe_destroy, 0x00, sizeof(struct fcoe_kwqe_destroy));
144 fcoe_destroy.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY;
145 fcoe_destroy.hdr.flags = (FCOE_KWQE_LAYER_CODE <<
146 FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
147 kwqe_arr[0] = (struct kwqe *) &fcoe_destroy;
148
149 if (hba->cnic && hba->cnic->submit_kwqes)
150 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
151 return rc;
152}
153
154/**
155 * bnx2fc_send_session_ofld_req - initiates FCoE Session offload process
156 *
157 * @port: port structure pointer
158 * @tgt: bnx2fc_rport structure pointer
159 */
160int bnx2fc_send_session_ofld_req(struct fcoe_port *port,
161 struct bnx2fc_rport *tgt)
162{
163 struct fc_lport *lport = port->lport;
164 struct bnx2fc_hba *hba = port->priv;
165 struct kwqe *kwqe_arr[4];
166 struct fcoe_kwqe_conn_offload1 ofld_req1;
167 struct fcoe_kwqe_conn_offload2 ofld_req2;
168 struct fcoe_kwqe_conn_offload3 ofld_req3;
169 struct fcoe_kwqe_conn_offload4 ofld_req4;
170 struct fc_rport_priv *rdata = tgt->rdata;
171 struct fc_rport *rport = tgt->rport;
172 int num_kwqes = 4;
173 u32 port_id;
174 int rc = 0;
175 u16 conn_id;
176
177 /* Initialize offload request 1 structure */
178 memset(&ofld_req1, 0x00, sizeof(struct fcoe_kwqe_conn_offload1));
179
180 ofld_req1.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN1;
181 ofld_req1.hdr.flags =
182 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
183
184
185 conn_id = (u16)tgt->fcoe_conn_id;
186 ofld_req1.fcoe_conn_id = conn_id;
187
188
189 ofld_req1.sq_addr_lo = (u32) tgt->sq_dma;
190 ofld_req1.sq_addr_hi = (u32)((u64) tgt->sq_dma >> 32);
191
192 ofld_req1.rq_pbl_addr_lo = (u32) tgt->rq_pbl_dma;
193 ofld_req1.rq_pbl_addr_hi = (u32)((u64) tgt->rq_pbl_dma >> 32);
194
195 ofld_req1.rq_first_pbe_addr_lo = (u32) tgt->rq_dma;
196 ofld_req1.rq_first_pbe_addr_hi =
197 (u32)((u64) tgt->rq_dma >> 32);
198
199 ofld_req1.rq_prod = 0x8000;
200
201 /* Initialize offload request 2 structure */
202 memset(&ofld_req2, 0x00, sizeof(struct fcoe_kwqe_conn_offload2));
203
204 ofld_req2.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN2;
205 ofld_req2.hdr.flags =
206 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
207
208 ofld_req2.tx_max_fc_pay_len = rdata->maxframe_size;
209
210 ofld_req2.cq_addr_lo = (u32) tgt->cq_dma;
211 ofld_req2.cq_addr_hi = (u32)((u64)tgt->cq_dma >> 32);
212
213 ofld_req2.xferq_addr_lo = (u32) tgt->xferq_dma;
214 ofld_req2.xferq_addr_hi = (u32)((u64)tgt->xferq_dma >> 32);
215
216 ofld_req2.conn_db_addr_lo = (u32)tgt->conn_db_dma;
217 ofld_req2.conn_db_addr_hi = (u32)((u64)tgt->conn_db_dma >> 32);
218
219 /* Initialize offload request 3 structure */
220 memset(&ofld_req3, 0x00, sizeof(struct fcoe_kwqe_conn_offload3));
221
222 ofld_req3.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN3;
223 ofld_req3.hdr.flags =
224 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
225
226 ofld_req3.vlan_tag = hba->vlan_id <<
227 FCOE_KWQE_CONN_OFFLOAD3_VLAN_ID_SHIFT;
228 ofld_req3.vlan_tag |= 3 << FCOE_KWQE_CONN_OFFLOAD3_PRIORITY_SHIFT;
229
230 port_id = fc_host_port_id(lport->host);
231 if (port_id == 0) {
232 BNX2FC_HBA_DBG(lport, "ofld_req: port_id = 0, link down?\n");
233 return -EINVAL;
234 }
235
236 /*
237 * Store s_id of the initiator for further reference. This will
238 * be used during disable/destroy during linkdown processing as
239 * when the lport is reset, the port_id also is reset to 0
240 */
241 tgt->sid = port_id;
242 ofld_req3.s_id[0] = (port_id & 0x000000FF);
243 ofld_req3.s_id[1] = (port_id & 0x0000FF00) >> 8;
244 ofld_req3.s_id[2] = (port_id & 0x00FF0000) >> 16;
245
246 port_id = rport->port_id;
247 ofld_req3.d_id[0] = (port_id & 0x000000FF);
248 ofld_req3.d_id[1] = (port_id & 0x0000FF00) >> 8;
249 ofld_req3.d_id[2] = (port_id & 0x00FF0000) >> 16;
250
251 ofld_req3.tx_total_conc_seqs = rdata->max_seq;
252
253 ofld_req3.tx_max_conc_seqs_c3 = rdata->max_seq;
254 ofld_req3.rx_max_fc_pay_len = lport->mfs;
255
256 ofld_req3.rx_total_conc_seqs = BNX2FC_MAX_SEQS;
257 ofld_req3.rx_max_conc_seqs_c3 = BNX2FC_MAX_SEQS;
258 ofld_req3.rx_open_seqs_exch_c3 = 1;
259
260 ofld_req3.confq_first_pbe_addr_lo = tgt->confq_dma;
261 ofld_req3.confq_first_pbe_addr_hi = (u32)((u64) tgt->confq_dma >> 32);
262
263 /* set mul_n_port_ids supported flag to 0, until it is supported */
264 ofld_req3.flags = 0;
265 /*
266 ofld_req3.flags |= (((lport->send_sp_features & FC_SP_FT_MNA) ? 1:0) <<
267 FCOE_KWQE_CONN_OFFLOAD3_B_MUL_N_PORT_IDS_SHIFT);
268 */
269 /* Info from PLOGI response */
270 ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_EDTR) ? 1 : 0) <<
271 FCOE_KWQE_CONN_OFFLOAD3_B_E_D_TOV_RES_SHIFT);
272
273 ofld_req3.flags |= (((rdata->sp_features & FC_SP_FT_SEQC) ? 1 : 0) <<
274 FCOE_KWQE_CONN_OFFLOAD3_B_CONT_INCR_SEQ_CNT_SHIFT);
275
276 /* vlan flag */
277 ofld_req3.flags |= (hba->vlan_enabled <<
278 FCOE_KWQE_CONN_OFFLOAD3_B_VLAN_FLAG_SHIFT);
279
280 /* C2_VALID and ACK flags are not set as they are not suppported */
281
282
283 /* Initialize offload request 4 structure */
284 memset(&ofld_req4, 0x00, sizeof(struct fcoe_kwqe_conn_offload4));
285 ofld_req4.hdr.op_code = FCOE_KWQE_OPCODE_OFFLOAD_CONN4;
286 ofld_req4.hdr.flags =
287 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
288
289 ofld_req4.e_d_tov_timer_val = lport->e_d_tov / 20;
290
291
292 ofld_req4.src_mac_addr_lo32[0] = port->data_src_addr[5];
293 /* local mac */
294 ofld_req4.src_mac_addr_lo32[1] = port->data_src_addr[4];
295 ofld_req4.src_mac_addr_lo32[2] = port->data_src_addr[3];
296 ofld_req4.src_mac_addr_lo32[3] = port->data_src_addr[2];
297 ofld_req4.src_mac_addr_hi16[0] = port->data_src_addr[1];
298 ofld_req4.src_mac_addr_hi16[1] = port->data_src_addr[0];
299 ofld_req4.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */
300 ofld_req4.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4];
301 ofld_req4.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3];
302 ofld_req4.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2];
303 ofld_req4.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1];
304 ofld_req4.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0];
305
306 ofld_req4.lcq_addr_lo = (u32) tgt->lcq_dma;
307 ofld_req4.lcq_addr_hi = (u32)((u64) tgt->lcq_dma >> 32);
308
309 ofld_req4.confq_pbl_base_addr_lo = (u32) tgt->confq_pbl_dma;
310 ofld_req4.confq_pbl_base_addr_hi =
311 (u32)((u64) tgt->confq_pbl_dma >> 32);
312
313 kwqe_arr[0] = (struct kwqe *) &ofld_req1;
314 kwqe_arr[1] = (struct kwqe *) &ofld_req2;
315 kwqe_arr[2] = (struct kwqe *) &ofld_req3;
316 kwqe_arr[3] = (struct kwqe *) &ofld_req4;
317
318 if (hba->cnic && hba->cnic->submit_kwqes)
319 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
320
321 return rc;
322}
323
324/**
325 * bnx2fc_send_session_enable_req - initiates FCoE Session enablement
326 *
327 * @port: port structure pointer
328 * @tgt: bnx2fc_rport structure pointer
329 */
330static int bnx2fc_send_session_enable_req(struct fcoe_port *port,
331 struct bnx2fc_rport *tgt)
332{
333 struct kwqe *kwqe_arr[2];
334 struct bnx2fc_hba *hba = port->priv;
335 struct fcoe_kwqe_conn_enable_disable enbl_req;
336 struct fc_lport *lport = port->lport;
337 struct fc_rport *rport = tgt->rport;
338 int num_kwqes = 1;
339 int rc = 0;
340 u32 port_id;
341
342 memset(&enbl_req, 0x00,
343 sizeof(struct fcoe_kwqe_conn_enable_disable));
344 enbl_req.hdr.op_code = FCOE_KWQE_OPCODE_ENABLE_CONN;
345 enbl_req.hdr.flags =
346 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
347
348 enbl_req.src_mac_addr_lo32[0] = port->data_src_addr[5];
349 /* local mac */
350 enbl_req.src_mac_addr_lo32[1] = port->data_src_addr[4];
351 enbl_req.src_mac_addr_lo32[2] = port->data_src_addr[3];
352 enbl_req.src_mac_addr_lo32[3] = port->data_src_addr[2];
353 enbl_req.src_mac_addr_hi16[0] = port->data_src_addr[1];
354 enbl_req.src_mac_addr_hi16[1] = port->data_src_addr[0];
355
356 enbl_req.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */
357 enbl_req.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4];
358 enbl_req.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3];
359 enbl_req.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2];
360 enbl_req.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1];
361 enbl_req.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0];
362
363 port_id = fc_host_port_id(lport->host);
364 if (port_id != tgt->sid) {
365 printk(KERN_ERR PFX "WARN: enable_req port_id = 0x%x,"
366 "sid = 0x%x\n", port_id, tgt->sid);
367 port_id = tgt->sid;
368 }
369 enbl_req.s_id[0] = (port_id & 0x000000FF);
370 enbl_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
371 enbl_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
372
373 port_id = rport->port_id;
374 enbl_req.d_id[0] = (port_id & 0x000000FF);
375 enbl_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
376 enbl_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
377 enbl_req.vlan_tag = hba->vlan_id <<
378 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
379 enbl_req.vlan_tag |= 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
380 enbl_req.vlan_flag = hba->vlan_enabled;
381 enbl_req.context_id = tgt->context_id;
382 enbl_req.conn_id = tgt->fcoe_conn_id;
383
384 kwqe_arr[0] = (struct kwqe *) &enbl_req;
385
386 if (hba->cnic && hba->cnic->submit_kwqes)
387 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
388 return rc;
389}
390
391/**
392 * bnx2fc_send_session_disable_req - initiates FCoE Session disable
393 *
394 * @port: port structure pointer
395 * @tgt: bnx2fc_rport structure pointer
396 */
397int bnx2fc_send_session_disable_req(struct fcoe_port *port,
398 struct bnx2fc_rport *tgt)
399{
400 struct bnx2fc_hba *hba = port->priv;
401 struct fcoe_kwqe_conn_enable_disable disable_req;
402 struct kwqe *kwqe_arr[2];
403 struct fc_rport *rport = tgt->rport;
404 int num_kwqes = 1;
405 int rc = 0;
406 u32 port_id;
407
408 memset(&disable_req, 0x00,
409 sizeof(struct fcoe_kwqe_conn_enable_disable));
410 disable_req.hdr.op_code = FCOE_KWQE_OPCODE_DISABLE_CONN;
411 disable_req.hdr.flags =
412 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
413
414 disable_req.src_mac_addr_lo32[0] = port->data_src_addr[5];
415 disable_req.src_mac_addr_lo32[2] = port->data_src_addr[3];
416 disable_req.src_mac_addr_lo32[3] = port->data_src_addr[2];
417 disable_req.src_mac_addr_hi16[0] = port->data_src_addr[1];
418 disable_req.src_mac_addr_hi16[1] = port->data_src_addr[0];
419
420 disable_req.dst_mac_addr_lo32[0] = hba->ctlr.dest_addr[5];/* fcf mac */
421 disable_req.dst_mac_addr_lo32[1] = hba->ctlr.dest_addr[4];
422 disable_req.dst_mac_addr_lo32[2] = hba->ctlr.dest_addr[3];
423 disable_req.dst_mac_addr_lo32[3] = hba->ctlr.dest_addr[2];
424 disable_req.dst_mac_addr_hi16[0] = hba->ctlr.dest_addr[1];
425 disable_req.dst_mac_addr_hi16[1] = hba->ctlr.dest_addr[0];
426
427 port_id = tgt->sid;
428 disable_req.s_id[0] = (port_id & 0x000000FF);
429 disable_req.s_id[1] = (port_id & 0x0000FF00) >> 8;
430 disable_req.s_id[2] = (port_id & 0x00FF0000) >> 16;
431
432
433 port_id = rport->port_id;
434 disable_req.d_id[0] = (port_id & 0x000000FF);
435 disable_req.d_id[1] = (port_id & 0x0000FF00) >> 8;
436 disable_req.d_id[2] = (port_id & 0x00FF0000) >> 16;
437 disable_req.context_id = tgt->context_id;
438 disable_req.conn_id = tgt->fcoe_conn_id;
439 disable_req.vlan_tag = hba->vlan_id <<
440 FCOE_KWQE_CONN_ENABLE_DISABLE_VLAN_ID_SHIFT;
441 disable_req.vlan_tag |=
442 3 << FCOE_KWQE_CONN_ENABLE_DISABLE_PRIORITY_SHIFT;
443 disable_req.vlan_flag = hba->vlan_enabled;
444
445 kwqe_arr[0] = (struct kwqe *) &disable_req;
446
447 if (hba->cnic && hba->cnic->submit_kwqes)
448 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
449
450 return rc;
451}
452
453/**
454 * bnx2fc_send_session_destroy_req - initiates FCoE Session destroy
455 *
456 * @port: port structure pointer
457 * @tgt: bnx2fc_rport structure pointer
458 */
459int bnx2fc_send_session_destroy_req(struct bnx2fc_hba *hba,
460 struct bnx2fc_rport *tgt)
461{
462 struct fcoe_kwqe_conn_destroy destroy_req;
463 struct kwqe *kwqe_arr[2];
464 int num_kwqes = 1;
465 int rc = 0;
466
467 memset(&destroy_req, 0x00, sizeof(struct fcoe_kwqe_conn_destroy));
468 destroy_req.hdr.op_code = FCOE_KWQE_OPCODE_DESTROY_CONN;
469 destroy_req.hdr.flags =
470 (FCOE_KWQE_LAYER_CODE << FCOE_KWQE_HEADER_LAYER_CODE_SHIFT);
471
472 destroy_req.context_id = tgt->context_id;
473 destroy_req.conn_id = tgt->fcoe_conn_id;
474
475 kwqe_arr[0] = (struct kwqe *) &destroy_req;
476
477 if (hba->cnic && hba->cnic->submit_kwqes)
478 rc = hba->cnic->submit_kwqes(hba->cnic, kwqe_arr, num_kwqes);
479
480 return rc;
481}
482
483static void bnx2fc_unsol_els_work(struct work_struct *work)
484{
485 struct bnx2fc_unsol_els *unsol_els;
486 struct fc_lport *lport;
487 struct fc_frame *fp;
488
489 unsol_els = container_of(work, struct bnx2fc_unsol_els, unsol_els_work);
490 lport = unsol_els->lport;
491 fp = unsol_els->fp;
492 fc_exch_recv(lport, fp);
493 kfree(unsol_els);
494}
495
496void bnx2fc_process_l2_frame_compl(struct bnx2fc_rport *tgt,
497 unsigned char *buf,
498 u32 frame_len, u16 l2_oxid)
499{
500 struct fcoe_port *port = tgt->port;
501 struct fc_lport *lport = port->lport;
502 struct bnx2fc_unsol_els *unsol_els;
503 struct fc_frame_header *fh;
504 struct fc_frame *fp;
505 struct sk_buff *skb;
506 u32 payload_len;
507 u32 crc;
508 u8 op;
509
510
511 unsol_els = kzalloc(sizeof(*unsol_els), GFP_ATOMIC);
512 if (!unsol_els) {
513 BNX2FC_TGT_DBG(tgt, "Unable to allocate unsol_work\n");
514 return;
515 }
516
517 BNX2FC_TGT_DBG(tgt, "l2_frame_compl l2_oxid = 0x%x, frame_len = %d\n",
518 l2_oxid, frame_len);
519
520 payload_len = frame_len - sizeof(struct fc_frame_header);
521
522 fp = fc_frame_alloc(lport, payload_len);
523 if (!fp) {
524 printk(KERN_ERR PFX "fc_frame_alloc failure\n");
Julia Lawall5c2dce22011-04-01 16:23:46 +0200525 kfree(unsol_els);
Bhanu Gollapudi853e2bd2011-02-04 12:10:34 -0800526 return;
527 }
528
529 fh = (struct fc_frame_header *) fc_frame_header_get(fp);
530 /* Copy FC Frame header and payload into the frame */
531 memcpy(fh, buf, frame_len);
532
533 if (l2_oxid != FC_XID_UNKNOWN)
534 fh->fh_ox_id = htons(l2_oxid);
535
536 skb = fp_skb(fp);
537
538 if ((fh->fh_r_ctl == FC_RCTL_ELS_REQ) ||
539 (fh->fh_r_ctl == FC_RCTL_ELS_REP)) {
540
541 if (fh->fh_type == FC_TYPE_ELS) {
542 op = fc_frame_payload_op(fp);
543 if ((op == ELS_TEST) || (op == ELS_ESTC) ||
544 (op == ELS_FAN) || (op == ELS_CSU)) {
545 /*
546 * No need to reply for these
547 * ELS requests
548 */
549 printk(KERN_ERR PFX "dropping ELS 0x%x\n", op);
550 kfree_skb(skb);
Julia Lawall5c2dce22011-04-01 16:23:46 +0200551 kfree(unsol_els);
Bhanu Gollapudi853e2bd2011-02-04 12:10:34 -0800552 return;
553 }
554 }
555 crc = fcoe_fc_crc(fp);
556 fc_frame_init(fp);
557 fr_dev(fp) = lport;
558 fr_sof(fp) = FC_SOF_I3;
559 fr_eof(fp) = FC_EOF_T;
560 fr_crc(fp) = cpu_to_le32(~crc);
561 unsol_els->lport = lport;
562 unsol_els->fp = fp;
563 INIT_WORK(&unsol_els->unsol_els_work, bnx2fc_unsol_els_work);
564 queue_work(bnx2fc_wq, &unsol_els->unsol_els_work);
565 } else {
566 BNX2FC_HBA_DBG(lport, "fh_r_ctl = 0x%x\n", fh->fh_r_ctl);
567 kfree_skb(skb);
Julia Lawall5c2dce22011-04-01 16:23:46 +0200568 kfree(unsol_els);
Bhanu Gollapudi853e2bd2011-02-04 12:10:34 -0800569 }
570}
571
572static void bnx2fc_process_unsol_compl(struct bnx2fc_rport *tgt, u16 wqe)
573{
574 u8 num_rq;
575 struct fcoe_err_report_entry *err_entry;
576 unsigned char *rq_data;
577 unsigned char *buf = NULL, *buf1;
578 int i;
579 u16 xid;
580 u32 frame_len, len;
581 struct bnx2fc_cmd *io_req = NULL;
582 struct fcoe_task_ctx_entry *task, *task_page;
583 struct bnx2fc_hba *hba = tgt->port->priv;
584 int task_idx, index;
585 int rc = 0;
586
587
588 BNX2FC_TGT_DBG(tgt, "Entered UNSOL COMPLETION wqe = 0x%x\n", wqe);
589 switch (wqe & FCOE_UNSOLICITED_CQE_SUBTYPE) {
590 case FCOE_UNSOLICITED_FRAME_CQE_TYPE:
591 frame_len = (wqe & FCOE_UNSOLICITED_CQE_PKT_LEN) >>
592 FCOE_UNSOLICITED_CQE_PKT_LEN_SHIFT;
593
594 num_rq = (frame_len + BNX2FC_RQ_BUF_SZ - 1) / BNX2FC_RQ_BUF_SZ;
595
Nithin Sujir68695972011-03-17 17:13:31 -0700596 spin_lock_bh(&tgt->tgt_lock);
Bhanu Gollapudi853e2bd2011-02-04 12:10:34 -0800597 rq_data = (unsigned char *)bnx2fc_get_next_rqe(tgt, num_rq);
Nithin Sujir68695972011-03-17 17:13:31 -0700598 spin_unlock_bh(&tgt->tgt_lock);
599
Bhanu Gollapudi853e2bd2011-02-04 12:10:34 -0800600 if (rq_data) {
601 buf = rq_data;
602 } else {
603 buf1 = buf = kmalloc((num_rq * BNX2FC_RQ_BUF_SZ),
604 GFP_ATOMIC);
605
606 if (!buf1) {
607 BNX2FC_TGT_DBG(tgt, "Memory alloc failure\n");
608 break;
609 }
610
611 for (i = 0; i < num_rq; i++) {
Nithin Sujir68695972011-03-17 17:13:31 -0700612 spin_lock_bh(&tgt->tgt_lock);
Bhanu Gollapudi853e2bd2011-02-04 12:10:34 -0800613 rq_data = (unsigned char *)
614 bnx2fc_get_next_rqe(tgt, 1);
Nithin Sujir68695972011-03-17 17:13:31 -0700615 spin_unlock_bh(&tgt->tgt_lock);
Bhanu Gollapudi853e2bd2011-02-04 12:10:34 -0800616 len = BNX2FC_RQ_BUF_SZ;
617 memcpy(buf1, rq_data, len);
618 buf1 += len;
619 }
620 }
621 bnx2fc_process_l2_frame_compl(tgt, buf, frame_len,
622 FC_XID_UNKNOWN);
623
624 if (buf != rq_data)
625 kfree(buf);
Nithin Sujir68695972011-03-17 17:13:31 -0700626 spin_lock_bh(&tgt->tgt_lock);
Bhanu Gollapudi853e2bd2011-02-04 12:10:34 -0800627 bnx2fc_return_rqe(tgt, num_rq);
Nithin Sujir68695972011-03-17 17:13:31 -0700628 spin_unlock_bh(&tgt->tgt_lock);
Bhanu Gollapudi853e2bd2011-02-04 12:10:34 -0800629 break;
630
631 case FCOE_ERROR_DETECTION_CQE_TYPE:
632 /*
Nithin Sujir68695972011-03-17 17:13:31 -0700633 * In case of error reporting CQE a single RQ entry
634 * is consumed.
Bhanu Gollapudi853e2bd2011-02-04 12:10:34 -0800635 */
636 spin_lock_bh(&tgt->tgt_lock);
637 num_rq = 1;
638 err_entry = (struct fcoe_err_report_entry *)
639 bnx2fc_get_next_rqe(tgt, 1);
640 xid = err_entry->fc_hdr.ox_id;
641 BNX2FC_TGT_DBG(tgt, "Unsol Error Frame OX_ID = 0x%x\n", xid);
642 BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x\n",
643 err_entry->err_warn_bitmap_hi,
644 err_entry->err_warn_bitmap_lo);
645 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x\n",
646 err_entry->tx_buf_off, err_entry->rx_buf_off);
647
648 bnx2fc_return_rqe(tgt, 1);
649
650 if (xid > BNX2FC_MAX_XID) {
651 BNX2FC_TGT_DBG(tgt, "xid(0x%x) out of FW range\n",
652 xid);
653 spin_unlock_bh(&tgt->tgt_lock);
654 break;
655 }
656
657 task_idx = xid / BNX2FC_TASKS_PER_PAGE;
658 index = xid % BNX2FC_TASKS_PER_PAGE;
659 task_page = (struct fcoe_task_ctx_entry *)
660 hba->task_ctx[task_idx];
661 task = &(task_page[index]);
662
663 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
664 if (!io_req) {
665 spin_unlock_bh(&tgt->tgt_lock);
666 break;
667 }
668
669 if (io_req->cmd_type != BNX2FC_SCSI_CMD) {
670 printk(KERN_ERR PFX "err_warn: Not a SCSI cmd\n");
671 spin_unlock_bh(&tgt->tgt_lock);
672 break;
673 }
674
675 if (test_and_clear_bit(BNX2FC_FLAG_IO_CLEANUP,
676 &io_req->req_flags)) {
677 BNX2FC_IO_DBG(io_req, "unsol_err: cleanup in "
678 "progress.. ignore unsol err\n");
679 spin_unlock_bh(&tgt->tgt_lock);
680 break;
681 }
682
683 /*
684 * If ABTS is already in progress, and FW error is
685 * received after that, do not cancel the timeout_work
686 * and let the error recovery continue by explicitly
687 * logging out the target, when the ABTS eventually
688 * times out.
689 */
690 if (!test_and_set_bit(BNX2FC_FLAG_ISSUE_ABTS,
691 &io_req->req_flags)) {
692 /*
693 * Cancel the timeout_work, as we received IO
694 * completion with FW error.
695 */
696 if (cancel_delayed_work(&io_req->timeout_work))
697 kref_put(&io_req->refcount,
698 bnx2fc_cmd_release); /* timer hold */
699
700 rc = bnx2fc_initiate_abts(io_req);
701 if (rc != SUCCESS) {
702 BNX2FC_IO_DBG(io_req, "err_warn: initiate_abts "
703 "failed. issue cleanup\n");
704 rc = bnx2fc_initiate_cleanup(io_req);
705 BUG_ON(rc);
706 }
707 } else
708 printk(KERN_ERR PFX "err_warn: io_req (0x%x) already "
709 "in ABTS processing\n", xid);
710 spin_unlock_bh(&tgt->tgt_lock);
711 break;
712
713 case FCOE_WARNING_DETECTION_CQE_TYPE:
714 /*
715 *In case of warning reporting CQE a single RQ entry
716 * is consumes.
717 */
Nithin Sujir68695972011-03-17 17:13:31 -0700718 spin_lock_bh(&tgt->tgt_lock);
Bhanu Gollapudi853e2bd2011-02-04 12:10:34 -0800719 num_rq = 1;
720 err_entry = (struct fcoe_err_report_entry *)
721 bnx2fc_get_next_rqe(tgt, 1);
722 xid = cpu_to_be16(err_entry->fc_hdr.ox_id);
723 BNX2FC_TGT_DBG(tgt, "Unsol Warning Frame OX_ID = 0x%x\n", xid);
724 BNX2FC_TGT_DBG(tgt, "err_warn_bitmap = %08x:%08x",
725 err_entry->err_warn_bitmap_hi,
726 err_entry->err_warn_bitmap_lo);
727 BNX2FC_TGT_DBG(tgt, "buf_offsets - tx = 0x%x, rx = 0x%x",
728 err_entry->tx_buf_off, err_entry->rx_buf_off);
729
730 bnx2fc_return_rqe(tgt, 1);
Nithin Sujir68695972011-03-17 17:13:31 -0700731 spin_unlock_bh(&tgt->tgt_lock);
Bhanu Gollapudi853e2bd2011-02-04 12:10:34 -0800732 break;
733
734 default:
735 printk(KERN_ERR PFX "Unsol Compl: Invalid CQE Subtype\n");
736 break;
737 }
738}
739
740void bnx2fc_process_cq_compl(struct bnx2fc_rport *tgt, u16 wqe)
741{
742 struct fcoe_task_ctx_entry *task;
743 struct fcoe_task_ctx_entry *task_page;
744 struct fcoe_port *port = tgt->port;
745 struct bnx2fc_hba *hba = port->priv;
746 struct bnx2fc_cmd *io_req;
747 int task_idx, index;
748 u16 xid;
749 u8 cmd_type;
750 u8 rx_state = 0;
751 u8 num_rq;
752
753 spin_lock_bh(&tgt->tgt_lock);
754 xid = wqe & FCOE_PEND_WQ_CQE_TASK_ID;
755 if (xid >= BNX2FC_MAX_TASKS) {
756 printk(KERN_ALERT PFX "ERROR:xid out of range\n");
757 spin_unlock_bh(&tgt->tgt_lock);
758 return;
759 }
760 task_idx = xid / BNX2FC_TASKS_PER_PAGE;
761 index = xid % BNX2FC_TASKS_PER_PAGE;
762 task_page = (struct fcoe_task_ctx_entry *)hba->task_ctx[task_idx];
763 task = &(task_page[index]);
764
765 num_rq = ((task->rx_wr_tx_rd.rx_flags &
766 FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE) >>
767 FCOE_TASK_CTX_ENTRY_RXWR_TXRD_NUM_RQ_WQE_SHIFT);
768
769 io_req = (struct bnx2fc_cmd *)hba->cmd_mgr->cmds[xid];
770
771 if (io_req == NULL) {
772 printk(KERN_ERR PFX "ERROR? cq_compl - io_req is NULL\n");
773 spin_unlock_bh(&tgt->tgt_lock);
774 return;
775 }
776
777 /* Timestamp IO completion time */
778 cmd_type = io_req->cmd_type;
779
780 /* optimized completion path */
781 if (cmd_type == BNX2FC_SCSI_CMD) {
782 rx_state = ((task->rx_wr_tx_rd.rx_flags &
783 FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE) >>
784 FCOE_TASK_CTX_ENTRY_RXWR_TXRD_RX_STATE_SHIFT);
785
786 if (rx_state == FCOE_TASK_RX_STATE_COMPLETED) {
787 bnx2fc_process_scsi_cmd_compl(io_req, task, num_rq);
788 spin_unlock_bh(&tgt->tgt_lock);
789 return;
790 }
791 }
792
793 /* Process other IO completion types */
794 switch (cmd_type) {
795 case BNX2FC_SCSI_CMD:
796 if (rx_state == FCOE_TASK_RX_STATE_ABTS_COMPLETED)
797 bnx2fc_process_abts_compl(io_req, task, num_rq);
798 else if (rx_state ==
799 FCOE_TASK_RX_STATE_EXCHANGE_CLEANUP_COMPLETED)
800 bnx2fc_process_cleanup_compl(io_req, task, num_rq);
801 else
802 printk(KERN_ERR PFX "Invalid rx state - %d\n",
803 rx_state);
804 break;
805
806 case BNX2FC_TASK_MGMT_CMD:
807 BNX2FC_IO_DBG(io_req, "Processing TM complete\n");
808 bnx2fc_process_tm_compl(io_req, task, num_rq);
809 break;
810
811 case BNX2FC_ABTS:
812 /*
813 * ABTS request received by firmware. ABTS response
814 * will be delivered to the task belonging to the IO
815 * that was aborted
816 */
817 BNX2FC_IO_DBG(io_req, "cq_compl- ABTS sent out by fw\n");
818 kref_put(&io_req->refcount, bnx2fc_cmd_release);
819 break;
820
821 case BNX2FC_ELS:
822 BNX2FC_IO_DBG(io_req, "cq_compl - call process_els_compl\n");
823 bnx2fc_process_els_compl(io_req, task, num_rq);
824 break;
825
826 case BNX2FC_CLEANUP:
827 BNX2FC_IO_DBG(io_req, "cq_compl- cleanup resp rcvd\n");
828 kref_put(&io_req->refcount, bnx2fc_cmd_release);
829 break;
830
831 default:
832 printk(KERN_ERR PFX "Invalid cmd_type %d\n", cmd_type);
833 break;
834 }
835 spin_unlock_bh(&tgt->tgt_lock);
836}
837
838struct bnx2fc_work *bnx2fc_alloc_work(struct bnx2fc_rport *tgt, u16 wqe)
839{
840 struct bnx2fc_work *work;
841 work = kzalloc(sizeof(struct bnx2fc_work), GFP_ATOMIC);
842 if (!work)
843 return NULL;
844
845 INIT_LIST_HEAD(&work->list);
846 work->tgt = tgt;
847 work->wqe = wqe;
848 return work;
849}
850
851int bnx2fc_process_new_cqes(struct bnx2fc_rport *tgt)
852{
853 struct fcoe_cqe *cq;
854 u32 cq_cons;
855 struct fcoe_cqe *cqe;
856 u16 wqe;
857 bool more_cqes_found = false;
858
859 /*
860 * cq_lock is a low contention lock used to protect
861 * the CQ data structure from being freed up during
862 * the upload operation
863 */
864 spin_lock_bh(&tgt->cq_lock);
865
866 if (!tgt->cq) {
867 printk(KERN_ERR PFX "process_new_cqes: cq is NULL\n");
868 spin_unlock_bh(&tgt->cq_lock);
869 return 0;
870 }
871 cq = tgt->cq;
872 cq_cons = tgt->cq_cons_idx;
873 cqe = &cq[cq_cons];
874
875 do {
876 more_cqes_found ^= true;
877
878 while (((wqe = cqe->wqe) & FCOE_CQE_TOGGLE_BIT) ==
879 (tgt->cq_curr_toggle_bit <<
880 FCOE_CQE_TOGGLE_BIT_SHIFT)) {
881
882 /* new entry on the cq */
883 if (wqe & FCOE_CQE_CQE_TYPE) {
884 /* Unsolicited event notification */
885 bnx2fc_process_unsol_compl(tgt, wqe);
886 } else {
887 struct bnx2fc_work *work = NULL;
888 struct bnx2fc_percpu_s *fps = NULL;
889 unsigned int cpu = wqe % num_possible_cpus();
890
891 fps = &per_cpu(bnx2fc_percpu, cpu);
892 spin_lock_bh(&fps->fp_work_lock);
893 if (unlikely(!fps->iothread))
894 goto unlock;
895
896 work = bnx2fc_alloc_work(tgt, wqe);
897 if (work)
898 list_add_tail(&work->list,
899 &fps->work_list);
900unlock:
901 spin_unlock_bh(&fps->fp_work_lock);
902
903 /* Pending work request completion */
904 if (fps->iothread && work)
905 wake_up_process(fps->iothread);
906 else
907 bnx2fc_process_cq_compl(tgt, wqe);
908 }
909 cqe++;
910 tgt->cq_cons_idx++;
911
912 if (tgt->cq_cons_idx == BNX2FC_CQ_WQES_MAX) {
913 tgt->cq_cons_idx = 0;
914 cqe = cq;
915 tgt->cq_curr_toggle_bit =
916 1 - tgt->cq_curr_toggle_bit;
917 }
918 }
919 /* Re-arm CQ */
920 if (more_cqes_found) {
921 tgt->conn_db->cq_arm.lo = -1;
922 wmb();
923 }
924 } while (more_cqes_found);
925
926 /*
927 * Commit tgt->cq_cons_idx change to the memory
928 * spin_lock implies full memory barrier, no need to smp_wmb
929 */
930
931 spin_unlock_bh(&tgt->cq_lock);
932 return 0;
933}
934
935/**
936 * bnx2fc_fastpath_notification - process global event queue (KCQ)
937 *
938 * @hba: adapter structure pointer
939 * @new_cqe_kcqe: pointer to newly DMA'd KCQ entry
940 *
941 * Fast path event notification handler
942 */
943static void bnx2fc_fastpath_notification(struct bnx2fc_hba *hba,
944 struct fcoe_kcqe *new_cqe_kcqe)
945{
946 u32 conn_id = new_cqe_kcqe->fcoe_conn_id;
947 struct bnx2fc_rport *tgt = hba->tgt_ofld_list[conn_id];
948
949 if (!tgt) {
950 printk(KERN_ALERT PFX "conn_id 0x%x not valid\n", conn_id);
951 return;
952 }
953
954 bnx2fc_process_new_cqes(tgt);
955}
956
957/**
958 * bnx2fc_process_ofld_cmpl - process FCoE session offload completion
959 *
960 * @hba: adapter structure pointer
961 * @ofld_kcqe: connection offload kcqe pointer
962 *
963 * handle session offload completion, enable the session if offload is
964 * successful.
965 */
966static void bnx2fc_process_ofld_cmpl(struct bnx2fc_hba *hba,
967 struct fcoe_kcqe *ofld_kcqe)
968{
969 struct bnx2fc_rport *tgt;
970 struct fcoe_port *port;
971 u32 conn_id;
972 u32 context_id;
973 int rc;
974
975 conn_id = ofld_kcqe->fcoe_conn_id;
976 context_id = ofld_kcqe->fcoe_conn_context_id;
977 tgt = hba->tgt_ofld_list[conn_id];
978 if (!tgt) {
979 printk(KERN_ALERT PFX "ERROR:ofld_cmpl: No pending ofld req\n");
980 return;
981 }
982 BNX2FC_TGT_DBG(tgt, "Entered ofld compl - context_id = 0x%x\n",
983 ofld_kcqe->fcoe_conn_context_id);
984 port = tgt->port;
985 if (hba != tgt->port->priv) {
986 printk(KERN_ALERT PFX "ERROR:ofld_cmpl: HBA mis-match\n");
987 goto ofld_cmpl_err;
988 }
989 /*
990 * cnic has allocated a context_id for this session; use this
991 * while enabling the session.
992 */
993 tgt->context_id = context_id;
994 if (ofld_kcqe->completion_status) {
995 if (ofld_kcqe->completion_status ==
996 FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE) {
997 printk(KERN_ERR PFX "unable to allocate FCoE context "
998 "resources\n");
999 set_bit(BNX2FC_FLAG_CTX_ALLOC_FAILURE, &tgt->flags);
1000 }
1001 goto ofld_cmpl_err;
1002 } else {
1003
1004 /* now enable the session */
1005 rc = bnx2fc_send_session_enable_req(port, tgt);
1006 if (rc) {
1007 printk(KERN_ALERT PFX "enable session failed\n");
1008 goto ofld_cmpl_err;
1009 }
1010 }
1011 return;
1012ofld_cmpl_err:
1013 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1014 wake_up_interruptible(&tgt->ofld_wait);
1015}
1016
1017/**
1018 * bnx2fc_process_enable_conn_cmpl - process FCoE session enable completion
1019 *
1020 * @hba: adapter structure pointer
1021 * @ofld_kcqe: connection offload kcqe pointer
1022 *
1023 * handle session enable completion, mark the rport as ready
1024 */
1025
1026static void bnx2fc_process_enable_conn_cmpl(struct bnx2fc_hba *hba,
1027 struct fcoe_kcqe *ofld_kcqe)
1028{
1029 struct bnx2fc_rport *tgt;
1030 u32 conn_id;
1031 u32 context_id;
1032
1033 context_id = ofld_kcqe->fcoe_conn_context_id;
1034 conn_id = ofld_kcqe->fcoe_conn_id;
1035 tgt = hba->tgt_ofld_list[conn_id];
1036 if (!tgt) {
1037 printk(KERN_ALERT PFX "ERROR:enbl_cmpl: No pending ofld req\n");
1038 return;
1039 }
1040
1041 BNX2FC_TGT_DBG(tgt, "Enable compl - context_id = 0x%x\n",
1042 ofld_kcqe->fcoe_conn_context_id);
1043
1044 /*
1045 * context_id should be the same for this target during offload
1046 * and enable
1047 */
1048 if (tgt->context_id != context_id) {
1049 printk(KERN_ALERT PFX "context id mis-match\n");
1050 return;
1051 }
1052 if (hba != tgt->port->priv) {
1053 printk(KERN_ALERT PFX "bnx2fc-enbl_cmpl: HBA mis-match\n");
1054 goto enbl_cmpl_err;
1055 }
1056 if (ofld_kcqe->completion_status) {
1057 goto enbl_cmpl_err;
1058 } else {
1059 /* enable successful - rport ready for issuing IOs */
1060 set_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
1061 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1062 wake_up_interruptible(&tgt->ofld_wait);
1063 }
1064 return;
1065
1066enbl_cmpl_err:
1067 set_bit(BNX2FC_FLAG_OFLD_REQ_CMPL, &tgt->flags);
1068 wake_up_interruptible(&tgt->ofld_wait);
1069}
1070
1071static void bnx2fc_process_conn_disable_cmpl(struct bnx2fc_hba *hba,
1072 struct fcoe_kcqe *disable_kcqe)
1073{
1074
1075 struct bnx2fc_rport *tgt;
1076 u32 conn_id;
1077
1078 conn_id = disable_kcqe->fcoe_conn_id;
1079 tgt = hba->tgt_ofld_list[conn_id];
1080 if (!tgt) {
1081 printk(KERN_ALERT PFX "ERROR: disable_cmpl: No disable req\n");
1082 return;
1083 }
1084
1085 BNX2FC_TGT_DBG(tgt, PFX "disable_cmpl: conn_id %d\n", conn_id);
1086
1087 if (disable_kcqe->completion_status) {
1088 printk(KERN_ALERT PFX "ERROR: Disable failed with cmpl status %d\n",
1089 disable_kcqe->completion_status);
1090 return;
1091 } else {
1092 /* disable successful */
1093 BNX2FC_TGT_DBG(tgt, "disable successful\n");
1094 clear_bit(BNX2FC_FLAG_OFFLOADED, &tgt->flags);
1095 set_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
1096 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1097 wake_up_interruptible(&tgt->upld_wait);
1098 }
1099}
1100
1101static void bnx2fc_process_conn_destroy_cmpl(struct bnx2fc_hba *hba,
1102 struct fcoe_kcqe *destroy_kcqe)
1103{
1104 struct bnx2fc_rport *tgt;
1105 u32 conn_id;
1106
1107 conn_id = destroy_kcqe->fcoe_conn_id;
1108 tgt = hba->tgt_ofld_list[conn_id];
1109 if (!tgt) {
1110 printk(KERN_ALERT PFX "destroy_cmpl: No destroy req\n");
1111 return;
1112 }
1113
1114 BNX2FC_TGT_DBG(tgt, "destroy_cmpl: conn_id %d\n", conn_id);
1115
1116 if (destroy_kcqe->completion_status) {
1117 printk(KERN_ALERT PFX "Destroy conn failed, cmpl status %d\n",
1118 destroy_kcqe->completion_status);
1119 return;
1120 } else {
1121 /* destroy successful */
1122 BNX2FC_TGT_DBG(tgt, "upload successful\n");
1123 clear_bit(BNX2FC_FLAG_DISABLED, &tgt->flags);
1124 set_bit(BNX2FC_FLAG_DESTROYED, &tgt->flags);
1125 set_bit(BNX2FC_FLAG_UPLD_REQ_COMPL, &tgt->flags);
1126 wake_up_interruptible(&tgt->upld_wait);
1127 }
1128}
1129
1130static void bnx2fc_init_failure(struct bnx2fc_hba *hba, u32 err_code)
1131{
1132 switch (err_code) {
1133 case FCOE_KCQE_COMPLETION_STATUS_INVALID_OPCODE:
1134 printk(KERN_ERR PFX "init_failure due to invalid opcode\n");
1135 break;
1136
1137 case FCOE_KCQE_COMPLETION_STATUS_CTX_ALLOC_FAILURE:
1138 printk(KERN_ERR PFX "init failed due to ctx alloc failure\n");
1139 break;
1140
1141 case FCOE_KCQE_COMPLETION_STATUS_NIC_ERROR:
1142 printk(KERN_ERR PFX "init_failure due to NIC error\n");
1143 break;
1144
1145 default:
1146 printk(KERN_ERR PFX "Unknown Error code %d\n", err_code);
1147 }
1148}
1149
1150/**
1151 * bnx2fc_indicae_kcqe - process KCQE
1152 *
1153 * @hba: adapter structure pointer
1154 * @kcqe: kcqe pointer
1155 * @num_cqe: Number of completion queue elements
1156 *
1157 * Generic KCQ event handler
1158 */
1159void bnx2fc_indicate_kcqe(void *context, struct kcqe *kcq[],
1160 u32 num_cqe)
1161{
1162 struct bnx2fc_hba *hba = (struct bnx2fc_hba *)context;
1163 int i = 0;
1164 struct fcoe_kcqe *kcqe = NULL;
1165
1166 while (i < num_cqe) {
1167 kcqe = (struct fcoe_kcqe *) kcq[i++];
1168
1169 switch (kcqe->op_code) {
1170 case FCOE_KCQE_OPCODE_CQ_EVENT_NOTIFICATION:
1171 bnx2fc_fastpath_notification(hba, kcqe);
1172 break;
1173
1174 case FCOE_KCQE_OPCODE_OFFLOAD_CONN:
1175 bnx2fc_process_ofld_cmpl(hba, kcqe);
1176 break;
1177
1178 case FCOE_KCQE_OPCODE_ENABLE_CONN:
1179 bnx2fc_process_enable_conn_cmpl(hba, kcqe);
1180 break;
1181
1182 case FCOE_KCQE_OPCODE_INIT_FUNC:
1183 if (kcqe->completion_status !=
1184 FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
1185 bnx2fc_init_failure(hba,
1186 kcqe->completion_status);
1187 } else {
1188 set_bit(ADAPTER_STATE_UP, &hba->adapter_state);
1189 bnx2fc_get_link_state(hba);
1190 printk(KERN_INFO PFX "[%.2x]: FCOE_INIT passed\n",
1191 (u8)hba->pcidev->bus->number);
1192 }
1193 break;
1194
1195 case FCOE_KCQE_OPCODE_DESTROY_FUNC:
1196 if (kcqe->completion_status !=
1197 FCOE_KCQE_COMPLETION_STATUS_SUCCESS) {
1198
1199 printk(KERN_ERR PFX "DESTROY failed\n");
1200 } else {
1201 printk(KERN_ERR PFX "DESTROY success\n");
1202 }
1203 hba->flags |= BNX2FC_FLAG_DESTROY_CMPL;
1204 wake_up_interruptible(&hba->destroy_wait);
1205 break;
1206
1207 case FCOE_KCQE_OPCODE_DISABLE_CONN:
1208 bnx2fc_process_conn_disable_cmpl(hba, kcqe);
1209 break;
1210
1211 case FCOE_KCQE_OPCODE_DESTROY_CONN:
1212 bnx2fc_process_conn_destroy_cmpl(hba, kcqe);
1213 break;
1214
1215 case FCOE_KCQE_OPCODE_STAT_FUNC:
1216 if (kcqe->completion_status !=
1217 FCOE_KCQE_COMPLETION_STATUS_SUCCESS)
1218 printk(KERN_ERR PFX "STAT failed\n");
1219 complete(&hba->stat_req_done);
1220 break;
1221
1222 case FCOE_KCQE_OPCODE_FCOE_ERROR:
1223 /* fall thru */
1224 default:
1225 printk(KERN_ALERT PFX "unknown opcode 0x%x\n",
1226 kcqe->op_code);
1227 }
1228 }
1229}
1230
1231void bnx2fc_add_2_sq(struct bnx2fc_rport *tgt, u16 xid)
1232{
1233 struct fcoe_sqe *sqe;
1234
1235 sqe = &tgt->sq[tgt->sq_prod_idx];
1236
1237 /* Fill SQ WQE */
1238 sqe->wqe = xid << FCOE_SQE_TASK_ID_SHIFT;
1239 sqe->wqe |= tgt->sq_curr_toggle_bit << FCOE_SQE_TOGGLE_BIT_SHIFT;
1240
1241 /* Advance SQ Prod Idx */
1242 if (++tgt->sq_prod_idx == BNX2FC_SQ_WQES_MAX) {
1243 tgt->sq_prod_idx = 0;
1244 tgt->sq_curr_toggle_bit = 1 - tgt->sq_curr_toggle_bit;
1245 }
1246}
1247
1248void bnx2fc_ring_doorbell(struct bnx2fc_rport *tgt)
1249{
1250 struct b577xx_doorbell_set_prod ev_doorbell;
1251 u32 msg;
1252
1253 wmb();
1254
1255 memset(&ev_doorbell, 0, sizeof(struct b577xx_doorbell_set_prod));
1256 ev_doorbell.header.header = B577XX_DOORBELL_HDR_DB_TYPE;
1257
1258 ev_doorbell.prod = tgt->sq_prod_idx |
1259 (tgt->sq_curr_toggle_bit << 15);
1260 ev_doorbell.header.header |= B577XX_FCOE_CONNECTION_TYPE <<
1261 B577XX_DOORBELL_HDR_CONN_TYPE_SHIFT;
1262 msg = *((u32 *)&ev_doorbell);
1263 writel(cpu_to_le32(msg), tgt->ctx_base);
1264
1265 mmiowb();
1266
1267}
1268
1269int bnx2fc_map_doorbell(struct bnx2fc_rport *tgt)
1270{
1271 u32 context_id = tgt->context_id;
1272 struct fcoe_port *port = tgt->port;
1273 u32 reg_off;
1274 resource_size_t reg_base;
1275 struct bnx2fc_hba *hba = port->priv;
1276
1277 reg_base = pci_resource_start(hba->pcidev,
1278 BNX2X_DOORBELL_PCI_BAR);
1279 reg_off = BNX2FC_5771X_DB_PAGE_SIZE *
1280 (context_id & 0x1FFFF) + DPM_TRIGER_TYPE;
1281 tgt->ctx_base = ioremap_nocache(reg_base + reg_off, 4);
1282 if (!tgt->ctx_base)
1283 return -ENOMEM;
1284 return 0;
1285}
1286
1287char *bnx2fc_get_next_rqe(struct bnx2fc_rport *tgt, u8 num_items)
1288{
1289 char *buf = (char *)tgt->rq + (tgt->rq_cons_idx * BNX2FC_RQ_BUF_SZ);
1290
1291 if (tgt->rq_cons_idx + num_items > BNX2FC_RQ_WQES_MAX)
1292 return NULL;
1293
1294 tgt->rq_cons_idx += num_items;
1295
1296 if (tgt->rq_cons_idx >= BNX2FC_RQ_WQES_MAX)
1297 tgt->rq_cons_idx -= BNX2FC_RQ_WQES_MAX;
1298
1299 return buf;
1300}
1301
1302void bnx2fc_return_rqe(struct bnx2fc_rport *tgt, u8 num_items)
1303{
1304 /* return the rq buffer */
1305 u32 next_prod_idx = tgt->rq_prod_idx + num_items;
1306 if ((next_prod_idx & 0x7fff) == BNX2FC_RQ_WQES_MAX) {
1307 /* Wrap around RQ */
1308 next_prod_idx += 0x8000 - BNX2FC_RQ_WQES_MAX;
1309 }
1310 tgt->rq_prod_idx = next_prod_idx;
1311 tgt->conn_db->rq_prod = tgt->rq_prod_idx;
1312}
1313
1314void bnx2fc_init_cleanup_task(struct bnx2fc_cmd *io_req,
1315 struct fcoe_task_ctx_entry *task,
1316 u16 orig_xid)
1317{
1318 u8 task_type = FCOE_TASK_TYPE_EXCHANGE_CLEANUP;
1319 struct bnx2fc_rport *tgt = io_req->tgt;
1320 u32 context_id = tgt->context_id;
1321
1322 memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1323
1324 /* Tx Write Rx Read */
1325 task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_EXCHANGE_CLEANUP <<
1326 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
1327 task->tx_wr_rx_rd.init_flags = task_type <<
1328 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
1329 task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1330 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
1331 /* Common */
1332 task->cmn.common_flags = context_id <<
1333 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
1334 task->cmn.general.cleanup_info.task_id = orig_xid;
1335
1336
1337}
1338
1339void bnx2fc_init_mp_task(struct bnx2fc_cmd *io_req,
1340 struct fcoe_task_ctx_entry *task)
1341{
1342 struct bnx2fc_mp_req *mp_req = &(io_req->mp_req);
1343 struct bnx2fc_rport *tgt = io_req->tgt;
1344 struct fc_frame_header *fc_hdr;
1345 u8 task_type = 0;
1346 u64 *hdr;
1347 u64 temp_hdr[3];
1348 u32 context_id;
1349
1350
1351 /* Obtain task_type */
1352 if ((io_req->cmd_type == BNX2FC_TASK_MGMT_CMD) ||
1353 (io_req->cmd_type == BNX2FC_ELS)) {
1354 task_type = FCOE_TASK_TYPE_MIDPATH;
1355 } else if (io_req->cmd_type == BNX2FC_ABTS) {
1356 task_type = FCOE_TASK_TYPE_ABTS;
1357 }
1358
1359 memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1360
1361 /* Setup the task from io_req for easy reference */
1362 io_req->task = task;
1363
1364 BNX2FC_IO_DBG(io_req, "Init MP task for cmd_type = %d task_type = %d\n",
1365 io_req->cmd_type, task_type);
1366
1367 /* Tx only */
1368 if ((task_type == FCOE_TASK_TYPE_MIDPATH) ||
1369 (task_type == FCOE_TASK_TYPE_UNSOLICITED)) {
1370 task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
1371 (u32)mp_req->mp_req_bd_dma;
1372 task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
1373 (u32)((u64)mp_req->mp_req_bd_dma >> 32);
1374 task->tx_wr_only.sgl_ctx.mul_sges.sgl_size = 1;
1375 BNX2FC_IO_DBG(io_req, "init_mp_task - bd_dma = 0x%llx\n",
1376 (unsigned long long)mp_req->mp_req_bd_dma);
1377 }
1378
1379 /* Tx Write Rx Read */
1380 task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_INIT <<
1381 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
1382 task->tx_wr_rx_rd.init_flags = task_type <<
1383 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
1384 task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK <<
1385 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT;
1386 task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1387 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
1388
1389 /* Common */
1390 task->cmn.data_2_trns = io_req->data_xfer_len;
1391 context_id = tgt->context_id;
1392 task->cmn.common_flags = context_id <<
1393 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
1394 task->cmn.common_flags |= 1 <<
1395 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT;
1396 task->cmn.common_flags |= 1 <<
1397 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT;
1398
1399 /* Rx Write Tx Read */
1400 fc_hdr = &(mp_req->req_fc_hdr);
1401 if (task_type == FCOE_TASK_TYPE_MIDPATH) {
1402 fc_hdr->fh_ox_id = cpu_to_be16(io_req->xid);
1403 fc_hdr->fh_rx_id = htons(0xffff);
1404 task->rx_wr_tx_rd.rx_id = 0xffff;
1405 } else if (task_type == FCOE_TASK_TYPE_UNSOLICITED) {
1406 fc_hdr->fh_rx_id = cpu_to_be16(io_req->xid);
1407 }
1408
1409 /* Fill FC Header into middle path buffer */
1410 hdr = (u64 *) &task->cmn.general.cmd_info.mp_fc_frame.fc_hdr;
1411 memcpy(temp_hdr, fc_hdr, sizeof(temp_hdr));
1412 hdr[0] = cpu_to_be64(temp_hdr[0]);
1413 hdr[1] = cpu_to_be64(temp_hdr[1]);
1414 hdr[2] = cpu_to_be64(temp_hdr[2]);
1415
1416 /* Rx Only */
1417 if (task_type == FCOE_TASK_TYPE_MIDPATH) {
1418
1419 task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
1420 (u32)mp_req->mp_resp_bd_dma;
1421 task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
1422 (u32)((u64)mp_req->mp_resp_bd_dma >> 32);
1423 task->rx_wr_only.sgl_ctx.mul_sges.sgl_size = 1;
1424 }
1425}
1426
1427void bnx2fc_init_task(struct bnx2fc_cmd *io_req,
1428 struct fcoe_task_ctx_entry *task)
1429{
1430 u8 task_type;
1431 struct scsi_cmnd *sc_cmd = io_req->sc_cmd;
1432 struct io_bdt *bd_tbl = io_req->bd_tbl;
1433 struct bnx2fc_rport *tgt = io_req->tgt;
1434 u64 *fcp_cmnd;
1435 u64 tmp_fcp_cmnd[4];
1436 u32 context_id;
1437 int cnt, i;
1438 int bd_count;
1439
1440 memset(task, 0, sizeof(struct fcoe_task_ctx_entry));
1441
1442 /* Setup the task from io_req for easy reference */
1443 io_req->task = task;
1444
1445 if (sc_cmd->sc_data_direction == DMA_TO_DEVICE)
1446 task_type = FCOE_TASK_TYPE_WRITE;
1447 else
1448 task_type = FCOE_TASK_TYPE_READ;
1449
1450 /* Tx only */
1451 if (task_type == FCOE_TASK_TYPE_WRITE) {
1452 task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
1453 (u32)bd_tbl->bd_tbl_dma;
1454 task->tx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
1455 (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1456 task->tx_wr_only.sgl_ctx.mul_sges.sgl_size =
1457 bd_tbl->bd_valid;
1458 }
1459
1460 /*Tx Write Rx Read */
1461 /* Init state to NORMAL */
1462 task->tx_wr_rx_rd.tx_flags = FCOE_TASK_TX_STATE_NORMAL <<
1463 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TX_STATE_SHIFT;
1464 task->tx_wr_rx_rd.init_flags = task_type <<
1465 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_TASK_TYPE_SHIFT;
1466 task->tx_wr_rx_rd.init_flags |= FCOE_TASK_DEV_TYPE_DISK <<
1467 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_DEV_TYPE_SHIFT;
1468 task->tx_wr_rx_rd.init_flags |= FCOE_TASK_CLASS_TYPE_3 <<
1469 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_CLASS_TYPE_SHIFT;
1470
1471 /* Common */
1472 task->cmn.data_2_trns = io_req->data_xfer_len;
1473 context_id = tgt->context_id;
1474 task->cmn.common_flags = context_id <<
1475 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_CID_SHIFT;
1476 task->cmn.common_flags |= 1 <<
1477 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_VALID_SHIFT;
1478 task->cmn.common_flags |= 1 <<
1479 FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME_SHIFT;
1480
1481 /* Set initiative ownership */
1482 task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_SEQ_INIT;
1483
1484 /* Set initial seq counter */
1485 task->cmn.tx_low_seq_cnt = 1;
1486
1487 /* Set state to "waiting for the first packet" */
1488 task->cmn.common_flags |= FCOE_TASK_CTX_ENTRY_TX_RX_CMN_EXP_FIRST_FRAME;
1489
1490 /* Fill FCP_CMND IU */
1491 fcp_cmnd = (u64 *)
1492 task->cmn.general.cmd_info.fcp_cmd_payload.opaque;
1493 bnx2fc_build_fcp_cmnd(io_req, (struct fcp_cmnd *)&tmp_fcp_cmnd);
1494
1495 /* swap fcp_cmnd */
1496 cnt = sizeof(struct fcp_cmnd) / sizeof(u64);
1497
1498 for (i = 0; i < cnt; i++) {
1499 *fcp_cmnd = cpu_to_be64(tmp_fcp_cmnd[i]);
1500 fcp_cmnd++;
1501 }
1502
1503 /* Rx Write Tx Read */
1504 task->rx_wr_tx_rd.rx_id = 0xffff;
1505
1506 /* Rx Only */
1507 if (task_type == FCOE_TASK_TYPE_READ) {
1508
1509 bd_count = bd_tbl->bd_valid;
1510 if (bd_count == 1) {
1511
1512 struct fcoe_bd_ctx *fcoe_bd_tbl = bd_tbl->bd_tbl;
1513
1514 task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.lo =
1515 fcoe_bd_tbl->buf_addr_lo;
1516 task->rx_wr_only.sgl_ctx.single_sge.cur_buf_addr.hi =
1517 fcoe_bd_tbl->buf_addr_hi;
1518 task->rx_wr_only.sgl_ctx.single_sge.cur_buf_rem =
1519 fcoe_bd_tbl->buf_len;
1520 task->tx_wr_rx_rd.init_flags |= 1 <<
1521 FCOE_TASK_CTX_ENTRY_TXWR_RXRD_SINGLE_SGE_SHIFT;
1522 } else {
1523
1524 task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.lo =
1525 (u32)bd_tbl->bd_tbl_dma;
1526 task->rx_wr_only.sgl_ctx.mul_sges.cur_sge_addr.hi =
1527 (u32)((u64)bd_tbl->bd_tbl_dma >> 32);
1528 task->rx_wr_only.sgl_ctx.mul_sges.sgl_size =
1529 bd_tbl->bd_valid;
1530 }
1531 }
1532}
1533
1534/**
1535 * bnx2fc_setup_task_ctx - allocate and map task context
1536 *
1537 * @hba: pointer to adapter structure
1538 *
1539 * allocate memory for task context, and associated BD table to be used
1540 * by firmware
1541 *
1542 */
1543int bnx2fc_setup_task_ctx(struct bnx2fc_hba *hba)
1544{
1545 int rc = 0;
1546 struct regpair *task_ctx_bdt;
1547 dma_addr_t addr;
1548 int i;
1549
1550 /*
1551 * Allocate task context bd table. A page size of bd table
1552 * can map 256 buffers. Each buffer contains 32 task context
1553 * entries. Hence the limit with one page is 8192 task context
1554 * entries.
1555 */
1556 hba->task_ctx_bd_tbl = dma_alloc_coherent(&hba->pcidev->dev,
1557 PAGE_SIZE,
1558 &hba->task_ctx_bd_dma,
1559 GFP_KERNEL);
1560 if (!hba->task_ctx_bd_tbl) {
1561 printk(KERN_ERR PFX "unable to allocate task context BDT\n");
1562 rc = -1;
1563 goto out;
1564 }
1565 memset(hba->task_ctx_bd_tbl, 0, PAGE_SIZE);
1566
1567 /*
1568 * Allocate task_ctx which is an array of pointers pointing to
1569 * a page containing 32 task contexts
1570 */
1571 hba->task_ctx = kzalloc((BNX2FC_TASK_CTX_ARR_SZ * sizeof(void *)),
1572 GFP_KERNEL);
1573 if (!hba->task_ctx) {
1574 printk(KERN_ERR PFX "unable to allocate task context array\n");
1575 rc = -1;
1576 goto out1;
1577 }
1578
1579 /*
1580 * Allocate task_ctx_dma which is an array of dma addresses
1581 */
1582 hba->task_ctx_dma = kmalloc((BNX2FC_TASK_CTX_ARR_SZ *
1583 sizeof(dma_addr_t)), GFP_KERNEL);
1584 if (!hba->task_ctx_dma) {
1585 printk(KERN_ERR PFX "unable to alloc context mapping array\n");
1586 rc = -1;
1587 goto out2;
1588 }
1589
1590 task_ctx_bdt = (struct regpair *)hba->task_ctx_bd_tbl;
1591 for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
1592
1593 hba->task_ctx[i] = dma_alloc_coherent(&hba->pcidev->dev,
1594 PAGE_SIZE,
1595 &hba->task_ctx_dma[i],
1596 GFP_KERNEL);
1597 if (!hba->task_ctx[i]) {
1598 printk(KERN_ERR PFX "unable to alloc task context\n");
1599 rc = -1;
1600 goto out3;
1601 }
1602 memset(hba->task_ctx[i], 0, PAGE_SIZE);
1603 addr = (u64)hba->task_ctx_dma[i];
1604 task_ctx_bdt->hi = cpu_to_le32((u64)addr >> 32);
1605 task_ctx_bdt->lo = cpu_to_le32((u32)addr);
1606 task_ctx_bdt++;
1607 }
1608 return 0;
1609
1610out3:
1611 for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
1612 if (hba->task_ctx[i]) {
1613
1614 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1615 hba->task_ctx[i], hba->task_ctx_dma[i]);
1616 hba->task_ctx[i] = NULL;
1617 }
1618 }
1619
1620 kfree(hba->task_ctx_dma);
1621 hba->task_ctx_dma = NULL;
1622out2:
1623 kfree(hba->task_ctx);
1624 hba->task_ctx = NULL;
1625out1:
1626 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1627 hba->task_ctx_bd_tbl, hba->task_ctx_bd_dma);
1628 hba->task_ctx_bd_tbl = NULL;
1629out:
1630 return rc;
1631}
1632
1633void bnx2fc_free_task_ctx(struct bnx2fc_hba *hba)
1634{
1635 int i;
1636
1637 if (hba->task_ctx_bd_tbl) {
1638 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1639 hba->task_ctx_bd_tbl,
1640 hba->task_ctx_bd_dma);
1641 hba->task_ctx_bd_tbl = NULL;
1642 }
1643
1644 if (hba->task_ctx) {
1645 for (i = 0; i < BNX2FC_TASK_CTX_ARR_SZ; i++) {
1646 if (hba->task_ctx[i]) {
1647 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1648 hba->task_ctx[i],
1649 hba->task_ctx_dma[i]);
1650 hba->task_ctx[i] = NULL;
1651 }
1652 }
1653 kfree(hba->task_ctx);
1654 hba->task_ctx = NULL;
1655 }
1656
1657 kfree(hba->task_ctx_dma);
1658 hba->task_ctx_dma = NULL;
1659}
1660
1661static void bnx2fc_free_hash_table(struct bnx2fc_hba *hba)
1662{
1663 int i;
1664 int segment_count;
1665 int hash_table_size;
1666 u32 *pbl;
1667
1668 segment_count = hba->hash_tbl_segment_count;
1669 hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
1670 sizeof(struct fcoe_hash_table_entry);
1671
1672 pbl = hba->hash_tbl_pbl;
1673 for (i = 0; i < segment_count; ++i) {
1674 dma_addr_t dma_address;
1675
1676 dma_address = le32_to_cpu(*pbl);
1677 ++pbl;
1678 dma_address += ((u64)le32_to_cpu(*pbl)) << 32;
1679 ++pbl;
1680 dma_free_coherent(&hba->pcidev->dev,
1681 BNX2FC_HASH_TBL_CHUNK_SIZE,
1682 hba->hash_tbl_segments[i],
1683 dma_address);
1684
1685 }
1686
1687 if (hba->hash_tbl_pbl) {
1688 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1689 hba->hash_tbl_pbl,
1690 hba->hash_tbl_pbl_dma);
1691 hba->hash_tbl_pbl = NULL;
1692 }
1693}
1694
1695static int bnx2fc_allocate_hash_table(struct bnx2fc_hba *hba)
1696{
1697 int i;
1698 int hash_table_size;
1699 int segment_count;
1700 int segment_array_size;
1701 int dma_segment_array_size;
1702 dma_addr_t *dma_segment_array;
1703 u32 *pbl;
1704
1705 hash_table_size = BNX2FC_NUM_MAX_SESS * BNX2FC_MAX_ROWS_IN_HASH_TBL *
1706 sizeof(struct fcoe_hash_table_entry);
1707
1708 segment_count = hash_table_size + BNX2FC_HASH_TBL_CHUNK_SIZE - 1;
1709 segment_count /= BNX2FC_HASH_TBL_CHUNK_SIZE;
1710 hba->hash_tbl_segment_count = segment_count;
1711
1712 segment_array_size = segment_count * sizeof(*hba->hash_tbl_segments);
1713 hba->hash_tbl_segments = kzalloc(segment_array_size, GFP_KERNEL);
1714 if (!hba->hash_tbl_segments) {
1715 printk(KERN_ERR PFX "hash table pointers alloc failed\n");
1716 return -ENOMEM;
1717 }
1718 dma_segment_array_size = segment_count * sizeof(*dma_segment_array);
1719 dma_segment_array = kzalloc(dma_segment_array_size, GFP_KERNEL);
1720 if (!dma_segment_array) {
1721 printk(KERN_ERR PFX "hash table pointers (dma) alloc failed\n");
1722 return -ENOMEM;
1723 }
1724
1725 for (i = 0; i < segment_count; ++i) {
1726 hba->hash_tbl_segments[i] =
1727 dma_alloc_coherent(&hba->pcidev->dev,
1728 BNX2FC_HASH_TBL_CHUNK_SIZE,
1729 &dma_segment_array[i],
1730 GFP_KERNEL);
1731 if (!hba->hash_tbl_segments[i]) {
1732 printk(KERN_ERR PFX "hash segment alloc failed\n");
1733 while (--i >= 0) {
1734 dma_free_coherent(&hba->pcidev->dev,
1735 BNX2FC_HASH_TBL_CHUNK_SIZE,
1736 hba->hash_tbl_segments[i],
1737 dma_segment_array[i]);
1738 hba->hash_tbl_segments[i] = NULL;
1739 }
1740 kfree(dma_segment_array);
1741 return -ENOMEM;
1742 }
1743 memset(hba->hash_tbl_segments[i], 0,
1744 BNX2FC_HASH_TBL_CHUNK_SIZE);
1745 }
1746
1747 hba->hash_tbl_pbl = dma_alloc_coherent(&hba->pcidev->dev,
1748 PAGE_SIZE,
1749 &hba->hash_tbl_pbl_dma,
1750 GFP_KERNEL);
1751 if (!hba->hash_tbl_pbl) {
1752 printk(KERN_ERR PFX "hash table pbl alloc failed\n");
1753 kfree(dma_segment_array);
1754 return -ENOMEM;
1755 }
1756 memset(hba->hash_tbl_pbl, 0, PAGE_SIZE);
1757
1758 pbl = hba->hash_tbl_pbl;
1759 for (i = 0; i < segment_count; ++i) {
1760 u64 paddr = dma_segment_array[i];
1761 *pbl = cpu_to_le32((u32) paddr);
1762 ++pbl;
1763 *pbl = cpu_to_le32((u32) (paddr >> 32));
1764 ++pbl;
1765 }
1766 pbl = hba->hash_tbl_pbl;
1767 i = 0;
1768 while (*pbl && *(pbl + 1)) {
1769 u32 lo;
1770 u32 hi;
1771 lo = *pbl;
1772 ++pbl;
1773 hi = *pbl;
1774 ++pbl;
1775 ++i;
1776 }
1777 kfree(dma_segment_array);
1778 return 0;
1779}
1780
1781/**
1782 * bnx2fc_setup_fw_resc - Allocate and map hash table and dummy buffer
1783 *
1784 * @hba: Pointer to adapter structure
1785 *
1786 */
1787int bnx2fc_setup_fw_resc(struct bnx2fc_hba *hba)
1788{
1789 u64 addr;
1790 u32 mem_size;
1791 int i;
1792
1793 if (bnx2fc_allocate_hash_table(hba))
1794 return -ENOMEM;
1795
1796 mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
1797 hba->t2_hash_tbl_ptr = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
1798 &hba->t2_hash_tbl_ptr_dma,
1799 GFP_KERNEL);
1800 if (!hba->t2_hash_tbl_ptr) {
1801 printk(KERN_ERR PFX "unable to allocate t2 hash table ptr\n");
1802 bnx2fc_free_fw_resc(hba);
1803 return -ENOMEM;
1804 }
1805 memset(hba->t2_hash_tbl_ptr, 0x00, mem_size);
1806
1807 mem_size = BNX2FC_NUM_MAX_SESS *
1808 sizeof(struct fcoe_t2_hash_table_entry);
1809 hba->t2_hash_tbl = dma_alloc_coherent(&hba->pcidev->dev, mem_size,
1810 &hba->t2_hash_tbl_dma,
1811 GFP_KERNEL);
1812 if (!hba->t2_hash_tbl) {
1813 printk(KERN_ERR PFX "unable to allocate t2 hash table\n");
1814 bnx2fc_free_fw_resc(hba);
1815 return -ENOMEM;
1816 }
1817 memset(hba->t2_hash_tbl, 0x00, mem_size);
1818 for (i = 0; i < BNX2FC_NUM_MAX_SESS; i++) {
1819 addr = (unsigned long) hba->t2_hash_tbl_dma +
1820 ((i+1) * sizeof(struct fcoe_t2_hash_table_entry));
1821 hba->t2_hash_tbl[i].next.lo = addr & 0xffffffff;
1822 hba->t2_hash_tbl[i].next.hi = addr >> 32;
1823 }
1824
1825 hba->dummy_buffer = dma_alloc_coherent(&hba->pcidev->dev,
1826 PAGE_SIZE, &hba->dummy_buf_dma,
1827 GFP_KERNEL);
1828 if (!hba->dummy_buffer) {
1829 printk(KERN_ERR PFX "unable to alloc MP Dummy Buffer\n");
1830 bnx2fc_free_fw_resc(hba);
1831 return -ENOMEM;
1832 }
1833
1834 hba->stats_buffer = dma_alloc_coherent(&hba->pcidev->dev,
1835 PAGE_SIZE,
1836 &hba->stats_buf_dma,
1837 GFP_KERNEL);
1838 if (!hba->stats_buffer) {
1839 printk(KERN_ERR PFX "unable to alloc Stats Buffer\n");
1840 bnx2fc_free_fw_resc(hba);
1841 return -ENOMEM;
1842 }
1843 memset(hba->stats_buffer, 0x00, PAGE_SIZE);
1844
1845 return 0;
1846}
1847
1848void bnx2fc_free_fw_resc(struct bnx2fc_hba *hba)
1849{
1850 u32 mem_size;
1851
1852 if (hba->stats_buffer) {
1853 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1854 hba->stats_buffer, hba->stats_buf_dma);
1855 hba->stats_buffer = NULL;
1856 }
1857
1858 if (hba->dummy_buffer) {
1859 dma_free_coherent(&hba->pcidev->dev, PAGE_SIZE,
1860 hba->dummy_buffer, hba->dummy_buf_dma);
1861 hba->dummy_buffer = NULL;
1862 }
1863
1864 if (hba->t2_hash_tbl_ptr) {
1865 mem_size = BNX2FC_NUM_MAX_SESS * sizeof(struct regpair);
1866 dma_free_coherent(&hba->pcidev->dev, mem_size,
1867 hba->t2_hash_tbl_ptr,
1868 hba->t2_hash_tbl_ptr_dma);
1869 hba->t2_hash_tbl_ptr = NULL;
1870 }
1871
1872 if (hba->t2_hash_tbl) {
1873 mem_size = BNX2FC_NUM_MAX_SESS *
1874 sizeof(struct fcoe_t2_hash_table_entry);
1875 dma_free_coherent(&hba->pcidev->dev, mem_size,
1876 hba->t2_hash_tbl, hba->t2_hash_tbl_dma);
1877 hba->t2_hash_tbl = NULL;
1878 }
1879 bnx2fc_free_hash_table(hba);
1880}