blob: 98ab921070d2a80b67f5f3a4c80c93a97fc870a3 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Andrew Vasquezfa90c542005-10-27 11:10:08 -07002 * QLogic Fibre Channel HBA Driver
Saurav Kashyap1e633952013-02-08 01:57:54 -05003 * Copyright (c) 2003-2013 QLogic Corporation
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Andrew Vasquezfa90c542005-10-27 11:10:08 -07005 * See LICENSE.qla2xxx for copyright and licensing details.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 */
7
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -04008/**
9 * qla24xx_calc_iocbs() - Determine number of Command Type 3 and
10 * Continuation Type 1 IOCBs to allocate.
11 *
12 * @dsds: number of data segment decriptors needed
13 *
14 * Returns the number of IOCB entries needed to store @dsds.
15 */
16static inline uint16_t
17qla24xx_calc_iocbs(scsi_qla_host_t *vha, uint16_t dsds)
18{
19 uint16_t iocbs;
20
21 iocbs = 1;
22 if (dsds > 1) {
23 iocbs += (dsds - 1) / 5;
24 if ((dsds - 1) % 5)
25 iocbs++;
26 }
27 return iocbs;
28}
29
Linus Torvalds1da177e2005-04-16 15:20:36 -070030/*
31 * qla2x00_debounce_register
32 * Debounce register.
33 *
34 * Input:
35 * port = register address.
36 *
37 * Returns:
38 * register value.
39 */
40static __inline__ uint16_t
Andrew Vasquezfa2a1ce2005-07-06 10:32:07 -070041qla2x00_debounce_register(volatile uint16_t __iomem *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070042{
43 volatile uint16_t first;
44 volatile uint16_t second;
45
46 do {
47 first = RD_REG_WORD(addr);
48 barrier();
49 cpu_relax();
50 second = RD_REG_WORD(addr);
51 } while (first != second);
52
53 return (first);
54}
55
Andrew Vasquezfa2a1ce2005-07-06 10:32:07 -070056static inline void
Anirban Chakrabortye315cd22008-11-06 10:40:51 -080057qla2x00_poll(struct rsp_que *rsp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070058{
Andrew Vasquezd2ba5672008-05-12 22:21:14 -070059 unsigned long flags;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -080060 struct qla_hw_data *ha = rsp->hw;
Andrew Vasquezd2ba5672008-05-12 22:21:14 -070061 local_irq_save(flags);
Giridhar Malavalia9083012010-04-12 17:59:55 -070062 if (IS_QLA82XX(ha))
63 qla82xx_poll(0, rsp);
64 else
65 ha->isp_ops->intr_handler(0, rsp);
Andrew Vasquezd2ba5672008-05-12 22:21:14 -070066 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070067}
68
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -070069static inline uint8_t *
70host_to_fcp_swap(uint8_t *fcp, uint32_t bsize)
71{
72 uint32_t *ifcp = (uint32_t *) fcp;
73 uint32_t *ofcp = (uint32_t *) fcp;
74 uint32_t iter = bsize >> 2;
75
76 for (; iter ; iter--)
77 *ofcp++ = swab32(*ifcp++);
78
79 return fcp;
80}
Andrew Vasquez3d716442005-07-06 10:30:26 -070081
Chad Dupuis5f16b332012-08-22 14:21:00 -040082static inline void
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -040083host_to_adap(uint8_t *src, uint8_t *dst, uint32_t bsize)
84{
85 uint32_t *isrc = (uint32_t *) src;
86 uint32_t *odest = (uint32_t *) dst;
87 uint32_t iter = bsize >> 2;
88
89 for (; iter ; iter--)
90 *odest++ = cpu_to_le32(*isrc++);
91}
92
93static inline void
Chad Dupuis5f16b332012-08-22 14:21:00 -040094qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha)
95{
96 int i;
97
98 if (IS_FWI2_CAPABLE(ha))
99 return;
100
101 for (i = 0; i < SNS_FIRST_LOOP_ID; i++)
102 set_bit(i, ha->loop_id_map);
103 set_bit(MANAGEMENT_SERVER, ha->loop_id_map);
104 set_bit(BROADCAST, ha->loop_id_map);
105}
106
Andrew Vasquez3d716442005-07-06 10:30:26 -0700107static inline int
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800108qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
Andrew Vasquez3d716442005-07-06 10:30:26 -0700109{
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800110 struct qla_hw_data *ha = vha->hw;
Andrew Vasqueze4289242007-07-19 15:05:56 -0700111 if (IS_FWI2_CAPABLE(ha))
Andrew Vasquez3d716442005-07-06 10:30:26 -0700112 return (loop_id > NPH_LAST_HANDLE);
113
Anirban Chakrabortye315cd22008-11-06 10:40:51 -0800114 return ((loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
Andrew Vasquez3d716442005-07-06 10:30:26 -0700115 loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST);
Anirban Chakraborty17d98632008-12-18 10:06:15 -0800116}
Arun Easibad75002010-05-04 15:01:30 -0700117
118static inline void
Chad Dupuis5f16b332012-08-22 14:21:00 -0400119qla2x00_clear_loop_id(fc_port_t *fcport) {
120 struct qla_hw_data *ha = fcport->vha->hw;
121
122 if (fcport->loop_id == FC_NO_LOOP_ID ||
123 qla2x00_is_reserved_id(fcport->vha, fcport->loop_id))
124 return;
125
126 clear_bit(fcport->loop_id, ha->loop_id_map);
127 fcport->loop_id = FC_NO_LOOP_ID;
128}
129
130static inline void
Arun Easibad75002010-05-04 15:01:30 -0700131qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp)
132{
133 struct dsd_dma *dsd_ptr, *tdsd_ptr;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800134 struct crc_context *ctx;
135
136 ctx = (struct crc_context *)GET_CMD_CTX_SP(sp);
Arun Easibad75002010-05-04 15:01:30 -0700137
138 /* clean up allocated prev pool */
139 list_for_each_entry_safe(dsd_ptr, tdsd_ptr,
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800140 &ctx->dsd_list, list) {
Arun Easibad75002010-05-04 15:01:30 -0700141 dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr,
142 dsd_ptr->dsd_list_dma);
143 list_del(&dsd_ptr->list);
144 kfree(dsd_ptr);
145 }
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800146 INIT_LIST_HEAD(&ctx->dsd_list);
Arun Easibad75002010-05-04 15:01:30 -0700147}
Chad Dupuisec426e12011-03-30 11:46:32 -0700148
149static inline void
150qla2x00_set_fcport_state(fc_port_t *fcport, int state)
151{
152 int old_state;
153
154 old_state = atomic_read(&fcport->state);
155 atomic_set(&fcport->state, state);
156
157 /* Don't print state transitions during initial allocation of fcport */
158 if (old_state && old_state != state) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700159 ql_dbg(ql_dbg_disc, fcport->vha, 0x207d,
160 "FCPort state transitioned from %s to %s - "
161 "portid=%02x%02x%02x.\n",
Chad Dupuisec426e12011-03-30 11:46:32 -0700162 port_state_str[old_state], port_state_str[state],
163 fcport->d_id.b.domain, fcport->d_id.b.area,
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700164 fcport->d_id.b.al_pa);
Chad Dupuisec426e12011-03-30 11:46:32 -0700165 }
166}
Arun Easi8cb20492011-08-16 11:29:22 -0700167
168static inline int
Arun Easie02587d2011-08-16 11:29:23 -0700169qla2x00_hba_err_chk_enabled(srb_t *sp)
Arun Easi8cb20492011-08-16 11:29:22 -0700170{
Arun Easie02587d2011-08-16 11:29:23 -0700171 /*
172 * Uncomment when corresponding SCSI changes are done.
173 *
174 if (!sp->cmd->prot_chk)
175 return 0;
176 *
177 */
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800178 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
Arun Easi8cb20492011-08-16 11:29:22 -0700179 case SCSI_PROT_READ_STRIP:
180 case SCSI_PROT_WRITE_INSERT:
181 if (ql2xenablehba_err_chk >= 1)
182 return 1;
183 break;
184 case SCSI_PROT_READ_PASS:
185 case SCSI_PROT_WRITE_PASS:
186 if (ql2xenablehba_err_chk >= 2)
187 return 1;
188 break;
189 case SCSI_PROT_READ_INSERT:
190 case SCSI_PROT_WRITE_STRIP:
191 return 1;
192 }
193 return 0;
194}
Andrew Vasquezd051a5aa2012-02-09 11:14:05 -0800195
196static inline int
197qla2x00_reset_active(scsi_qla_host_t *vha)
198{
199 scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
200
201 /* Test appropriate base-vha and vha flags. */
202 return test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) ||
203 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
204 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
205 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
206 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
207}
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800208
209static inline srb_t *
210qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
211{
212 srb_t *sp = NULL;
213 struct qla_hw_data *ha = vha->hw;
214 uint8_t bail;
215
216 QLA_VHA_MARK_BUSY(vha, bail);
217 if (unlikely(bail))
218 return NULL;
219
220 sp = mempool_alloc(ha->srb_mempool, flag);
221 if (!sp)
222 goto done;
223
224 memset(sp, 0, sizeof(*sp));
225 sp->fcport = fcport;
226 sp->iocbs = 1;
227done:
228 if (!sp)
229 QLA_VHA_MARK_NOT_BUSY(vha);
230 return sp;
231}
232
233static inline void
Chad Dupuisb00ee7d2013-02-08 01:57:50 -0500234qla2x00_rel_sp(scsi_qla_host_t *vha, srb_t *sp)
235{
236 mempool_free(sp, vha->hw->srb_mempool);
237 QLA_VHA_MARK_NOT_BUSY(vha);
238}
239
240static inline void
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800241qla2x00_init_timer(srb_t *sp, unsigned long tmo)
242{
243 init_timer(&sp->u.iocb_cmd.timer);
244 sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
245 sp->u.iocb_cmd.timer.data = (unsigned long)sp;
246 sp->u.iocb_cmd.timer.function = qla2x00_sp_timeout;
247 add_timer(&sp->u.iocb_cmd.timer);
248 sp->free = qla2x00_sp_free;
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -0400249 if ((IS_QLAFX00(sp->fcport->vha->hw)) &&
250 (sp->type == SRB_FXIOCB_DCMD))
251 init_completion(&sp->u.iocb_cmd.u.fxiocb.fxiocb_comp);
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800252}
Chad Dupuis642ef982012-02-09 11:15:57 -0800253
254static inline int
255qla2x00_gid_list_size(struct qla_hw_data *ha)
256{
Giridhar Malavali8ae6d9c2013-03-28 08:21:23 -0400257 if (IS_QLAFX00(ha))
258 return sizeof(uint32_t) * 32;
259 else
260 return sizeof(struct gid_list_info) * ha->max_fibre_devices;
Chad Dupuis642ef982012-02-09 11:15:57 -0800261}
Chad Dupuis3c290d02013-01-30 03:34:38 -0500262
263static inline void
264qla2x00_do_host_ramp_up(scsi_qla_host_t *vha)
265{
266 if (vha->hw->cfg_lun_q_depth >= ql2xmaxqdepth)
267 return;
268
269 /* Wait at least HOST_QUEUE_RAMPDOWN_INTERVAL before ramping up */
270 if (time_before(jiffies, (vha->hw->host_last_rampdown_time +
271 HOST_QUEUE_RAMPDOWN_INTERVAL)))
272 return;
273
274 /* Wait at least HOST_QUEUE_RAMPUP_INTERVAL between each ramp up */
275 if (time_before(jiffies, (vha->hw->host_last_rampup_time +
276 HOST_QUEUE_RAMPUP_INTERVAL)))
277 return;
278
279 set_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags);
280}