blob: 68e2c4afc134488c6d9e9b97bbea176c36e3826d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
Andrew Vasquezfa90c542005-10-27 11:10:08 -07002 * QLogic Fibre Channel HBA Driver
Saurav Kashyap1e633952013-02-08 01:57:54 -05003 * Copyright (c) 2003-2013 QLogic Corporation
Linus Torvalds1da177e2005-04-16 15:20:36 -07004 *
Andrew Vasquezfa90c542005-10-27 11:10:08 -07005 * See LICENSE.qla2xxx for copyright and licensing details.
Linus Torvalds1da177e2005-04-16 15:20:36 -07006 */
7
Linus Torvalds1da177e2005-04-16 15:20:36 -07008/*
9 * qla2x00_debounce_register
10 * Debounce register.
11 *
12 * Input:
13 * port = register address.
14 *
15 * Returns:
16 * register value.
17 */
18static __inline__ uint16_t
Andrew Vasquezfa2a1ce2005-07-06 10:32:07 -070019qla2x00_debounce_register(volatile uint16_t __iomem *addr)
Linus Torvalds1da177e2005-04-16 15:20:36 -070020{
21 volatile uint16_t first;
22 volatile uint16_t second;
23
24 do {
25 first = RD_REG_WORD(addr);
26 barrier();
27 cpu_relax();
28 second = RD_REG_WORD(addr);
29 } while (first != second);
30
31 return (first);
32}
33
Andrew Vasquezfa2a1ce2005-07-06 10:32:07 -070034static inline void
Anirban Chakrabortye315cd22008-11-06 10:40:51 -080035qla2x00_poll(struct rsp_que *rsp)
Linus Torvalds1da177e2005-04-16 15:20:36 -070036{
Andrew Vasquezd2ba5672008-05-12 22:21:14 -070037 unsigned long flags;
Anirban Chakrabortye315cd22008-11-06 10:40:51 -080038 struct qla_hw_data *ha = rsp->hw;
Andrew Vasquezd2ba5672008-05-12 22:21:14 -070039 local_irq_save(flags);
Giridhar Malavalia9083012010-04-12 17:59:55 -070040 if (IS_QLA82XX(ha))
41 qla82xx_poll(0, rsp);
42 else
43 ha->isp_ops->intr_handler(0, rsp);
Andrew Vasquezd2ba5672008-05-12 22:21:14 -070044 local_irq_restore(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070045}
46
Andrew Vasquez2b6c0ce2005-07-06 10:31:17 -070047static inline uint8_t *
48host_to_fcp_swap(uint8_t *fcp, uint32_t bsize)
49{
50 uint32_t *ifcp = (uint32_t *) fcp;
51 uint32_t *ofcp = (uint32_t *) fcp;
52 uint32_t iter = bsize >> 2;
53
54 for (; iter ; iter--)
55 *ofcp++ = swab32(*ifcp++);
56
57 return fcp;
58}
Andrew Vasquez3d716442005-07-06 10:30:26 -070059
Chad Dupuis5f16b332012-08-22 14:21:00 -040060static inline void
61qla2x00_set_reserved_loop_ids(struct qla_hw_data *ha)
62{
63 int i;
64
65 if (IS_FWI2_CAPABLE(ha))
66 return;
67
68 for (i = 0; i < SNS_FIRST_LOOP_ID; i++)
69 set_bit(i, ha->loop_id_map);
70 set_bit(MANAGEMENT_SERVER, ha->loop_id_map);
71 set_bit(BROADCAST, ha->loop_id_map);
72}
73
Andrew Vasquez3d716442005-07-06 10:30:26 -070074static inline int
Anirban Chakrabortye315cd22008-11-06 10:40:51 -080075qla2x00_is_reserved_id(scsi_qla_host_t *vha, uint16_t loop_id)
Andrew Vasquez3d716442005-07-06 10:30:26 -070076{
Anirban Chakrabortye315cd22008-11-06 10:40:51 -080077 struct qla_hw_data *ha = vha->hw;
Andrew Vasqueze4289242007-07-19 15:05:56 -070078 if (IS_FWI2_CAPABLE(ha))
Andrew Vasquez3d716442005-07-06 10:30:26 -070079 return (loop_id > NPH_LAST_HANDLE);
80
Anirban Chakrabortye315cd22008-11-06 10:40:51 -080081 return ((loop_id > ha->max_loop_id && loop_id < SNS_FIRST_LOOP_ID) ||
Andrew Vasquez3d716442005-07-06 10:30:26 -070082 loop_id == MANAGEMENT_SERVER || loop_id == BROADCAST);
Anirban Chakraborty17d98632008-12-18 10:06:15 -080083}
Arun Easibad75002010-05-04 15:01:30 -070084
85static inline void
Chad Dupuis5f16b332012-08-22 14:21:00 -040086qla2x00_clear_loop_id(fc_port_t *fcport) {
87 struct qla_hw_data *ha = fcport->vha->hw;
88
89 if (fcport->loop_id == FC_NO_LOOP_ID ||
90 qla2x00_is_reserved_id(fcport->vha, fcport->loop_id))
91 return;
92
93 clear_bit(fcport->loop_id, ha->loop_id_map);
94 fcport->loop_id = FC_NO_LOOP_ID;
95}
96
97static inline void
Arun Easibad75002010-05-04 15:01:30 -070098qla2x00_clean_dsd_pool(struct qla_hw_data *ha, srb_t *sp)
99{
100 struct dsd_dma *dsd_ptr, *tdsd_ptr;
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800101 struct crc_context *ctx;
102
103 ctx = (struct crc_context *)GET_CMD_CTX_SP(sp);
Arun Easibad75002010-05-04 15:01:30 -0700104
105 /* clean up allocated prev pool */
106 list_for_each_entry_safe(dsd_ptr, tdsd_ptr,
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800107 &ctx->dsd_list, list) {
Arun Easibad75002010-05-04 15:01:30 -0700108 dma_pool_free(ha->dl_dma_pool, dsd_ptr->dsd_addr,
109 dsd_ptr->dsd_list_dma);
110 list_del(&dsd_ptr->list);
111 kfree(dsd_ptr);
112 }
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800113 INIT_LIST_HEAD(&ctx->dsd_list);
Arun Easibad75002010-05-04 15:01:30 -0700114}
Chad Dupuisec426e12011-03-30 11:46:32 -0700115
116static inline void
117qla2x00_set_fcport_state(fc_port_t *fcport, int state)
118{
119 int old_state;
120
121 old_state = atomic_read(&fcport->state);
122 atomic_set(&fcport->state, state);
123
124 /* Don't print state transitions during initial allocation of fcport */
125 if (old_state && old_state != state) {
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700126 ql_dbg(ql_dbg_disc, fcport->vha, 0x207d,
127 "FCPort state transitioned from %s to %s - "
128 "portid=%02x%02x%02x.\n",
Chad Dupuisec426e12011-03-30 11:46:32 -0700129 port_state_str[old_state], port_state_str[state],
130 fcport->d_id.b.domain, fcport->d_id.b.area,
Saurav Kashyap7c3df132011-07-14 12:00:13 -0700131 fcport->d_id.b.al_pa);
Chad Dupuisec426e12011-03-30 11:46:32 -0700132 }
133}
Arun Easi8cb20492011-08-16 11:29:22 -0700134
135static inline int
Arun Easie02587d2011-08-16 11:29:23 -0700136qla2x00_hba_err_chk_enabled(srb_t *sp)
Arun Easi8cb20492011-08-16 11:29:22 -0700137{
Arun Easie02587d2011-08-16 11:29:23 -0700138 /*
139 * Uncomment when corresponding SCSI changes are done.
140 *
141 if (!sp->cmd->prot_chk)
142 return 0;
143 *
144 */
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800145 switch (scsi_get_prot_op(GET_CMD_SP(sp))) {
Arun Easi8cb20492011-08-16 11:29:22 -0700146 case SCSI_PROT_READ_STRIP:
147 case SCSI_PROT_WRITE_INSERT:
148 if (ql2xenablehba_err_chk >= 1)
149 return 1;
150 break;
151 case SCSI_PROT_READ_PASS:
152 case SCSI_PROT_WRITE_PASS:
153 if (ql2xenablehba_err_chk >= 2)
154 return 1;
155 break;
156 case SCSI_PROT_READ_INSERT:
157 case SCSI_PROT_WRITE_STRIP:
158 return 1;
159 }
160 return 0;
161}
Andrew Vasquezd051a5aa2012-02-09 11:14:05 -0800162
163static inline int
164qla2x00_reset_active(scsi_qla_host_t *vha)
165{
166 scsi_qla_host_t *base_vha = pci_get_drvdata(vha->hw->pdev);
167
168 /* Test appropriate base-vha and vha flags. */
169 return test_bit(ISP_ABORT_NEEDED, &base_vha->dpc_flags) ||
170 test_bit(ABORT_ISP_ACTIVE, &base_vha->dpc_flags) ||
171 test_bit(ISP_ABORT_RETRY, &base_vha->dpc_flags) ||
172 test_bit(ISP_ABORT_NEEDED, &vha->dpc_flags) ||
173 test_bit(ABORT_ISP_ACTIVE, &vha->dpc_flags);
174}
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800175
176static inline srb_t *
177qla2x00_get_sp(scsi_qla_host_t *vha, fc_port_t *fcport, gfp_t flag)
178{
179 srb_t *sp = NULL;
180 struct qla_hw_data *ha = vha->hw;
181 uint8_t bail;
182
183 QLA_VHA_MARK_BUSY(vha, bail);
184 if (unlikely(bail))
185 return NULL;
186
187 sp = mempool_alloc(ha->srb_mempool, flag);
188 if (!sp)
189 goto done;
190
191 memset(sp, 0, sizeof(*sp));
192 sp->fcport = fcport;
193 sp->iocbs = 1;
194done:
195 if (!sp)
196 QLA_VHA_MARK_NOT_BUSY(vha);
197 return sp;
198}
199
200static inline void
Chad Dupuisb00ee7d2013-02-08 01:57:50 -0500201qla2x00_rel_sp(scsi_qla_host_t *vha, srb_t *sp)
202{
203 mempool_free(sp, vha->hw->srb_mempool);
204 QLA_VHA_MARK_NOT_BUSY(vha);
205}
206
207static inline void
Giridhar Malavali9ba56b92012-02-09 11:15:36 -0800208qla2x00_init_timer(srb_t *sp, unsigned long tmo)
209{
210 init_timer(&sp->u.iocb_cmd.timer);
211 sp->u.iocb_cmd.timer.expires = jiffies + tmo * HZ;
212 sp->u.iocb_cmd.timer.data = (unsigned long)sp;
213 sp->u.iocb_cmd.timer.function = qla2x00_sp_timeout;
214 add_timer(&sp->u.iocb_cmd.timer);
215 sp->free = qla2x00_sp_free;
216}
Chad Dupuis642ef982012-02-09 11:15:57 -0800217
218static inline int
219qla2x00_gid_list_size(struct qla_hw_data *ha)
220{
221 return sizeof(struct gid_list_info) * ha->max_fibre_devices;
222}
Chad Dupuis3c290d02013-01-30 03:34:38 -0500223
224static inline void
225qla2x00_do_host_ramp_up(scsi_qla_host_t *vha)
226{
227 if (vha->hw->cfg_lun_q_depth >= ql2xmaxqdepth)
228 return;
229
230 /* Wait at least HOST_QUEUE_RAMPDOWN_INTERVAL before ramping up */
231 if (time_before(jiffies, (vha->hw->host_last_rampdown_time +
232 HOST_QUEUE_RAMPDOWN_INTERVAL)))
233 return;
234
235 /* Wait at least HOST_QUEUE_RAMPUP_INTERVAL between each ramp up */
236 if (time_before(jiffies, (vha->hw->host_last_rampup_time +
237 HOST_QUEUE_RAMPUP_INTERVAL)))
238 return;
239
240 set_bit(HOST_RAMP_UP_QUEUE_DEPTH, &vha->dpc_flags);
241}