blob: 4366011cd4002baa56fca33fcd39a04d3623e1ad [file] [log] [blame]
David S. Millercd9ad582007-04-26 21:19:23 -07001/* esp_scsi.c: ESP SCSI driver.
2 *
3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
4 */
5
6#include <linux/kernel.h>
7#include <linux/types.h>
8#include <linux/slab.h>
9#include <linux/delay.h>
10#include <linux/list.h>
11#include <linux/completion.h>
12#include <linux/kallsyms.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/init.h>
Alexey Dobriyane1f2a092007-04-27 15:19:27 -070016#include <linux/irqreturn.h>
David S. Millercd9ad582007-04-26 21:19:23 -070017
18#include <asm/irq.h>
19#include <asm/io.h>
20#include <asm/dma.h>
21
22#include <scsi/scsi.h>
23#include <scsi/scsi_host.h>
24#include <scsi/scsi_cmnd.h>
25#include <scsi/scsi_device.h>
26#include <scsi/scsi_tcq.h>
27#include <scsi/scsi_dbg.h>
28#include <scsi/scsi_transport_spi.h>
29
30#include "esp_scsi.h"
31
32#define DRV_MODULE_NAME "esp"
33#define PFX DRV_MODULE_NAME ": "
34#define DRV_VERSION "2.000"
35#define DRV_MODULE_RELDATE "April 19, 2007"
36
37/* SCSI bus reset settle time in seconds. */
38static int esp_bus_reset_settle = 3;
39
40static u32 esp_debug;
41#define ESP_DEBUG_INTR 0x00000001
42#define ESP_DEBUG_SCSICMD 0x00000002
43#define ESP_DEBUG_RESET 0x00000004
44#define ESP_DEBUG_MSGIN 0x00000008
45#define ESP_DEBUG_MSGOUT 0x00000010
46#define ESP_DEBUG_CMDDONE 0x00000020
47#define ESP_DEBUG_DISCONNECT 0x00000040
48#define ESP_DEBUG_DATASTART 0x00000080
49#define ESP_DEBUG_DATADONE 0x00000100
50#define ESP_DEBUG_RECONNECT 0x00000200
51#define ESP_DEBUG_AUTOSENSE 0x00000400
Hannes Reinecke1af6f602014-11-24 15:37:22 +010052#define ESP_DEBUG_EVENT 0x00000800
53#define ESP_DEBUG_COMMAND 0x00001000
David S. Millercd9ad582007-04-26 21:19:23 -070054
55#define esp_log_intr(f, a...) \
56do { if (esp_debug & ESP_DEBUG_INTR) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010057 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070058} while (0)
59
60#define esp_log_reset(f, a...) \
61do { if (esp_debug & ESP_DEBUG_RESET) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010062 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070063} while (0)
64
65#define esp_log_msgin(f, a...) \
66do { if (esp_debug & ESP_DEBUG_MSGIN) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010067 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070068} while (0)
69
70#define esp_log_msgout(f, a...) \
71do { if (esp_debug & ESP_DEBUG_MSGOUT) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010072 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070073} while (0)
74
75#define esp_log_cmddone(f, a...) \
76do { if (esp_debug & ESP_DEBUG_CMDDONE) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010077 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070078} while (0)
79
80#define esp_log_disconnect(f, a...) \
81do { if (esp_debug & ESP_DEBUG_DISCONNECT) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010082 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070083} while (0)
84
85#define esp_log_datastart(f, a...) \
86do { if (esp_debug & ESP_DEBUG_DATASTART) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010087 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070088} while (0)
89
90#define esp_log_datadone(f, a...) \
91do { if (esp_debug & ESP_DEBUG_DATADONE) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010092 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070093} while (0)
94
95#define esp_log_reconnect(f, a...) \
96do { if (esp_debug & ESP_DEBUG_RECONNECT) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010097 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070098} while (0)
99
100#define esp_log_autosense(f, a...) \
101do { if (esp_debug & ESP_DEBUG_AUTOSENSE) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +0100102 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -0700103} while (0)
104
Hannes Reinecke1af6f602014-11-24 15:37:22 +0100105#define esp_log_event(f, a...) \
106do { if (esp_debug & ESP_DEBUG_EVENT) \
107 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
108} while (0)
109
110#define esp_log_command(f, a...) \
111do { if (esp_debug & ESP_DEBUG_COMMAND) \
112 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
113} while (0)
114
David S. Millercd9ad582007-04-26 21:19:23 -0700115#define esp_read8(REG) esp->ops->esp_read8(esp, REG)
116#define esp_write8(VAL,REG) esp->ops->esp_write8(esp, VAL, REG)
117
118static void esp_log_fill_regs(struct esp *esp,
119 struct esp_event_ent *p)
120{
121 p->sreg = esp->sreg;
122 p->seqreg = esp->seqreg;
123 p->sreg2 = esp->sreg2;
124 p->ireg = esp->ireg;
125 p->select_state = esp->select_state;
126 p->event = esp->event;
127}
128
129void scsi_esp_cmd(struct esp *esp, u8 val)
130{
131 struct esp_event_ent *p;
132 int idx = esp->esp_event_cur;
133
134 p = &esp->esp_event_log[idx];
135 p->type = ESP_EVENT_TYPE_CMD;
136 p->val = val;
137 esp_log_fill_regs(esp, p);
138
139 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
140
Hannes Reinecke1af6f602014-11-24 15:37:22 +0100141 esp_log_command("cmd[%02x]\n", val);
David S. Millercd9ad582007-04-26 21:19:23 -0700142 esp_write8(val, ESP_CMD);
143}
144EXPORT_SYMBOL(scsi_esp_cmd);
145
Hannes Reinecke31708662014-11-24 15:37:24 +0100146static void esp_send_dma_cmd(struct esp *esp, int len, int max_len, int cmd)
147{
148 if (esp->flags & ESP_FLAG_USE_FIFO) {
149 int i;
150
151 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
152 for (i = 0; i < len; i++)
153 esp_write8(esp->command_block[i], ESP_FDATA);
154 scsi_esp_cmd(esp, cmd);
155 } else {
156 if (esp->rev == FASHME)
157 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
158 cmd |= ESP_CMD_DMA;
159 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
160 len, max_len, 0, cmd);
161 }
162}
163
David S. Millercd9ad582007-04-26 21:19:23 -0700164static void esp_event(struct esp *esp, u8 val)
165{
166 struct esp_event_ent *p;
167 int idx = esp->esp_event_cur;
168
169 p = &esp->esp_event_log[idx];
170 p->type = ESP_EVENT_TYPE_EVENT;
171 p->val = val;
172 esp_log_fill_regs(esp, p);
173
174 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
175
176 esp->event = val;
177}
178
179static void esp_dump_cmd_log(struct esp *esp)
180{
181 int idx = esp->esp_event_cur;
182 int stop = idx;
183
Hannes Reineckea1a75b32014-11-24 15:37:21 +0100184 shost_printk(KERN_INFO, esp->host, "Dumping command log\n");
David S. Millercd9ad582007-04-26 21:19:23 -0700185 do {
186 struct esp_event_ent *p = &esp->esp_event_log[idx];
187
Hannes Reineckea1a75b32014-11-24 15:37:21 +0100188 shost_printk(KERN_INFO, esp->host,
189 "ent[%d] %s val[%02x] sreg[%02x] seqreg[%02x] "
190 "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n",
191 idx,
192 p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT",
193 p->val, p->sreg, p->seqreg,
194 p->sreg2, p->ireg, p->select_state, p->event);
David S. Millercd9ad582007-04-26 21:19:23 -0700195
196 idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
197 } while (idx != stop);
198}
199
200static void esp_flush_fifo(struct esp *esp)
201{
202 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
203 if (esp->rev == ESP236) {
204 int lim = 1000;
205
206 while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) {
207 if (--lim == 0) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +0100208 shost_printk(KERN_ALERT, esp->host,
209 "ESP_FF_BYTES will not clear!\n");
David S. Millercd9ad582007-04-26 21:19:23 -0700210 break;
211 }
212 udelay(1);
213 }
214 }
215}
216
217static void hme_read_fifo(struct esp *esp)
218{
219 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
220 int idx = 0;
221
222 while (fcnt--) {
223 esp->fifo[idx++] = esp_read8(ESP_FDATA);
224 esp->fifo[idx++] = esp_read8(ESP_FDATA);
225 }
226 if (esp->sreg2 & ESP_STAT2_F1BYTE) {
227 esp_write8(0, ESP_FDATA);
228 esp->fifo[idx++] = esp_read8(ESP_FDATA);
229 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
230 }
231 esp->fifo_cnt = idx;
232}
233
234static void esp_set_all_config3(struct esp *esp, u8 val)
235{
236 int i;
237
238 for (i = 0; i < ESP_MAX_TARGET; i++)
239 esp->target[i].esp_config3 = val;
240}
241
242/* Reset the ESP chip, _not_ the SCSI bus. */
243static void esp_reset_esp(struct esp *esp)
244{
245 u8 family_code, version;
246
247 /* Now reset the ESP chip */
248 scsi_esp_cmd(esp, ESP_CMD_RC);
249 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
David S. Millera7938042007-09-30 17:10:42 -0700250 if (esp->rev == FAST)
251 esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2);
David S. Millercd9ad582007-04-26 21:19:23 -0700252 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
253
David S. Millercd9ad582007-04-26 21:19:23 -0700254 /* This is the only point at which it is reliable to read
255 * the ID-code for a fast ESP chip variants.
256 */
257 esp->max_period = ((35 * esp->ccycle) / 1000);
258 if (esp->rev == FAST) {
259 version = esp_read8(ESP_UID);
260 family_code = (version & 0xf8) >> 3;
261 if (family_code == 0x02)
262 esp->rev = FAS236;
263 else if (family_code == 0x0a)
264 esp->rev = FASHME; /* Version is usually '5'. */
265 else
266 esp->rev = FAS100A;
267 esp->min_period = ((4 * esp->ccycle) / 1000);
268 } else {
269 esp->min_period = ((5 * esp->ccycle) / 1000);
270 }
271 esp->max_period = (esp->max_period + 3)>>2;
272 esp->min_period = (esp->min_period + 3)>>2;
273
274 esp_write8(esp->config1, ESP_CFG1);
275 switch (esp->rev) {
276 case ESP100:
277 /* nothing to do */
278 break;
279
280 case ESP100A:
281 esp_write8(esp->config2, ESP_CFG2);
282 break;
283
284 case ESP236:
285 /* Slow 236 */
286 esp_write8(esp->config2, ESP_CFG2);
287 esp->prev_cfg3 = esp->target[0].esp_config3;
288 esp_write8(esp->prev_cfg3, ESP_CFG3);
289 break;
290
291 case FASHME:
292 esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
293 /* fallthrough... */
294
295 case FAS236:
296 /* Fast 236 or HME */
297 esp_write8(esp->config2, ESP_CFG2);
298 if (esp->rev == FASHME) {
299 u8 cfg3 = esp->target[0].esp_config3;
300
301 cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH;
302 if (esp->scsi_id >= 8)
303 cfg3 |= ESP_CONFIG3_IDBIT3;
304 esp_set_all_config3(esp, cfg3);
305 } else {
306 u32 cfg3 = esp->target[0].esp_config3;
307
308 cfg3 |= ESP_CONFIG3_FCLK;
309 esp_set_all_config3(esp, cfg3);
310 }
311 esp->prev_cfg3 = esp->target[0].esp_config3;
312 esp_write8(esp->prev_cfg3, ESP_CFG3);
313 if (esp->rev == FASHME) {
314 esp->radelay = 80;
315 } else {
316 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
317 esp->radelay = 0;
318 else
319 esp->radelay = 96;
320 }
321 break;
322
323 case FAS100A:
324 /* Fast 100a */
325 esp_write8(esp->config2, ESP_CFG2);
326 esp_set_all_config3(esp,
327 (esp->target[0].esp_config3 |
328 ESP_CONFIG3_FCLOCK));
329 esp->prev_cfg3 = esp->target[0].esp_config3;
330 esp_write8(esp->prev_cfg3, ESP_CFG3);
331 esp->radelay = 32;
332 break;
333
334 default:
335 break;
336 }
337
David S. Millera7938042007-09-30 17:10:42 -0700338 /* Reload the configuration registers */
339 esp_write8(esp->cfact, ESP_CFACT);
340
341 esp->prev_stp = 0;
342 esp_write8(esp->prev_stp, ESP_STP);
343
344 esp->prev_soff = 0;
345 esp_write8(esp->prev_soff, ESP_SOFF);
346
347 esp_write8(esp->neg_defp, ESP_TIMEO);
348
David S. Millercd9ad582007-04-26 21:19:23 -0700349 /* Eat any bitrot in the chip */
350 esp_read8(ESP_INTRPT);
351 udelay(100);
352}
353
354static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
355{
356 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
FUJITA Tomonori4c2baaa2007-05-26 04:51:32 +0900357 struct scatterlist *sg = scsi_sglist(cmd);
David S. Millercd9ad582007-04-26 21:19:23 -0700358 int dir = cmd->sc_data_direction;
359 int total, i;
360
361 if (dir == DMA_NONE)
362 return;
363
FUJITA Tomonori4c2baaa2007-05-26 04:51:32 +0900364 spriv->u.num_sg = esp->ops->map_sg(esp, sg, scsi_sg_count(cmd), dir);
David S. Millercd9ad582007-04-26 21:19:23 -0700365 spriv->cur_residue = sg_dma_len(sg);
366 spriv->cur_sg = sg;
367
368 total = 0;
369 for (i = 0; i < spriv->u.num_sg; i++)
370 total += sg_dma_len(&sg[i]);
371 spriv->tot_residue = total;
372}
373
374static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent,
375 struct scsi_cmnd *cmd)
376{
377 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
378
379 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
380 return ent->sense_dma +
381 (ent->sense_ptr - cmd->sense_buffer);
382 }
383
384 return sg_dma_address(p->cur_sg) +
385 (sg_dma_len(p->cur_sg) -
386 p->cur_residue);
387}
388
389static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent,
390 struct scsi_cmnd *cmd)
391{
392 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
393
394 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
395 return SCSI_SENSE_BUFFERSIZE -
396 (ent->sense_ptr - cmd->sense_buffer);
397 }
398 return p->cur_residue;
399}
400
401static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
402 struct scsi_cmnd *cmd, unsigned int len)
403{
404 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
405
406 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
407 ent->sense_ptr += len;
408 return;
409 }
410
411 p->cur_residue -= len;
412 p->tot_residue -= len;
413 if (p->cur_residue < 0 || p->tot_residue < 0) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +0100414 shost_printk(KERN_ERR, esp->host,
415 "Data transfer overflow.\n");
416 shost_printk(KERN_ERR, esp->host,
417 "cur_residue[%d] tot_residue[%d] len[%u]\n",
418 p->cur_residue, p->tot_residue, len);
David S. Millercd9ad582007-04-26 21:19:23 -0700419 p->cur_residue = 0;
420 p->tot_residue = 0;
421 }
422 if (!p->cur_residue && p->tot_residue) {
423 p->cur_sg++;
424 p->cur_residue = sg_dma_len(p->cur_sg);
425 }
426}
427
428static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
429{
430 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
431 int dir = cmd->sc_data_direction;
432
433 if (dir == DMA_NONE)
434 return;
435
FUJITA Tomonori4c2baaa2007-05-26 04:51:32 +0900436 esp->ops->unmap_sg(esp, scsi_sglist(cmd), spriv->u.num_sg, dir);
David S. Millercd9ad582007-04-26 21:19:23 -0700437}
438
439static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
440{
441 struct scsi_cmnd *cmd = ent->cmd;
442 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
443
444 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
445 ent->saved_sense_ptr = ent->sense_ptr;
446 return;
447 }
448 ent->saved_cur_residue = spriv->cur_residue;
449 ent->saved_cur_sg = spriv->cur_sg;
450 ent->saved_tot_residue = spriv->tot_residue;
451}
452
453static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
454{
455 struct scsi_cmnd *cmd = ent->cmd;
456 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
457
458 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
459 ent->sense_ptr = ent->saved_sense_ptr;
460 return;
461 }
462 spriv->cur_residue = ent->saved_cur_residue;
463 spriv->cur_sg = ent->saved_cur_sg;
464 spriv->tot_residue = ent->saved_tot_residue;
465}
466
467static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd)
468{
469 if (cmd->cmd_len == 6 ||
470 cmd->cmd_len == 10 ||
471 cmd->cmd_len == 12) {
472 esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
473 } else {
474 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
475 }
476}
477
478static void esp_write_tgt_config3(struct esp *esp, int tgt)
479{
480 if (esp->rev > ESP100A) {
481 u8 val = esp->target[tgt].esp_config3;
482
483 if (val != esp->prev_cfg3) {
484 esp->prev_cfg3 = val;
485 esp_write8(val, ESP_CFG3);
486 }
487 }
488}
489
490static void esp_write_tgt_sync(struct esp *esp, int tgt)
491{
492 u8 off = esp->target[tgt].esp_offset;
493 u8 per = esp->target[tgt].esp_period;
494
495 if (off != esp->prev_soff) {
496 esp->prev_soff = off;
497 esp_write8(off, ESP_SOFF);
498 }
499 if (per != esp->prev_stp) {
500 esp->prev_stp = per;
501 esp_write8(per, ESP_STP);
502 }
503}
504
505static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
506{
507 if (esp->rev == FASHME) {
508 /* Arbitrary segment boundaries, 24-bit counts. */
509 if (dma_len > (1U << 24))
510 dma_len = (1U << 24);
511 } else {
512 u32 base, end;
513
514 /* ESP chip limits other variants by 16-bits of transfer
515 * count. Actually on FAS100A and FAS236 we could get
516 * 24-bits of transfer count by enabling ESP_CONFIG2_FENAB
517 * in the ESP_CFG2 register but that causes other unwanted
518 * changes so we don't use it currently.
519 */
520 if (dma_len > (1U << 16))
521 dma_len = (1U << 16);
522
523 /* All of the DMA variants hooked up to these chips
524 * cannot handle crossing a 24-bit address boundary.
525 */
526 base = dma_addr & ((1U << 24) - 1U);
527 end = base + dma_len;
528 if (end > (1U << 24))
529 end = (1U <<24);
530 dma_len = end - base;
531 }
532 return dma_len;
533}
534
535static int esp_need_to_nego_wide(struct esp_target_data *tp)
536{
537 struct scsi_target *target = tp->starget;
538
539 return spi_width(target) != tp->nego_goal_width;
540}
541
542static int esp_need_to_nego_sync(struct esp_target_data *tp)
543{
544 struct scsi_target *target = tp->starget;
545
546 /* When offset is zero, period is "don't care". */
547 if (!spi_offset(target) && !tp->nego_goal_offset)
548 return 0;
549
550 if (spi_offset(target) == tp->nego_goal_offset &&
551 spi_period(target) == tp->nego_goal_period)
552 return 0;
553
554 return 1;
555}
556
557static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
558 struct esp_lun_data *lp)
559{
David S. Miller21af8102013-08-01 18:08:34 -0700560 if (!ent->orig_tag[0]) {
David S. Millercd9ad582007-04-26 21:19:23 -0700561 /* Non-tagged, slot already taken? */
562 if (lp->non_tagged_cmd)
563 return -EBUSY;
564
565 if (lp->hold) {
566 /* We are being held by active tagged
567 * commands.
568 */
569 if (lp->num_tagged)
570 return -EBUSY;
571
572 /* Tagged commands completed, we can unplug
573 * the queue and run this untagged command.
574 */
575 lp->hold = 0;
576 } else if (lp->num_tagged) {
577 /* Plug the queue until num_tagged decreases
578 * to zero in esp_free_lun_tag.
579 */
580 lp->hold = 1;
581 return -EBUSY;
582 }
583
584 lp->non_tagged_cmd = ent;
585 return 0;
586 } else {
587 /* Tagged command, see if blocked by a
588 * non-tagged one.
589 */
590 if (lp->non_tagged_cmd || lp->hold)
591 return -EBUSY;
592 }
593
David S. Miller21af8102013-08-01 18:08:34 -0700594 BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]);
David S. Millercd9ad582007-04-26 21:19:23 -0700595
David S. Miller21af8102013-08-01 18:08:34 -0700596 lp->tagged_cmds[ent->orig_tag[1]] = ent;
David S. Millercd9ad582007-04-26 21:19:23 -0700597 lp->num_tagged++;
598
599 return 0;
600}
601
602static void esp_free_lun_tag(struct esp_cmd_entry *ent,
603 struct esp_lun_data *lp)
604{
David S. Miller21af8102013-08-01 18:08:34 -0700605 if (ent->orig_tag[0]) {
606 BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent);
607 lp->tagged_cmds[ent->orig_tag[1]] = NULL;
David S. Millercd9ad582007-04-26 21:19:23 -0700608 lp->num_tagged--;
609 } else {
610 BUG_ON(lp->non_tagged_cmd != ent);
611 lp->non_tagged_cmd = NULL;
612 }
613}
614
615/* When a contingent allegiance conditon is created, we force feed a
616 * REQUEST_SENSE command to the device to fetch the sense data. I
617 * tried many other schemes, relying on the scsi error handling layer
618 * to send out the REQUEST_SENSE automatically, but this was difficult
619 * to get right especially in the presence of applications like smartd
620 * which use SG_IO to send out their own REQUEST_SENSE commands.
621 */
622static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
623{
624 struct scsi_cmnd *cmd = ent->cmd;
625 struct scsi_device *dev = cmd->device;
626 int tgt, lun;
627 u8 *p, val;
628
629 tgt = dev->id;
630 lun = dev->lun;
631
632
633 if (!ent->sense_ptr) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +0100634 esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n",
635 tgt, lun);
David S. Millercd9ad582007-04-26 21:19:23 -0700636
637 ent->sense_ptr = cmd->sense_buffer;
638 ent->sense_dma = esp->ops->map_single(esp,
639 ent->sense_ptr,
640 SCSI_SENSE_BUFFERSIZE,
641 DMA_FROM_DEVICE);
642 }
643 ent->saved_sense_ptr = ent->sense_ptr;
644
645 esp->active_cmd = ent;
646
647 p = esp->command_block;
648 esp->msg_out_len = 0;
649
650 *p++ = IDENTIFY(0, lun);
651 *p++ = REQUEST_SENSE;
652 *p++ = ((dev->scsi_level <= SCSI_2) ?
653 (lun << 5) : 0);
654 *p++ = 0;
655 *p++ = 0;
656 *p++ = SCSI_SENSE_BUFFERSIZE;
657 *p++ = 0;
658
659 esp->select_state = ESP_SELECT_BASIC;
660
661 val = tgt;
662 if (esp->rev == FASHME)
663 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
664 esp_write8(val, ESP_BUSID);
665
666 esp_write_tgt_sync(esp, tgt);
667 esp_write_tgt_config3(esp, tgt);
668
669 val = (p - esp->command_block);
670
Hannes Reinecke31708662014-11-24 15:37:24 +0100671 esp_send_dma_cmd(esp, val, 16, ESP_CMD_SELA);
David S. Millercd9ad582007-04-26 21:19:23 -0700672}
673
674static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
675{
676 struct esp_cmd_entry *ent;
677
678 list_for_each_entry(ent, &esp->queued_cmds, list) {
679 struct scsi_cmnd *cmd = ent->cmd;
680 struct scsi_device *dev = cmd->device;
681 struct esp_lun_data *lp = dev->hostdata;
682
683 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
684 ent->tag[0] = 0;
685 ent->tag[1] = 0;
686 return ent;
687 }
688
Christoph Hellwig50668632014-10-30 14:30:06 +0100689 if (!spi_populate_tag_msg(&ent->tag[0], cmd)) {
David S. Millercd9ad582007-04-26 21:19:23 -0700690 ent->tag[0] = 0;
691 ent->tag[1] = 0;
692 }
David S. Miller21af8102013-08-01 18:08:34 -0700693 ent->orig_tag[0] = ent->tag[0];
694 ent->orig_tag[1] = ent->tag[1];
David S. Millercd9ad582007-04-26 21:19:23 -0700695
696 if (esp_alloc_lun_tag(ent, lp) < 0)
697 continue;
698
699 return ent;
700 }
701
702 return NULL;
703}
704
705static void esp_maybe_execute_command(struct esp *esp)
706{
707 struct esp_target_data *tp;
708 struct esp_lun_data *lp;
709 struct scsi_device *dev;
710 struct scsi_cmnd *cmd;
711 struct esp_cmd_entry *ent;
712 int tgt, lun, i;
713 u32 val, start_cmd;
714 u8 *p;
715
716 if (esp->active_cmd ||
717 (esp->flags & ESP_FLAG_RESETTING))
718 return;
719
720 ent = find_and_prep_issuable_command(esp);
721 if (!ent)
722 return;
723
724 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
725 esp_autosense(esp, ent);
726 return;
727 }
728
729 cmd = ent->cmd;
730 dev = cmd->device;
731 tgt = dev->id;
732 lun = dev->lun;
733 tp = &esp->target[tgt];
734 lp = dev->hostdata;
735
Kirill A. Shutemov63ce2492011-04-01 16:06:09 -0700736 list_move(&ent->list, &esp->active_cmds);
David S. Millercd9ad582007-04-26 21:19:23 -0700737
738 esp->active_cmd = ent;
739
740 esp_map_dma(esp, cmd);
741 esp_save_pointers(esp, ent);
742
743 esp_check_command_len(esp, cmd);
744
745 p = esp->command_block;
746
747 esp->msg_out_len = 0;
748 if (tp->flags & ESP_TGT_CHECK_NEGO) {
749 /* Need to negotiate. If the target is broken
750 * go for synchronous transfers and non-wide.
751 */
752 if (tp->flags & ESP_TGT_BROKEN) {
753 tp->flags &= ~ESP_TGT_DISCONNECT;
754 tp->nego_goal_period = 0;
755 tp->nego_goal_offset = 0;
756 tp->nego_goal_width = 0;
757 tp->nego_goal_tags = 0;
758 }
759
760 /* If the settings are not changing, skip this. */
761 if (spi_width(tp->starget) == tp->nego_goal_width &&
762 spi_period(tp->starget) == tp->nego_goal_period &&
763 spi_offset(tp->starget) == tp->nego_goal_offset) {
764 tp->flags &= ~ESP_TGT_CHECK_NEGO;
765 goto build_identify;
766 }
767
768 if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) {
769 esp->msg_out_len =
770 spi_populate_width_msg(&esp->msg_out[0],
771 (tp->nego_goal_width ?
772 1 : 0));
773 tp->flags |= ESP_TGT_NEGO_WIDE;
774 } else if (esp_need_to_nego_sync(tp)) {
775 esp->msg_out_len =
776 spi_populate_sync_msg(&esp->msg_out[0],
777 tp->nego_goal_period,
778 tp->nego_goal_offset);
779 tp->flags |= ESP_TGT_NEGO_SYNC;
780 } else {
781 tp->flags &= ~ESP_TGT_CHECK_NEGO;
782 }
783
784 /* Process it like a slow command. */
785 if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC))
786 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
787 }
788
789build_identify:
790 /* If we don't have a lun-data struct yet, we're probing
791 * so do not disconnect. Also, do not disconnect unless
792 * we have a tag on this command.
793 */
794 if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0])
795 *p++ = IDENTIFY(1, lun);
796 else
797 *p++ = IDENTIFY(0, lun);
798
799 if (ent->tag[0] && esp->rev == ESP100) {
800 /* ESP100 lacks select w/atn3 command, use select
801 * and stop instead.
802 */
803 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
804 }
805
806 if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) {
Hannes Reinecke31708662014-11-24 15:37:24 +0100807 start_cmd = ESP_CMD_SELA;
David S. Millercd9ad582007-04-26 21:19:23 -0700808 if (ent->tag[0]) {
809 *p++ = ent->tag[0];
810 *p++ = ent->tag[1];
811
Hannes Reinecke31708662014-11-24 15:37:24 +0100812 start_cmd = ESP_CMD_SA3;
David S. Millercd9ad582007-04-26 21:19:23 -0700813 }
814
815 for (i = 0; i < cmd->cmd_len; i++)
816 *p++ = cmd->cmnd[i];
817
818 esp->select_state = ESP_SELECT_BASIC;
819 } else {
820 esp->cmd_bytes_left = cmd->cmd_len;
821 esp->cmd_bytes_ptr = &cmd->cmnd[0];
822
823 if (ent->tag[0]) {
824 for (i = esp->msg_out_len - 1;
825 i >= 0; i--)
826 esp->msg_out[i + 2] = esp->msg_out[i];
827 esp->msg_out[0] = ent->tag[0];
828 esp->msg_out[1] = ent->tag[1];
829 esp->msg_out_len += 2;
830 }
831
Hannes Reinecke31708662014-11-24 15:37:24 +0100832 start_cmd = ESP_CMD_SELAS;
David S. Millercd9ad582007-04-26 21:19:23 -0700833 esp->select_state = ESP_SELECT_MSGOUT;
834 }
835 val = tgt;
836 if (esp->rev == FASHME)
837 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
838 esp_write8(val, ESP_BUSID);
839
840 esp_write_tgt_sync(esp, tgt);
841 esp_write_tgt_config3(esp, tgt);
842
843 val = (p - esp->command_block);
844
845 if (esp_debug & ESP_DEBUG_SCSICMD) {
846 printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun);
847 for (i = 0; i < cmd->cmd_len; i++)
848 printk("%02x ", cmd->cmnd[i]);
849 printk("]\n");
850 }
851
Hannes Reinecke31708662014-11-24 15:37:24 +0100852 esp_send_dma_cmd(esp, val, 16, start_cmd);
David S. Millercd9ad582007-04-26 21:19:23 -0700853}
854
855static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
856{
857 struct list_head *head = &esp->esp_cmd_pool;
858 struct esp_cmd_entry *ret;
859
860 if (list_empty(head)) {
861 ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC);
862 } else {
863 ret = list_entry(head->next, struct esp_cmd_entry, list);
864 list_del(&ret->list);
865 memset(ret, 0, sizeof(*ret));
866 }
867 return ret;
868}
869
870static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
871{
872 list_add(&ent->list, &esp->esp_cmd_pool);
873}
874
875static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
876 struct scsi_cmnd *cmd, unsigned int result)
877{
878 struct scsi_device *dev = cmd->device;
879 int tgt = dev->id;
880 int lun = dev->lun;
881
882 esp->active_cmd = NULL;
883 esp_unmap_dma(esp, cmd);
884 esp_free_lun_tag(ent, dev->hostdata);
885 cmd->result = result;
886
887 if (ent->eh_done) {
888 complete(ent->eh_done);
889 ent->eh_done = NULL;
890 }
891
892 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
893 esp->ops->unmap_single(esp, ent->sense_dma,
894 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
895 ent->sense_ptr = NULL;
896
897 /* Restore the message/status bytes to what we actually
898 * saw originally. Also, report that we are providing
899 * the sense data.
900 */
901 cmd->result = ((DRIVER_SENSE << 24) |
902 (DID_OK << 16) |
903 (COMMAND_COMPLETE << 8) |
904 (SAM_STAT_CHECK_CONDITION << 0));
905
906 ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
907 if (esp_debug & ESP_DEBUG_AUTOSENSE) {
908 int i;
909
910 printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ",
911 esp->host->unique_id, tgt, lun);
912 for (i = 0; i < 18; i++)
913 printk("%02x ", cmd->sense_buffer[i]);
914 printk("]\n");
915 }
916 }
917
918 cmd->scsi_done(cmd);
919
920 list_del(&ent->list);
921 esp_put_ent(esp, ent);
922
923 esp_maybe_execute_command(esp);
924}
925
926static unsigned int compose_result(unsigned int status, unsigned int message,
927 unsigned int driver_code)
928{
929 return (status | (message << 8) | (driver_code << 16));
930}
931
932static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
933{
934 struct scsi_device *dev = ent->cmd->device;
935 struct esp_lun_data *lp = dev->hostdata;
936
937 scsi_track_queue_full(dev, lp->num_tagged - 1);
938}
939
Jeff Garzikf2812332010-11-16 02:10:29 -0500940static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
David S. Millercd9ad582007-04-26 21:19:23 -0700941{
942 struct scsi_device *dev = cmd->device;
Christoph Hellwig2b14ec72007-05-31 20:12:32 +0200943 struct esp *esp = shost_priv(dev->host);
David S. Millercd9ad582007-04-26 21:19:23 -0700944 struct esp_cmd_priv *spriv;
945 struct esp_cmd_entry *ent;
946
947 ent = esp_get_ent(esp);
948 if (!ent)
949 return SCSI_MLQUEUE_HOST_BUSY;
950
951 ent->cmd = cmd;
952
953 cmd->scsi_done = done;
954
955 spriv = ESP_CMD_PRIV(cmd);
956 spriv->u.dma_addr = ~(dma_addr_t)0x0;
957
958 list_add_tail(&ent->list, &esp->queued_cmds);
959
960 esp_maybe_execute_command(esp);
961
962 return 0;
963}
964
Jeff Garzikf2812332010-11-16 02:10:29 -0500965static DEF_SCSI_QCMD(esp_queuecommand)
966
David S. Millercd9ad582007-04-26 21:19:23 -0700967static int esp_check_gross_error(struct esp *esp)
968{
969 if (esp->sreg & ESP_STAT_SPAM) {
970 /* Gross Error, could be one of:
971 * - top of fifo overwritten
972 * - top of command register overwritten
973 * - DMA programmed with wrong direction
974 * - improper phase change
975 */
Hannes Reineckea1a75b32014-11-24 15:37:21 +0100976 shost_printk(KERN_ERR, esp->host,
977 "Gross error sreg[%02x]\n", esp->sreg);
David S. Millercd9ad582007-04-26 21:19:23 -0700978 /* XXX Reset the chip. XXX */
979 return 1;
980 }
981 return 0;
982}
983
984static int esp_check_spur_intr(struct esp *esp)
985{
986 switch (esp->rev) {
987 case ESP100:
988 case ESP100A:
989 /* The interrupt pending bit of the status register cannot
990 * be trusted on these revisions.
991 */
992 esp->sreg &= ~ESP_STAT_INTR;
993 break;
994
995 default:
996 if (!(esp->sreg & ESP_STAT_INTR)) {
David S. Millercd9ad582007-04-26 21:19:23 -0700997 if (esp->ireg & ESP_INTR_SR)
998 return 1;
999
1000 /* If the DMA is indicating interrupt pending and the
1001 * ESP is not, the only possibility is a DMA error.
1002 */
1003 if (!esp->ops->dma_error(esp)) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001004 shost_printk(KERN_ERR, esp->host,
1005 "Spurious irq, sreg=%02x.\n",
1006 esp->sreg);
David S. Millercd9ad582007-04-26 21:19:23 -07001007 return -1;
1008 }
1009
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001010 shost_printk(KERN_ERR, esp->host, "DMA error\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001011
1012 /* XXX Reset the chip. XXX */
1013 return -1;
1014 }
1015 break;
1016 }
1017
1018 return 0;
1019}
1020
1021static void esp_schedule_reset(struct esp *esp)
1022{
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001023 esp_log_reset("esp_schedule_reset() from %pf\n",
David S. Millercd9ad582007-04-26 21:19:23 -07001024 __builtin_return_address(0));
1025 esp->flags |= ESP_FLAG_RESETTING;
1026 esp_event(esp, ESP_EVENT_RESET);
1027}
1028
1029/* In order to avoid having to add a special half-reconnected state
1030 * into the driver we just sit here and poll through the rest of
1031 * the reselection process to get the tag message bytes.
1032 */
1033static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1034 struct esp_lun_data *lp)
1035{
1036 struct esp_cmd_entry *ent;
1037 int i;
1038
1039 if (!lp->num_tagged) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001040 shost_printk(KERN_ERR, esp->host,
1041 "Reconnect w/num_tagged==0\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001042 return NULL;
1043 }
1044
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001045 esp_log_reconnect("reconnect tag, ");
David S. Millercd9ad582007-04-26 21:19:23 -07001046
1047 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
1048 if (esp->ops->irq_pending(esp))
1049 break;
1050 }
1051 if (i == ESP_QUICKIRQ_LIMIT) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001052 shost_printk(KERN_ERR, esp->host,
1053 "Reconnect IRQ1 timeout\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001054 return NULL;
1055 }
1056
1057 esp->sreg = esp_read8(ESP_STATUS);
1058 esp->ireg = esp_read8(ESP_INTRPT);
1059
1060 esp_log_reconnect("IRQ(%d:%x:%x), ",
1061 i, esp->ireg, esp->sreg);
1062
1063 if (esp->ireg & ESP_INTR_DC) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001064 shost_printk(KERN_ERR, esp->host,
1065 "Reconnect, got disconnect.\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001066 return NULL;
1067 }
1068
1069 if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001070 shost_printk(KERN_ERR, esp->host,
1071 "Reconnect, not MIP sreg[%02x].\n", esp->sreg);
David S. Millercd9ad582007-04-26 21:19:23 -07001072 return NULL;
1073 }
1074
1075 /* DMA in the tag bytes... */
1076 esp->command_block[0] = 0xff;
1077 esp->command_block[1] = 0xff;
1078 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1079 2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI);
1080
Justin P. Mattock70f23fd2011-05-10 10:16:21 +02001081 /* ACK the message. */
David S. Millercd9ad582007-04-26 21:19:23 -07001082 scsi_esp_cmd(esp, ESP_CMD_MOK);
1083
1084 for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) {
1085 if (esp->ops->irq_pending(esp)) {
1086 esp->sreg = esp_read8(ESP_STATUS);
1087 esp->ireg = esp_read8(ESP_INTRPT);
1088 if (esp->ireg & ESP_INTR_FDONE)
1089 break;
1090 }
1091 udelay(1);
1092 }
1093 if (i == ESP_RESELECT_TAG_LIMIT) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001094 shost_printk(KERN_ERR, esp->host, "Reconnect IRQ2 timeout\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001095 return NULL;
1096 }
1097 esp->ops->dma_drain(esp);
1098 esp->ops->dma_invalidate(esp);
1099
1100 esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n",
1101 i, esp->ireg, esp->sreg,
1102 esp->command_block[0],
1103 esp->command_block[1]);
1104
1105 if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
1106 esp->command_block[0] > ORDERED_QUEUE_TAG) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001107 shost_printk(KERN_ERR, esp->host,
1108 "Reconnect, bad tag type %02x.\n",
1109 esp->command_block[0]);
David S. Millercd9ad582007-04-26 21:19:23 -07001110 return NULL;
1111 }
1112
1113 ent = lp->tagged_cmds[esp->command_block[1]];
1114 if (!ent) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001115 shost_printk(KERN_ERR, esp->host,
1116 "Reconnect, no entry for tag %02x.\n",
1117 esp->command_block[1]);
David S. Millercd9ad582007-04-26 21:19:23 -07001118 return NULL;
1119 }
1120
1121 return ent;
1122}
1123
1124static int esp_reconnect(struct esp *esp)
1125{
1126 struct esp_cmd_entry *ent;
1127 struct esp_target_data *tp;
1128 struct esp_lun_data *lp;
1129 struct scsi_device *dev;
1130 int target, lun;
1131
1132 BUG_ON(esp->active_cmd);
1133 if (esp->rev == FASHME) {
1134 /* FASHME puts the target and lun numbers directly
1135 * into the fifo.
1136 */
1137 target = esp->fifo[0];
1138 lun = esp->fifo[1] & 0x7;
1139 } else {
1140 u8 bits = esp_read8(ESP_FDATA);
1141
1142 /* Older chips put the lun directly into the fifo, but
1143 * the target is given as a sample of the arbitration
1144 * lines on the bus at reselection time. So we should
1145 * see the ID of the ESP and the one reconnecting target
1146 * set in the bitmap.
1147 */
1148 if (!(bits & esp->scsi_id_mask))
1149 goto do_reset;
1150 bits &= ~esp->scsi_id_mask;
1151 if (!bits || (bits & (bits - 1)))
1152 goto do_reset;
1153
1154 target = ffs(bits) - 1;
1155 lun = (esp_read8(ESP_FDATA) & 0x7);
1156
1157 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1158 if (esp->rev == ESP100) {
1159 u8 ireg = esp_read8(ESP_INTRPT);
1160 /* This chip has a bug during reselection that can
1161 * cause a spurious illegal-command interrupt, which
1162 * we simply ACK here. Another possibility is a bus
1163 * reset so we must check for that.
1164 */
1165 if (ireg & ESP_INTR_SR)
1166 goto do_reset;
1167 }
1168 scsi_esp_cmd(esp, ESP_CMD_NULL);
1169 }
1170
1171 esp_write_tgt_sync(esp, target);
1172 esp_write_tgt_config3(esp, target);
1173
1174 scsi_esp_cmd(esp, ESP_CMD_MOK);
1175
1176 if (esp->rev == FASHME)
1177 esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT,
1178 ESP_BUSID);
1179
1180 tp = &esp->target[target];
1181 dev = __scsi_device_lookup_by_target(tp->starget, lun);
1182 if (!dev) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001183 shost_printk(KERN_ERR, esp->host,
1184 "Reconnect, no lp tgt[%u] lun[%u]\n",
1185 target, lun);
David S. Millercd9ad582007-04-26 21:19:23 -07001186 goto do_reset;
1187 }
1188 lp = dev->hostdata;
1189
1190 ent = lp->non_tagged_cmd;
1191 if (!ent) {
1192 ent = esp_reconnect_with_tag(esp, lp);
1193 if (!ent)
1194 goto do_reset;
1195 }
1196
1197 esp->active_cmd = ent;
1198
1199 if (ent->flags & ESP_CMD_FLAG_ABORT) {
1200 esp->msg_out[0] = ABORT_TASK_SET;
1201 esp->msg_out_len = 1;
1202 scsi_esp_cmd(esp, ESP_CMD_SATN);
1203 }
1204
1205 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1206 esp_restore_pointers(esp, ent);
1207 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1208 return 1;
1209
1210do_reset:
1211 esp_schedule_reset(esp);
1212 return 0;
1213}
1214
1215static int esp_finish_select(struct esp *esp)
1216{
1217 struct esp_cmd_entry *ent;
1218 struct scsi_cmnd *cmd;
1219 u8 orig_select_state;
1220
1221 orig_select_state = esp->select_state;
1222
1223 /* No longer selecting. */
1224 esp->select_state = ESP_SELECT_NONE;
1225
1226 esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS;
1227 ent = esp->active_cmd;
1228 cmd = ent->cmd;
1229
1230 if (esp->ops->dma_error(esp)) {
1231 /* If we see a DMA error during or as a result of selection,
1232 * all bets are off.
1233 */
1234 esp_schedule_reset(esp);
1235 esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16));
1236 return 0;
1237 }
1238
1239 esp->ops->dma_invalidate(esp);
1240
1241 if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
1242 struct esp_target_data *tp = &esp->target[cmd->device->id];
1243
1244 /* Carefully back out of the selection attempt. Release
1245 * resources (such as DMA mapping & TAG) and reset state (such
1246 * as message out and command delivery variables).
1247 */
1248 if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1249 esp_unmap_dma(esp, cmd);
1250 esp_free_lun_tag(ent, cmd->device->hostdata);
1251 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
1252 esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
1253 esp->cmd_bytes_ptr = NULL;
1254 esp->cmd_bytes_left = 0;
1255 } else {
1256 esp->ops->unmap_single(esp, ent->sense_dma,
1257 SCSI_SENSE_BUFFERSIZE,
1258 DMA_FROM_DEVICE);
1259 ent->sense_ptr = NULL;
1260 }
1261
1262 /* Now that the state is unwound properly, put back onto
1263 * the issue queue. This command is no longer active.
1264 */
Kirill A. Shutemov63ce2492011-04-01 16:06:09 -07001265 list_move(&ent->list, &esp->queued_cmds);
David S. Millercd9ad582007-04-26 21:19:23 -07001266 esp->active_cmd = NULL;
1267
1268 /* Return value ignored by caller, it directly invokes
1269 * esp_reconnect().
1270 */
1271 return 0;
1272 }
1273
1274 if (esp->ireg == ESP_INTR_DC) {
1275 struct scsi_device *dev = cmd->device;
1276
1277 /* Disconnect. Make sure we re-negotiate sync and
1278 * wide parameters if this target starts responding
1279 * again in the future.
1280 */
1281 esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
1282
1283 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1284 esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16));
1285 return 1;
1286 }
1287
1288 if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
1289 /* Selection successful. On pre-FAST chips we have
1290 * to do a NOP and possibly clean out the FIFO.
1291 */
1292 if (esp->rev <= ESP236) {
1293 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1294
1295 scsi_esp_cmd(esp, ESP_CMD_NULL);
1296
1297 if (!fcnt &&
1298 (!esp->prev_soff ||
1299 ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
1300 esp_flush_fifo(esp);
1301 }
1302
1303 /* If we are doing a slow command, negotiation, etc.
1304 * we'll do the right thing as we transition to the
1305 * next phase.
1306 */
1307 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1308 return 0;
1309 }
1310
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001311 shost_printk(KERN_INFO, esp->host,
1312 "Unexpected selection completion ireg[%x]\n", esp->ireg);
David S. Millercd9ad582007-04-26 21:19:23 -07001313 esp_schedule_reset(esp);
1314 return 0;
1315}
1316
1317static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
1318 struct scsi_cmnd *cmd)
1319{
1320 int fifo_cnt, ecount, bytes_sent, flush_fifo;
1321
1322 fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1323 if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
1324 fifo_cnt <<= 1;
1325
1326 ecount = 0;
1327 if (!(esp->sreg & ESP_STAT_TCNT)) {
1328 ecount = ((unsigned int)esp_read8(ESP_TCLOW) |
1329 (((unsigned int)esp_read8(ESP_TCMED)) << 8));
1330 if (esp->rev == FASHME)
1331 ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16;
1332 }
1333
1334 bytes_sent = esp->data_dma_len;
1335 bytes_sent -= ecount;
1336
Hannes Reinecke6df388f2014-11-24 15:37:26 +01001337 /*
1338 * The am53c974 has a DMA 'pecularity'. The doc states:
1339 * In some odd byte conditions, one residual byte will
1340 * be left in the SCSI FIFO, and the FIFO Flags will
1341 * never count to '0 '. When this happens, the residual
1342 * byte should be retrieved via PIO following completion
1343 * of the BLAST operation.
1344 */
1345 if (fifo_cnt == 1 && ent->flags & ESP_CMD_FLAG_RESIDUAL) {
1346 size_t count = 1;
1347 size_t offset = bytes_sent;
1348 u8 bval = esp_read8(ESP_FDATA);
1349
1350 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
1351 ent->sense_ptr[bytes_sent] = bval;
1352 else {
1353 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
1354 u8 *ptr;
1355
1356 ptr = scsi_kmap_atomic_sg(p->cur_sg, p->u.num_sg,
1357 &offset, &count);
1358 if (likely(ptr)) {
1359 *(ptr + offset) = bval;
1360 scsi_kunmap_atomic_sg(ptr);
1361 }
1362 }
1363 bytes_sent += fifo_cnt;
1364 ent->flags &= ~ESP_CMD_FLAG_RESIDUAL;
1365 }
David S. Millercd9ad582007-04-26 21:19:23 -07001366 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1367 bytes_sent -= fifo_cnt;
1368
1369 flush_fifo = 0;
1370 if (!esp->prev_soff) {
1371 /* Synchronous data transfer, always flush fifo. */
1372 flush_fifo = 1;
1373 } else {
1374 if (esp->rev == ESP100) {
1375 u32 fflags, phase;
1376
1377 /* ESP100 has a chip bug where in the synchronous data
1378 * phase it can mistake a final long REQ pulse from the
1379 * target as an extra data byte. Fun.
1380 *
1381 * To detect this case we resample the status register
1382 * and fifo flags. If we're still in a data phase and
1383 * we see spurious chunks in the fifo, we return error
1384 * to the caller which should reset and set things up
1385 * such that we only try future transfers to this
1386 * target in synchronous mode.
1387 */
1388 esp->sreg = esp_read8(ESP_STATUS);
1389 phase = esp->sreg & ESP_STAT_PMASK;
1390 fflags = esp_read8(ESP_FFLAGS);
1391
1392 if ((phase == ESP_DOP &&
1393 (fflags & ESP_FF_ONOTZERO)) ||
1394 (phase == ESP_DIP &&
1395 (fflags & ESP_FF_FBYTES)))
1396 return -1;
1397 }
1398 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1399 flush_fifo = 1;
1400 }
1401
1402 if (flush_fifo)
1403 esp_flush_fifo(esp);
1404
1405 return bytes_sent;
1406}
1407
1408static void esp_setsync(struct esp *esp, struct esp_target_data *tp,
1409 u8 scsi_period, u8 scsi_offset,
1410 u8 esp_stp, u8 esp_soff)
1411{
1412 spi_period(tp->starget) = scsi_period;
1413 spi_offset(tp->starget) = scsi_offset;
1414 spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0;
1415
1416 if (esp_soff) {
1417 esp_stp &= 0x1f;
1418 esp_soff |= esp->radelay;
1419 if (esp->rev >= FAS236) {
1420 u8 bit = ESP_CONFIG3_FSCSI;
1421 if (esp->rev >= FAS100A)
1422 bit = ESP_CONFIG3_FAST;
1423
1424 if (scsi_period < 50) {
1425 if (esp->rev == FASHME)
1426 esp_soff &= ~esp->radelay;
1427 tp->esp_config3 |= bit;
1428 } else {
1429 tp->esp_config3 &= ~bit;
1430 }
1431 esp->prev_cfg3 = tp->esp_config3;
1432 esp_write8(esp->prev_cfg3, ESP_CFG3);
1433 }
1434 }
1435
1436 tp->esp_period = esp->prev_stp = esp_stp;
1437 tp->esp_offset = esp->prev_soff = esp_soff;
1438
1439 esp_write8(esp_soff, ESP_SOFF);
1440 esp_write8(esp_stp, ESP_STP);
1441
1442 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1443
1444 spi_display_xfer_agreement(tp->starget);
1445}
1446
1447static void esp_msgin_reject(struct esp *esp)
1448{
1449 struct esp_cmd_entry *ent = esp->active_cmd;
1450 struct scsi_cmnd *cmd = ent->cmd;
1451 struct esp_target_data *tp;
1452 int tgt;
1453
1454 tgt = cmd->device->id;
1455 tp = &esp->target[tgt];
1456
1457 if (tp->flags & ESP_TGT_NEGO_WIDE) {
1458 tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE);
1459
1460 if (!esp_need_to_nego_sync(tp)) {
1461 tp->flags &= ~ESP_TGT_CHECK_NEGO;
1462 scsi_esp_cmd(esp, ESP_CMD_RATN);
1463 } else {
1464 esp->msg_out_len =
1465 spi_populate_sync_msg(&esp->msg_out[0],
1466 tp->nego_goal_period,
1467 tp->nego_goal_offset);
1468 tp->flags |= ESP_TGT_NEGO_SYNC;
1469 scsi_esp_cmd(esp, ESP_CMD_SATN);
1470 }
1471 return;
1472 }
1473
1474 if (tp->flags & ESP_TGT_NEGO_SYNC) {
1475 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1476 tp->esp_period = 0;
1477 tp->esp_offset = 0;
1478 esp_setsync(esp, tp, 0, 0, 0, 0);
1479 scsi_esp_cmd(esp, ESP_CMD_RATN);
1480 return;
1481 }
1482
1483 esp->msg_out[0] = ABORT_TASK_SET;
1484 esp->msg_out_len = 1;
1485 scsi_esp_cmd(esp, ESP_CMD_SATN);
1486}
1487
1488static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1489{
1490 u8 period = esp->msg_in[3];
1491 u8 offset = esp->msg_in[4];
1492 u8 stp;
1493
1494 if (!(tp->flags & ESP_TGT_NEGO_SYNC))
1495 goto do_reject;
1496
1497 if (offset > 15)
1498 goto do_reject;
1499
1500 if (offset) {
Julia Lawall237abac2008-10-29 14:24:40 -07001501 int one_clock;
David S. Millercd9ad582007-04-26 21:19:23 -07001502
1503 if (period > esp->max_period) {
1504 period = offset = 0;
1505 goto do_sdtr;
1506 }
1507 if (period < esp->min_period)
1508 goto do_reject;
1509
1510 one_clock = esp->ccycle / 1000;
Julia Lawall237abac2008-10-29 14:24:40 -07001511 stp = DIV_ROUND_UP(period << 2, one_clock);
David S. Millercd9ad582007-04-26 21:19:23 -07001512 if (stp && esp->rev >= FAS236) {
1513 if (stp >= 50)
1514 stp--;
1515 }
1516 } else {
1517 stp = 0;
1518 }
1519
1520 esp_setsync(esp, tp, period, offset, stp, offset);
1521 return;
1522
1523do_reject:
1524 esp->msg_out[0] = MESSAGE_REJECT;
1525 esp->msg_out_len = 1;
1526 scsi_esp_cmd(esp, ESP_CMD_SATN);
1527 return;
1528
1529do_sdtr:
1530 tp->nego_goal_period = period;
1531 tp->nego_goal_offset = offset;
1532 esp->msg_out_len =
1533 spi_populate_sync_msg(&esp->msg_out[0],
1534 tp->nego_goal_period,
1535 tp->nego_goal_offset);
1536 scsi_esp_cmd(esp, ESP_CMD_SATN);
1537}
1538
1539static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp)
1540{
1541 int size = 8 << esp->msg_in[3];
1542 u8 cfg3;
1543
1544 if (esp->rev != FASHME)
1545 goto do_reject;
1546
1547 if (size != 8 && size != 16)
1548 goto do_reject;
1549
1550 if (!(tp->flags & ESP_TGT_NEGO_WIDE))
1551 goto do_reject;
1552
1553 cfg3 = tp->esp_config3;
1554 if (size == 16) {
1555 tp->flags |= ESP_TGT_WIDE;
1556 cfg3 |= ESP_CONFIG3_EWIDE;
1557 } else {
1558 tp->flags &= ~ESP_TGT_WIDE;
1559 cfg3 &= ~ESP_CONFIG3_EWIDE;
1560 }
1561 tp->esp_config3 = cfg3;
1562 esp->prev_cfg3 = cfg3;
1563 esp_write8(cfg3, ESP_CFG3);
1564
1565 tp->flags &= ~ESP_TGT_NEGO_WIDE;
1566
1567 spi_period(tp->starget) = 0;
1568 spi_offset(tp->starget) = 0;
1569 if (!esp_need_to_nego_sync(tp)) {
1570 tp->flags &= ~ESP_TGT_CHECK_NEGO;
1571 scsi_esp_cmd(esp, ESP_CMD_RATN);
1572 } else {
1573 esp->msg_out_len =
1574 spi_populate_sync_msg(&esp->msg_out[0],
1575 tp->nego_goal_period,
1576 tp->nego_goal_offset);
1577 tp->flags |= ESP_TGT_NEGO_SYNC;
1578 scsi_esp_cmd(esp, ESP_CMD_SATN);
1579 }
1580 return;
1581
1582do_reject:
1583 esp->msg_out[0] = MESSAGE_REJECT;
1584 esp->msg_out_len = 1;
1585 scsi_esp_cmd(esp, ESP_CMD_SATN);
1586}
1587
1588static void esp_msgin_extended(struct esp *esp)
1589{
1590 struct esp_cmd_entry *ent = esp->active_cmd;
1591 struct scsi_cmnd *cmd = ent->cmd;
1592 struct esp_target_data *tp;
1593 int tgt = cmd->device->id;
1594
1595 tp = &esp->target[tgt];
1596 if (esp->msg_in[2] == EXTENDED_SDTR) {
1597 esp_msgin_sdtr(esp, tp);
1598 return;
1599 }
1600 if (esp->msg_in[2] == EXTENDED_WDTR) {
1601 esp_msgin_wdtr(esp, tp);
1602 return;
1603 }
1604
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001605 shost_printk(KERN_INFO, esp->host,
1606 "Unexpected extended msg type %x\n", esp->msg_in[2]);
David S. Millercd9ad582007-04-26 21:19:23 -07001607
1608 esp->msg_out[0] = ABORT_TASK_SET;
1609 esp->msg_out_len = 1;
1610 scsi_esp_cmd(esp, ESP_CMD_SATN);
1611}
1612
1613/* Analyze msgin bytes received from target so far. Return non-zero
1614 * if there are more bytes needed to complete the message.
1615 */
1616static int esp_msgin_process(struct esp *esp)
1617{
1618 u8 msg0 = esp->msg_in[0];
1619 int len = esp->msg_in_len;
1620
1621 if (msg0 & 0x80) {
1622 /* Identify */
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001623 shost_printk(KERN_INFO, esp->host,
1624 "Unexpected msgin identify\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001625 return 0;
1626 }
1627
1628 switch (msg0) {
1629 case EXTENDED_MESSAGE:
1630 if (len == 1)
1631 return 1;
1632 if (len < esp->msg_in[1] + 2)
1633 return 1;
1634 esp_msgin_extended(esp);
1635 return 0;
1636
1637 case IGNORE_WIDE_RESIDUE: {
1638 struct esp_cmd_entry *ent;
1639 struct esp_cmd_priv *spriv;
1640 if (len == 1)
1641 return 1;
1642
1643 if (esp->msg_in[1] != 1)
1644 goto do_reject;
1645
1646 ent = esp->active_cmd;
1647 spriv = ESP_CMD_PRIV(ent->cmd);
1648
1649 if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
1650 spriv->cur_sg--;
1651 spriv->cur_residue = 1;
1652 } else
1653 spriv->cur_residue++;
1654 spriv->tot_residue++;
1655 return 0;
1656 }
1657 case NOP:
1658 return 0;
1659 case RESTORE_POINTERS:
1660 esp_restore_pointers(esp, esp->active_cmd);
1661 return 0;
1662 case SAVE_POINTERS:
1663 esp_save_pointers(esp, esp->active_cmd);
1664 return 0;
1665
1666 case COMMAND_COMPLETE:
1667 case DISCONNECT: {
1668 struct esp_cmd_entry *ent = esp->active_cmd;
1669
1670 ent->message = msg0;
1671 esp_event(esp, ESP_EVENT_FREE_BUS);
1672 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1673 return 0;
1674 }
1675 case MESSAGE_REJECT:
1676 esp_msgin_reject(esp);
1677 return 0;
1678
1679 default:
1680 do_reject:
1681 esp->msg_out[0] = MESSAGE_REJECT;
1682 esp->msg_out_len = 1;
1683 scsi_esp_cmd(esp, ESP_CMD_SATN);
1684 return 0;
1685 }
1686}
1687
1688static int esp_process_event(struct esp *esp)
1689{
Hannes Reinecke31708662014-11-24 15:37:24 +01001690 int write, i;
David S. Millercd9ad582007-04-26 21:19:23 -07001691
1692again:
1693 write = 0;
Hannes Reinecke1af6f602014-11-24 15:37:22 +01001694 esp_log_event("process event %d phase %x\n",
1695 esp->event, esp->sreg & ESP_STAT_PMASK);
David S. Millercd9ad582007-04-26 21:19:23 -07001696 switch (esp->event) {
1697 case ESP_EVENT_CHECK_PHASE:
1698 switch (esp->sreg & ESP_STAT_PMASK) {
1699 case ESP_DOP:
1700 esp_event(esp, ESP_EVENT_DATA_OUT);
1701 break;
1702 case ESP_DIP:
1703 esp_event(esp, ESP_EVENT_DATA_IN);
1704 break;
1705 case ESP_STATP:
1706 esp_flush_fifo(esp);
1707 scsi_esp_cmd(esp, ESP_CMD_ICCSEQ);
1708 esp_event(esp, ESP_EVENT_STATUS);
1709 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1710 return 1;
1711
1712 case ESP_MOP:
1713 esp_event(esp, ESP_EVENT_MSGOUT);
1714 break;
1715
1716 case ESP_MIP:
1717 esp_event(esp, ESP_EVENT_MSGIN);
1718 break;
1719
1720 case ESP_CMDP:
1721 esp_event(esp, ESP_EVENT_CMD_START);
1722 break;
1723
1724 default:
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001725 shost_printk(KERN_INFO, esp->host,
1726 "Unexpected phase, sreg=%02x\n",
1727 esp->sreg);
David S. Millercd9ad582007-04-26 21:19:23 -07001728 esp_schedule_reset(esp);
1729 return 0;
1730 }
1731 goto again;
1732 break;
1733
1734 case ESP_EVENT_DATA_IN:
1735 write = 1;
1736 /* fallthru */
1737
1738 case ESP_EVENT_DATA_OUT: {
1739 struct esp_cmd_entry *ent = esp->active_cmd;
1740 struct scsi_cmnd *cmd = ent->cmd;
1741 dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd);
1742 unsigned int dma_len = esp_cur_dma_len(ent, cmd);
1743
1744 if (esp->rev == ESP100)
1745 scsi_esp_cmd(esp, ESP_CMD_NULL);
1746
1747 if (write)
1748 ent->flags |= ESP_CMD_FLAG_WRITE;
1749 else
1750 ent->flags &= ~ESP_CMD_FLAG_WRITE;
1751
Finn Thain6fe07aa2008-04-25 10:06:05 -05001752 if (esp->ops->dma_length_limit)
1753 dma_len = esp->ops->dma_length_limit(esp, dma_addr,
1754 dma_len);
1755 else
1756 dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
1757
David S. Millercd9ad582007-04-26 21:19:23 -07001758 esp->data_dma_len = dma_len;
1759
1760 if (!dma_len) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001761 shost_printk(KERN_ERR, esp->host,
1762 "DMA length is zero!\n");
1763 shost_printk(KERN_ERR, esp->host,
1764 "cur adr[%08llx] len[%08x]\n",
1765 (unsigned long long)esp_cur_dma_addr(ent, cmd),
1766 esp_cur_dma_len(ent, cmd));
David S. Millercd9ad582007-04-26 21:19:23 -07001767 esp_schedule_reset(esp);
1768 return 0;
1769 }
1770
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001771 esp_log_datastart("start data addr[%08llx] len[%u] write(%d)\n",
Alexey Dobriyane1f2a092007-04-27 15:19:27 -07001772 (unsigned long long)dma_addr, dma_len, write);
David S. Millercd9ad582007-04-26 21:19:23 -07001773
1774 esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
1775 write, ESP_CMD_DMA | ESP_CMD_TI);
1776 esp_event(esp, ESP_EVENT_DATA_DONE);
1777 break;
1778 }
1779 case ESP_EVENT_DATA_DONE: {
1780 struct esp_cmd_entry *ent = esp->active_cmd;
1781 struct scsi_cmnd *cmd = ent->cmd;
1782 int bytes_sent;
1783
1784 if (esp->ops->dma_error(esp)) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001785 shost_printk(KERN_INFO, esp->host,
1786 "data done, DMA error, resetting\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001787 esp_schedule_reset(esp);
1788 return 0;
1789 }
1790
1791 if (ent->flags & ESP_CMD_FLAG_WRITE) {
1792 /* XXX parity errors, etc. XXX */
1793
1794 esp->ops->dma_drain(esp);
1795 }
1796 esp->ops->dma_invalidate(esp);
1797
1798 if (esp->ireg != ESP_INTR_BSERV) {
1799 /* We should always see exactly a bus-service
1800 * interrupt at the end of a successful transfer.
1801 */
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001802 shost_printk(KERN_INFO, esp->host,
1803 "data done, not BSERV, resetting\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001804 esp_schedule_reset(esp);
1805 return 0;
1806 }
1807
1808 bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
1809
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001810 esp_log_datadone("data done flgs[%x] sent[%d]\n",
David S. Millercd9ad582007-04-26 21:19:23 -07001811 ent->flags, bytes_sent);
1812
1813 if (bytes_sent < 0) {
1814 /* XXX force sync mode for this target XXX */
1815 esp_schedule_reset(esp);
1816 return 0;
1817 }
1818
1819 esp_advance_dma(esp, ent, cmd, bytes_sent);
1820 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1821 goto again;
David S. Millercd9ad582007-04-26 21:19:23 -07001822 }
1823
1824 case ESP_EVENT_STATUS: {
1825 struct esp_cmd_entry *ent = esp->active_cmd;
1826
1827 if (esp->ireg & ESP_INTR_FDONE) {
1828 ent->status = esp_read8(ESP_FDATA);
1829 ent->message = esp_read8(ESP_FDATA);
1830 scsi_esp_cmd(esp, ESP_CMD_MOK);
1831 } else if (esp->ireg == ESP_INTR_BSERV) {
1832 ent->status = esp_read8(ESP_FDATA);
1833 ent->message = 0xff;
1834 esp_event(esp, ESP_EVENT_MSGIN);
1835 return 0;
1836 }
1837
1838 if (ent->message != COMMAND_COMPLETE) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001839 shost_printk(KERN_INFO, esp->host,
1840 "Unexpected message %x in status\n",
1841 ent->message);
David S. Millercd9ad582007-04-26 21:19:23 -07001842 esp_schedule_reset(esp);
1843 return 0;
1844 }
1845
1846 esp_event(esp, ESP_EVENT_FREE_BUS);
1847 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1848 break;
1849 }
1850 case ESP_EVENT_FREE_BUS: {
1851 struct esp_cmd_entry *ent = esp->active_cmd;
1852 struct scsi_cmnd *cmd = ent->cmd;
1853
1854 if (ent->message == COMMAND_COMPLETE ||
1855 ent->message == DISCONNECT)
1856 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1857
1858 if (ent->message == COMMAND_COMPLETE) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001859 esp_log_cmddone("Command done status[%x] message[%x]\n",
David S. Millercd9ad582007-04-26 21:19:23 -07001860 ent->status, ent->message);
1861 if (ent->status == SAM_STAT_TASK_SET_FULL)
1862 esp_event_queue_full(esp, ent);
1863
1864 if (ent->status == SAM_STAT_CHECK_CONDITION &&
1865 !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1866 ent->flags |= ESP_CMD_FLAG_AUTOSENSE;
1867 esp_autosense(esp, ent);
1868 } else {
1869 esp_cmd_is_done(esp, ent, cmd,
1870 compose_result(ent->status,
1871 ent->message,
1872 DID_OK));
1873 }
1874 } else if (ent->message == DISCONNECT) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001875 esp_log_disconnect("Disconnecting tgt[%d] tag[%x:%x]\n",
David S. Millercd9ad582007-04-26 21:19:23 -07001876 cmd->device->id,
1877 ent->tag[0], ent->tag[1]);
1878
1879 esp->active_cmd = NULL;
1880 esp_maybe_execute_command(esp);
1881 } else {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001882 shost_printk(KERN_INFO, esp->host,
1883 "Unexpected message %x in freebus\n",
1884 ent->message);
David S. Millercd9ad582007-04-26 21:19:23 -07001885 esp_schedule_reset(esp);
1886 return 0;
1887 }
1888 if (esp->active_cmd)
1889 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1890 break;
1891 }
1892 case ESP_EVENT_MSGOUT: {
1893 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1894
1895 if (esp_debug & ESP_DEBUG_MSGOUT) {
1896 int i;
1897 printk("ESP: Sending message [ ");
1898 for (i = 0; i < esp->msg_out_len; i++)
1899 printk("%02x ", esp->msg_out[i]);
1900 printk("]\n");
1901 }
1902
1903 if (esp->rev == FASHME) {
1904 int i;
1905
1906 /* Always use the fifo. */
1907 for (i = 0; i < esp->msg_out_len; i++) {
1908 esp_write8(esp->msg_out[i], ESP_FDATA);
1909 esp_write8(0, ESP_FDATA);
1910 }
1911 scsi_esp_cmd(esp, ESP_CMD_TI);
1912 } else {
1913 if (esp->msg_out_len == 1) {
1914 esp_write8(esp->msg_out[0], ESP_FDATA);
1915 scsi_esp_cmd(esp, ESP_CMD_TI);
Hannes Reinecke31708662014-11-24 15:37:24 +01001916 } else if (esp->flags & ESP_FLAG_USE_FIFO) {
1917 for (i = 0; i < esp->msg_out_len; i++)
1918 esp_write8(esp->msg_out[i], ESP_FDATA);
1919 scsi_esp_cmd(esp, ESP_CMD_TI);
David S. Millercd9ad582007-04-26 21:19:23 -07001920 } else {
1921 /* Use DMA. */
1922 memcpy(esp->command_block,
1923 esp->msg_out,
1924 esp->msg_out_len);
1925
1926 esp->ops->send_dma_cmd(esp,
1927 esp->command_block_dma,
1928 esp->msg_out_len,
1929 esp->msg_out_len,
1930 0,
1931 ESP_CMD_DMA|ESP_CMD_TI);
1932 }
1933 }
1934 esp_event(esp, ESP_EVENT_MSGOUT_DONE);
1935 break;
1936 }
1937 case ESP_EVENT_MSGOUT_DONE:
1938 if (esp->rev == FASHME) {
1939 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1940 } else {
1941 if (esp->msg_out_len > 1)
1942 esp->ops->dma_invalidate(esp);
1943 }
1944
1945 if (!(esp->ireg & ESP_INTR_DC)) {
1946 if (esp->rev != FASHME)
1947 scsi_esp_cmd(esp, ESP_CMD_NULL);
1948 }
1949 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1950 goto again;
1951 case ESP_EVENT_MSGIN:
1952 if (esp->ireg & ESP_INTR_BSERV) {
1953 if (esp->rev == FASHME) {
1954 if (!(esp_read8(ESP_STATUS2) &
1955 ESP_STAT2_FEMPTY))
1956 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1957 } else {
1958 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1959 if (esp->rev == ESP100)
1960 scsi_esp_cmd(esp, ESP_CMD_NULL);
1961 }
1962 scsi_esp_cmd(esp, ESP_CMD_TI);
1963 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1964 return 1;
1965 }
1966 if (esp->ireg & ESP_INTR_FDONE) {
1967 u8 val;
1968
1969 if (esp->rev == FASHME)
1970 val = esp->fifo[0];
1971 else
1972 val = esp_read8(ESP_FDATA);
1973 esp->msg_in[esp->msg_in_len++] = val;
1974
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001975 esp_log_msgin("Got msgin byte %x\n", val);
David S. Millercd9ad582007-04-26 21:19:23 -07001976
1977 if (!esp_msgin_process(esp))
1978 esp->msg_in_len = 0;
1979
1980 if (esp->rev == FASHME)
1981 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1982
1983 scsi_esp_cmd(esp, ESP_CMD_MOK);
1984
1985 if (esp->event != ESP_EVENT_FREE_BUS)
1986 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1987 } else {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001988 shost_printk(KERN_INFO, esp->host,
1989 "MSGIN neither BSERV not FDON, resetting");
David S. Millercd9ad582007-04-26 21:19:23 -07001990 esp_schedule_reset(esp);
1991 return 0;
1992 }
1993 break;
1994 case ESP_EVENT_CMD_START:
1995 memcpy(esp->command_block, esp->cmd_bytes_ptr,
1996 esp->cmd_bytes_left);
Hannes Reinecke31708662014-11-24 15:37:24 +01001997 esp_send_dma_cmd(esp, esp->cmd_bytes_left, 16, ESP_CMD_TI);
David S. Millercd9ad582007-04-26 21:19:23 -07001998 esp_event(esp, ESP_EVENT_CMD_DONE);
1999 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
2000 break;
2001 case ESP_EVENT_CMD_DONE:
2002 esp->ops->dma_invalidate(esp);
2003 if (esp->ireg & ESP_INTR_BSERV) {
2004 esp_event(esp, ESP_EVENT_CHECK_PHASE);
2005 goto again;
2006 }
2007 esp_schedule_reset(esp);
2008 return 0;
2009 break;
2010
2011 case ESP_EVENT_RESET:
2012 scsi_esp_cmd(esp, ESP_CMD_RS);
2013 break;
2014
2015 default:
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002016 shost_printk(KERN_INFO, esp->host,
2017 "Unexpected event %x, resetting\n", esp->event);
David S. Millercd9ad582007-04-26 21:19:23 -07002018 esp_schedule_reset(esp);
2019 return 0;
2020 break;
2021 }
2022 return 1;
2023}
2024
2025static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
2026{
2027 struct scsi_cmnd *cmd = ent->cmd;
2028
2029 esp_unmap_dma(esp, cmd);
2030 esp_free_lun_tag(ent, cmd->device->hostdata);
2031 cmd->result = DID_RESET << 16;
2032
2033 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
2034 esp->ops->unmap_single(esp, ent->sense_dma,
2035 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
2036 ent->sense_ptr = NULL;
2037 }
2038
2039 cmd->scsi_done(cmd);
2040 list_del(&ent->list);
2041 esp_put_ent(esp, ent);
2042}
2043
2044static void esp_clear_hold(struct scsi_device *dev, void *data)
2045{
2046 struct esp_lun_data *lp = dev->hostdata;
2047
2048 BUG_ON(lp->num_tagged);
2049 lp->hold = 0;
2050}
2051
2052static void esp_reset_cleanup(struct esp *esp)
2053{
2054 struct esp_cmd_entry *ent, *tmp;
2055 int i;
2056
2057 list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) {
2058 struct scsi_cmnd *cmd = ent->cmd;
2059
2060 list_del(&ent->list);
2061 cmd->result = DID_RESET << 16;
2062 cmd->scsi_done(cmd);
2063 esp_put_ent(esp, ent);
2064 }
2065
2066 list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) {
2067 if (ent == esp->active_cmd)
2068 esp->active_cmd = NULL;
2069 esp_reset_cleanup_one(esp, ent);
2070 }
2071
2072 BUG_ON(esp->active_cmd != NULL);
2073
2074 /* Force renegotiation of sync/wide transfers. */
2075 for (i = 0; i < ESP_MAX_TARGET; i++) {
2076 struct esp_target_data *tp = &esp->target[i];
2077
2078 tp->esp_period = 0;
2079 tp->esp_offset = 0;
2080 tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE |
2081 ESP_CONFIG3_FSCSI |
2082 ESP_CONFIG3_FAST);
2083 tp->flags &= ~ESP_TGT_WIDE;
2084 tp->flags |= ESP_TGT_CHECK_NEGO;
2085
2086 if (tp->starget)
Maciej W. Rozycki522939d2007-12-10 15:49:31 -08002087 __starget_for_each_device(tp->starget, NULL,
2088 esp_clear_hold);
David S. Millercd9ad582007-04-26 21:19:23 -07002089 }
Thomas Bogendoerfer204abf22007-06-13 12:58:53 -07002090 esp->flags &= ~ESP_FLAG_RESETTING;
David S. Millercd9ad582007-04-26 21:19:23 -07002091}
2092
2093/* Runs under host->lock */
2094static void __esp_interrupt(struct esp *esp)
2095{
2096 int finish_reset, intr_done;
2097 u8 phase;
2098
Hannes Reinecke9535fff2014-11-24 15:37:23 +01002099 /*
2100 * Once INTRPT is read STATUS and SSTEP are cleared.
2101 */
David S. Millercd9ad582007-04-26 21:19:23 -07002102 esp->sreg = esp_read8(ESP_STATUS);
Hannes Reinecke9535fff2014-11-24 15:37:23 +01002103 esp->seqreg = esp_read8(ESP_SSTEP);
2104 esp->ireg = esp_read8(ESP_INTRPT);
David S. Millercd9ad582007-04-26 21:19:23 -07002105
2106 if (esp->flags & ESP_FLAG_RESETTING) {
2107 finish_reset = 1;
2108 } else {
2109 if (esp_check_gross_error(esp))
2110 return;
2111
2112 finish_reset = esp_check_spur_intr(esp);
2113 if (finish_reset < 0)
2114 return;
2115 }
2116
David S. Millercd9ad582007-04-26 21:19:23 -07002117 if (esp->ireg & ESP_INTR_SR)
2118 finish_reset = 1;
2119
2120 if (finish_reset) {
2121 esp_reset_cleanup(esp);
2122 if (esp->eh_reset) {
2123 complete(esp->eh_reset);
2124 esp->eh_reset = NULL;
2125 }
2126 return;
2127 }
2128
2129 phase = (esp->sreg & ESP_STAT_PMASK);
2130 if (esp->rev == FASHME) {
2131 if (((phase != ESP_DIP && phase != ESP_DOP) &&
2132 esp->select_state == ESP_SELECT_NONE &&
2133 esp->event != ESP_EVENT_STATUS &&
2134 esp->event != ESP_EVENT_DATA_DONE) ||
2135 (esp->ireg & ESP_INTR_RSEL)) {
2136 esp->sreg2 = esp_read8(ESP_STATUS2);
2137 if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
2138 (esp->sreg2 & ESP_STAT2_F1BYTE))
2139 hme_read_fifo(esp);
2140 }
2141 }
2142
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002143 esp_log_intr("intr sreg[%02x] seqreg[%02x] "
David S. Millercd9ad582007-04-26 21:19:23 -07002144 "sreg2[%02x] ireg[%02x]\n",
2145 esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
2146
2147 intr_done = 0;
2148
2149 if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002150 shost_printk(KERN_INFO, esp->host,
2151 "unexpected IREG %02x\n", esp->ireg);
David S. Millercd9ad582007-04-26 21:19:23 -07002152 if (esp->ireg & ESP_INTR_IC)
2153 esp_dump_cmd_log(esp);
2154
2155 esp_schedule_reset(esp);
2156 } else {
2157 if (!(esp->ireg & ESP_INTR_RSEL)) {
2158 /* Some combination of FDONE, BSERV, DC. */
2159 if (esp->select_state != ESP_SELECT_NONE)
2160 intr_done = esp_finish_select(esp);
2161 } else if (esp->ireg & ESP_INTR_RSEL) {
2162 if (esp->active_cmd)
2163 (void) esp_finish_select(esp);
2164 intr_done = esp_reconnect(esp);
2165 }
2166 }
2167 while (!intr_done)
2168 intr_done = esp_process_event(esp);
2169}
2170
2171irqreturn_t scsi_esp_intr(int irq, void *dev_id)
2172{
2173 struct esp *esp = dev_id;
2174 unsigned long flags;
2175 irqreturn_t ret;
2176
2177 spin_lock_irqsave(esp->host->host_lock, flags);
2178 ret = IRQ_NONE;
2179 if (esp->ops->irq_pending(esp)) {
2180 ret = IRQ_HANDLED;
2181 for (;;) {
2182 int i;
2183
2184 __esp_interrupt(esp);
2185 if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK))
2186 break;
2187 esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK;
2188
2189 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
2190 if (esp->ops->irq_pending(esp))
2191 break;
2192 }
2193 if (i == ESP_QUICKIRQ_LIMIT)
2194 break;
2195 }
2196 }
2197 spin_unlock_irqrestore(esp->host->host_lock, flags);
2198
2199 return ret;
2200}
2201EXPORT_SYMBOL(scsi_esp_intr);
2202
Adrian Bunk76246802007-10-11 17:35:20 +02002203static void esp_get_revision(struct esp *esp)
David S. Millercd9ad582007-04-26 21:19:23 -07002204{
2205 u8 val;
2206
2207 esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
2208 esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
2209 esp_write8(esp->config2, ESP_CFG2);
2210
2211 val = esp_read8(ESP_CFG2);
2212 val &= ~ESP_CONFIG2_MAGIC;
2213 if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
2214 /* If what we write to cfg2 does not come back, cfg2 is not
2215 * implemented, therefore this must be a plain esp100.
2216 */
2217 esp->rev = ESP100;
2218 } else {
2219 esp->config2 = 0;
2220 esp_set_all_config3(esp, 5);
2221 esp->prev_cfg3 = 5;
2222 esp_write8(esp->config2, ESP_CFG2);
2223 esp_write8(0, ESP_CFG3);
2224 esp_write8(esp->prev_cfg3, ESP_CFG3);
2225
2226 val = esp_read8(ESP_CFG3);
2227 if (val != 5) {
2228 /* The cfg2 register is implemented, however
2229 * cfg3 is not, must be esp100a.
2230 */
2231 esp->rev = ESP100A;
2232 } else {
2233 esp_set_all_config3(esp, 0);
2234 esp->prev_cfg3 = 0;
2235 esp_write8(esp->prev_cfg3, ESP_CFG3);
2236
2237 /* All of cfg{1,2,3} implemented, must be one of
2238 * the fas variants, figure out which one.
2239 */
2240 if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
2241 esp->rev = FAST;
2242 esp->sync_defp = SYNC_DEFP_FAST;
2243 } else {
2244 esp->rev = ESP236;
2245 }
2246 esp->config2 = 0;
2247 esp_write8(esp->config2, ESP_CFG2);
2248 }
2249 }
2250}
2251
Adrian Bunk76246802007-10-11 17:35:20 +02002252static void esp_init_swstate(struct esp *esp)
David S. Millercd9ad582007-04-26 21:19:23 -07002253{
2254 int i;
2255
2256 INIT_LIST_HEAD(&esp->queued_cmds);
2257 INIT_LIST_HEAD(&esp->active_cmds);
2258 INIT_LIST_HEAD(&esp->esp_cmd_pool);
2259
2260 /* Start with a clear state, domain validation (via ->slave_configure,
2261 * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged
2262 * commands.
2263 */
2264 for (i = 0 ; i < ESP_MAX_TARGET; i++) {
2265 esp->target[i].flags = 0;
2266 esp->target[i].nego_goal_period = 0;
2267 esp->target[i].nego_goal_offset = 0;
2268 esp->target[i].nego_goal_width = 0;
2269 esp->target[i].nego_goal_tags = 0;
2270 }
2271}
2272
2273/* This places the ESP into a known state at boot time. */
Martin Habetsd679f802007-05-07 14:05:03 -07002274static void esp_bootup_reset(struct esp *esp)
David S. Millercd9ad582007-04-26 21:19:23 -07002275{
2276 u8 val;
2277
2278 /* Reset the DMA */
2279 esp->ops->reset_dma(esp);
2280
2281 /* Reset the ESP */
2282 esp_reset_esp(esp);
2283
2284 /* Reset the SCSI bus, but tell ESP not to generate an irq */
2285 val = esp_read8(ESP_CFG1);
2286 val |= ESP_CONFIG1_SRRDISAB;
2287 esp_write8(val, ESP_CFG1);
2288
2289 scsi_esp_cmd(esp, ESP_CMD_RS);
2290 udelay(400);
2291
2292 esp_write8(esp->config1, ESP_CFG1);
2293
2294 /* Eat any bitrot in the chip and we are done... */
2295 esp_read8(ESP_INTRPT);
2296}
2297
Adrian Bunk76246802007-10-11 17:35:20 +02002298static void esp_set_clock_params(struct esp *esp)
David S. Millercd9ad582007-04-26 21:19:23 -07002299{
Finn Thain6fe07aa2008-04-25 10:06:05 -05002300 int fhz;
David S. Millercd9ad582007-04-26 21:19:23 -07002301 u8 ccf;
2302
2303 /* This is getting messy but it has to be done correctly or else
2304 * you get weird behavior all over the place. We are trying to
2305 * basically figure out three pieces of information.
2306 *
2307 * a) Clock Conversion Factor
2308 *
2309 * This is a representation of the input crystal clock frequency
2310 * going into the ESP on this machine. Any operation whose timing
2311 * is longer than 400ns depends on this value being correct. For
2312 * example, you'll get blips for arbitration/selection during high
2313 * load or with multiple targets if this is not set correctly.
2314 *
2315 * b) Selection Time-Out
2316 *
2317 * The ESP isn't very bright and will arbitrate for the bus and try
2318 * to select a target forever if you let it. This value tells the
2319 * ESP when it has taken too long to negotiate and that it should
2320 * interrupt the CPU so we can see what happened. The value is
2321 * computed as follows (from NCR/Symbios chip docs).
2322 *
2323 * (Time Out Period) * (Input Clock)
2324 * STO = ----------------------------------
2325 * (8192) * (Clock Conversion Factor)
2326 *
2327 * We use a time out period of 250ms (ESP_BUS_TIMEOUT).
2328 *
2329 * c) Imperical constants for synchronous offset and transfer period
2330 * register values
2331 *
2332 * This entails the smallest and largest sync period we could ever
2333 * handle on this ESP.
2334 */
Finn Thain6fe07aa2008-04-25 10:06:05 -05002335 fhz = esp->cfreq;
David S. Millercd9ad582007-04-26 21:19:23 -07002336
Finn Thain6fe07aa2008-04-25 10:06:05 -05002337 ccf = ((fhz / 1000000) + 4) / 5;
David S. Millercd9ad582007-04-26 21:19:23 -07002338 if (ccf == 1)
2339 ccf = 2;
2340
2341 /* If we can't find anything reasonable, just assume 20MHZ.
2342 * This is the clock frequency of the older sun4c's where I've
2343 * been unable to find the clock-frequency PROM property. All
2344 * other machines provide useful values it seems.
2345 */
Finn Thain6fe07aa2008-04-25 10:06:05 -05002346 if (fhz <= 5000000 || ccf < 1 || ccf > 8) {
2347 fhz = 20000000;
David S. Millercd9ad582007-04-26 21:19:23 -07002348 ccf = 4;
2349 }
2350
2351 esp->cfact = (ccf == 8 ? 0 : ccf);
Finn Thain6fe07aa2008-04-25 10:06:05 -05002352 esp->cfreq = fhz;
2353 esp->ccycle = ESP_HZ_TO_CYCLE(fhz);
David S. Millercd9ad582007-04-26 21:19:23 -07002354 esp->ctick = ESP_TICK(ccf, esp->ccycle);
Finn Thain6fe07aa2008-04-25 10:06:05 -05002355 esp->neg_defp = ESP_NEG_DEFP(fhz, ccf);
David S. Millercd9ad582007-04-26 21:19:23 -07002356 esp->sync_defp = SYNC_DEFP_SLOW;
2357}
2358
2359static const char *esp_chip_names[] = {
2360 "ESP100",
2361 "ESP100A",
2362 "ESP236",
2363 "FAS236",
2364 "FAS100A",
2365 "FAST",
2366 "FASHME",
2367};
2368
2369static struct scsi_transport_template *esp_transport_template;
2370
Adrian Bunk76246802007-10-11 17:35:20 +02002371int scsi_esp_register(struct esp *esp, struct device *dev)
David S. Millercd9ad582007-04-26 21:19:23 -07002372{
2373 static int instance;
2374 int err;
2375
Hannes Reinecke3707a182014-11-24 15:37:20 +01002376 if (!esp->num_tags)
2377 esp->num_tags = ESP_DEFAULT_TAGS;
2378 else if (esp->num_tags >= ESP_MAX_TAG)
2379 esp->num_tags = ESP_MAX_TAG - 1;
David S. Millercd9ad582007-04-26 21:19:23 -07002380 esp->host->transportt = esp_transport_template;
2381 esp->host->max_lun = ESP_MAX_LUN;
2382 esp->host->cmd_per_lun = 2;
David Millerff4abd62007-08-24 22:25:58 -07002383 esp->host->unique_id = instance;
David S. Millercd9ad582007-04-26 21:19:23 -07002384
2385 esp_set_clock_params(esp);
2386
2387 esp_get_revision(esp);
2388
2389 esp_init_swstate(esp);
2390
2391 esp_bootup_reset(esp);
2392
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002393 dev_printk(KERN_INFO, dev, "esp%u: regs[%1p:%1p] irq[%u]\n",
2394 esp->host->unique_id, esp->regs, esp->dma_regs,
2395 esp->host->irq);
2396 dev_printk(KERN_INFO, dev,
2397 "esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
2398 esp->host->unique_id, esp_chip_names[esp->rev],
2399 esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
David S. Millercd9ad582007-04-26 21:19:23 -07002400
2401 /* Let the SCSI bus reset settle. */
2402 ssleep(esp_bus_reset_settle);
2403
2404 err = scsi_add_host(esp->host, dev);
2405 if (err)
2406 return err;
2407
David Millerff4abd62007-08-24 22:25:58 -07002408 instance++;
David S. Millercd9ad582007-04-26 21:19:23 -07002409
2410 scsi_scan_host(esp->host);
2411
2412 return 0;
2413}
2414EXPORT_SYMBOL(scsi_esp_register);
2415
Adrian Bunk76246802007-10-11 17:35:20 +02002416void scsi_esp_unregister(struct esp *esp)
David S. Millercd9ad582007-04-26 21:19:23 -07002417{
2418 scsi_remove_host(esp->host);
2419}
2420EXPORT_SYMBOL(scsi_esp_unregister);
2421
James Bottomleyec5e69f2008-06-23 14:52:09 -05002422static int esp_target_alloc(struct scsi_target *starget)
2423{
2424 struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2425 struct esp_target_data *tp = &esp->target[starget->id];
2426
2427 tp->starget = starget;
2428
2429 return 0;
2430}
2431
2432static void esp_target_destroy(struct scsi_target *starget)
2433{
2434 struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2435 struct esp_target_data *tp = &esp->target[starget->id];
2436
2437 tp->starget = NULL;
2438}
2439
David S. Millercd9ad582007-04-26 21:19:23 -07002440static int esp_slave_alloc(struct scsi_device *dev)
2441{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002442 struct esp *esp = shost_priv(dev->host);
David S. Millercd9ad582007-04-26 21:19:23 -07002443 struct esp_target_data *tp = &esp->target[dev->id];
2444 struct esp_lun_data *lp;
2445
2446 lp = kzalloc(sizeof(*lp), GFP_KERNEL);
2447 if (!lp)
2448 return -ENOMEM;
2449 dev->hostdata = lp;
2450
David S. Millercd9ad582007-04-26 21:19:23 -07002451 spi_min_period(tp->starget) = esp->min_period;
2452 spi_max_offset(tp->starget) = 15;
2453
2454 if (esp->flags & ESP_FLAG_WIDE_CAPABLE)
2455 spi_max_width(tp->starget) = 1;
2456 else
2457 spi_max_width(tp->starget) = 0;
2458
2459 return 0;
2460}
2461
2462static int esp_slave_configure(struct scsi_device *dev)
2463{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002464 struct esp *esp = shost_priv(dev->host);
David S. Millercd9ad582007-04-26 21:19:23 -07002465 struct esp_target_data *tp = &esp->target[dev->id];
David S. Millercd9ad582007-04-26 21:19:23 -07002466
Hannes Reinecke3707a182014-11-24 15:37:20 +01002467 if (dev->tagged_supported)
2468 scsi_change_queue_depth(dev, esp->num_tags);
David S. Millercd9ad582007-04-26 21:19:23 -07002469
David S. Millercd9ad582007-04-26 21:19:23 -07002470 tp->flags |= ESP_TGT_DISCONNECT;
2471
2472 if (!spi_initial_dv(dev->sdev_target))
2473 spi_dv_device(dev);
2474
2475 return 0;
2476}
2477
2478static void esp_slave_destroy(struct scsi_device *dev)
2479{
2480 struct esp_lun_data *lp = dev->hostdata;
2481
2482 kfree(lp);
2483 dev->hostdata = NULL;
2484}
2485
2486static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
2487{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002488 struct esp *esp = shost_priv(cmd->device->host);
David S. Millercd9ad582007-04-26 21:19:23 -07002489 struct esp_cmd_entry *ent, *tmp;
2490 struct completion eh_done;
2491 unsigned long flags;
2492
2493 /* XXX This helps a lot with debugging but might be a bit
2494 * XXX much for the final driver.
2495 */
2496 spin_lock_irqsave(esp->host->host_lock, flags);
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002497 shost_printk(KERN_ERR, esp->host, "Aborting command [%p:%02x]\n",
2498 cmd, cmd->cmnd[0]);
David S. Millercd9ad582007-04-26 21:19:23 -07002499 ent = esp->active_cmd;
2500 if (ent)
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002501 shost_printk(KERN_ERR, esp->host,
2502 "Current command [%p:%02x]\n",
2503 ent->cmd, ent->cmd->cmnd[0]);
David S. Millercd9ad582007-04-26 21:19:23 -07002504 list_for_each_entry(ent, &esp->queued_cmds, list) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002505 shost_printk(KERN_ERR, esp->host, "Queued command [%p:%02x]\n",
2506 ent->cmd, ent->cmd->cmnd[0]);
David S. Millercd9ad582007-04-26 21:19:23 -07002507 }
2508 list_for_each_entry(ent, &esp->active_cmds, list) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002509 shost_printk(KERN_ERR, esp->host, " Active command [%p:%02x]\n",
2510 ent->cmd, ent->cmd->cmnd[0]);
David S. Millercd9ad582007-04-26 21:19:23 -07002511 }
2512 esp_dump_cmd_log(esp);
2513 spin_unlock_irqrestore(esp->host->host_lock, flags);
2514
2515 spin_lock_irqsave(esp->host->host_lock, flags);
2516
2517 ent = NULL;
2518 list_for_each_entry(tmp, &esp->queued_cmds, list) {
2519 if (tmp->cmd == cmd) {
2520 ent = tmp;
2521 break;
2522 }
2523 }
2524
2525 if (ent) {
2526 /* Easiest case, we didn't even issue the command
2527 * yet so it is trivial to abort.
2528 */
2529 list_del(&ent->list);
2530
2531 cmd->result = DID_ABORT << 16;
2532 cmd->scsi_done(cmd);
2533
2534 esp_put_ent(esp, ent);
2535
2536 goto out_success;
2537 }
2538
2539 init_completion(&eh_done);
2540
2541 ent = esp->active_cmd;
2542 if (ent && ent->cmd == cmd) {
2543 /* Command is the currently active command on
2544 * the bus. If we already have an output message
2545 * pending, no dice.
2546 */
2547 if (esp->msg_out_len)
2548 goto out_failure;
2549
2550 /* Send out an abort, encouraging the target to
2551 * go to MSGOUT phase by asserting ATN.
2552 */
2553 esp->msg_out[0] = ABORT_TASK_SET;
2554 esp->msg_out_len = 1;
2555 ent->eh_done = &eh_done;
2556
2557 scsi_esp_cmd(esp, ESP_CMD_SATN);
2558 } else {
2559 /* The command is disconnected. This is not easy to
2560 * abort. For now we fail and let the scsi error
2561 * handling layer go try a scsi bus reset or host
2562 * reset.
2563 *
2564 * What we could do is put together a scsi command
2565 * solely for the purpose of sending an abort message
2566 * to the target. Coming up with all the code to
2567 * cook up scsi commands, special case them everywhere,
2568 * etc. is for questionable gain and it would be better
2569 * if the generic scsi error handling layer could do at
2570 * least some of that for us.
2571 *
2572 * Anyways this is an area for potential future improvement
2573 * in this driver.
2574 */
2575 goto out_failure;
2576 }
2577
2578 spin_unlock_irqrestore(esp->host->host_lock, flags);
2579
2580 if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) {
2581 spin_lock_irqsave(esp->host->host_lock, flags);
2582 ent->eh_done = NULL;
2583 spin_unlock_irqrestore(esp->host->host_lock, flags);
2584
2585 return FAILED;
2586 }
2587
2588 return SUCCESS;
2589
2590out_success:
2591 spin_unlock_irqrestore(esp->host->host_lock, flags);
2592 return SUCCESS;
2593
2594out_failure:
2595 /* XXX This might be a good location to set ESP_TGT_BROKEN
2596 * XXX since we know which target/lun in particular is
2597 * XXX causing trouble.
2598 */
2599 spin_unlock_irqrestore(esp->host->host_lock, flags);
2600 return FAILED;
2601}
2602
2603static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
2604{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002605 struct esp *esp = shost_priv(cmd->device->host);
David S. Millercd9ad582007-04-26 21:19:23 -07002606 struct completion eh_reset;
2607 unsigned long flags;
2608
2609 init_completion(&eh_reset);
2610
2611 spin_lock_irqsave(esp->host->host_lock, flags);
2612
2613 esp->eh_reset = &eh_reset;
2614
2615 /* XXX This is too simple... We should add lots of
2616 * XXX checks here so that if we find that the chip is
2617 * XXX very wedged we return failure immediately so
2618 * XXX that we can perform a full chip reset.
2619 */
2620 esp->flags |= ESP_FLAG_RESETTING;
2621 scsi_esp_cmd(esp, ESP_CMD_RS);
2622
2623 spin_unlock_irqrestore(esp->host->host_lock, flags);
2624
2625 ssleep(esp_bus_reset_settle);
2626
2627 if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) {
2628 spin_lock_irqsave(esp->host->host_lock, flags);
2629 esp->eh_reset = NULL;
2630 spin_unlock_irqrestore(esp->host->host_lock, flags);
2631
2632 return FAILED;
2633 }
2634
2635 return SUCCESS;
2636}
2637
2638/* All bets are off, reset the entire device. */
2639static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd)
2640{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002641 struct esp *esp = shost_priv(cmd->device->host);
David S. Millercd9ad582007-04-26 21:19:23 -07002642 unsigned long flags;
2643
2644 spin_lock_irqsave(esp->host->host_lock, flags);
2645 esp_bootup_reset(esp);
2646 esp_reset_cleanup(esp);
2647 spin_unlock_irqrestore(esp->host->host_lock, flags);
2648
2649 ssleep(esp_bus_reset_settle);
2650
2651 return SUCCESS;
2652}
2653
2654static const char *esp_info(struct Scsi_Host *host)
2655{
2656 return "esp";
2657}
2658
2659struct scsi_host_template scsi_esp_template = {
2660 .module = THIS_MODULE,
2661 .name = "esp",
2662 .info = esp_info,
2663 .queuecommand = esp_queuecommand,
James Bottomleyec5e69f2008-06-23 14:52:09 -05002664 .target_alloc = esp_target_alloc,
2665 .target_destroy = esp_target_destroy,
David S. Millercd9ad582007-04-26 21:19:23 -07002666 .slave_alloc = esp_slave_alloc,
2667 .slave_configure = esp_slave_configure,
2668 .slave_destroy = esp_slave_destroy,
2669 .eh_abort_handler = esp_eh_abort_handler,
2670 .eh_bus_reset_handler = esp_eh_bus_reset_handler,
2671 .eh_host_reset_handler = esp_eh_host_reset_handler,
2672 .can_queue = 7,
2673 .this_id = 7,
2674 .sg_tablesize = SG_ALL,
2675 .use_clustering = ENABLE_CLUSTERING,
2676 .max_sectors = 0xffff,
2677 .skip_settle_delay = 1,
Christoph Hellwig2ecb2042014-11-03 14:09:02 +01002678 .use_blk_tags = 1,
David S. Millercd9ad582007-04-26 21:19:23 -07002679};
2680EXPORT_SYMBOL(scsi_esp_template);
2681
2682static void esp_get_signalling(struct Scsi_Host *host)
2683{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002684 struct esp *esp = shost_priv(host);
David S. Millercd9ad582007-04-26 21:19:23 -07002685 enum spi_signal_type type;
2686
2687 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
2688 type = SPI_SIGNAL_HVD;
2689 else
2690 type = SPI_SIGNAL_SE;
2691
2692 spi_signalling(host) = type;
2693}
2694
2695static void esp_set_offset(struct scsi_target *target, int offset)
2696{
2697 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002698 struct esp *esp = shost_priv(host);
David S. Millercd9ad582007-04-26 21:19:23 -07002699 struct esp_target_data *tp = &esp->target[target->id];
2700
Finn Thain02507a82009-12-05 12:30:42 +11002701 if (esp->flags & ESP_FLAG_DISABLE_SYNC)
2702 tp->nego_goal_offset = 0;
2703 else
2704 tp->nego_goal_offset = offset;
David S. Millercd9ad582007-04-26 21:19:23 -07002705 tp->flags |= ESP_TGT_CHECK_NEGO;
2706}
2707
2708static void esp_set_period(struct scsi_target *target, int period)
2709{
2710 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002711 struct esp *esp = shost_priv(host);
David S. Millercd9ad582007-04-26 21:19:23 -07002712 struct esp_target_data *tp = &esp->target[target->id];
2713
2714 tp->nego_goal_period = period;
2715 tp->flags |= ESP_TGT_CHECK_NEGO;
2716}
2717
2718static void esp_set_width(struct scsi_target *target, int width)
2719{
2720 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002721 struct esp *esp = shost_priv(host);
David S. Millercd9ad582007-04-26 21:19:23 -07002722 struct esp_target_data *tp = &esp->target[target->id];
2723
2724 tp->nego_goal_width = (width ? 1 : 0);
2725 tp->flags |= ESP_TGT_CHECK_NEGO;
2726}
2727
2728static struct spi_function_template esp_transport_ops = {
2729 .set_offset = esp_set_offset,
2730 .show_offset = 1,
2731 .set_period = esp_set_period,
2732 .show_period = 1,
2733 .set_width = esp_set_width,
2734 .show_width = 1,
2735 .get_signalling = esp_get_signalling,
2736};
2737
2738static int __init esp_init(void)
2739{
2740 BUILD_BUG_ON(sizeof(struct scsi_pointer) <
2741 sizeof(struct esp_cmd_priv));
2742
2743 esp_transport_template = spi_attach_transport(&esp_transport_ops);
2744 if (!esp_transport_template)
2745 return -ENODEV;
2746
2747 return 0;
2748}
2749
2750static void __exit esp_exit(void)
2751{
2752 spi_release_transport(esp_transport_template);
2753}
2754
2755MODULE_DESCRIPTION("ESP SCSI driver core");
2756MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
2757MODULE_LICENSE("GPL");
2758MODULE_VERSION(DRV_VERSION);
2759
2760module_param(esp_bus_reset_settle, int, 0);
2761MODULE_PARM_DESC(esp_bus_reset_settle,
2762 "ESP scsi bus reset delay in seconds");
2763
2764module_param(esp_debug, int, 0);
2765MODULE_PARM_DESC(esp_debug,
2766"ESP bitmapped debugging message enable value:\n"
2767" 0x00000001 Log interrupt events\n"
2768" 0x00000002 Log scsi commands\n"
2769" 0x00000004 Log resets\n"
2770" 0x00000008 Log message in events\n"
2771" 0x00000010 Log message out events\n"
2772" 0x00000020 Log command completion\n"
2773" 0x00000040 Log disconnects\n"
2774" 0x00000080 Log data start\n"
2775" 0x00000100 Log data done\n"
2776" 0x00000200 Log reconnects\n"
2777" 0x00000400 Log auto-sense data\n"
2778);
2779
2780module_init(esp_init);
2781module_exit(esp_exit);