blob: 800ff3ea501d69efee463164d672520888f87639 [file] [log] [blame]
David S. Millercd9ad582007-04-26 21:19:23 -07001/* esp_scsi.c: ESP SCSI driver.
2 *
3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
4 */
5
6#include <linux/kernel.h>
7#include <linux/types.h>
8#include <linux/slab.h>
9#include <linux/delay.h>
10#include <linux/list.h>
11#include <linux/completion.h>
12#include <linux/kallsyms.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/init.h>
Alexey Dobriyane1f2a092007-04-27 15:19:27 -070016#include <linux/irqreturn.h>
David S. Millercd9ad582007-04-26 21:19:23 -070017
18#include <asm/irq.h>
19#include <asm/io.h>
20#include <asm/dma.h>
21
22#include <scsi/scsi.h>
23#include <scsi/scsi_host.h>
24#include <scsi/scsi_cmnd.h>
25#include <scsi/scsi_device.h>
26#include <scsi/scsi_tcq.h>
27#include <scsi/scsi_dbg.h>
28#include <scsi/scsi_transport_spi.h>
29
30#include "esp_scsi.h"
31
32#define DRV_MODULE_NAME "esp"
33#define PFX DRV_MODULE_NAME ": "
34#define DRV_VERSION "2.000"
35#define DRV_MODULE_RELDATE "April 19, 2007"
36
37/* SCSI bus reset settle time in seconds. */
38static int esp_bus_reset_settle = 3;
39
40static u32 esp_debug;
41#define ESP_DEBUG_INTR 0x00000001
42#define ESP_DEBUG_SCSICMD 0x00000002
43#define ESP_DEBUG_RESET 0x00000004
44#define ESP_DEBUG_MSGIN 0x00000008
45#define ESP_DEBUG_MSGOUT 0x00000010
46#define ESP_DEBUG_CMDDONE 0x00000020
47#define ESP_DEBUG_DISCONNECT 0x00000040
48#define ESP_DEBUG_DATASTART 0x00000080
49#define ESP_DEBUG_DATADONE 0x00000100
50#define ESP_DEBUG_RECONNECT 0x00000200
51#define ESP_DEBUG_AUTOSENSE 0x00000400
Hannes Reinecke1af6f602014-11-24 15:37:22 +010052#define ESP_DEBUG_EVENT 0x00000800
53#define ESP_DEBUG_COMMAND 0x00001000
David S. Millercd9ad582007-04-26 21:19:23 -070054
55#define esp_log_intr(f, a...) \
56do { if (esp_debug & ESP_DEBUG_INTR) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010057 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070058} while (0)
59
60#define esp_log_reset(f, a...) \
61do { if (esp_debug & ESP_DEBUG_RESET) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010062 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070063} while (0)
64
65#define esp_log_msgin(f, a...) \
66do { if (esp_debug & ESP_DEBUG_MSGIN) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010067 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070068} while (0)
69
70#define esp_log_msgout(f, a...) \
71do { if (esp_debug & ESP_DEBUG_MSGOUT) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010072 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070073} while (0)
74
75#define esp_log_cmddone(f, a...) \
76do { if (esp_debug & ESP_DEBUG_CMDDONE) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010077 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070078} while (0)
79
80#define esp_log_disconnect(f, a...) \
81do { if (esp_debug & ESP_DEBUG_DISCONNECT) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010082 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070083} while (0)
84
85#define esp_log_datastart(f, a...) \
86do { if (esp_debug & ESP_DEBUG_DATASTART) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010087 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070088} while (0)
89
90#define esp_log_datadone(f, a...) \
91do { if (esp_debug & ESP_DEBUG_DATADONE) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010092 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070093} while (0)
94
95#define esp_log_reconnect(f, a...) \
96do { if (esp_debug & ESP_DEBUG_RECONNECT) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010097 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070098} while (0)
99
100#define esp_log_autosense(f, a...) \
101do { if (esp_debug & ESP_DEBUG_AUTOSENSE) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +0100102 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -0700103} while (0)
104
Hannes Reinecke1af6f602014-11-24 15:37:22 +0100105#define esp_log_event(f, a...) \
106do { if (esp_debug & ESP_DEBUG_EVENT) \
107 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
108} while (0)
109
110#define esp_log_command(f, a...) \
111do { if (esp_debug & ESP_DEBUG_COMMAND) \
112 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
113} while (0)
114
David S. Millercd9ad582007-04-26 21:19:23 -0700115#define esp_read8(REG) esp->ops->esp_read8(esp, REG)
116#define esp_write8(VAL,REG) esp->ops->esp_write8(esp, VAL, REG)
117
118static void esp_log_fill_regs(struct esp *esp,
119 struct esp_event_ent *p)
120{
121 p->sreg = esp->sreg;
122 p->seqreg = esp->seqreg;
123 p->sreg2 = esp->sreg2;
124 p->ireg = esp->ireg;
125 p->select_state = esp->select_state;
126 p->event = esp->event;
127}
128
129void scsi_esp_cmd(struct esp *esp, u8 val)
130{
131 struct esp_event_ent *p;
132 int idx = esp->esp_event_cur;
133
134 p = &esp->esp_event_log[idx];
135 p->type = ESP_EVENT_TYPE_CMD;
136 p->val = val;
137 esp_log_fill_regs(esp, p);
138
139 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
140
Hannes Reinecke1af6f602014-11-24 15:37:22 +0100141 esp_log_command("cmd[%02x]\n", val);
David S. Millercd9ad582007-04-26 21:19:23 -0700142 esp_write8(val, ESP_CMD);
143}
144EXPORT_SYMBOL(scsi_esp_cmd);
145
146static void esp_event(struct esp *esp, u8 val)
147{
148 struct esp_event_ent *p;
149 int idx = esp->esp_event_cur;
150
151 p = &esp->esp_event_log[idx];
152 p->type = ESP_EVENT_TYPE_EVENT;
153 p->val = val;
154 esp_log_fill_regs(esp, p);
155
156 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
157
158 esp->event = val;
159}
160
161static void esp_dump_cmd_log(struct esp *esp)
162{
163 int idx = esp->esp_event_cur;
164 int stop = idx;
165
Hannes Reineckea1a75b32014-11-24 15:37:21 +0100166 shost_printk(KERN_INFO, esp->host, "Dumping command log\n");
David S. Millercd9ad582007-04-26 21:19:23 -0700167 do {
168 struct esp_event_ent *p = &esp->esp_event_log[idx];
169
Hannes Reineckea1a75b32014-11-24 15:37:21 +0100170 shost_printk(KERN_INFO, esp->host,
171 "ent[%d] %s val[%02x] sreg[%02x] seqreg[%02x] "
172 "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n",
173 idx,
174 p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT",
175 p->val, p->sreg, p->seqreg,
176 p->sreg2, p->ireg, p->select_state, p->event);
David S. Millercd9ad582007-04-26 21:19:23 -0700177
178 idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
179 } while (idx != stop);
180}
181
182static void esp_flush_fifo(struct esp *esp)
183{
184 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
185 if (esp->rev == ESP236) {
186 int lim = 1000;
187
188 while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) {
189 if (--lim == 0) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +0100190 shost_printk(KERN_ALERT, esp->host,
191 "ESP_FF_BYTES will not clear!\n");
David S. Millercd9ad582007-04-26 21:19:23 -0700192 break;
193 }
194 udelay(1);
195 }
196 }
197}
198
199static void hme_read_fifo(struct esp *esp)
200{
201 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
202 int idx = 0;
203
204 while (fcnt--) {
205 esp->fifo[idx++] = esp_read8(ESP_FDATA);
206 esp->fifo[idx++] = esp_read8(ESP_FDATA);
207 }
208 if (esp->sreg2 & ESP_STAT2_F1BYTE) {
209 esp_write8(0, ESP_FDATA);
210 esp->fifo[idx++] = esp_read8(ESP_FDATA);
211 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
212 }
213 esp->fifo_cnt = idx;
214}
215
216static void esp_set_all_config3(struct esp *esp, u8 val)
217{
218 int i;
219
220 for (i = 0; i < ESP_MAX_TARGET; i++)
221 esp->target[i].esp_config3 = val;
222}
223
224/* Reset the ESP chip, _not_ the SCSI bus. */
225static void esp_reset_esp(struct esp *esp)
226{
227 u8 family_code, version;
228
229 /* Now reset the ESP chip */
230 scsi_esp_cmd(esp, ESP_CMD_RC);
231 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
David S. Millera7938042007-09-30 17:10:42 -0700232 if (esp->rev == FAST)
233 esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2);
David S. Millercd9ad582007-04-26 21:19:23 -0700234 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
235
David S. Millercd9ad582007-04-26 21:19:23 -0700236 /* This is the only point at which it is reliable to read
237 * the ID-code for a fast ESP chip variants.
238 */
239 esp->max_period = ((35 * esp->ccycle) / 1000);
240 if (esp->rev == FAST) {
241 version = esp_read8(ESP_UID);
242 family_code = (version & 0xf8) >> 3;
243 if (family_code == 0x02)
244 esp->rev = FAS236;
245 else if (family_code == 0x0a)
246 esp->rev = FASHME; /* Version is usually '5'. */
247 else
248 esp->rev = FAS100A;
249 esp->min_period = ((4 * esp->ccycle) / 1000);
250 } else {
251 esp->min_period = ((5 * esp->ccycle) / 1000);
252 }
253 esp->max_period = (esp->max_period + 3)>>2;
254 esp->min_period = (esp->min_period + 3)>>2;
255
256 esp_write8(esp->config1, ESP_CFG1);
257 switch (esp->rev) {
258 case ESP100:
259 /* nothing to do */
260 break;
261
262 case ESP100A:
263 esp_write8(esp->config2, ESP_CFG2);
264 break;
265
266 case ESP236:
267 /* Slow 236 */
268 esp_write8(esp->config2, ESP_CFG2);
269 esp->prev_cfg3 = esp->target[0].esp_config3;
270 esp_write8(esp->prev_cfg3, ESP_CFG3);
271 break;
272
273 case FASHME:
274 esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
275 /* fallthrough... */
276
277 case FAS236:
278 /* Fast 236 or HME */
279 esp_write8(esp->config2, ESP_CFG2);
280 if (esp->rev == FASHME) {
281 u8 cfg3 = esp->target[0].esp_config3;
282
283 cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH;
284 if (esp->scsi_id >= 8)
285 cfg3 |= ESP_CONFIG3_IDBIT3;
286 esp_set_all_config3(esp, cfg3);
287 } else {
288 u32 cfg3 = esp->target[0].esp_config3;
289
290 cfg3 |= ESP_CONFIG3_FCLK;
291 esp_set_all_config3(esp, cfg3);
292 }
293 esp->prev_cfg3 = esp->target[0].esp_config3;
294 esp_write8(esp->prev_cfg3, ESP_CFG3);
295 if (esp->rev == FASHME) {
296 esp->radelay = 80;
297 } else {
298 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
299 esp->radelay = 0;
300 else
301 esp->radelay = 96;
302 }
303 break;
304
305 case FAS100A:
306 /* Fast 100a */
307 esp_write8(esp->config2, ESP_CFG2);
308 esp_set_all_config3(esp,
309 (esp->target[0].esp_config3 |
310 ESP_CONFIG3_FCLOCK));
311 esp->prev_cfg3 = esp->target[0].esp_config3;
312 esp_write8(esp->prev_cfg3, ESP_CFG3);
313 esp->radelay = 32;
314 break;
315
316 default:
317 break;
318 }
319
David S. Millera7938042007-09-30 17:10:42 -0700320 /* Reload the configuration registers */
321 esp_write8(esp->cfact, ESP_CFACT);
322
323 esp->prev_stp = 0;
324 esp_write8(esp->prev_stp, ESP_STP);
325
326 esp->prev_soff = 0;
327 esp_write8(esp->prev_soff, ESP_SOFF);
328
329 esp_write8(esp->neg_defp, ESP_TIMEO);
330
David S. Millercd9ad582007-04-26 21:19:23 -0700331 /* Eat any bitrot in the chip */
332 esp_read8(ESP_INTRPT);
333 udelay(100);
334}
335
336static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
337{
338 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
FUJITA Tomonori4c2baaa2007-05-26 04:51:32 +0900339 struct scatterlist *sg = scsi_sglist(cmd);
David S. Millercd9ad582007-04-26 21:19:23 -0700340 int dir = cmd->sc_data_direction;
341 int total, i;
342
343 if (dir == DMA_NONE)
344 return;
345
FUJITA Tomonori4c2baaa2007-05-26 04:51:32 +0900346 spriv->u.num_sg = esp->ops->map_sg(esp, sg, scsi_sg_count(cmd), dir);
David S. Millercd9ad582007-04-26 21:19:23 -0700347 spriv->cur_residue = sg_dma_len(sg);
348 spriv->cur_sg = sg;
349
350 total = 0;
351 for (i = 0; i < spriv->u.num_sg; i++)
352 total += sg_dma_len(&sg[i]);
353 spriv->tot_residue = total;
354}
355
356static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent,
357 struct scsi_cmnd *cmd)
358{
359 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
360
361 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
362 return ent->sense_dma +
363 (ent->sense_ptr - cmd->sense_buffer);
364 }
365
366 return sg_dma_address(p->cur_sg) +
367 (sg_dma_len(p->cur_sg) -
368 p->cur_residue);
369}
370
371static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent,
372 struct scsi_cmnd *cmd)
373{
374 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
375
376 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
377 return SCSI_SENSE_BUFFERSIZE -
378 (ent->sense_ptr - cmd->sense_buffer);
379 }
380 return p->cur_residue;
381}
382
383static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
384 struct scsi_cmnd *cmd, unsigned int len)
385{
386 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
387
388 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
389 ent->sense_ptr += len;
390 return;
391 }
392
393 p->cur_residue -= len;
394 p->tot_residue -= len;
395 if (p->cur_residue < 0 || p->tot_residue < 0) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +0100396 shost_printk(KERN_ERR, esp->host,
397 "Data transfer overflow.\n");
398 shost_printk(KERN_ERR, esp->host,
399 "cur_residue[%d] tot_residue[%d] len[%u]\n",
400 p->cur_residue, p->tot_residue, len);
David S. Millercd9ad582007-04-26 21:19:23 -0700401 p->cur_residue = 0;
402 p->tot_residue = 0;
403 }
404 if (!p->cur_residue && p->tot_residue) {
405 p->cur_sg++;
406 p->cur_residue = sg_dma_len(p->cur_sg);
407 }
408}
409
410static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
411{
412 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
413 int dir = cmd->sc_data_direction;
414
415 if (dir == DMA_NONE)
416 return;
417
FUJITA Tomonori4c2baaa2007-05-26 04:51:32 +0900418 esp->ops->unmap_sg(esp, scsi_sglist(cmd), spriv->u.num_sg, dir);
David S. Millercd9ad582007-04-26 21:19:23 -0700419}
420
421static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
422{
423 struct scsi_cmnd *cmd = ent->cmd;
424 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
425
426 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
427 ent->saved_sense_ptr = ent->sense_ptr;
428 return;
429 }
430 ent->saved_cur_residue = spriv->cur_residue;
431 ent->saved_cur_sg = spriv->cur_sg;
432 ent->saved_tot_residue = spriv->tot_residue;
433}
434
435static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
436{
437 struct scsi_cmnd *cmd = ent->cmd;
438 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
439
440 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
441 ent->sense_ptr = ent->saved_sense_ptr;
442 return;
443 }
444 spriv->cur_residue = ent->saved_cur_residue;
445 spriv->cur_sg = ent->saved_cur_sg;
446 spriv->tot_residue = ent->saved_tot_residue;
447}
448
449static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd)
450{
451 if (cmd->cmd_len == 6 ||
452 cmd->cmd_len == 10 ||
453 cmd->cmd_len == 12) {
454 esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
455 } else {
456 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
457 }
458}
459
460static void esp_write_tgt_config3(struct esp *esp, int tgt)
461{
462 if (esp->rev > ESP100A) {
463 u8 val = esp->target[tgt].esp_config3;
464
465 if (val != esp->prev_cfg3) {
466 esp->prev_cfg3 = val;
467 esp_write8(val, ESP_CFG3);
468 }
469 }
470}
471
472static void esp_write_tgt_sync(struct esp *esp, int tgt)
473{
474 u8 off = esp->target[tgt].esp_offset;
475 u8 per = esp->target[tgt].esp_period;
476
477 if (off != esp->prev_soff) {
478 esp->prev_soff = off;
479 esp_write8(off, ESP_SOFF);
480 }
481 if (per != esp->prev_stp) {
482 esp->prev_stp = per;
483 esp_write8(per, ESP_STP);
484 }
485}
486
487static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
488{
489 if (esp->rev == FASHME) {
490 /* Arbitrary segment boundaries, 24-bit counts. */
491 if (dma_len > (1U << 24))
492 dma_len = (1U << 24);
493 } else {
494 u32 base, end;
495
496 /* ESP chip limits other variants by 16-bits of transfer
497 * count. Actually on FAS100A and FAS236 we could get
498 * 24-bits of transfer count by enabling ESP_CONFIG2_FENAB
499 * in the ESP_CFG2 register but that causes other unwanted
500 * changes so we don't use it currently.
501 */
502 if (dma_len > (1U << 16))
503 dma_len = (1U << 16);
504
505 /* All of the DMA variants hooked up to these chips
506 * cannot handle crossing a 24-bit address boundary.
507 */
508 base = dma_addr & ((1U << 24) - 1U);
509 end = base + dma_len;
510 if (end > (1U << 24))
511 end = (1U <<24);
512 dma_len = end - base;
513 }
514 return dma_len;
515}
516
517static int esp_need_to_nego_wide(struct esp_target_data *tp)
518{
519 struct scsi_target *target = tp->starget;
520
521 return spi_width(target) != tp->nego_goal_width;
522}
523
524static int esp_need_to_nego_sync(struct esp_target_data *tp)
525{
526 struct scsi_target *target = tp->starget;
527
528 /* When offset is zero, period is "don't care". */
529 if (!spi_offset(target) && !tp->nego_goal_offset)
530 return 0;
531
532 if (spi_offset(target) == tp->nego_goal_offset &&
533 spi_period(target) == tp->nego_goal_period)
534 return 0;
535
536 return 1;
537}
538
539static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
540 struct esp_lun_data *lp)
541{
David S. Miller21af8102013-08-01 18:08:34 -0700542 if (!ent->orig_tag[0]) {
David S. Millercd9ad582007-04-26 21:19:23 -0700543 /* Non-tagged, slot already taken? */
544 if (lp->non_tagged_cmd)
545 return -EBUSY;
546
547 if (lp->hold) {
548 /* We are being held by active tagged
549 * commands.
550 */
551 if (lp->num_tagged)
552 return -EBUSY;
553
554 /* Tagged commands completed, we can unplug
555 * the queue and run this untagged command.
556 */
557 lp->hold = 0;
558 } else if (lp->num_tagged) {
559 /* Plug the queue until num_tagged decreases
560 * to zero in esp_free_lun_tag.
561 */
562 lp->hold = 1;
563 return -EBUSY;
564 }
565
566 lp->non_tagged_cmd = ent;
567 return 0;
568 } else {
569 /* Tagged command, see if blocked by a
570 * non-tagged one.
571 */
572 if (lp->non_tagged_cmd || lp->hold)
573 return -EBUSY;
574 }
575
David S. Miller21af8102013-08-01 18:08:34 -0700576 BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]);
David S. Millercd9ad582007-04-26 21:19:23 -0700577
David S. Miller21af8102013-08-01 18:08:34 -0700578 lp->tagged_cmds[ent->orig_tag[1]] = ent;
David S. Millercd9ad582007-04-26 21:19:23 -0700579 lp->num_tagged++;
580
581 return 0;
582}
583
584static void esp_free_lun_tag(struct esp_cmd_entry *ent,
585 struct esp_lun_data *lp)
586{
David S. Miller21af8102013-08-01 18:08:34 -0700587 if (ent->orig_tag[0]) {
588 BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent);
589 lp->tagged_cmds[ent->orig_tag[1]] = NULL;
David S. Millercd9ad582007-04-26 21:19:23 -0700590 lp->num_tagged--;
591 } else {
592 BUG_ON(lp->non_tagged_cmd != ent);
593 lp->non_tagged_cmd = NULL;
594 }
595}
596
597/* When a contingent allegiance conditon is created, we force feed a
598 * REQUEST_SENSE command to the device to fetch the sense data. I
599 * tried many other schemes, relying on the scsi error handling layer
600 * to send out the REQUEST_SENSE automatically, but this was difficult
601 * to get right especially in the presence of applications like smartd
602 * which use SG_IO to send out their own REQUEST_SENSE commands.
603 */
604static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
605{
606 struct scsi_cmnd *cmd = ent->cmd;
607 struct scsi_device *dev = cmd->device;
608 int tgt, lun;
609 u8 *p, val;
610
611 tgt = dev->id;
612 lun = dev->lun;
613
614
615 if (!ent->sense_ptr) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +0100616 esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n",
617 tgt, lun);
David S. Millercd9ad582007-04-26 21:19:23 -0700618
619 ent->sense_ptr = cmd->sense_buffer;
620 ent->sense_dma = esp->ops->map_single(esp,
621 ent->sense_ptr,
622 SCSI_SENSE_BUFFERSIZE,
623 DMA_FROM_DEVICE);
624 }
625 ent->saved_sense_ptr = ent->sense_ptr;
626
627 esp->active_cmd = ent;
628
629 p = esp->command_block;
630 esp->msg_out_len = 0;
631
632 *p++ = IDENTIFY(0, lun);
633 *p++ = REQUEST_SENSE;
634 *p++ = ((dev->scsi_level <= SCSI_2) ?
635 (lun << 5) : 0);
636 *p++ = 0;
637 *p++ = 0;
638 *p++ = SCSI_SENSE_BUFFERSIZE;
639 *p++ = 0;
640
641 esp->select_state = ESP_SELECT_BASIC;
642
643 val = tgt;
644 if (esp->rev == FASHME)
645 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
646 esp_write8(val, ESP_BUSID);
647
648 esp_write_tgt_sync(esp, tgt);
649 esp_write_tgt_config3(esp, tgt);
650
651 val = (p - esp->command_block);
652
653 if (esp->rev == FASHME)
654 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
655 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
656 val, 16, 0, ESP_CMD_DMA | ESP_CMD_SELA);
657}
658
659static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
660{
661 struct esp_cmd_entry *ent;
662
663 list_for_each_entry(ent, &esp->queued_cmds, list) {
664 struct scsi_cmnd *cmd = ent->cmd;
665 struct scsi_device *dev = cmd->device;
666 struct esp_lun_data *lp = dev->hostdata;
667
668 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
669 ent->tag[0] = 0;
670 ent->tag[1] = 0;
671 return ent;
672 }
673
Christoph Hellwig50668632014-10-30 14:30:06 +0100674 if (!spi_populate_tag_msg(&ent->tag[0], cmd)) {
David S. Millercd9ad582007-04-26 21:19:23 -0700675 ent->tag[0] = 0;
676 ent->tag[1] = 0;
677 }
David S. Miller21af8102013-08-01 18:08:34 -0700678 ent->orig_tag[0] = ent->tag[0];
679 ent->orig_tag[1] = ent->tag[1];
David S. Millercd9ad582007-04-26 21:19:23 -0700680
681 if (esp_alloc_lun_tag(ent, lp) < 0)
682 continue;
683
684 return ent;
685 }
686
687 return NULL;
688}
689
690static void esp_maybe_execute_command(struct esp *esp)
691{
692 struct esp_target_data *tp;
693 struct esp_lun_data *lp;
694 struct scsi_device *dev;
695 struct scsi_cmnd *cmd;
696 struct esp_cmd_entry *ent;
697 int tgt, lun, i;
698 u32 val, start_cmd;
699 u8 *p;
700
701 if (esp->active_cmd ||
702 (esp->flags & ESP_FLAG_RESETTING))
703 return;
704
705 ent = find_and_prep_issuable_command(esp);
706 if (!ent)
707 return;
708
709 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
710 esp_autosense(esp, ent);
711 return;
712 }
713
714 cmd = ent->cmd;
715 dev = cmd->device;
716 tgt = dev->id;
717 lun = dev->lun;
718 tp = &esp->target[tgt];
719 lp = dev->hostdata;
720
Kirill A. Shutemov63ce2492011-04-01 16:06:09 -0700721 list_move(&ent->list, &esp->active_cmds);
David S. Millercd9ad582007-04-26 21:19:23 -0700722
723 esp->active_cmd = ent;
724
725 esp_map_dma(esp, cmd);
726 esp_save_pointers(esp, ent);
727
728 esp_check_command_len(esp, cmd);
729
730 p = esp->command_block;
731
732 esp->msg_out_len = 0;
733 if (tp->flags & ESP_TGT_CHECK_NEGO) {
734 /* Need to negotiate. If the target is broken
735 * go for synchronous transfers and non-wide.
736 */
737 if (tp->flags & ESP_TGT_BROKEN) {
738 tp->flags &= ~ESP_TGT_DISCONNECT;
739 tp->nego_goal_period = 0;
740 tp->nego_goal_offset = 0;
741 tp->nego_goal_width = 0;
742 tp->nego_goal_tags = 0;
743 }
744
745 /* If the settings are not changing, skip this. */
746 if (spi_width(tp->starget) == tp->nego_goal_width &&
747 spi_period(tp->starget) == tp->nego_goal_period &&
748 spi_offset(tp->starget) == tp->nego_goal_offset) {
749 tp->flags &= ~ESP_TGT_CHECK_NEGO;
750 goto build_identify;
751 }
752
753 if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) {
754 esp->msg_out_len =
755 spi_populate_width_msg(&esp->msg_out[0],
756 (tp->nego_goal_width ?
757 1 : 0));
758 tp->flags |= ESP_TGT_NEGO_WIDE;
759 } else if (esp_need_to_nego_sync(tp)) {
760 esp->msg_out_len =
761 spi_populate_sync_msg(&esp->msg_out[0],
762 tp->nego_goal_period,
763 tp->nego_goal_offset);
764 tp->flags |= ESP_TGT_NEGO_SYNC;
765 } else {
766 tp->flags &= ~ESP_TGT_CHECK_NEGO;
767 }
768
769 /* Process it like a slow command. */
770 if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC))
771 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
772 }
773
774build_identify:
775 /* If we don't have a lun-data struct yet, we're probing
776 * so do not disconnect. Also, do not disconnect unless
777 * we have a tag on this command.
778 */
779 if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0])
780 *p++ = IDENTIFY(1, lun);
781 else
782 *p++ = IDENTIFY(0, lun);
783
784 if (ent->tag[0] && esp->rev == ESP100) {
785 /* ESP100 lacks select w/atn3 command, use select
786 * and stop instead.
787 */
788 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
789 }
790
791 if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) {
792 start_cmd = ESP_CMD_DMA | ESP_CMD_SELA;
793 if (ent->tag[0]) {
794 *p++ = ent->tag[0];
795 *p++ = ent->tag[1];
796
797 start_cmd = ESP_CMD_DMA | ESP_CMD_SA3;
798 }
799
800 for (i = 0; i < cmd->cmd_len; i++)
801 *p++ = cmd->cmnd[i];
802
803 esp->select_state = ESP_SELECT_BASIC;
804 } else {
805 esp->cmd_bytes_left = cmd->cmd_len;
806 esp->cmd_bytes_ptr = &cmd->cmnd[0];
807
808 if (ent->tag[0]) {
809 for (i = esp->msg_out_len - 1;
810 i >= 0; i--)
811 esp->msg_out[i + 2] = esp->msg_out[i];
812 esp->msg_out[0] = ent->tag[0];
813 esp->msg_out[1] = ent->tag[1];
814 esp->msg_out_len += 2;
815 }
816
817 start_cmd = ESP_CMD_DMA | ESP_CMD_SELAS;
818 esp->select_state = ESP_SELECT_MSGOUT;
819 }
820 val = tgt;
821 if (esp->rev == FASHME)
822 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
823 esp_write8(val, ESP_BUSID);
824
825 esp_write_tgt_sync(esp, tgt);
826 esp_write_tgt_config3(esp, tgt);
827
828 val = (p - esp->command_block);
829
830 if (esp_debug & ESP_DEBUG_SCSICMD) {
831 printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun);
832 for (i = 0; i < cmd->cmd_len; i++)
833 printk("%02x ", cmd->cmnd[i]);
834 printk("]\n");
835 }
836
837 if (esp->rev == FASHME)
838 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
839 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
840 val, 16, 0, start_cmd);
841}
842
843static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
844{
845 struct list_head *head = &esp->esp_cmd_pool;
846 struct esp_cmd_entry *ret;
847
848 if (list_empty(head)) {
849 ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC);
850 } else {
851 ret = list_entry(head->next, struct esp_cmd_entry, list);
852 list_del(&ret->list);
853 memset(ret, 0, sizeof(*ret));
854 }
855 return ret;
856}
857
858static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
859{
860 list_add(&ent->list, &esp->esp_cmd_pool);
861}
862
863static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
864 struct scsi_cmnd *cmd, unsigned int result)
865{
866 struct scsi_device *dev = cmd->device;
867 int tgt = dev->id;
868 int lun = dev->lun;
869
870 esp->active_cmd = NULL;
871 esp_unmap_dma(esp, cmd);
872 esp_free_lun_tag(ent, dev->hostdata);
873 cmd->result = result;
874
875 if (ent->eh_done) {
876 complete(ent->eh_done);
877 ent->eh_done = NULL;
878 }
879
880 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
881 esp->ops->unmap_single(esp, ent->sense_dma,
882 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
883 ent->sense_ptr = NULL;
884
885 /* Restore the message/status bytes to what we actually
886 * saw originally. Also, report that we are providing
887 * the sense data.
888 */
889 cmd->result = ((DRIVER_SENSE << 24) |
890 (DID_OK << 16) |
891 (COMMAND_COMPLETE << 8) |
892 (SAM_STAT_CHECK_CONDITION << 0));
893
894 ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
895 if (esp_debug & ESP_DEBUG_AUTOSENSE) {
896 int i;
897
898 printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ",
899 esp->host->unique_id, tgt, lun);
900 for (i = 0; i < 18; i++)
901 printk("%02x ", cmd->sense_buffer[i]);
902 printk("]\n");
903 }
904 }
905
906 cmd->scsi_done(cmd);
907
908 list_del(&ent->list);
909 esp_put_ent(esp, ent);
910
911 esp_maybe_execute_command(esp);
912}
913
914static unsigned int compose_result(unsigned int status, unsigned int message,
915 unsigned int driver_code)
916{
917 return (status | (message << 8) | (driver_code << 16));
918}
919
920static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
921{
922 struct scsi_device *dev = ent->cmd->device;
923 struct esp_lun_data *lp = dev->hostdata;
924
925 scsi_track_queue_full(dev, lp->num_tagged - 1);
926}
927
Jeff Garzikf2812332010-11-16 02:10:29 -0500928static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
David S. Millercd9ad582007-04-26 21:19:23 -0700929{
930 struct scsi_device *dev = cmd->device;
Christoph Hellwig2b14ec72007-05-31 20:12:32 +0200931 struct esp *esp = shost_priv(dev->host);
David S. Millercd9ad582007-04-26 21:19:23 -0700932 struct esp_cmd_priv *spriv;
933 struct esp_cmd_entry *ent;
934
935 ent = esp_get_ent(esp);
936 if (!ent)
937 return SCSI_MLQUEUE_HOST_BUSY;
938
939 ent->cmd = cmd;
940
941 cmd->scsi_done = done;
942
943 spriv = ESP_CMD_PRIV(cmd);
944 spriv->u.dma_addr = ~(dma_addr_t)0x0;
945
946 list_add_tail(&ent->list, &esp->queued_cmds);
947
948 esp_maybe_execute_command(esp);
949
950 return 0;
951}
952
Jeff Garzikf2812332010-11-16 02:10:29 -0500953static DEF_SCSI_QCMD(esp_queuecommand)
954
David S. Millercd9ad582007-04-26 21:19:23 -0700955static int esp_check_gross_error(struct esp *esp)
956{
957 if (esp->sreg & ESP_STAT_SPAM) {
958 /* Gross Error, could be one of:
959 * - top of fifo overwritten
960 * - top of command register overwritten
961 * - DMA programmed with wrong direction
962 * - improper phase change
963 */
Hannes Reineckea1a75b32014-11-24 15:37:21 +0100964 shost_printk(KERN_ERR, esp->host,
965 "Gross error sreg[%02x]\n", esp->sreg);
David S. Millercd9ad582007-04-26 21:19:23 -0700966 /* XXX Reset the chip. XXX */
967 return 1;
968 }
969 return 0;
970}
971
972static int esp_check_spur_intr(struct esp *esp)
973{
974 switch (esp->rev) {
975 case ESP100:
976 case ESP100A:
977 /* The interrupt pending bit of the status register cannot
978 * be trusted on these revisions.
979 */
980 esp->sreg &= ~ESP_STAT_INTR;
981 break;
982
983 default:
984 if (!(esp->sreg & ESP_STAT_INTR)) {
985 esp->ireg = esp_read8(ESP_INTRPT);
986 if (esp->ireg & ESP_INTR_SR)
987 return 1;
988
989 /* If the DMA is indicating interrupt pending and the
990 * ESP is not, the only possibility is a DMA error.
991 */
992 if (!esp->ops->dma_error(esp)) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +0100993 shost_printk(KERN_ERR, esp->host,
994 "Spurious irq, sreg=%02x.\n",
995 esp->sreg);
David S. Millercd9ad582007-04-26 21:19:23 -0700996 return -1;
997 }
998
Hannes Reineckea1a75b32014-11-24 15:37:21 +0100999 shost_printk(KERN_ERR, esp->host, "DMA error\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001000
1001 /* XXX Reset the chip. XXX */
1002 return -1;
1003 }
1004 break;
1005 }
1006
1007 return 0;
1008}
1009
1010static void esp_schedule_reset(struct esp *esp)
1011{
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001012 esp_log_reset("esp_schedule_reset() from %pf\n",
David S. Millercd9ad582007-04-26 21:19:23 -07001013 __builtin_return_address(0));
1014 esp->flags |= ESP_FLAG_RESETTING;
1015 esp_event(esp, ESP_EVENT_RESET);
1016}
1017
1018/* In order to avoid having to add a special half-reconnected state
1019 * into the driver we just sit here and poll through the rest of
1020 * the reselection process to get the tag message bytes.
1021 */
1022static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1023 struct esp_lun_data *lp)
1024{
1025 struct esp_cmd_entry *ent;
1026 int i;
1027
1028 if (!lp->num_tagged) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001029 shost_printk(KERN_ERR, esp->host,
1030 "Reconnect w/num_tagged==0\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001031 return NULL;
1032 }
1033
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001034 esp_log_reconnect("reconnect tag, ");
David S. Millercd9ad582007-04-26 21:19:23 -07001035
1036 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
1037 if (esp->ops->irq_pending(esp))
1038 break;
1039 }
1040 if (i == ESP_QUICKIRQ_LIMIT) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001041 shost_printk(KERN_ERR, esp->host,
1042 "Reconnect IRQ1 timeout\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001043 return NULL;
1044 }
1045
1046 esp->sreg = esp_read8(ESP_STATUS);
1047 esp->ireg = esp_read8(ESP_INTRPT);
1048
1049 esp_log_reconnect("IRQ(%d:%x:%x), ",
1050 i, esp->ireg, esp->sreg);
1051
1052 if (esp->ireg & ESP_INTR_DC) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001053 shost_printk(KERN_ERR, esp->host,
1054 "Reconnect, got disconnect.\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001055 return NULL;
1056 }
1057
1058 if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001059 shost_printk(KERN_ERR, esp->host,
1060 "Reconnect, not MIP sreg[%02x].\n", esp->sreg);
David S. Millercd9ad582007-04-26 21:19:23 -07001061 return NULL;
1062 }
1063
1064 /* DMA in the tag bytes... */
1065 esp->command_block[0] = 0xff;
1066 esp->command_block[1] = 0xff;
1067 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1068 2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI);
1069
Justin P. Mattock70f23fd2011-05-10 10:16:21 +02001070 /* ACK the message. */
David S. Millercd9ad582007-04-26 21:19:23 -07001071 scsi_esp_cmd(esp, ESP_CMD_MOK);
1072
1073 for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) {
1074 if (esp->ops->irq_pending(esp)) {
1075 esp->sreg = esp_read8(ESP_STATUS);
1076 esp->ireg = esp_read8(ESP_INTRPT);
1077 if (esp->ireg & ESP_INTR_FDONE)
1078 break;
1079 }
1080 udelay(1);
1081 }
1082 if (i == ESP_RESELECT_TAG_LIMIT) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001083 shost_printk(KERN_ERR, esp->host, "Reconnect IRQ2 timeout\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001084 return NULL;
1085 }
1086 esp->ops->dma_drain(esp);
1087 esp->ops->dma_invalidate(esp);
1088
1089 esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n",
1090 i, esp->ireg, esp->sreg,
1091 esp->command_block[0],
1092 esp->command_block[1]);
1093
1094 if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
1095 esp->command_block[0] > ORDERED_QUEUE_TAG) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001096 shost_printk(KERN_ERR, esp->host,
1097 "Reconnect, bad tag type %02x.\n",
1098 esp->command_block[0]);
David S. Millercd9ad582007-04-26 21:19:23 -07001099 return NULL;
1100 }
1101
1102 ent = lp->tagged_cmds[esp->command_block[1]];
1103 if (!ent) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001104 shost_printk(KERN_ERR, esp->host,
1105 "Reconnect, no entry for tag %02x.\n",
1106 esp->command_block[1]);
David S. Millercd9ad582007-04-26 21:19:23 -07001107 return NULL;
1108 }
1109
1110 return ent;
1111}
1112
1113static int esp_reconnect(struct esp *esp)
1114{
1115 struct esp_cmd_entry *ent;
1116 struct esp_target_data *tp;
1117 struct esp_lun_data *lp;
1118 struct scsi_device *dev;
1119 int target, lun;
1120
1121 BUG_ON(esp->active_cmd);
1122 if (esp->rev == FASHME) {
1123 /* FASHME puts the target and lun numbers directly
1124 * into the fifo.
1125 */
1126 target = esp->fifo[0];
1127 lun = esp->fifo[1] & 0x7;
1128 } else {
1129 u8 bits = esp_read8(ESP_FDATA);
1130
1131 /* Older chips put the lun directly into the fifo, but
1132 * the target is given as a sample of the arbitration
1133 * lines on the bus at reselection time. So we should
1134 * see the ID of the ESP and the one reconnecting target
1135 * set in the bitmap.
1136 */
1137 if (!(bits & esp->scsi_id_mask))
1138 goto do_reset;
1139 bits &= ~esp->scsi_id_mask;
1140 if (!bits || (bits & (bits - 1)))
1141 goto do_reset;
1142
1143 target = ffs(bits) - 1;
1144 lun = (esp_read8(ESP_FDATA) & 0x7);
1145
1146 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1147 if (esp->rev == ESP100) {
1148 u8 ireg = esp_read8(ESP_INTRPT);
1149 /* This chip has a bug during reselection that can
1150 * cause a spurious illegal-command interrupt, which
1151 * we simply ACK here. Another possibility is a bus
1152 * reset so we must check for that.
1153 */
1154 if (ireg & ESP_INTR_SR)
1155 goto do_reset;
1156 }
1157 scsi_esp_cmd(esp, ESP_CMD_NULL);
1158 }
1159
1160 esp_write_tgt_sync(esp, target);
1161 esp_write_tgt_config3(esp, target);
1162
1163 scsi_esp_cmd(esp, ESP_CMD_MOK);
1164
1165 if (esp->rev == FASHME)
1166 esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT,
1167 ESP_BUSID);
1168
1169 tp = &esp->target[target];
1170 dev = __scsi_device_lookup_by_target(tp->starget, lun);
1171 if (!dev) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001172 shost_printk(KERN_ERR, esp->host,
1173 "Reconnect, no lp tgt[%u] lun[%u]\n",
1174 target, lun);
David S. Millercd9ad582007-04-26 21:19:23 -07001175 goto do_reset;
1176 }
1177 lp = dev->hostdata;
1178
1179 ent = lp->non_tagged_cmd;
1180 if (!ent) {
1181 ent = esp_reconnect_with_tag(esp, lp);
1182 if (!ent)
1183 goto do_reset;
1184 }
1185
1186 esp->active_cmd = ent;
1187
1188 if (ent->flags & ESP_CMD_FLAG_ABORT) {
1189 esp->msg_out[0] = ABORT_TASK_SET;
1190 esp->msg_out_len = 1;
1191 scsi_esp_cmd(esp, ESP_CMD_SATN);
1192 }
1193
1194 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1195 esp_restore_pointers(esp, ent);
1196 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1197 return 1;
1198
1199do_reset:
1200 esp_schedule_reset(esp);
1201 return 0;
1202}
1203
1204static int esp_finish_select(struct esp *esp)
1205{
1206 struct esp_cmd_entry *ent;
1207 struct scsi_cmnd *cmd;
1208 u8 orig_select_state;
1209
1210 orig_select_state = esp->select_state;
1211
1212 /* No longer selecting. */
1213 esp->select_state = ESP_SELECT_NONE;
1214
1215 esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS;
1216 ent = esp->active_cmd;
1217 cmd = ent->cmd;
1218
1219 if (esp->ops->dma_error(esp)) {
1220 /* If we see a DMA error during or as a result of selection,
1221 * all bets are off.
1222 */
1223 esp_schedule_reset(esp);
1224 esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16));
1225 return 0;
1226 }
1227
1228 esp->ops->dma_invalidate(esp);
1229
1230 if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
1231 struct esp_target_data *tp = &esp->target[cmd->device->id];
1232
1233 /* Carefully back out of the selection attempt. Release
1234 * resources (such as DMA mapping & TAG) and reset state (such
1235 * as message out and command delivery variables).
1236 */
1237 if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1238 esp_unmap_dma(esp, cmd);
1239 esp_free_lun_tag(ent, cmd->device->hostdata);
1240 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
1241 esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
1242 esp->cmd_bytes_ptr = NULL;
1243 esp->cmd_bytes_left = 0;
1244 } else {
1245 esp->ops->unmap_single(esp, ent->sense_dma,
1246 SCSI_SENSE_BUFFERSIZE,
1247 DMA_FROM_DEVICE);
1248 ent->sense_ptr = NULL;
1249 }
1250
1251 /* Now that the state is unwound properly, put back onto
1252 * the issue queue. This command is no longer active.
1253 */
Kirill A. Shutemov63ce2492011-04-01 16:06:09 -07001254 list_move(&ent->list, &esp->queued_cmds);
David S. Millercd9ad582007-04-26 21:19:23 -07001255 esp->active_cmd = NULL;
1256
1257 /* Return value ignored by caller, it directly invokes
1258 * esp_reconnect().
1259 */
1260 return 0;
1261 }
1262
1263 if (esp->ireg == ESP_INTR_DC) {
1264 struct scsi_device *dev = cmd->device;
1265
1266 /* Disconnect. Make sure we re-negotiate sync and
1267 * wide parameters if this target starts responding
1268 * again in the future.
1269 */
1270 esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
1271
1272 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1273 esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16));
1274 return 1;
1275 }
1276
1277 if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
1278 /* Selection successful. On pre-FAST chips we have
1279 * to do a NOP and possibly clean out the FIFO.
1280 */
1281 if (esp->rev <= ESP236) {
1282 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1283
1284 scsi_esp_cmd(esp, ESP_CMD_NULL);
1285
1286 if (!fcnt &&
1287 (!esp->prev_soff ||
1288 ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
1289 esp_flush_fifo(esp);
1290 }
1291
1292 /* If we are doing a slow command, negotiation, etc.
1293 * we'll do the right thing as we transition to the
1294 * next phase.
1295 */
1296 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1297 return 0;
1298 }
1299
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001300 shost_printk(KERN_INFO, esp->host,
1301 "Unexpected selection completion ireg[%x]\n", esp->ireg);
David S. Millercd9ad582007-04-26 21:19:23 -07001302 esp_schedule_reset(esp);
1303 return 0;
1304}
1305
1306static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
1307 struct scsi_cmnd *cmd)
1308{
1309 int fifo_cnt, ecount, bytes_sent, flush_fifo;
1310
1311 fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1312 if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
1313 fifo_cnt <<= 1;
1314
1315 ecount = 0;
1316 if (!(esp->sreg & ESP_STAT_TCNT)) {
1317 ecount = ((unsigned int)esp_read8(ESP_TCLOW) |
1318 (((unsigned int)esp_read8(ESP_TCMED)) << 8));
1319 if (esp->rev == FASHME)
1320 ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16;
1321 }
1322
1323 bytes_sent = esp->data_dma_len;
1324 bytes_sent -= ecount;
1325
1326 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1327 bytes_sent -= fifo_cnt;
1328
1329 flush_fifo = 0;
1330 if (!esp->prev_soff) {
1331 /* Synchronous data transfer, always flush fifo. */
1332 flush_fifo = 1;
1333 } else {
1334 if (esp->rev == ESP100) {
1335 u32 fflags, phase;
1336
1337 /* ESP100 has a chip bug where in the synchronous data
1338 * phase it can mistake a final long REQ pulse from the
1339 * target as an extra data byte. Fun.
1340 *
1341 * To detect this case we resample the status register
1342 * and fifo flags. If we're still in a data phase and
1343 * we see spurious chunks in the fifo, we return error
1344 * to the caller which should reset and set things up
1345 * such that we only try future transfers to this
1346 * target in synchronous mode.
1347 */
1348 esp->sreg = esp_read8(ESP_STATUS);
1349 phase = esp->sreg & ESP_STAT_PMASK;
1350 fflags = esp_read8(ESP_FFLAGS);
1351
1352 if ((phase == ESP_DOP &&
1353 (fflags & ESP_FF_ONOTZERO)) ||
1354 (phase == ESP_DIP &&
1355 (fflags & ESP_FF_FBYTES)))
1356 return -1;
1357 }
1358 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1359 flush_fifo = 1;
1360 }
1361
1362 if (flush_fifo)
1363 esp_flush_fifo(esp);
1364
1365 return bytes_sent;
1366}
1367
1368static void esp_setsync(struct esp *esp, struct esp_target_data *tp,
1369 u8 scsi_period, u8 scsi_offset,
1370 u8 esp_stp, u8 esp_soff)
1371{
1372 spi_period(tp->starget) = scsi_period;
1373 spi_offset(tp->starget) = scsi_offset;
1374 spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0;
1375
1376 if (esp_soff) {
1377 esp_stp &= 0x1f;
1378 esp_soff |= esp->radelay;
1379 if (esp->rev >= FAS236) {
1380 u8 bit = ESP_CONFIG3_FSCSI;
1381 if (esp->rev >= FAS100A)
1382 bit = ESP_CONFIG3_FAST;
1383
1384 if (scsi_period < 50) {
1385 if (esp->rev == FASHME)
1386 esp_soff &= ~esp->radelay;
1387 tp->esp_config3 |= bit;
1388 } else {
1389 tp->esp_config3 &= ~bit;
1390 }
1391 esp->prev_cfg3 = tp->esp_config3;
1392 esp_write8(esp->prev_cfg3, ESP_CFG3);
1393 }
1394 }
1395
1396 tp->esp_period = esp->prev_stp = esp_stp;
1397 tp->esp_offset = esp->prev_soff = esp_soff;
1398
1399 esp_write8(esp_soff, ESP_SOFF);
1400 esp_write8(esp_stp, ESP_STP);
1401
1402 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1403
1404 spi_display_xfer_agreement(tp->starget);
1405}
1406
1407static void esp_msgin_reject(struct esp *esp)
1408{
1409 struct esp_cmd_entry *ent = esp->active_cmd;
1410 struct scsi_cmnd *cmd = ent->cmd;
1411 struct esp_target_data *tp;
1412 int tgt;
1413
1414 tgt = cmd->device->id;
1415 tp = &esp->target[tgt];
1416
1417 if (tp->flags & ESP_TGT_NEGO_WIDE) {
1418 tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE);
1419
1420 if (!esp_need_to_nego_sync(tp)) {
1421 tp->flags &= ~ESP_TGT_CHECK_NEGO;
1422 scsi_esp_cmd(esp, ESP_CMD_RATN);
1423 } else {
1424 esp->msg_out_len =
1425 spi_populate_sync_msg(&esp->msg_out[0],
1426 tp->nego_goal_period,
1427 tp->nego_goal_offset);
1428 tp->flags |= ESP_TGT_NEGO_SYNC;
1429 scsi_esp_cmd(esp, ESP_CMD_SATN);
1430 }
1431 return;
1432 }
1433
1434 if (tp->flags & ESP_TGT_NEGO_SYNC) {
1435 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1436 tp->esp_period = 0;
1437 tp->esp_offset = 0;
1438 esp_setsync(esp, tp, 0, 0, 0, 0);
1439 scsi_esp_cmd(esp, ESP_CMD_RATN);
1440 return;
1441 }
1442
1443 esp->msg_out[0] = ABORT_TASK_SET;
1444 esp->msg_out_len = 1;
1445 scsi_esp_cmd(esp, ESP_CMD_SATN);
1446}
1447
1448static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1449{
1450 u8 period = esp->msg_in[3];
1451 u8 offset = esp->msg_in[4];
1452 u8 stp;
1453
1454 if (!(tp->flags & ESP_TGT_NEGO_SYNC))
1455 goto do_reject;
1456
1457 if (offset > 15)
1458 goto do_reject;
1459
1460 if (offset) {
Julia Lawall237abac2008-10-29 14:24:40 -07001461 int one_clock;
David S. Millercd9ad582007-04-26 21:19:23 -07001462
1463 if (period > esp->max_period) {
1464 period = offset = 0;
1465 goto do_sdtr;
1466 }
1467 if (period < esp->min_period)
1468 goto do_reject;
1469
1470 one_clock = esp->ccycle / 1000;
Julia Lawall237abac2008-10-29 14:24:40 -07001471 stp = DIV_ROUND_UP(period << 2, one_clock);
David S. Millercd9ad582007-04-26 21:19:23 -07001472 if (stp && esp->rev >= FAS236) {
1473 if (stp >= 50)
1474 stp--;
1475 }
1476 } else {
1477 stp = 0;
1478 }
1479
1480 esp_setsync(esp, tp, period, offset, stp, offset);
1481 return;
1482
1483do_reject:
1484 esp->msg_out[0] = MESSAGE_REJECT;
1485 esp->msg_out_len = 1;
1486 scsi_esp_cmd(esp, ESP_CMD_SATN);
1487 return;
1488
1489do_sdtr:
1490 tp->nego_goal_period = period;
1491 tp->nego_goal_offset = offset;
1492 esp->msg_out_len =
1493 spi_populate_sync_msg(&esp->msg_out[0],
1494 tp->nego_goal_period,
1495 tp->nego_goal_offset);
1496 scsi_esp_cmd(esp, ESP_CMD_SATN);
1497}
1498
1499static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp)
1500{
1501 int size = 8 << esp->msg_in[3];
1502 u8 cfg3;
1503
1504 if (esp->rev != FASHME)
1505 goto do_reject;
1506
1507 if (size != 8 && size != 16)
1508 goto do_reject;
1509
1510 if (!(tp->flags & ESP_TGT_NEGO_WIDE))
1511 goto do_reject;
1512
1513 cfg3 = tp->esp_config3;
1514 if (size == 16) {
1515 tp->flags |= ESP_TGT_WIDE;
1516 cfg3 |= ESP_CONFIG3_EWIDE;
1517 } else {
1518 tp->flags &= ~ESP_TGT_WIDE;
1519 cfg3 &= ~ESP_CONFIG3_EWIDE;
1520 }
1521 tp->esp_config3 = cfg3;
1522 esp->prev_cfg3 = cfg3;
1523 esp_write8(cfg3, ESP_CFG3);
1524
1525 tp->flags &= ~ESP_TGT_NEGO_WIDE;
1526
1527 spi_period(tp->starget) = 0;
1528 spi_offset(tp->starget) = 0;
1529 if (!esp_need_to_nego_sync(tp)) {
1530 tp->flags &= ~ESP_TGT_CHECK_NEGO;
1531 scsi_esp_cmd(esp, ESP_CMD_RATN);
1532 } else {
1533 esp->msg_out_len =
1534 spi_populate_sync_msg(&esp->msg_out[0],
1535 tp->nego_goal_period,
1536 tp->nego_goal_offset);
1537 tp->flags |= ESP_TGT_NEGO_SYNC;
1538 scsi_esp_cmd(esp, ESP_CMD_SATN);
1539 }
1540 return;
1541
1542do_reject:
1543 esp->msg_out[0] = MESSAGE_REJECT;
1544 esp->msg_out_len = 1;
1545 scsi_esp_cmd(esp, ESP_CMD_SATN);
1546}
1547
1548static void esp_msgin_extended(struct esp *esp)
1549{
1550 struct esp_cmd_entry *ent = esp->active_cmd;
1551 struct scsi_cmnd *cmd = ent->cmd;
1552 struct esp_target_data *tp;
1553 int tgt = cmd->device->id;
1554
1555 tp = &esp->target[tgt];
1556 if (esp->msg_in[2] == EXTENDED_SDTR) {
1557 esp_msgin_sdtr(esp, tp);
1558 return;
1559 }
1560 if (esp->msg_in[2] == EXTENDED_WDTR) {
1561 esp_msgin_wdtr(esp, tp);
1562 return;
1563 }
1564
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001565 shost_printk(KERN_INFO, esp->host,
1566 "Unexpected extended msg type %x\n", esp->msg_in[2]);
David S. Millercd9ad582007-04-26 21:19:23 -07001567
1568 esp->msg_out[0] = ABORT_TASK_SET;
1569 esp->msg_out_len = 1;
1570 scsi_esp_cmd(esp, ESP_CMD_SATN);
1571}
1572
1573/* Analyze msgin bytes received from target so far. Return non-zero
1574 * if there are more bytes needed to complete the message.
1575 */
1576static int esp_msgin_process(struct esp *esp)
1577{
1578 u8 msg0 = esp->msg_in[0];
1579 int len = esp->msg_in_len;
1580
1581 if (msg0 & 0x80) {
1582 /* Identify */
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001583 shost_printk(KERN_INFO, esp->host,
1584 "Unexpected msgin identify\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001585 return 0;
1586 }
1587
1588 switch (msg0) {
1589 case EXTENDED_MESSAGE:
1590 if (len == 1)
1591 return 1;
1592 if (len < esp->msg_in[1] + 2)
1593 return 1;
1594 esp_msgin_extended(esp);
1595 return 0;
1596
1597 case IGNORE_WIDE_RESIDUE: {
1598 struct esp_cmd_entry *ent;
1599 struct esp_cmd_priv *spriv;
1600 if (len == 1)
1601 return 1;
1602
1603 if (esp->msg_in[1] != 1)
1604 goto do_reject;
1605
1606 ent = esp->active_cmd;
1607 spriv = ESP_CMD_PRIV(ent->cmd);
1608
1609 if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
1610 spriv->cur_sg--;
1611 spriv->cur_residue = 1;
1612 } else
1613 spriv->cur_residue++;
1614 spriv->tot_residue++;
1615 return 0;
1616 }
1617 case NOP:
1618 return 0;
1619 case RESTORE_POINTERS:
1620 esp_restore_pointers(esp, esp->active_cmd);
1621 return 0;
1622 case SAVE_POINTERS:
1623 esp_save_pointers(esp, esp->active_cmd);
1624 return 0;
1625
1626 case COMMAND_COMPLETE:
1627 case DISCONNECT: {
1628 struct esp_cmd_entry *ent = esp->active_cmd;
1629
1630 ent->message = msg0;
1631 esp_event(esp, ESP_EVENT_FREE_BUS);
1632 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1633 return 0;
1634 }
1635 case MESSAGE_REJECT:
1636 esp_msgin_reject(esp);
1637 return 0;
1638
1639 default:
1640 do_reject:
1641 esp->msg_out[0] = MESSAGE_REJECT;
1642 esp->msg_out_len = 1;
1643 scsi_esp_cmd(esp, ESP_CMD_SATN);
1644 return 0;
1645 }
1646}
1647
1648static int esp_process_event(struct esp *esp)
1649{
1650 int write;
1651
1652again:
1653 write = 0;
Hannes Reinecke1af6f602014-11-24 15:37:22 +01001654 esp_log_event("process event %d phase %x\n",
1655 esp->event, esp->sreg & ESP_STAT_PMASK);
David S. Millercd9ad582007-04-26 21:19:23 -07001656 switch (esp->event) {
1657 case ESP_EVENT_CHECK_PHASE:
1658 switch (esp->sreg & ESP_STAT_PMASK) {
1659 case ESP_DOP:
1660 esp_event(esp, ESP_EVENT_DATA_OUT);
1661 break;
1662 case ESP_DIP:
1663 esp_event(esp, ESP_EVENT_DATA_IN);
1664 break;
1665 case ESP_STATP:
1666 esp_flush_fifo(esp);
1667 scsi_esp_cmd(esp, ESP_CMD_ICCSEQ);
1668 esp_event(esp, ESP_EVENT_STATUS);
1669 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1670 return 1;
1671
1672 case ESP_MOP:
1673 esp_event(esp, ESP_EVENT_MSGOUT);
1674 break;
1675
1676 case ESP_MIP:
1677 esp_event(esp, ESP_EVENT_MSGIN);
1678 break;
1679
1680 case ESP_CMDP:
1681 esp_event(esp, ESP_EVENT_CMD_START);
1682 break;
1683
1684 default:
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001685 shost_printk(KERN_INFO, esp->host,
1686 "Unexpected phase, sreg=%02x\n",
1687 esp->sreg);
David S. Millercd9ad582007-04-26 21:19:23 -07001688 esp_schedule_reset(esp);
1689 return 0;
1690 }
1691 goto again;
1692 break;
1693
1694 case ESP_EVENT_DATA_IN:
1695 write = 1;
1696 /* fallthru */
1697
1698 case ESP_EVENT_DATA_OUT: {
1699 struct esp_cmd_entry *ent = esp->active_cmd;
1700 struct scsi_cmnd *cmd = ent->cmd;
1701 dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd);
1702 unsigned int dma_len = esp_cur_dma_len(ent, cmd);
1703
1704 if (esp->rev == ESP100)
1705 scsi_esp_cmd(esp, ESP_CMD_NULL);
1706
1707 if (write)
1708 ent->flags |= ESP_CMD_FLAG_WRITE;
1709 else
1710 ent->flags &= ~ESP_CMD_FLAG_WRITE;
1711
Finn Thain6fe07aa2008-04-25 10:06:05 -05001712 if (esp->ops->dma_length_limit)
1713 dma_len = esp->ops->dma_length_limit(esp, dma_addr,
1714 dma_len);
1715 else
1716 dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
1717
David S. Millercd9ad582007-04-26 21:19:23 -07001718 esp->data_dma_len = dma_len;
1719
1720 if (!dma_len) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001721 shost_printk(KERN_ERR, esp->host,
1722 "DMA length is zero!\n");
1723 shost_printk(KERN_ERR, esp->host,
1724 "cur adr[%08llx] len[%08x]\n",
1725 (unsigned long long)esp_cur_dma_addr(ent, cmd),
1726 esp_cur_dma_len(ent, cmd));
David S. Millercd9ad582007-04-26 21:19:23 -07001727 esp_schedule_reset(esp);
1728 return 0;
1729 }
1730
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001731 esp_log_datastart("start data addr[%08llx] len[%u] write(%d)\n",
Alexey Dobriyane1f2a092007-04-27 15:19:27 -07001732 (unsigned long long)dma_addr, dma_len, write);
David S. Millercd9ad582007-04-26 21:19:23 -07001733
1734 esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
1735 write, ESP_CMD_DMA | ESP_CMD_TI);
1736 esp_event(esp, ESP_EVENT_DATA_DONE);
1737 break;
1738 }
1739 case ESP_EVENT_DATA_DONE: {
1740 struct esp_cmd_entry *ent = esp->active_cmd;
1741 struct scsi_cmnd *cmd = ent->cmd;
1742 int bytes_sent;
1743
1744 if (esp->ops->dma_error(esp)) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001745 shost_printk(KERN_INFO, esp->host,
1746 "data done, DMA error, resetting\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001747 esp_schedule_reset(esp);
1748 return 0;
1749 }
1750
1751 if (ent->flags & ESP_CMD_FLAG_WRITE) {
1752 /* XXX parity errors, etc. XXX */
1753
1754 esp->ops->dma_drain(esp);
1755 }
1756 esp->ops->dma_invalidate(esp);
1757
1758 if (esp->ireg != ESP_INTR_BSERV) {
1759 /* We should always see exactly a bus-service
1760 * interrupt at the end of a successful transfer.
1761 */
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001762 shost_printk(KERN_INFO, esp->host,
1763 "data done, not BSERV, resetting\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001764 esp_schedule_reset(esp);
1765 return 0;
1766 }
1767
1768 bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
1769
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001770 esp_log_datadone("data done flgs[%x] sent[%d]\n",
David S. Millercd9ad582007-04-26 21:19:23 -07001771 ent->flags, bytes_sent);
1772
1773 if (bytes_sent < 0) {
1774 /* XXX force sync mode for this target XXX */
1775 esp_schedule_reset(esp);
1776 return 0;
1777 }
1778
1779 esp_advance_dma(esp, ent, cmd, bytes_sent);
1780 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1781 goto again;
David S. Millercd9ad582007-04-26 21:19:23 -07001782 }
1783
1784 case ESP_EVENT_STATUS: {
1785 struct esp_cmd_entry *ent = esp->active_cmd;
1786
1787 if (esp->ireg & ESP_INTR_FDONE) {
1788 ent->status = esp_read8(ESP_FDATA);
1789 ent->message = esp_read8(ESP_FDATA);
1790 scsi_esp_cmd(esp, ESP_CMD_MOK);
1791 } else if (esp->ireg == ESP_INTR_BSERV) {
1792 ent->status = esp_read8(ESP_FDATA);
1793 ent->message = 0xff;
1794 esp_event(esp, ESP_EVENT_MSGIN);
1795 return 0;
1796 }
1797
1798 if (ent->message != COMMAND_COMPLETE) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001799 shost_printk(KERN_INFO, esp->host,
1800 "Unexpected message %x in status\n",
1801 ent->message);
David S. Millercd9ad582007-04-26 21:19:23 -07001802 esp_schedule_reset(esp);
1803 return 0;
1804 }
1805
1806 esp_event(esp, ESP_EVENT_FREE_BUS);
1807 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1808 break;
1809 }
1810 case ESP_EVENT_FREE_BUS: {
1811 struct esp_cmd_entry *ent = esp->active_cmd;
1812 struct scsi_cmnd *cmd = ent->cmd;
1813
1814 if (ent->message == COMMAND_COMPLETE ||
1815 ent->message == DISCONNECT)
1816 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1817
1818 if (ent->message == COMMAND_COMPLETE) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001819 esp_log_cmddone("Command done status[%x] message[%x]\n",
David S. Millercd9ad582007-04-26 21:19:23 -07001820 ent->status, ent->message);
1821 if (ent->status == SAM_STAT_TASK_SET_FULL)
1822 esp_event_queue_full(esp, ent);
1823
1824 if (ent->status == SAM_STAT_CHECK_CONDITION &&
1825 !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1826 ent->flags |= ESP_CMD_FLAG_AUTOSENSE;
1827 esp_autosense(esp, ent);
1828 } else {
1829 esp_cmd_is_done(esp, ent, cmd,
1830 compose_result(ent->status,
1831 ent->message,
1832 DID_OK));
1833 }
1834 } else if (ent->message == DISCONNECT) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001835 esp_log_disconnect("Disconnecting tgt[%d] tag[%x:%x]\n",
David S. Millercd9ad582007-04-26 21:19:23 -07001836 cmd->device->id,
1837 ent->tag[0], ent->tag[1]);
1838
1839 esp->active_cmd = NULL;
1840 esp_maybe_execute_command(esp);
1841 } else {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001842 shost_printk(KERN_INFO, esp->host,
1843 "Unexpected message %x in freebus\n",
1844 ent->message);
David S. Millercd9ad582007-04-26 21:19:23 -07001845 esp_schedule_reset(esp);
1846 return 0;
1847 }
1848 if (esp->active_cmd)
1849 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1850 break;
1851 }
1852 case ESP_EVENT_MSGOUT: {
1853 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1854
1855 if (esp_debug & ESP_DEBUG_MSGOUT) {
1856 int i;
1857 printk("ESP: Sending message [ ");
1858 for (i = 0; i < esp->msg_out_len; i++)
1859 printk("%02x ", esp->msg_out[i]);
1860 printk("]\n");
1861 }
1862
1863 if (esp->rev == FASHME) {
1864 int i;
1865
1866 /* Always use the fifo. */
1867 for (i = 0; i < esp->msg_out_len; i++) {
1868 esp_write8(esp->msg_out[i], ESP_FDATA);
1869 esp_write8(0, ESP_FDATA);
1870 }
1871 scsi_esp_cmd(esp, ESP_CMD_TI);
1872 } else {
1873 if (esp->msg_out_len == 1) {
1874 esp_write8(esp->msg_out[0], ESP_FDATA);
1875 scsi_esp_cmd(esp, ESP_CMD_TI);
1876 } else {
1877 /* Use DMA. */
1878 memcpy(esp->command_block,
1879 esp->msg_out,
1880 esp->msg_out_len);
1881
1882 esp->ops->send_dma_cmd(esp,
1883 esp->command_block_dma,
1884 esp->msg_out_len,
1885 esp->msg_out_len,
1886 0,
1887 ESP_CMD_DMA|ESP_CMD_TI);
1888 }
1889 }
1890 esp_event(esp, ESP_EVENT_MSGOUT_DONE);
1891 break;
1892 }
1893 case ESP_EVENT_MSGOUT_DONE:
1894 if (esp->rev == FASHME) {
1895 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1896 } else {
1897 if (esp->msg_out_len > 1)
1898 esp->ops->dma_invalidate(esp);
1899 }
1900
1901 if (!(esp->ireg & ESP_INTR_DC)) {
1902 if (esp->rev != FASHME)
1903 scsi_esp_cmd(esp, ESP_CMD_NULL);
1904 }
1905 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1906 goto again;
1907 case ESP_EVENT_MSGIN:
1908 if (esp->ireg & ESP_INTR_BSERV) {
1909 if (esp->rev == FASHME) {
1910 if (!(esp_read8(ESP_STATUS2) &
1911 ESP_STAT2_FEMPTY))
1912 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1913 } else {
1914 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1915 if (esp->rev == ESP100)
1916 scsi_esp_cmd(esp, ESP_CMD_NULL);
1917 }
1918 scsi_esp_cmd(esp, ESP_CMD_TI);
1919 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1920 return 1;
1921 }
1922 if (esp->ireg & ESP_INTR_FDONE) {
1923 u8 val;
1924
1925 if (esp->rev == FASHME)
1926 val = esp->fifo[0];
1927 else
1928 val = esp_read8(ESP_FDATA);
1929 esp->msg_in[esp->msg_in_len++] = val;
1930
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001931 esp_log_msgin("Got msgin byte %x\n", val);
David S. Millercd9ad582007-04-26 21:19:23 -07001932
1933 if (!esp_msgin_process(esp))
1934 esp->msg_in_len = 0;
1935
1936 if (esp->rev == FASHME)
1937 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1938
1939 scsi_esp_cmd(esp, ESP_CMD_MOK);
1940
1941 if (esp->event != ESP_EVENT_FREE_BUS)
1942 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1943 } else {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001944 shost_printk(KERN_INFO, esp->host,
1945 "MSGIN neither BSERV not FDON, resetting");
David S. Millercd9ad582007-04-26 21:19:23 -07001946 esp_schedule_reset(esp);
1947 return 0;
1948 }
1949 break;
1950 case ESP_EVENT_CMD_START:
1951 memcpy(esp->command_block, esp->cmd_bytes_ptr,
1952 esp->cmd_bytes_left);
1953 if (esp->rev == FASHME)
1954 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1955 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1956 esp->cmd_bytes_left, 16, 0,
1957 ESP_CMD_DMA | ESP_CMD_TI);
1958 esp_event(esp, ESP_EVENT_CMD_DONE);
1959 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1960 break;
1961 case ESP_EVENT_CMD_DONE:
1962 esp->ops->dma_invalidate(esp);
1963 if (esp->ireg & ESP_INTR_BSERV) {
1964 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1965 goto again;
1966 }
1967 esp_schedule_reset(esp);
1968 return 0;
1969 break;
1970
1971 case ESP_EVENT_RESET:
1972 scsi_esp_cmd(esp, ESP_CMD_RS);
1973 break;
1974
1975 default:
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001976 shost_printk(KERN_INFO, esp->host,
1977 "Unexpected event %x, resetting\n", esp->event);
David S. Millercd9ad582007-04-26 21:19:23 -07001978 esp_schedule_reset(esp);
1979 return 0;
1980 break;
1981 }
1982 return 1;
1983}
1984
1985static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
1986{
1987 struct scsi_cmnd *cmd = ent->cmd;
1988
1989 esp_unmap_dma(esp, cmd);
1990 esp_free_lun_tag(ent, cmd->device->hostdata);
1991 cmd->result = DID_RESET << 16;
1992
1993 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
1994 esp->ops->unmap_single(esp, ent->sense_dma,
1995 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
1996 ent->sense_ptr = NULL;
1997 }
1998
1999 cmd->scsi_done(cmd);
2000 list_del(&ent->list);
2001 esp_put_ent(esp, ent);
2002}
2003
2004static void esp_clear_hold(struct scsi_device *dev, void *data)
2005{
2006 struct esp_lun_data *lp = dev->hostdata;
2007
2008 BUG_ON(lp->num_tagged);
2009 lp->hold = 0;
2010}
2011
2012static void esp_reset_cleanup(struct esp *esp)
2013{
2014 struct esp_cmd_entry *ent, *tmp;
2015 int i;
2016
2017 list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) {
2018 struct scsi_cmnd *cmd = ent->cmd;
2019
2020 list_del(&ent->list);
2021 cmd->result = DID_RESET << 16;
2022 cmd->scsi_done(cmd);
2023 esp_put_ent(esp, ent);
2024 }
2025
2026 list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) {
2027 if (ent == esp->active_cmd)
2028 esp->active_cmd = NULL;
2029 esp_reset_cleanup_one(esp, ent);
2030 }
2031
2032 BUG_ON(esp->active_cmd != NULL);
2033
2034 /* Force renegotiation of sync/wide transfers. */
2035 for (i = 0; i < ESP_MAX_TARGET; i++) {
2036 struct esp_target_data *tp = &esp->target[i];
2037
2038 tp->esp_period = 0;
2039 tp->esp_offset = 0;
2040 tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE |
2041 ESP_CONFIG3_FSCSI |
2042 ESP_CONFIG3_FAST);
2043 tp->flags &= ~ESP_TGT_WIDE;
2044 tp->flags |= ESP_TGT_CHECK_NEGO;
2045
2046 if (tp->starget)
Maciej W. Rozycki522939d2007-12-10 15:49:31 -08002047 __starget_for_each_device(tp->starget, NULL,
2048 esp_clear_hold);
David S. Millercd9ad582007-04-26 21:19:23 -07002049 }
Thomas Bogendoerfer204abf22007-06-13 12:58:53 -07002050 esp->flags &= ~ESP_FLAG_RESETTING;
David S. Millercd9ad582007-04-26 21:19:23 -07002051}
2052
2053/* Runs under host->lock */
2054static void __esp_interrupt(struct esp *esp)
2055{
2056 int finish_reset, intr_done;
2057 u8 phase;
2058
2059 esp->sreg = esp_read8(ESP_STATUS);
2060
2061 if (esp->flags & ESP_FLAG_RESETTING) {
2062 finish_reset = 1;
2063 } else {
2064 if (esp_check_gross_error(esp))
2065 return;
2066
2067 finish_reset = esp_check_spur_intr(esp);
2068 if (finish_reset < 0)
2069 return;
2070 }
2071
2072 esp->ireg = esp_read8(ESP_INTRPT);
2073
2074 if (esp->ireg & ESP_INTR_SR)
2075 finish_reset = 1;
2076
2077 if (finish_reset) {
2078 esp_reset_cleanup(esp);
2079 if (esp->eh_reset) {
2080 complete(esp->eh_reset);
2081 esp->eh_reset = NULL;
2082 }
2083 return;
2084 }
2085
2086 phase = (esp->sreg & ESP_STAT_PMASK);
2087 if (esp->rev == FASHME) {
2088 if (((phase != ESP_DIP && phase != ESP_DOP) &&
2089 esp->select_state == ESP_SELECT_NONE &&
2090 esp->event != ESP_EVENT_STATUS &&
2091 esp->event != ESP_EVENT_DATA_DONE) ||
2092 (esp->ireg & ESP_INTR_RSEL)) {
2093 esp->sreg2 = esp_read8(ESP_STATUS2);
2094 if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
2095 (esp->sreg2 & ESP_STAT2_F1BYTE))
2096 hme_read_fifo(esp);
2097 }
2098 }
2099
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002100 esp_log_intr("intr sreg[%02x] seqreg[%02x] "
David S. Millercd9ad582007-04-26 21:19:23 -07002101 "sreg2[%02x] ireg[%02x]\n",
2102 esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
2103
2104 intr_done = 0;
2105
2106 if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002107 shost_printk(KERN_INFO, esp->host,
2108 "unexpected IREG %02x\n", esp->ireg);
David S. Millercd9ad582007-04-26 21:19:23 -07002109 if (esp->ireg & ESP_INTR_IC)
2110 esp_dump_cmd_log(esp);
2111
2112 esp_schedule_reset(esp);
2113 } else {
2114 if (!(esp->ireg & ESP_INTR_RSEL)) {
2115 /* Some combination of FDONE, BSERV, DC. */
2116 if (esp->select_state != ESP_SELECT_NONE)
2117 intr_done = esp_finish_select(esp);
2118 } else if (esp->ireg & ESP_INTR_RSEL) {
2119 if (esp->active_cmd)
2120 (void) esp_finish_select(esp);
2121 intr_done = esp_reconnect(esp);
2122 }
2123 }
2124 while (!intr_done)
2125 intr_done = esp_process_event(esp);
2126}
2127
2128irqreturn_t scsi_esp_intr(int irq, void *dev_id)
2129{
2130 struct esp *esp = dev_id;
2131 unsigned long flags;
2132 irqreturn_t ret;
2133
2134 spin_lock_irqsave(esp->host->host_lock, flags);
2135 ret = IRQ_NONE;
2136 if (esp->ops->irq_pending(esp)) {
2137 ret = IRQ_HANDLED;
2138 for (;;) {
2139 int i;
2140
2141 __esp_interrupt(esp);
2142 if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK))
2143 break;
2144 esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK;
2145
2146 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
2147 if (esp->ops->irq_pending(esp))
2148 break;
2149 }
2150 if (i == ESP_QUICKIRQ_LIMIT)
2151 break;
2152 }
2153 }
2154 spin_unlock_irqrestore(esp->host->host_lock, flags);
2155
2156 return ret;
2157}
2158EXPORT_SYMBOL(scsi_esp_intr);
2159
Adrian Bunk76246802007-10-11 17:35:20 +02002160static void esp_get_revision(struct esp *esp)
David S. Millercd9ad582007-04-26 21:19:23 -07002161{
2162 u8 val;
2163
2164 esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
2165 esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
2166 esp_write8(esp->config2, ESP_CFG2);
2167
2168 val = esp_read8(ESP_CFG2);
2169 val &= ~ESP_CONFIG2_MAGIC;
2170 if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
2171 /* If what we write to cfg2 does not come back, cfg2 is not
2172 * implemented, therefore this must be a plain esp100.
2173 */
2174 esp->rev = ESP100;
2175 } else {
2176 esp->config2 = 0;
2177 esp_set_all_config3(esp, 5);
2178 esp->prev_cfg3 = 5;
2179 esp_write8(esp->config2, ESP_CFG2);
2180 esp_write8(0, ESP_CFG3);
2181 esp_write8(esp->prev_cfg3, ESP_CFG3);
2182
2183 val = esp_read8(ESP_CFG3);
2184 if (val != 5) {
2185 /* The cfg2 register is implemented, however
2186 * cfg3 is not, must be esp100a.
2187 */
2188 esp->rev = ESP100A;
2189 } else {
2190 esp_set_all_config3(esp, 0);
2191 esp->prev_cfg3 = 0;
2192 esp_write8(esp->prev_cfg3, ESP_CFG3);
2193
2194 /* All of cfg{1,2,3} implemented, must be one of
2195 * the fas variants, figure out which one.
2196 */
2197 if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
2198 esp->rev = FAST;
2199 esp->sync_defp = SYNC_DEFP_FAST;
2200 } else {
2201 esp->rev = ESP236;
2202 }
2203 esp->config2 = 0;
2204 esp_write8(esp->config2, ESP_CFG2);
2205 }
2206 }
2207}
2208
Adrian Bunk76246802007-10-11 17:35:20 +02002209static void esp_init_swstate(struct esp *esp)
David S. Millercd9ad582007-04-26 21:19:23 -07002210{
2211 int i;
2212
2213 INIT_LIST_HEAD(&esp->queued_cmds);
2214 INIT_LIST_HEAD(&esp->active_cmds);
2215 INIT_LIST_HEAD(&esp->esp_cmd_pool);
2216
2217 /* Start with a clear state, domain validation (via ->slave_configure,
2218 * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged
2219 * commands.
2220 */
2221 for (i = 0 ; i < ESP_MAX_TARGET; i++) {
2222 esp->target[i].flags = 0;
2223 esp->target[i].nego_goal_period = 0;
2224 esp->target[i].nego_goal_offset = 0;
2225 esp->target[i].nego_goal_width = 0;
2226 esp->target[i].nego_goal_tags = 0;
2227 }
2228}
2229
2230/* This places the ESP into a known state at boot time. */
Martin Habetsd679f802007-05-07 14:05:03 -07002231static void esp_bootup_reset(struct esp *esp)
David S. Millercd9ad582007-04-26 21:19:23 -07002232{
2233 u8 val;
2234
2235 /* Reset the DMA */
2236 esp->ops->reset_dma(esp);
2237
2238 /* Reset the ESP */
2239 esp_reset_esp(esp);
2240
2241 /* Reset the SCSI bus, but tell ESP not to generate an irq */
2242 val = esp_read8(ESP_CFG1);
2243 val |= ESP_CONFIG1_SRRDISAB;
2244 esp_write8(val, ESP_CFG1);
2245
2246 scsi_esp_cmd(esp, ESP_CMD_RS);
2247 udelay(400);
2248
2249 esp_write8(esp->config1, ESP_CFG1);
2250
2251 /* Eat any bitrot in the chip and we are done... */
2252 esp_read8(ESP_INTRPT);
2253}
2254
Adrian Bunk76246802007-10-11 17:35:20 +02002255static void esp_set_clock_params(struct esp *esp)
David S. Millercd9ad582007-04-26 21:19:23 -07002256{
Finn Thain6fe07aa2008-04-25 10:06:05 -05002257 int fhz;
David S. Millercd9ad582007-04-26 21:19:23 -07002258 u8 ccf;
2259
2260 /* This is getting messy but it has to be done correctly or else
2261 * you get weird behavior all over the place. We are trying to
2262 * basically figure out three pieces of information.
2263 *
2264 * a) Clock Conversion Factor
2265 *
2266 * This is a representation of the input crystal clock frequency
2267 * going into the ESP on this machine. Any operation whose timing
2268 * is longer than 400ns depends on this value being correct. For
2269 * example, you'll get blips for arbitration/selection during high
2270 * load or with multiple targets if this is not set correctly.
2271 *
2272 * b) Selection Time-Out
2273 *
2274 * The ESP isn't very bright and will arbitrate for the bus and try
2275 * to select a target forever if you let it. This value tells the
2276 * ESP when it has taken too long to negotiate and that it should
2277 * interrupt the CPU so we can see what happened. The value is
2278 * computed as follows (from NCR/Symbios chip docs).
2279 *
2280 * (Time Out Period) * (Input Clock)
2281 * STO = ----------------------------------
2282 * (8192) * (Clock Conversion Factor)
2283 *
2284 * We use a time out period of 250ms (ESP_BUS_TIMEOUT).
2285 *
2286 * c) Imperical constants for synchronous offset and transfer period
2287 * register values
2288 *
2289 * This entails the smallest and largest sync period we could ever
2290 * handle on this ESP.
2291 */
Finn Thain6fe07aa2008-04-25 10:06:05 -05002292 fhz = esp->cfreq;
David S. Millercd9ad582007-04-26 21:19:23 -07002293
Finn Thain6fe07aa2008-04-25 10:06:05 -05002294 ccf = ((fhz / 1000000) + 4) / 5;
David S. Millercd9ad582007-04-26 21:19:23 -07002295 if (ccf == 1)
2296 ccf = 2;
2297
2298 /* If we can't find anything reasonable, just assume 20MHZ.
2299 * This is the clock frequency of the older sun4c's where I've
2300 * been unable to find the clock-frequency PROM property. All
2301 * other machines provide useful values it seems.
2302 */
Finn Thain6fe07aa2008-04-25 10:06:05 -05002303 if (fhz <= 5000000 || ccf < 1 || ccf > 8) {
2304 fhz = 20000000;
David S. Millercd9ad582007-04-26 21:19:23 -07002305 ccf = 4;
2306 }
2307
2308 esp->cfact = (ccf == 8 ? 0 : ccf);
Finn Thain6fe07aa2008-04-25 10:06:05 -05002309 esp->cfreq = fhz;
2310 esp->ccycle = ESP_HZ_TO_CYCLE(fhz);
David S. Millercd9ad582007-04-26 21:19:23 -07002311 esp->ctick = ESP_TICK(ccf, esp->ccycle);
Finn Thain6fe07aa2008-04-25 10:06:05 -05002312 esp->neg_defp = ESP_NEG_DEFP(fhz, ccf);
David S. Millercd9ad582007-04-26 21:19:23 -07002313 esp->sync_defp = SYNC_DEFP_SLOW;
2314}
2315
2316static const char *esp_chip_names[] = {
2317 "ESP100",
2318 "ESP100A",
2319 "ESP236",
2320 "FAS236",
2321 "FAS100A",
2322 "FAST",
2323 "FASHME",
2324};
2325
2326static struct scsi_transport_template *esp_transport_template;
2327
Adrian Bunk76246802007-10-11 17:35:20 +02002328int scsi_esp_register(struct esp *esp, struct device *dev)
David S. Millercd9ad582007-04-26 21:19:23 -07002329{
2330 static int instance;
2331 int err;
2332
Hannes Reinecke3707a182014-11-24 15:37:20 +01002333 if (!esp->num_tags)
2334 esp->num_tags = ESP_DEFAULT_TAGS;
2335 else if (esp->num_tags >= ESP_MAX_TAG)
2336 esp->num_tags = ESP_MAX_TAG - 1;
David S. Millercd9ad582007-04-26 21:19:23 -07002337 esp->host->transportt = esp_transport_template;
2338 esp->host->max_lun = ESP_MAX_LUN;
2339 esp->host->cmd_per_lun = 2;
David Millerff4abd62007-08-24 22:25:58 -07002340 esp->host->unique_id = instance;
David S. Millercd9ad582007-04-26 21:19:23 -07002341
2342 esp_set_clock_params(esp);
2343
2344 esp_get_revision(esp);
2345
2346 esp_init_swstate(esp);
2347
2348 esp_bootup_reset(esp);
2349
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002350 dev_printk(KERN_INFO, dev, "esp%u: regs[%1p:%1p] irq[%u]\n",
2351 esp->host->unique_id, esp->regs, esp->dma_regs,
2352 esp->host->irq);
2353 dev_printk(KERN_INFO, dev,
2354 "esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
2355 esp->host->unique_id, esp_chip_names[esp->rev],
2356 esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
David S. Millercd9ad582007-04-26 21:19:23 -07002357
2358 /* Let the SCSI bus reset settle. */
2359 ssleep(esp_bus_reset_settle);
2360
2361 err = scsi_add_host(esp->host, dev);
2362 if (err)
2363 return err;
2364
David Millerff4abd62007-08-24 22:25:58 -07002365 instance++;
David S. Millercd9ad582007-04-26 21:19:23 -07002366
2367 scsi_scan_host(esp->host);
2368
2369 return 0;
2370}
2371EXPORT_SYMBOL(scsi_esp_register);
2372
Adrian Bunk76246802007-10-11 17:35:20 +02002373void scsi_esp_unregister(struct esp *esp)
David S. Millercd9ad582007-04-26 21:19:23 -07002374{
2375 scsi_remove_host(esp->host);
2376}
2377EXPORT_SYMBOL(scsi_esp_unregister);
2378
James Bottomleyec5e69f2008-06-23 14:52:09 -05002379static int esp_target_alloc(struct scsi_target *starget)
2380{
2381 struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2382 struct esp_target_data *tp = &esp->target[starget->id];
2383
2384 tp->starget = starget;
2385
2386 return 0;
2387}
2388
2389static void esp_target_destroy(struct scsi_target *starget)
2390{
2391 struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2392 struct esp_target_data *tp = &esp->target[starget->id];
2393
2394 tp->starget = NULL;
2395}
2396
David S. Millercd9ad582007-04-26 21:19:23 -07002397static int esp_slave_alloc(struct scsi_device *dev)
2398{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002399 struct esp *esp = shost_priv(dev->host);
David S. Millercd9ad582007-04-26 21:19:23 -07002400 struct esp_target_data *tp = &esp->target[dev->id];
2401 struct esp_lun_data *lp;
2402
2403 lp = kzalloc(sizeof(*lp), GFP_KERNEL);
2404 if (!lp)
2405 return -ENOMEM;
2406 dev->hostdata = lp;
2407
David S. Millercd9ad582007-04-26 21:19:23 -07002408 spi_min_period(tp->starget) = esp->min_period;
2409 spi_max_offset(tp->starget) = 15;
2410
2411 if (esp->flags & ESP_FLAG_WIDE_CAPABLE)
2412 spi_max_width(tp->starget) = 1;
2413 else
2414 spi_max_width(tp->starget) = 0;
2415
2416 return 0;
2417}
2418
2419static int esp_slave_configure(struct scsi_device *dev)
2420{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002421 struct esp *esp = shost_priv(dev->host);
David S. Millercd9ad582007-04-26 21:19:23 -07002422 struct esp_target_data *tp = &esp->target[dev->id];
David S. Millercd9ad582007-04-26 21:19:23 -07002423
Hannes Reinecke3707a182014-11-24 15:37:20 +01002424 if (dev->tagged_supported)
2425 scsi_change_queue_depth(dev, esp->num_tags);
David S. Millercd9ad582007-04-26 21:19:23 -07002426
David S. Millercd9ad582007-04-26 21:19:23 -07002427 tp->flags |= ESP_TGT_DISCONNECT;
2428
2429 if (!spi_initial_dv(dev->sdev_target))
2430 spi_dv_device(dev);
2431
2432 return 0;
2433}
2434
2435static void esp_slave_destroy(struct scsi_device *dev)
2436{
2437 struct esp_lun_data *lp = dev->hostdata;
2438
2439 kfree(lp);
2440 dev->hostdata = NULL;
2441}
2442
2443static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
2444{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002445 struct esp *esp = shost_priv(cmd->device->host);
David S. Millercd9ad582007-04-26 21:19:23 -07002446 struct esp_cmd_entry *ent, *tmp;
2447 struct completion eh_done;
2448 unsigned long flags;
2449
2450 /* XXX This helps a lot with debugging but might be a bit
2451 * XXX much for the final driver.
2452 */
2453 spin_lock_irqsave(esp->host->host_lock, flags);
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002454 shost_printk(KERN_ERR, esp->host, "Aborting command [%p:%02x]\n",
2455 cmd, cmd->cmnd[0]);
David S. Millercd9ad582007-04-26 21:19:23 -07002456 ent = esp->active_cmd;
2457 if (ent)
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002458 shost_printk(KERN_ERR, esp->host,
2459 "Current command [%p:%02x]\n",
2460 ent->cmd, ent->cmd->cmnd[0]);
David S. Millercd9ad582007-04-26 21:19:23 -07002461 list_for_each_entry(ent, &esp->queued_cmds, list) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002462 shost_printk(KERN_ERR, esp->host, "Queued command [%p:%02x]\n",
2463 ent->cmd, ent->cmd->cmnd[0]);
David S. Millercd9ad582007-04-26 21:19:23 -07002464 }
2465 list_for_each_entry(ent, &esp->active_cmds, list) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002466 shost_printk(KERN_ERR, esp->host, " Active command [%p:%02x]\n",
2467 ent->cmd, ent->cmd->cmnd[0]);
David S. Millercd9ad582007-04-26 21:19:23 -07002468 }
2469 esp_dump_cmd_log(esp);
2470 spin_unlock_irqrestore(esp->host->host_lock, flags);
2471
2472 spin_lock_irqsave(esp->host->host_lock, flags);
2473
2474 ent = NULL;
2475 list_for_each_entry(tmp, &esp->queued_cmds, list) {
2476 if (tmp->cmd == cmd) {
2477 ent = tmp;
2478 break;
2479 }
2480 }
2481
2482 if (ent) {
2483 /* Easiest case, we didn't even issue the command
2484 * yet so it is trivial to abort.
2485 */
2486 list_del(&ent->list);
2487
2488 cmd->result = DID_ABORT << 16;
2489 cmd->scsi_done(cmd);
2490
2491 esp_put_ent(esp, ent);
2492
2493 goto out_success;
2494 }
2495
2496 init_completion(&eh_done);
2497
2498 ent = esp->active_cmd;
2499 if (ent && ent->cmd == cmd) {
2500 /* Command is the currently active command on
2501 * the bus. If we already have an output message
2502 * pending, no dice.
2503 */
2504 if (esp->msg_out_len)
2505 goto out_failure;
2506
2507 /* Send out an abort, encouraging the target to
2508 * go to MSGOUT phase by asserting ATN.
2509 */
2510 esp->msg_out[0] = ABORT_TASK_SET;
2511 esp->msg_out_len = 1;
2512 ent->eh_done = &eh_done;
2513
2514 scsi_esp_cmd(esp, ESP_CMD_SATN);
2515 } else {
2516 /* The command is disconnected. This is not easy to
2517 * abort. For now we fail and let the scsi error
2518 * handling layer go try a scsi bus reset or host
2519 * reset.
2520 *
2521 * What we could do is put together a scsi command
2522 * solely for the purpose of sending an abort message
2523 * to the target. Coming up with all the code to
2524 * cook up scsi commands, special case them everywhere,
2525 * etc. is for questionable gain and it would be better
2526 * if the generic scsi error handling layer could do at
2527 * least some of that for us.
2528 *
2529 * Anyways this is an area for potential future improvement
2530 * in this driver.
2531 */
2532 goto out_failure;
2533 }
2534
2535 spin_unlock_irqrestore(esp->host->host_lock, flags);
2536
2537 if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) {
2538 spin_lock_irqsave(esp->host->host_lock, flags);
2539 ent->eh_done = NULL;
2540 spin_unlock_irqrestore(esp->host->host_lock, flags);
2541
2542 return FAILED;
2543 }
2544
2545 return SUCCESS;
2546
2547out_success:
2548 spin_unlock_irqrestore(esp->host->host_lock, flags);
2549 return SUCCESS;
2550
2551out_failure:
2552 /* XXX This might be a good location to set ESP_TGT_BROKEN
2553 * XXX since we know which target/lun in particular is
2554 * XXX causing trouble.
2555 */
2556 spin_unlock_irqrestore(esp->host->host_lock, flags);
2557 return FAILED;
2558}
2559
2560static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
2561{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002562 struct esp *esp = shost_priv(cmd->device->host);
David S. Millercd9ad582007-04-26 21:19:23 -07002563 struct completion eh_reset;
2564 unsigned long flags;
2565
2566 init_completion(&eh_reset);
2567
2568 spin_lock_irqsave(esp->host->host_lock, flags);
2569
2570 esp->eh_reset = &eh_reset;
2571
2572 /* XXX This is too simple... We should add lots of
2573 * XXX checks here so that if we find that the chip is
2574 * XXX very wedged we return failure immediately so
2575 * XXX that we can perform a full chip reset.
2576 */
2577 esp->flags |= ESP_FLAG_RESETTING;
2578 scsi_esp_cmd(esp, ESP_CMD_RS);
2579
2580 spin_unlock_irqrestore(esp->host->host_lock, flags);
2581
2582 ssleep(esp_bus_reset_settle);
2583
2584 if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) {
2585 spin_lock_irqsave(esp->host->host_lock, flags);
2586 esp->eh_reset = NULL;
2587 spin_unlock_irqrestore(esp->host->host_lock, flags);
2588
2589 return FAILED;
2590 }
2591
2592 return SUCCESS;
2593}
2594
2595/* All bets are off, reset the entire device. */
2596static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd)
2597{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002598 struct esp *esp = shost_priv(cmd->device->host);
David S. Millercd9ad582007-04-26 21:19:23 -07002599 unsigned long flags;
2600
2601 spin_lock_irqsave(esp->host->host_lock, flags);
2602 esp_bootup_reset(esp);
2603 esp_reset_cleanup(esp);
2604 spin_unlock_irqrestore(esp->host->host_lock, flags);
2605
2606 ssleep(esp_bus_reset_settle);
2607
2608 return SUCCESS;
2609}
2610
2611static const char *esp_info(struct Scsi_Host *host)
2612{
2613 return "esp";
2614}
2615
2616struct scsi_host_template scsi_esp_template = {
2617 .module = THIS_MODULE,
2618 .name = "esp",
2619 .info = esp_info,
2620 .queuecommand = esp_queuecommand,
James Bottomleyec5e69f2008-06-23 14:52:09 -05002621 .target_alloc = esp_target_alloc,
2622 .target_destroy = esp_target_destroy,
David S. Millercd9ad582007-04-26 21:19:23 -07002623 .slave_alloc = esp_slave_alloc,
2624 .slave_configure = esp_slave_configure,
2625 .slave_destroy = esp_slave_destroy,
2626 .eh_abort_handler = esp_eh_abort_handler,
2627 .eh_bus_reset_handler = esp_eh_bus_reset_handler,
2628 .eh_host_reset_handler = esp_eh_host_reset_handler,
2629 .can_queue = 7,
2630 .this_id = 7,
2631 .sg_tablesize = SG_ALL,
2632 .use_clustering = ENABLE_CLUSTERING,
2633 .max_sectors = 0xffff,
2634 .skip_settle_delay = 1,
Christoph Hellwig2ecb2042014-11-03 14:09:02 +01002635 .use_blk_tags = 1,
David S. Millercd9ad582007-04-26 21:19:23 -07002636};
2637EXPORT_SYMBOL(scsi_esp_template);
2638
2639static void esp_get_signalling(struct Scsi_Host *host)
2640{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002641 struct esp *esp = shost_priv(host);
David S. Millercd9ad582007-04-26 21:19:23 -07002642 enum spi_signal_type type;
2643
2644 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
2645 type = SPI_SIGNAL_HVD;
2646 else
2647 type = SPI_SIGNAL_SE;
2648
2649 spi_signalling(host) = type;
2650}
2651
2652static void esp_set_offset(struct scsi_target *target, int offset)
2653{
2654 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002655 struct esp *esp = shost_priv(host);
David S. Millercd9ad582007-04-26 21:19:23 -07002656 struct esp_target_data *tp = &esp->target[target->id];
2657
Finn Thain02507a82009-12-05 12:30:42 +11002658 if (esp->flags & ESP_FLAG_DISABLE_SYNC)
2659 tp->nego_goal_offset = 0;
2660 else
2661 tp->nego_goal_offset = offset;
David S. Millercd9ad582007-04-26 21:19:23 -07002662 tp->flags |= ESP_TGT_CHECK_NEGO;
2663}
2664
2665static void esp_set_period(struct scsi_target *target, int period)
2666{
2667 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002668 struct esp *esp = shost_priv(host);
David S. Millercd9ad582007-04-26 21:19:23 -07002669 struct esp_target_data *tp = &esp->target[target->id];
2670
2671 tp->nego_goal_period = period;
2672 tp->flags |= ESP_TGT_CHECK_NEGO;
2673}
2674
2675static void esp_set_width(struct scsi_target *target, int width)
2676{
2677 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002678 struct esp *esp = shost_priv(host);
David S. Millercd9ad582007-04-26 21:19:23 -07002679 struct esp_target_data *tp = &esp->target[target->id];
2680
2681 tp->nego_goal_width = (width ? 1 : 0);
2682 tp->flags |= ESP_TGT_CHECK_NEGO;
2683}
2684
2685static struct spi_function_template esp_transport_ops = {
2686 .set_offset = esp_set_offset,
2687 .show_offset = 1,
2688 .set_period = esp_set_period,
2689 .show_period = 1,
2690 .set_width = esp_set_width,
2691 .show_width = 1,
2692 .get_signalling = esp_get_signalling,
2693};
2694
2695static int __init esp_init(void)
2696{
2697 BUILD_BUG_ON(sizeof(struct scsi_pointer) <
2698 sizeof(struct esp_cmd_priv));
2699
2700 esp_transport_template = spi_attach_transport(&esp_transport_ops);
2701 if (!esp_transport_template)
2702 return -ENODEV;
2703
2704 return 0;
2705}
2706
2707static void __exit esp_exit(void)
2708{
2709 spi_release_transport(esp_transport_template);
2710}
2711
2712MODULE_DESCRIPTION("ESP SCSI driver core");
2713MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
2714MODULE_LICENSE("GPL");
2715MODULE_VERSION(DRV_VERSION);
2716
2717module_param(esp_bus_reset_settle, int, 0);
2718MODULE_PARM_DESC(esp_bus_reset_settle,
2719 "ESP scsi bus reset delay in seconds");
2720
2721module_param(esp_debug, int, 0);
2722MODULE_PARM_DESC(esp_debug,
2723"ESP bitmapped debugging message enable value:\n"
2724" 0x00000001 Log interrupt events\n"
2725" 0x00000002 Log scsi commands\n"
2726" 0x00000004 Log resets\n"
2727" 0x00000008 Log message in events\n"
2728" 0x00000010 Log message out events\n"
2729" 0x00000020 Log command completion\n"
2730" 0x00000040 Log disconnects\n"
2731" 0x00000080 Log data start\n"
2732" 0x00000100 Log data done\n"
2733" 0x00000200 Log reconnects\n"
2734" 0x00000400 Log auto-sense data\n"
2735);
2736
2737module_init(esp_init);
2738module_exit(esp_exit);