blob: 60be0742e2c8bae75bccd5225638f5f2a48d0713 [file] [log] [blame]
David S. Millercd9ad582007-04-26 21:19:23 -07001/* esp_scsi.c: ESP SCSI driver.
2 *
3 * Copyright (C) 2007 David S. Miller (davem@davemloft.net)
4 */
5
6#include <linux/kernel.h>
7#include <linux/types.h>
8#include <linux/slab.h>
9#include <linux/delay.h>
10#include <linux/list.h>
11#include <linux/completion.h>
12#include <linux/kallsyms.h>
13#include <linux/module.h>
14#include <linux/moduleparam.h>
15#include <linux/init.h>
Alexey Dobriyane1f2a092007-04-27 15:19:27 -070016#include <linux/irqreturn.h>
David S. Millercd9ad582007-04-26 21:19:23 -070017
18#include <asm/irq.h>
19#include <asm/io.h>
20#include <asm/dma.h>
21
22#include <scsi/scsi.h>
23#include <scsi/scsi_host.h>
24#include <scsi/scsi_cmnd.h>
25#include <scsi/scsi_device.h>
26#include <scsi/scsi_tcq.h>
27#include <scsi/scsi_dbg.h>
28#include <scsi/scsi_transport_spi.h>
29
30#include "esp_scsi.h"
31
32#define DRV_MODULE_NAME "esp"
33#define PFX DRV_MODULE_NAME ": "
34#define DRV_VERSION "2.000"
35#define DRV_MODULE_RELDATE "April 19, 2007"
36
37/* SCSI bus reset settle time in seconds. */
38static int esp_bus_reset_settle = 3;
39
40static u32 esp_debug;
41#define ESP_DEBUG_INTR 0x00000001
42#define ESP_DEBUG_SCSICMD 0x00000002
43#define ESP_DEBUG_RESET 0x00000004
44#define ESP_DEBUG_MSGIN 0x00000008
45#define ESP_DEBUG_MSGOUT 0x00000010
46#define ESP_DEBUG_CMDDONE 0x00000020
47#define ESP_DEBUG_DISCONNECT 0x00000040
48#define ESP_DEBUG_DATASTART 0x00000080
49#define ESP_DEBUG_DATADONE 0x00000100
50#define ESP_DEBUG_RECONNECT 0x00000200
51#define ESP_DEBUG_AUTOSENSE 0x00000400
Hannes Reinecke1af6f602014-11-24 15:37:22 +010052#define ESP_DEBUG_EVENT 0x00000800
53#define ESP_DEBUG_COMMAND 0x00001000
David S. Millercd9ad582007-04-26 21:19:23 -070054
55#define esp_log_intr(f, a...) \
56do { if (esp_debug & ESP_DEBUG_INTR) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010057 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070058} while (0)
59
60#define esp_log_reset(f, a...) \
61do { if (esp_debug & ESP_DEBUG_RESET) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010062 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070063} while (0)
64
65#define esp_log_msgin(f, a...) \
66do { if (esp_debug & ESP_DEBUG_MSGIN) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010067 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070068} while (0)
69
70#define esp_log_msgout(f, a...) \
71do { if (esp_debug & ESP_DEBUG_MSGOUT) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010072 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070073} while (0)
74
75#define esp_log_cmddone(f, a...) \
76do { if (esp_debug & ESP_DEBUG_CMDDONE) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010077 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070078} while (0)
79
80#define esp_log_disconnect(f, a...) \
81do { if (esp_debug & ESP_DEBUG_DISCONNECT) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010082 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070083} while (0)
84
85#define esp_log_datastart(f, a...) \
86do { if (esp_debug & ESP_DEBUG_DATASTART) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010087 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070088} while (0)
89
90#define esp_log_datadone(f, a...) \
91do { if (esp_debug & ESP_DEBUG_DATADONE) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010092 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070093} while (0)
94
95#define esp_log_reconnect(f, a...) \
96do { if (esp_debug & ESP_DEBUG_RECONNECT) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +010097 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -070098} while (0)
99
100#define esp_log_autosense(f, a...) \
101do { if (esp_debug & ESP_DEBUG_AUTOSENSE) \
Hannes Reineckea1a75b32014-11-24 15:37:21 +0100102 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
David S. Millercd9ad582007-04-26 21:19:23 -0700103} while (0)
104
Hannes Reinecke1af6f602014-11-24 15:37:22 +0100105#define esp_log_event(f, a...) \
106do { if (esp_debug & ESP_DEBUG_EVENT) \
107 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
108} while (0)
109
110#define esp_log_command(f, a...) \
111do { if (esp_debug & ESP_DEBUG_COMMAND) \
112 shost_printk(KERN_DEBUG, esp->host, f, ## a); \
113} while (0)
114
David S. Millercd9ad582007-04-26 21:19:23 -0700115#define esp_read8(REG) esp->ops->esp_read8(esp, REG)
116#define esp_write8(VAL,REG) esp->ops->esp_write8(esp, VAL, REG)
117
118static void esp_log_fill_regs(struct esp *esp,
119 struct esp_event_ent *p)
120{
121 p->sreg = esp->sreg;
122 p->seqreg = esp->seqreg;
123 p->sreg2 = esp->sreg2;
124 p->ireg = esp->ireg;
125 p->select_state = esp->select_state;
126 p->event = esp->event;
127}
128
129void scsi_esp_cmd(struct esp *esp, u8 val)
130{
131 struct esp_event_ent *p;
132 int idx = esp->esp_event_cur;
133
134 p = &esp->esp_event_log[idx];
135 p->type = ESP_EVENT_TYPE_CMD;
136 p->val = val;
137 esp_log_fill_regs(esp, p);
138
139 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
140
Hannes Reinecke1af6f602014-11-24 15:37:22 +0100141 esp_log_command("cmd[%02x]\n", val);
David S. Millercd9ad582007-04-26 21:19:23 -0700142 esp_write8(val, ESP_CMD);
143}
144EXPORT_SYMBOL(scsi_esp_cmd);
145
Hannes Reinecke31708662014-11-24 15:37:24 +0100146static void esp_send_dma_cmd(struct esp *esp, int len, int max_len, int cmd)
147{
148 if (esp->flags & ESP_FLAG_USE_FIFO) {
149 int i;
150
151 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
152 for (i = 0; i < len; i++)
153 esp_write8(esp->command_block[i], ESP_FDATA);
154 scsi_esp_cmd(esp, cmd);
155 } else {
156 if (esp->rev == FASHME)
157 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
158 cmd |= ESP_CMD_DMA;
159 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
160 len, max_len, 0, cmd);
161 }
162}
163
David S. Millercd9ad582007-04-26 21:19:23 -0700164static void esp_event(struct esp *esp, u8 val)
165{
166 struct esp_event_ent *p;
167 int idx = esp->esp_event_cur;
168
169 p = &esp->esp_event_log[idx];
170 p->type = ESP_EVENT_TYPE_EVENT;
171 p->val = val;
172 esp_log_fill_regs(esp, p);
173
174 esp->esp_event_cur = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
175
176 esp->event = val;
177}
178
179static void esp_dump_cmd_log(struct esp *esp)
180{
181 int idx = esp->esp_event_cur;
182 int stop = idx;
183
Hannes Reineckea1a75b32014-11-24 15:37:21 +0100184 shost_printk(KERN_INFO, esp->host, "Dumping command log\n");
David S. Millercd9ad582007-04-26 21:19:23 -0700185 do {
186 struct esp_event_ent *p = &esp->esp_event_log[idx];
187
Hannes Reineckea1a75b32014-11-24 15:37:21 +0100188 shost_printk(KERN_INFO, esp->host,
189 "ent[%d] %s val[%02x] sreg[%02x] seqreg[%02x] "
190 "sreg2[%02x] ireg[%02x] ss[%02x] event[%02x]\n",
191 idx,
192 p->type == ESP_EVENT_TYPE_CMD ? "CMD" : "EVENT",
193 p->val, p->sreg, p->seqreg,
194 p->sreg2, p->ireg, p->select_state, p->event);
David S. Millercd9ad582007-04-26 21:19:23 -0700195
196 idx = (idx + 1) & (ESP_EVENT_LOG_SZ - 1);
197 } while (idx != stop);
198}
199
200static void esp_flush_fifo(struct esp *esp)
201{
202 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
203 if (esp->rev == ESP236) {
204 int lim = 1000;
205
206 while (esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES) {
207 if (--lim == 0) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +0100208 shost_printk(KERN_ALERT, esp->host,
209 "ESP_FF_BYTES will not clear!\n");
David S. Millercd9ad582007-04-26 21:19:23 -0700210 break;
211 }
212 udelay(1);
213 }
214 }
215}
216
217static void hme_read_fifo(struct esp *esp)
218{
219 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
220 int idx = 0;
221
222 while (fcnt--) {
223 esp->fifo[idx++] = esp_read8(ESP_FDATA);
224 esp->fifo[idx++] = esp_read8(ESP_FDATA);
225 }
226 if (esp->sreg2 & ESP_STAT2_F1BYTE) {
227 esp_write8(0, ESP_FDATA);
228 esp->fifo[idx++] = esp_read8(ESP_FDATA);
229 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
230 }
231 esp->fifo_cnt = idx;
232}
233
234static void esp_set_all_config3(struct esp *esp, u8 val)
235{
236 int i;
237
238 for (i = 0; i < ESP_MAX_TARGET; i++)
239 esp->target[i].esp_config3 = val;
240}
241
242/* Reset the ESP chip, _not_ the SCSI bus. */
243static void esp_reset_esp(struct esp *esp)
244{
245 u8 family_code, version;
246
247 /* Now reset the ESP chip */
248 scsi_esp_cmd(esp, ESP_CMD_RC);
249 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
David S. Millera7938042007-09-30 17:10:42 -0700250 if (esp->rev == FAST)
251 esp_write8(ESP_CONFIG2_FENAB, ESP_CFG2);
David S. Millercd9ad582007-04-26 21:19:23 -0700252 scsi_esp_cmd(esp, ESP_CMD_NULL | ESP_CMD_DMA);
253
David S. Millercd9ad582007-04-26 21:19:23 -0700254 /* This is the only point at which it is reliable to read
255 * the ID-code for a fast ESP chip variants.
256 */
257 esp->max_period = ((35 * esp->ccycle) / 1000);
258 if (esp->rev == FAST) {
259 version = esp_read8(ESP_UID);
260 family_code = (version & 0xf8) >> 3;
261 if (family_code == 0x02)
262 esp->rev = FAS236;
263 else if (family_code == 0x0a)
264 esp->rev = FASHME; /* Version is usually '5'. */
265 else
266 esp->rev = FAS100A;
267 esp->min_period = ((4 * esp->ccycle) / 1000);
268 } else {
269 esp->min_period = ((5 * esp->ccycle) / 1000);
270 }
Hannes Reineckeeeea2f92014-11-24 15:37:27 +0100271 if (esp->rev == FAS236) {
272 /*
273 * The AM53c974 chip returns the same ID as FAS236;
274 * try to configure glitch eater.
275 */
276 u8 config4 = ESP_CONFIG4_GE1;
277 esp_write8(config4, ESP_CFG4);
278 config4 = esp_read8(ESP_CFG4);
279 if (config4 & ESP_CONFIG4_GE1) {
280 esp->rev = PCSCSI;
281 esp_write8(esp->config4, ESP_CFG4);
282 }
283 }
David S. Millercd9ad582007-04-26 21:19:23 -0700284 esp->max_period = (esp->max_period + 3)>>2;
285 esp->min_period = (esp->min_period + 3)>>2;
286
287 esp_write8(esp->config1, ESP_CFG1);
288 switch (esp->rev) {
289 case ESP100:
290 /* nothing to do */
291 break;
292
293 case ESP100A:
294 esp_write8(esp->config2, ESP_CFG2);
295 break;
296
297 case ESP236:
298 /* Slow 236 */
299 esp_write8(esp->config2, ESP_CFG2);
300 esp->prev_cfg3 = esp->target[0].esp_config3;
301 esp_write8(esp->prev_cfg3, ESP_CFG3);
302 break;
303
304 case FASHME:
305 esp->config2 |= (ESP_CONFIG2_HME32 | ESP_CONFIG2_HMEFENAB);
306 /* fallthrough... */
307
308 case FAS236:
Hannes Reineckeeeea2f92014-11-24 15:37:27 +0100309 case PCSCSI:
310 /* Fast 236, AM53c974 or HME */
David S. Millercd9ad582007-04-26 21:19:23 -0700311 esp_write8(esp->config2, ESP_CFG2);
312 if (esp->rev == FASHME) {
313 u8 cfg3 = esp->target[0].esp_config3;
314
315 cfg3 |= ESP_CONFIG3_FCLOCK | ESP_CONFIG3_OBPUSH;
316 if (esp->scsi_id >= 8)
317 cfg3 |= ESP_CONFIG3_IDBIT3;
318 esp_set_all_config3(esp, cfg3);
319 } else {
320 u32 cfg3 = esp->target[0].esp_config3;
321
322 cfg3 |= ESP_CONFIG3_FCLK;
323 esp_set_all_config3(esp, cfg3);
324 }
325 esp->prev_cfg3 = esp->target[0].esp_config3;
326 esp_write8(esp->prev_cfg3, ESP_CFG3);
327 if (esp->rev == FASHME) {
328 esp->radelay = 80;
329 } else {
330 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
331 esp->radelay = 0;
332 else
333 esp->radelay = 96;
334 }
335 break;
336
337 case FAS100A:
338 /* Fast 100a */
339 esp_write8(esp->config2, ESP_CFG2);
340 esp_set_all_config3(esp,
341 (esp->target[0].esp_config3 |
342 ESP_CONFIG3_FCLOCK));
343 esp->prev_cfg3 = esp->target[0].esp_config3;
344 esp_write8(esp->prev_cfg3, ESP_CFG3);
345 esp->radelay = 32;
346 break;
347
348 default:
349 break;
350 }
351
David S. Millera7938042007-09-30 17:10:42 -0700352 /* Reload the configuration registers */
353 esp_write8(esp->cfact, ESP_CFACT);
354
355 esp->prev_stp = 0;
356 esp_write8(esp->prev_stp, ESP_STP);
357
358 esp->prev_soff = 0;
359 esp_write8(esp->prev_soff, ESP_SOFF);
360
361 esp_write8(esp->neg_defp, ESP_TIMEO);
362
David S. Millercd9ad582007-04-26 21:19:23 -0700363 /* Eat any bitrot in the chip */
364 esp_read8(ESP_INTRPT);
365 udelay(100);
366}
367
368static void esp_map_dma(struct esp *esp, struct scsi_cmnd *cmd)
369{
370 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
FUJITA Tomonori4c2baaa2007-05-26 04:51:32 +0900371 struct scatterlist *sg = scsi_sglist(cmd);
David S. Millercd9ad582007-04-26 21:19:23 -0700372 int dir = cmd->sc_data_direction;
373 int total, i;
374
375 if (dir == DMA_NONE)
376 return;
377
FUJITA Tomonori4c2baaa2007-05-26 04:51:32 +0900378 spriv->u.num_sg = esp->ops->map_sg(esp, sg, scsi_sg_count(cmd), dir);
David S. Millercd9ad582007-04-26 21:19:23 -0700379 spriv->cur_residue = sg_dma_len(sg);
380 spriv->cur_sg = sg;
381
382 total = 0;
383 for (i = 0; i < spriv->u.num_sg; i++)
384 total += sg_dma_len(&sg[i]);
385 spriv->tot_residue = total;
386}
387
388static dma_addr_t esp_cur_dma_addr(struct esp_cmd_entry *ent,
389 struct scsi_cmnd *cmd)
390{
391 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
392
393 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
394 return ent->sense_dma +
395 (ent->sense_ptr - cmd->sense_buffer);
396 }
397
398 return sg_dma_address(p->cur_sg) +
399 (sg_dma_len(p->cur_sg) -
400 p->cur_residue);
401}
402
403static unsigned int esp_cur_dma_len(struct esp_cmd_entry *ent,
404 struct scsi_cmnd *cmd)
405{
406 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
407
408 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
409 return SCSI_SENSE_BUFFERSIZE -
410 (ent->sense_ptr - cmd->sense_buffer);
411 }
412 return p->cur_residue;
413}
414
415static void esp_advance_dma(struct esp *esp, struct esp_cmd_entry *ent,
416 struct scsi_cmnd *cmd, unsigned int len)
417{
418 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
419
420 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
421 ent->sense_ptr += len;
422 return;
423 }
424
425 p->cur_residue -= len;
426 p->tot_residue -= len;
427 if (p->cur_residue < 0 || p->tot_residue < 0) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +0100428 shost_printk(KERN_ERR, esp->host,
429 "Data transfer overflow.\n");
430 shost_printk(KERN_ERR, esp->host,
431 "cur_residue[%d] tot_residue[%d] len[%u]\n",
432 p->cur_residue, p->tot_residue, len);
David S. Millercd9ad582007-04-26 21:19:23 -0700433 p->cur_residue = 0;
434 p->tot_residue = 0;
435 }
436 if (!p->cur_residue && p->tot_residue) {
437 p->cur_sg++;
438 p->cur_residue = sg_dma_len(p->cur_sg);
439 }
440}
441
442static void esp_unmap_dma(struct esp *esp, struct scsi_cmnd *cmd)
443{
444 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
445 int dir = cmd->sc_data_direction;
446
447 if (dir == DMA_NONE)
448 return;
449
FUJITA Tomonori4c2baaa2007-05-26 04:51:32 +0900450 esp->ops->unmap_sg(esp, scsi_sglist(cmd), spriv->u.num_sg, dir);
David S. Millercd9ad582007-04-26 21:19:23 -0700451}
452
453static void esp_save_pointers(struct esp *esp, struct esp_cmd_entry *ent)
454{
455 struct scsi_cmnd *cmd = ent->cmd;
456 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
457
458 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
459 ent->saved_sense_ptr = ent->sense_ptr;
460 return;
461 }
462 ent->saved_cur_residue = spriv->cur_residue;
463 ent->saved_cur_sg = spriv->cur_sg;
464 ent->saved_tot_residue = spriv->tot_residue;
465}
466
467static void esp_restore_pointers(struct esp *esp, struct esp_cmd_entry *ent)
468{
469 struct scsi_cmnd *cmd = ent->cmd;
470 struct esp_cmd_priv *spriv = ESP_CMD_PRIV(cmd);
471
472 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
473 ent->sense_ptr = ent->saved_sense_ptr;
474 return;
475 }
476 spriv->cur_residue = ent->saved_cur_residue;
477 spriv->cur_sg = ent->saved_cur_sg;
478 spriv->tot_residue = ent->saved_tot_residue;
479}
480
481static void esp_check_command_len(struct esp *esp, struct scsi_cmnd *cmd)
482{
483 if (cmd->cmd_len == 6 ||
484 cmd->cmd_len == 10 ||
485 cmd->cmd_len == 12) {
486 esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
487 } else {
488 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
489 }
490}
491
492static void esp_write_tgt_config3(struct esp *esp, int tgt)
493{
494 if (esp->rev > ESP100A) {
495 u8 val = esp->target[tgt].esp_config3;
496
497 if (val != esp->prev_cfg3) {
498 esp->prev_cfg3 = val;
499 esp_write8(val, ESP_CFG3);
500 }
501 }
502}
503
504static void esp_write_tgt_sync(struct esp *esp, int tgt)
505{
506 u8 off = esp->target[tgt].esp_offset;
507 u8 per = esp->target[tgt].esp_period;
508
509 if (off != esp->prev_soff) {
510 esp->prev_soff = off;
511 esp_write8(off, ESP_SOFF);
512 }
513 if (per != esp->prev_stp) {
514 esp->prev_stp = per;
515 esp_write8(per, ESP_STP);
516 }
517}
518
519static u32 esp_dma_length_limit(struct esp *esp, u32 dma_addr, u32 dma_len)
520{
521 if (esp->rev == FASHME) {
522 /* Arbitrary segment boundaries, 24-bit counts. */
523 if (dma_len > (1U << 24))
524 dma_len = (1U << 24);
525 } else {
526 u32 base, end;
527
528 /* ESP chip limits other variants by 16-bits of transfer
529 * count. Actually on FAS100A and FAS236 we could get
530 * 24-bits of transfer count by enabling ESP_CONFIG2_FENAB
531 * in the ESP_CFG2 register but that causes other unwanted
532 * changes so we don't use it currently.
533 */
534 if (dma_len > (1U << 16))
535 dma_len = (1U << 16);
536
537 /* All of the DMA variants hooked up to these chips
538 * cannot handle crossing a 24-bit address boundary.
539 */
540 base = dma_addr & ((1U << 24) - 1U);
541 end = base + dma_len;
542 if (end > (1U << 24))
543 end = (1U <<24);
544 dma_len = end - base;
545 }
546 return dma_len;
547}
548
549static int esp_need_to_nego_wide(struct esp_target_data *tp)
550{
551 struct scsi_target *target = tp->starget;
552
553 return spi_width(target) != tp->nego_goal_width;
554}
555
556static int esp_need_to_nego_sync(struct esp_target_data *tp)
557{
558 struct scsi_target *target = tp->starget;
559
560 /* When offset is zero, period is "don't care". */
561 if (!spi_offset(target) && !tp->nego_goal_offset)
562 return 0;
563
564 if (spi_offset(target) == tp->nego_goal_offset &&
565 spi_period(target) == tp->nego_goal_period)
566 return 0;
567
568 return 1;
569}
570
571static int esp_alloc_lun_tag(struct esp_cmd_entry *ent,
572 struct esp_lun_data *lp)
573{
David S. Miller21af8102013-08-01 18:08:34 -0700574 if (!ent->orig_tag[0]) {
David S. Millercd9ad582007-04-26 21:19:23 -0700575 /* Non-tagged, slot already taken? */
576 if (lp->non_tagged_cmd)
577 return -EBUSY;
578
579 if (lp->hold) {
580 /* We are being held by active tagged
581 * commands.
582 */
583 if (lp->num_tagged)
584 return -EBUSY;
585
586 /* Tagged commands completed, we can unplug
587 * the queue and run this untagged command.
588 */
589 lp->hold = 0;
590 } else if (lp->num_tagged) {
591 /* Plug the queue until num_tagged decreases
592 * to zero in esp_free_lun_tag.
593 */
594 lp->hold = 1;
595 return -EBUSY;
596 }
597
598 lp->non_tagged_cmd = ent;
599 return 0;
600 } else {
601 /* Tagged command, see if blocked by a
602 * non-tagged one.
603 */
604 if (lp->non_tagged_cmd || lp->hold)
605 return -EBUSY;
606 }
607
David S. Miller21af8102013-08-01 18:08:34 -0700608 BUG_ON(lp->tagged_cmds[ent->orig_tag[1]]);
David S. Millercd9ad582007-04-26 21:19:23 -0700609
David S. Miller21af8102013-08-01 18:08:34 -0700610 lp->tagged_cmds[ent->orig_tag[1]] = ent;
David S. Millercd9ad582007-04-26 21:19:23 -0700611 lp->num_tagged++;
612
613 return 0;
614}
615
616static void esp_free_lun_tag(struct esp_cmd_entry *ent,
617 struct esp_lun_data *lp)
618{
David S. Miller21af8102013-08-01 18:08:34 -0700619 if (ent->orig_tag[0]) {
620 BUG_ON(lp->tagged_cmds[ent->orig_tag[1]] != ent);
621 lp->tagged_cmds[ent->orig_tag[1]] = NULL;
David S. Millercd9ad582007-04-26 21:19:23 -0700622 lp->num_tagged--;
623 } else {
624 BUG_ON(lp->non_tagged_cmd != ent);
625 lp->non_tagged_cmd = NULL;
626 }
627}
628
629/* When a contingent allegiance conditon is created, we force feed a
630 * REQUEST_SENSE command to the device to fetch the sense data. I
631 * tried many other schemes, relying on the scsi error handling layer
632 * to send out the REQUEST_SENSE automatically, but this was difficult
633 * to get right especially in the presence of applications like smartd
634 * which use SG_IO to send out their own REQUEST_SENSE commands.
635 */
636static void esp_autosense(struct esp *esp, struct esp_cmd_entry *ent)
637{
638 struct scsi_cmnd *cmd = ent->cmd;
639 struct scsi_device *dev = cmd->device;
640 int tgt, lun;
641 u8 *p, val;
642
643 tgt = dev->id;
644 lun = dev->lun;
645
646
647 if (!ent->sense_ptr) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +0100648 esp_log_autosense("Doing auto-sense for tgt[%d] lun[%d]\n",
649 tgt, lun);
David S. Millercd9ad582007-04-26 21:19:23 -0700650
651 ent->sense_ptr = cmd->sense_buffer;
652 ent->sense_dma = esp->ops->map_single(esp,
653 ent->sense_ptr,
654 SCSI_SENSE_BUFFERSIZE,
655 DMA_FROM_DEVICE);
656 }
657 ent->saved_sense_ptr = ent->sense_ptr;
658
659 esp->active_cmd = ent;
660
661 p = esp->command_block;
662 esp->msg_out_len = 0;
663
664 *p++ = IDENTIFY(0, lun);
665 *p++ = REQUEST_SENSE;
666 *p++ = ((dev->scsi_level <= SCSI_2) ?
667 (lun << 5) : 0);
668 *p++ = 0;
669 *p++ = 0;
670 *p++ = SCSI_SENSE_BUFFERSIZE;
671 *p++ = 0;
672
673 esp->select_state = ESP_SELECT_BASIC;
674
675 val = tgt;
676 if (esp->rev == FASHME)
677 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
678 esp_write8(val, ESP_BUSID);
679
680 esp_write_tgt_sync(esp, tgt);
681 esp_write_tgt_config3(esp, tgt);
682
683 val = (p - esp->command_block);
684
Hannes Reinecke31708662014-11-24 15:37:24 +0100685 esp_send_dma_cmd(esp, val, 16, ESP_CMD_SELA);
David S. Millercd9ad582007-04-26 21:19:23 -0700686}
687
688static struct esp_cmd_entry *find_and_prep_issuable_command(struct esp *esp)
689{
690 struct esp_cmd_entry *ent;
691
692 list_for_each_entry(ent, &esp->queued_cmds, list) {
693 struct scsi_cmnd *cmd = ent->cmd;
694 struct scsi_device *dev = cmd->device;
695 struct esp_lun_data *lp = dev->hostdata;
696
697 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
698 ent->tag[0] = 0;
699 ent->tag[1] = 0;
700 return ent;
701 }
702
Christoph Hellwig50668632014-10-30 14:30:06 +0100703 if (!spi_populate_tag_msg(&ent->tag[0], cmd)) {
David S. Millercd9ad582007-04-26 21:19:23 -0700704 ent->tag[0] = 0;
705 ent->tag[1] = 0;
706 }
David S. Miller21af8102013-08-01 18:08:34 -0700707 ent->orig_tag[0] = ent->tag[0];
708 ent->orig_tag[1] = ent->tag[1];
David S. Millercd9ad582007-04-26 21:19:23 -0700709
710 if (esp_alloc_lun_tag(ent, lp) < 0)
711 continue;
712
713 return ent;
714 }
715
716 return NULL;
717}
718
719static void esp_maybe_execute_command(struct esp *esp)
720{
721 struct esp_target_data *tp;
722 struct esp_lun_data *lp;
723 struct scsi_device *dev;
724 struct scsi_cmnd *cmd;
725 struct esp_cmd_entry *ent;
726 int tgt, lun, i;
727 u32 val, start_cmd;
728 u8 *p;
729
730 if (esp->active_cmd ||
731 (esp->flags & ESP_FLAG_RESETTING))
732 return;
733
734 ent = find_and_prep_issuable_command(esp);
735 if (!ent)
736 return;
737
738 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
739 esp_autosense(esp, ent);
740 return;
741 }
742
743 cmd = ent->cmd;
744 dev = cmd->device;
745 tgt = dev->id;
746 lun = dev->lun;
747 tp = &esp->target[tgt];
748 lp = dev->hostdata;
749
Kirill A. Shutemov63ce2492011-04-01 16:06:09 -0700750 list_move(&ent->list, &esp->active_cmds);
David S. Millercd9ad582007-04-26 21:19:23 -0700751
752 esp->active_cmd = ent;
753
754 esp_map_dma(esp, cmd);
755 esp_save_pointers(esp, ent);
756
757 esp_check_command_len(esp, cmd);
758
759 p = esp->command_block;
760
761 esp->msg_out_len = 0;
762 if (tp->flags & ESP_TGT_CHECK_NEGO) {
763 /* Need to negotiate. If the target is broken
764 * go for synchronous transfers and non-wide.
765 */
766 if (tp->flags & ESP_TGT_BROKEN) {
767 tp->flags &= ~ESP_TGT_DISCONNECT;
768 tp->nego_goal_period = 0;
769 tp->nego_goal_offset = 0;
770 tp->nego_goal_width = 0;
771 tp->nego_goal_tags = 0;
772 }
773
774 /* If the settings are not changing, skip this. */
775 if (spi_width(tp->starget) == tp->nego_goal_width &&
776 spi_period(tp->starget) == tp->nego_goal_period &&
777 spi_offset(tp->starget) == tp->nego_goal_offset) {
778 tp->flags &= ~ESP_TGT_CHECK_NEGO;
779 goto build_identify;
780 }
781
782 if (esp->rev == FASHME && esp_need_to_nego_wide(tp)) {
783 esp->msg_out_len =
784 spi_populate_width_msg(&esp->msg_out[0],
785 (tp->nego_goal_width ?
786 1 : 0));
787 tp->flags |= ESP_TGT_NEGO_WIDE;
788 } else if (esp_need_to_nego_sync(tp)) {
789 esp->msg_out_len =
790 spi_populate_sync_msg(&esp->msg_out[0],
791 tp->nego_goal_period,
792 tp->nego_goal_offset);
793 tp->flags |= ESP_TGT_NEGO_SYNC;
794 } else {
795 tp->flags &= ~ESP_TGT_CHECK_NEGO;
796 }
797
798 /* Process it like a slow command. */
799 if (tp->flags & (ESP_TGT_NEGO_WIDE | ESP_TGT_NEGO_SYNC))
800 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
801 }
802
803build_identify:
804 /* If we don't have a lun-data struct yet, we're probing
805 * so do not disconnect. Also, do not disconnect unless
806 * we have a tag on this command.
807 */
808 if (lp && (tp->flags & ESP_TGT_DISCONNECT) && ent->tag[0])
809 *p++ = IDENTIFY(1, lun);
810 else
811 *p++ = IDENTIFY(0, lun);
812
813 if (ent->tag[0] && esp->rev == ESP100) {
814 /* ESP100 lacks select w/atn3 command, use select
815 * and stop instead.
816 */
817 esp->flags |= ESP_FLAG_DOING_SLOWCMD;
818 }
819
820 if (!(esp->flags & ESP_FLAG_DOING_SLOWCMD)) {
Hannes Reinecke31708662014-11-24 15:37:24 +0100821 start_cmd = ESP_CMD_SELA;
David S. Millercd9ad582007-04-26 21:19:23 -0700822 if (ent->tag[0]) {
823 *p++ = ent->tag[0];
824 *p++ = ent->tag[1];
825
Hannes Reinecke31708662014-11-24 15:37:24 +0100826 start_cmd = ESP_CMD_SA3;
David S. Millercd9ad582007-04-26 21:19:23 -0700827 }
828
829 for (i = 0; i < cmd->cmd_len; i++)
830 *p++ = cmd->cmnd[i];
831
832 esp->select_state = ESP_SELECT_BASIC;
833 } else {
834 esp->cmd_bytes_left = cmd->cmd_len;
835 esp->cmd_bytes_ptr = &cmd->cmnd[0];
836
837 if (ent->tag[0]) {
838 for (i = esp->msg_out_len - 1;
839 i >= 0; i--)
840 esp->msg_out[i + 2] = esp->msg_out[i];
841 esp->msg_out[0] = ent->tag[0];
842 esp->msg_out[1] = ent->tag[1];
843 esp->msg_out_len += 2;
844 }
845
Hannes Reinecke31708662014-11-24 15:37:24 +0100846 start_cmd = ESP_CMD_SELAS;
David S. Millercd9ad582007-04-26 21:19:23 -0700847 esp->select_state = ESP_SELECT_MSGOUT;
848 }
849 val = tgt;
850 if (esp->rev == FASHME)
851 val |= ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT;
852 esp_write8(val, ESP_BUSID);
853
854 esp_write_tgt_sync(esp, tgt);
855 esp_write_tgt_config3(esp, tgt);
856
857 val = (p - esp->command_block);
858
859 if (esp_debug & ESP_DEBUG_SCSICMD) {
860 printk("ESP: tgt[%d] lun[%d] scsi_cmd [ ", tgt, lun);
861 for (i = 0; i < cmd->cmd_len; i++)
862 printk("%02x ", cmd->cmnd[i]);
863 printk("]\n");
864 }
865
Hannes Reinecke31708662014-11-24 15:37:24 +0100866 esp_send_dma_cmd(esp, val, 16, start_cmd);
David S. Millercd9ad582007-04-26 21:19:23 -0700867}
868
869static struct esp_cmd_entry *esp_get_ent(struct esp *esp)
870{
871 struct list_head *head = &esp->esp_cmd_pool;
872 struct esp_cmd_entry *ret;
873
874 if (list_empty(head)) {
875 ret = kzalloc(sizeof(struct esp_cmd_entry), GFP_ATOMIC);
876 } else {
877 ret = list_entry(head->next, struct esp_cmd_entry, list);
878 list_del(&ret->list);
879 memset(ret, 0, sizeof(*ret));
880 }
881 return ret;
882}
883
884static void esp_put_ent(struct esp *esp, struct esp_cmd_entry *ent)
885{
886 list_add(&ent->list, &esp->esp_cmd_pool);
887}
888
889static void esp_cmd_is_done(struct esp *esp, struct esp_cmd_entry *ent,
890 struct scsi_cmnd *cmd, unsigned int result)
891{
892 struct scsi_device *dev = cmd->device;
893 int tgt = dev->id;
894 int lun = dev->lun;
895
896 esp->active_cmd = NULL;
897 esp_unmap_dma(esp, cmd);
898 esp_free_lun_tag(ent, dev->hostdata);
899 cmd->result = result;
900
901 if (ent->eh_done) {
902 complete(ent->eh_done);
903 ent->eh_done = NULL;
904 }
905
906 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
907 esp->ops->unmap_single(esp, ent->sense_dma,
908 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
909 ent->sense_ptr = NULL;
910
911 /* Restore the message/status bytes to what we actually
912 * saw originally. Also, report that we are providing
913 * the sense data.
914 */
915 cmd->result = ((DRIVER_SENSE << 24) |
916 (DID_OK << 16) |
917 (COMMAND_COMPLETE << 8) |
918 (SAM_STAT_CHECK_CONDITION << 0));
919
920 ent->flags &= ~ESP_CMD_FLAG_AUTOSENSE;
921 if (esp_debug & ESP_DEBUG_AUTOSENSE) {
922 int i;
923
924 printk("esp%d: tgt[%d] lun[%d] AUTO SENSE[ ",
925 esp->host->unique_id, tgt, lun);
926 for (i = 0; i < 18; i++)
927 printk("%02x ", cmd->sense_buffer[i]);
928 printk("]\n");
929 }
930 }
931
932 cmd->scsi_done(cmd);
933
934 list_del(&ent->list);
935 esp_put_ent(esp, ent);
936
937 esp_maybe_execute_command(esp);
938}
939
940static unsigned int compose_result(unsigned int status, unsigned int message,
941 unsigned int driver_code)
942{
943 return (status | (message << 8) | (driver_code << 16));
944}
945
946static void esp_event_queue_full(struct esp *esp, struct esp_cmd_entry *ent)
947{
948 struct scsi_device *dev = ent->cmd->device;
949 struct esp_lun_data *lp = dev->hostdata;
950
951 scsi_track_queue_full(dev, lp->num_tagged - 1);
952}
953
Jeff Garzikf2812332010-11-16 02:10:29 -0500954static int esp_queuecommand_lck(struct scsi_cmnd *cmd, void (*done)(struct scsi_cmnd *))
David S. Millercd9ad582007-04-26 21:19:23 -0700955{
956 struct scsi_device *dev = cmd->device;
Christoph Hellwig2b14ec72007-05-31 20:12:32 +0200957 struct esp *esp = shost_priv(dev->host);
David S. Millercd9ad582007-04-26 21:19:23 -0700958 struct esp_cmd_priv *spriv;
959 struct esp_cmd_entry *ent;
960
961 ent = esp_get_ent(esp);
962 if (!ent)
963 return SCSI_MLQUEUE_HOST_BUSY;
964
965 ent->cmd = cmd;
966
967 cmd->scsi_done = done;
968
969 spriv = ESP_CMD_PRIV(cmd);
970 spriv->u.dma_addr = ~(dma_addr_t)0x0;
971
972 list_add_tail(&ent->list, &esp->queued_cmds);
973
974 esp_maybe_execute_command(esp);
975
976 return 0;
977}
978
Jeff Garzikf2812332010-11-16 02:10:29 -0500979static DEF_SCSI_QCMD(esp_queuecommand)
980
David S. Millercd9ad582007-04-26 21:19:23 -0700981static int esp_check_gross_error(struct esp *esp)
982{
983 if (esp->sreg & ESP_STAT_SPAM) {
984 /* Gross Error, could be one of:
985 * - top of fifo overwritten
986 * - top of command register overwritten
987 * - DMA programmed with wrong direction
988 * - improper phase change
989 */
Hannes Reineckea1a75b32014-11-24 15:37:21 +0100990 shost_printk(KERN_ERR, esp->host,
991 "Gross error sreg[%02x]\n", esp->sreg);
David S. Millercd9ad582007-04-26 21:19:23 -0700992 /* XXX Reset the chip. XXX */
993 return 1;
994 }
995 return 0;
996}
997
998static int esp_check_spur_intr(struct esp *esp)
999{
1000 switch (esp->rev) {
1001 case ESP100:
1002 case ESP100A:
1003 /* The interrupt pending bit of the status register cannot
1004 * be trusted on these revisions.
1005 */
1006 esp->sreg &= ~ESP_STAT_INTR;
1007 break;
1008
1009 default:
1010 if (!(esp->sreg & ESP_STAT_INTR)) {
David S. Millercd9ad582007-04-26 21:19:23 -07001011 if (esp->ireg & ESP_INTR_SR)
1012 return 1;
1013
1014 /* If the DMA is indicating interrupt pending and the
1015 * ESP is not, the only possibility is a DMA error.
1016 */
1017 if (!esp->ops->dma_error(esp)) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001018 shost_printk(KERN_ERR, esp->host,
1019 "Spurious irq, sreg=%02x.\n",
1020 esp->sreg);
David S. Millercd9ad582007-04-26 21:19:23 -07001021 return -1;
1022 }
1023
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001024 shost_printk(KERN_ERR, esp->host, "DMA error\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001025
1026 /* XXX Reset the chip. XXX */
1027 return -1;
1028 }
1029 break;
1030 }
1031
1032 return 0;
1033}
1034
1035static void esp_schedule_reset(struct esp *esp)
1036{
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001037 esp_log_reset("esp_schedule_reset() from %pf\n",
David S. Millercd9ad582007-04-26 21:19:23 -07001038 __builtin_return_address(0));
1039 esp->flags |= ESP_FLAG_RESETTING;
1040 esp_event(esp, ESP_EVENT_RESET);
1041}
1042
1043/* In order to avoid having to add a special half-reconnected state
1044 * into the driver we just sit here and poll through the rest of
1045 * the reselection process to get the tag message bytes.
1046 */
1047static struct esp_cmd_entry *esp_reconnect_with_tag(struct esp *esp,
1048 struct esp_lun_data *lp)
1049{
1050 struct esp_cmd_entry *ent;
1051 int i;
1052
1053 if (!lp->num_tagged) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001054 shost_printk(KERN_ERR, esp->host,
1055 "Reconnect w/num_tagged==0\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001056 return NULL;
1057 }
1058
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001059 esp_log_reconnect("reconnect tag, ");
David S. Millercd9ad582007-04-26 21:19:23 -07001060
1061 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
1062 if (esp->ops->irq_pending(esp))
1063 break;
1064 }
1065 if (i == ESP_QUICKIRQ_LIMIT) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001066 shost_printk(KERN_ERR, esp->host,
1067 "Reconnect IRQ1 timeout\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001068 return NULL;
1069 }
1070
1071 esp->sreg = esp_read8(ESP_STATUS);
1072 esp->ireg = esp_read8(ESP_INTRPT);
1073
1074 esp_log_reconnect("IRQ(%d:%x:%x), ",
1075 i, esp->ireg, esp->sreg);
1076
1077 if (esp->ireg & ESP_INTR_DC) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001078 shost_printk(KERN_ERR, esp->host,
1079 "Reconnect, got disconnect.\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001080 return NULL;
1081 }
1082
1083 if ((esp->sreg & ESP_STAT_PMASK) != ESP_MIP) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001084 shost_printk(KERN_ERR, esp->host,
1085 "Reconnect, not MIP sreg[%02x].\n", esp->sreg);
David S. Millercd9ad582007-04-26 21:19:23 -07001086 return NULL;
1087 }
1088
1089 /* DMA in the tag bytes... */
1090 esp->command_block[0] = 0xff;
1091 esp->command_block[1] = 0xff;
1092 esp->ops->send_dma_cmd(esp, esp->command_block_dma,
1093 2, 2, 1, ESP_CMD_DMA | ESP_CMD_TI);
1094
Justin P. Mattock70f23fd2011-05-10 10:16:21 +02001095 /* ACK the message. */
David S. Millercd9ad582007-04-26 21:19:23 -07001096 scsi_esp_cmd(esp, ESP_CMD_MOK);
1097
1098 for (i = 0; i < ESP_RESELECT_TAG_LIMIT; i++) {
1099 if (esp->ops->irq_pending(esp)) {
1100 esp->sreg = esp_read8(ESP_STATUS);
1101 esp->ireg = esp_read8(ESP_INTRPT);
1102 if (esp->ireg & ESP_INTR_FDONE)
1103 break;
1104 }
1105 udelay(1);
1106 }
1107 if (i == ESP_RESELECT_TAG_LIMIT) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001108 shost_printk(KERN_ERR, esp->host, "Reconnect IRQ2 timeout\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001109 return NULL;
1110 }
1111 esp->ops->dma_drain(esp);
1112 esp->ops->dma_invalidate(esp);
1113
1114 esp_log_reconnect("IRQ2(%d:%x:%x) tag[%x:%x]\n",
1115 i, esp->ireg, esp->sreg,
1116 esp->command_block[0],
1117 esp->command_block[1]);
1118
1119 if (esp->command_block[0] < SIMPLE_QUEUE_TAG ||
1120 esp->command_block[0] > ORDERED_QUEUE_TAG) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001121 shost_printk(KERN_ERR, esp->host,
1122 "Reconnect, bad tag type %02x.\n",
1123 esp->command_block[0]);
David S. Millercd9ad582007-04-26 21:19:23 -07001124 return NULL;
1125 }
1126
1127 ent = lp->tagged_cmds[esp->command_block[1]];
1128 if (!ent) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001129 shost_printk(KERN_ERR, esp->host,
1130 "Reconnect, no entry for tag %02x.\n",
1131 esp->command_block[1]);
David S. Millercd9ad582007-04-26 21:19:23 -07001132 return NULL;
1133 }
1134
1135 return ent;
1136}
1137
1138static int esp_reconnect(struct esp *esp)
1139{
1140 struct esp_cmd_entry *ent;
1141 struct esp_target_data *tp;
1142 struct esp_lun_data *lp;
1143 struct scsi_device *dev;
1144 int target, lun;
1145
1146 BUG_ON(esp->active_cmd);
1147 if (esp->rev == FASHME) {
1148 /* FASHME puts the target and lun numbers directly
1149 * into the fifo.
1150 */
1151 target = esp->fifo[0];
1152 lun = esp->fifo[1] & 0x7;
1153 } else {
1154 u8 bits = esp_read8(ESP_FDATA);
1155
1156 /* Older chips put the lun directly into the fifo, but
1157 * the target is given as a sample of the arbitration
1158 * lines on the bus at reselection time. So we should
1159 * see the ID of the ESP and the one reconnecting target
1160 * set in the bitmap.
1161 */
1162 if (!(bits & esp->scsi_id_mask))
1163 goto do_reset;
1164 bits &= ~esp->scsi_id_mask;
1165 if (!bits || (bits & (bits - 1)))
1166 goto do_reset;
1167
1168 target = ffs(bits) - 1;
1169 lun = (esp_read8(ESP_FDATA) & 0x7);
1170
1171 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1172 if (esp->rev == ESP100) {
1173 u8 ireg = esp_read8(ESP_INTRPT);
1174 /* This chip has a bug during reselection that can
1175 * cause a spurious illegal-command interrupt, which
1176 * we simply ACK here. Another possibility is a bus
1177 * reset so we must check for that.
1178 */
1179 if (ireg & ESP_INTR_SR)
1180 goto do_reset;
1181 }
1182 scsi_esp_cmd(esp, ESP_CMD_NULL);
1183 }
1184
1185 esp_write_tgt_sync(esp, target);
1186 esp_write_tgt_config3(esp, target);
1187
1188 scsi_esp_cmd(esp, ESP_CMD_MOK);
1189
1190 if (esp->rev == FASHME)
1191 esp_write8(target | ESP_BUSID_RESELID | ESP_BUSID_CTR32BIT,
1192 ESP_BUSID);
1193
1194 tp = &esp->target[target];
1195 dev = __scsi_device_lookup_by_target(tp->starget, lun);
1196 if (!dev) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001197 shost_printk(KERN_ERR, esp->host,
1198 "Reconnect, no lp tgt[%u] lun[%u]\n",
1199 target, lun);
David S. Millercd9ad582007-04-26 21:19:23 -07001200 goto do_reset;
1201 }
1202 lp = dev->hostdata;
1203
1204 ent = lp->non_tagged_cmd;
1205 if (!ent) {
1206 ent = esp_reconnect_with_tag(esp, lp);
1207 if (!ent)
1208 goto do_reset;
1209 }
1210
1211 esp->active_cmd = ent;
1212
1213 if (ent->flags & ESP_CMD_FLAG_ABORT) {
1214 esp->msg_out[0] = ABORT_TASK_SET;
1215 esp->msg_out_len = 1;
1216 scsi_esp_cmd(esp, ESP_CMD_SATN);
1217 }
1218
1219 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1220 esp_restore_pointers(esp, ent);
1221 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1222 return 1;
1223
1224do_reset:
1225 esp_schedule_reset(esp);
1226 return 0;
1227}
1228
1229static int esp_finish_select(struct esp *esp)
1230{
1231 struct esp_cmd_entry *ent;
1232 struct scsi_cmnd *cmd;
1233 u8 orig_select_state;
1234
1235 orig_select_state = esp->select_state;
1236
1237 /* No longer selecting. */
1238 esp->select_state = ESP_SELECT_NONE;
1239
1240 esp->seqreg = esp_read8(ESP_SSTEP) & ESP_STEP_VBITS;
1241 ent = esp->active_cmd;
1242 cmd = ent->cmd;
1243
1244 if (esp->ops->dma_error(esp)) {
1245 /* If we see a DMA error during or as a result of selection,
1246 * all bets are off.
1247 */
1248 esp_schedule_reset(esp);
1249 esp_cmd_is_done(esp, ent, cmd, (DID_ERROR << 16));
1250 return 0;
1251 }
1252
1253 esp->ops->dma_invalidate(esp);
1254
1255 if (esp->ireg == (ESP_INTR_RSEL | ESP_INTR_FDONE)) {
1256 struct esp_target_data *tp = &esp->target[cmd->device->id];
1257
1258 /* Carefully back out of the selection attempt. Release
1259 * resources (such as DMA mapping & TAG) and reset state (such
1260 * as message out and command delivery variables).
1261 */
1262 if (!(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1263 esp_unmap_dma(esp, cmd);
1264 esp_free_lun_tag(ent, cmd->device->hostdata);
1265 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_NEGO_WIDE);
1266 esp->flags &= ~ESP_FLAG_DOING_SLOWCMD;
1267 esp->cmd_bytes_ptr = NULL;
1268 esp->cmd_bytes_left = 0;
1269 } else {
1270 esp->ops->unmap_single(esp, ent->sense_dma,
1271 SCSI_SENSE_BUFFERSIZE,
1272 DMA_FROM_DEVICE);
1273 ent->sense_ptr = NULL;
1274 }
1275
1276 /* Now that the state is unwound properly, put back onto
1277 * the issue queue. This command is no longer active.
1278 */
Kirill A. Shutemov63ce2492011-04-01 16:06:09 -07001279 list_move(&ent->list, &esp->queued_cmds);
David S. Millercd9ad582007-04-26 21:19:23 -07001280 esp->active_cmd = NULL;
1281
1282 /* Return value ignored by caller, it directly invokes
1283 * esp_reconnect().
1284 */
1285 return 0;
1286 }
1287
1288 if (esp->ireg == ESP_INTR_DC) {
1289 struct scsi_device *dev = cmd->device;
1290
1291 /* Disconnect. Make sure we re-negotiate sync and
1292 * wide parameters if this target starts responding
1293 * again in the future.
1294 */
1295 esp->target[dev->id].flags |= ESP_TGT_CHECK_NEGO;
1296
1297 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1298 esp_cmd_is_done(esp, ent, cmd, (DID_BAD_TARGET << 16));
1299 return 1;
1300 }
1301
1302 if (esp->ireg == (ESP_INTR_FDONE | ESP_INTR_BSERV)) {
1303 /* Selection successful. On pre-FAST chips we have
1304 * to do a NOP and possibly clean out the FIFO.
1305 */
1306 if (esp->rev <= ESP236) {
1307 int fcnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1308
1309 scsi_esp_cmd(esp, ESP_CMD_NULL);
1310
1311 if (!fcnt &&
1312 (!esp->prev_soff ||
1313 ((esp->sreg & ESP_STAT_PMASK) != ESP_DIP)))
1314 esp_flush_fifo(esp);
1315 }
1316
1317 /* If we are doing a slow command, negotiation, etc.
1318 * we'll do the right thing as we transition to the
1319 * next phase.
1320 */
1321 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1322 return 0;
1323 }
1324
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001325 shost_printk(KERN_INFO, esp->host,
1326 "Unexpected selection completion ireg[%x]\n", esp->ireg);
David S. Millercd9ad582007-04-26 21:19:23 -07001327 esp_schedule_reset(esp);
1328 return 0;
1329}
1330
1331static int esp_data_bytes_sent(struct esp *esp, struct esp_cmd_entry *ent,
1332 struct scsi_cmnd *cmd)
1333{
1334 int fifo_cnt, ecount, bytes_sent, flush_fifo;
1335
1336 fifo_cnt = esp_read8(ESP_FFLAGS) & ESP_FF_FBYTES;
1337 if (esp->prev_cfg3 & ESP_CONFIG3_EWIDE)
1338 fifo_cnt <<= 1;
1339
1340 ecount = 0;
1341 if (!(esp->sreg & ESP_STAT_TCNT)) {
1342 ecount = ((unsigned int)esp_read8(ESP_TCLOW) |
1343 (((unsigned int)esp_read8(ESP_TCMED)) << 8));
1344 if (esp->rev == FASHME)
1345 ecount |= ((unsigned int)esp_read8(FAS_RLO)) << 16;
Hannes Reineckee858d932014-11-24 15:37:29 +01001346 if (esp->rev == PCSCSI && (esp->config2 & ESP_CONFIG2_FENAB))
1347 ecount |= ((unsigned int)esp_read8(ESP_TCHI)) << 16;
David S. Millercd9ad582007-04-26 21:19:23 -07001348 }
1349
1350 bytes_sent = esp->data_dma_len;
1351 bytes_sent -= ecount;
Finn Thain57589692018-10-16 16:31:25 +11001352 bytes_sent -= esp->send_cmd_residual;
David S. Millercd9ad582007-04-26 21:19:23 -07001353
Hannes Reinecke6df388f2014-11-24 15:37:26 +01001354 /*
1355 * The am53c974 has a DMA 'pecularity'. The doc states:
1356 * In some odd byte conditions, one residual byte will
1357 * be left in the SCSI FIFO, and the FIFO Flags will
1358 * never count to '0 '. When this happens, the residual
1359 * byte should be retrieved via PIO following completion
1360 * of the BLAST operation.
1361 */
1362 if (fifo_cnt == 1 && ent->flags & ESP_CMD_FLAG_RESIDUAL) {
1363 size_t count = 1;
1364 size_t offset = bytes_sent;
1365 u8 bval = esp_read8(ESP_FDATA);
1366
1367 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE)
1368 ent->sense_ptr[bytes_sent] = bval;
1369 else {
1370 struct esp_cmd_priv *p = ESP_CMD_PRIV(cmd);
1371 u8 *ptr;
1372
1373 ptr = scsi_kmap_atomic_sg(p->cur_sg, p->u.num_sg,
1374 &offset, &count);
1375 if (likely(ptr)) {
1376 *(ptr + offset) = bval;
1377 scsi_kunmap_atomic_sg(ptr);
1378 }
1379 }
1380 bytes_sent += fifo_cnt;
1381 ent->flags &= ~ESP_CMD_FLAG_RESIDUAL;
1382 }
David S. Millercd9ad582007-04-26 21:19:23 -07001383 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1384 bytes_sent -= fifo_cnt;
1385
1386 flush_fifo = 0;
1387 if (!esp->prev_soff) {
1388 /* Synchronous data transfer, always flush fifo. */
1389 flush_fifo = 1;
1390 } else {
1391 if (esp->rev == ESP100) {
1392 u32 fflags, phase;
1393
1394 /* ESP100 has a chip bug where in the synchronous data
1395 * phase it can mistake a final long REQ pulse from the
1396 * target as an extra data byte. Fun.
1397 *
1398 * To detect this case we resample the status register
1399 * and fifo flags. If we're still in a data phase and
1400 * we see spurious chunks in the fifo, we return error
1401 * to the caller which should reset and set things up
1402 * such that we only try future transfers to this
1403 * target in synchronous mode.
1404 */
1405 esp->sreg = esp_read8(ESP_STATUS);
1406 phase = esp->sreg & ESP_STAT_PMASK;
1407 fflags = esp_read8(ESP_FFLAGS);
1408
1409 if ((phase == ESP_DOP &&
1410 (fflags & ESP_FF_ONOTZERO)) ||
1411 (phase == ESP_DIP &&
1412 (fflags & ESP_FF_FBYTES)))
1413 return -1;
1414 }
1415 if (!(ent->flags & ESP_CMD_FLAG_WRITE))
1416 flush_fifo = 1;
1417 }
1418
1419 if (flush_fifo)
1420 esp_flush_fifo(esp);
1421
1422 return bytes_sent;
1423}
1424
1425static void esp_setsync(struct esp *esp, struct esp_target_data *tp,
1426 u8 scsi_period, u8 scsi_offset,
1427 u8 esp_stp, u8 esp_soff)
1428{
1429 spi_period(tp->starget) = scsi_period;
1430 spi_offset(tp->starget) = scsi_offset;
1431 spi_width(tp->starget) = (tp->flags & ESP_TGT_WIDE) ? 1 : 0;
1432
1433 if (esp_soff) {
1434 esp_stp &= 0x1f;
1435 esp_soff |= esp->radelay;
1436 if (esp->rev >= FAS236) {
1437 u8 bit = ESP_CONFIG3_FSCSI;
1438 if (esp->rev >= FAS100A)
1439 bit = ESP_CONFIG3_FAST;
1440
1441 if (scsi_period < 50) {
1442 if (esp->rev == FASHME)
1443 esp_soff &= ~esp->radelay;
1444 tp->esp_config3 |= bit;
1445 } else {
1446 tp->esp_config3 &= ~bit;
1447 }
1448 esp->prev_cfg3 = tp->esp_config3;
1449 esp_write8(esp->prev_cfg3, ESP_CFG3);
1450 }
1451 }
1452
1453 tp->esp_period = esp->prev_stp = esp_stp;
1454 tp->esp_offset = esp->prev_soff = esp_soff;
1455
1456 esp_write8(esp_soff, ESP_SOFF);
1457 esp_write8(esp_stp, ESP_STP);
1458
1459 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1460
1461 spi_display_xfer_agreement(tp->starget);
1462}
1463
1464static void esp_msgin_reject(struct esp *esp)
1465{
1466 struct esp_cmd_entry *ent = esp->active_cmd;
1467 struct scsi_cmnd *cmd = ent->cmd;
1468 struct esp_target_data *tp;
1469 int tgt;
1470
1471 tgt = cmd->device->id;
1472 tp = &esp->target[tgt];
1473
1474 if (tp->flags & ESP_TGT_NEGO_WIDE) {
1475 tp->flags &= ~(ESP_TGT_NEGO_WIDE | ESP_TGT_WIDE);
1476
1477 if (!esp_need_to_nego_sync(tp)) {
1478 tp->flags &= ~ESP_TGT_CHECK_NEGO;
1479 scsi_esp_cmd(esp, ESP_CMD_RATN);
1480 } else {
1481 esp->msg_out_len =
1482 spi_populate_sync_msg(&esp->msg_out[0],
1483 tp->nego_goal_period,
1484 tp->nego_goal_offset);
1485 tp->flags |= ESP_TGT_NEGO_SYNC;
1486 scsi_esp_cmd(esp, ESP_CMD_SATN);
1487 }
1488 return;
1489 }
1490
1491 if (tp->flags & ESP_TGT_NEGO_SYNC) {
1492 tp->flags &= ~(ESP_TGT_NEGO_SYNC | ESP_TGT_CHECK_NEGO);
1493 tp->esp_period = 0;
1494 tp->esp_offset = 0;
1495 esp_setsync(esp, tp, 0, 0, 0, 0);
1496 scsi_esp_cmd(esp, ESP_CMD_RATN);
1497 return;
1498 }
1499
1500 esp->msg_out[0] = ABORT_TASK_SET;
1501 esp->msg_out_len = 1;
1502 scsi_esp_cmd(esp, ESP_CMD_SATN);
1503}
1504
1505static void esp_msgin_sdtr(struct esp *esp, struct esp_target_data *tp)
1506{
1507 u8 period = esp->msg_in[3];
1508 u8 offset = esp->msg_in[4];
1509 u8 stp;
1510
1511 if (!(tp->flags & ESP_TGT_NEGO_SYNC))
1512 goto do_reject;
1513
1514 if (offset > 15)
1515 goto do_reject;
1516
1517 if (offset) {
Julia Lawall237abac2008-10-29 14:24:40 -07001518 int one_clock;
David S. Millercd9ad582007-04-26 21:19:23 -07001519
1520 if (period > esp->max_period) {
1521 period = offset = 0;
1522 goto do_sdtr;
1523 }
1524 if (period < esp->min_period)
1525 goto do_reject;
1526
1527 one_clock = esp->ccycle / 1000;
Julia Lawall237abac2008-10-29 14:24:40 -07001528 stp = DIV_ROUND_UP(period << 2, one_clock);
David S. Millercd9ad582007-04-26 21:19:23 -07001529 if (stp && esp->rev >= FAS236) {
1530 if (stp >= 50)
1531 stp--;
1532 }
1533 } else {
1534 stp = 0;
1535 }
1536
1537 esp_setsync(esp, tp, period, offset, stp, offset);
1538 return;
1539
1540do_reject:
1541 esp->msg_out[0] = MESSAGE_REJECT;
1542 esp->msg_out_len = 1;
1543 scsi_esp_cmd(esp, ESP_CMD_SATN);
1544 return;
1545
1546do_sdtr:
1547 tp->nego_goal_period = period;
1548 tp->nego_goal_offset = offset;
1549 esp->msg_out_len =
1550 spi_populate_sync_msg(&esp->msg_out[0],
1551 tp->nego_goal_period,
1552 tp->nego_goal_offset);
1553 scsi_esp_cmd(esp, ESP_CMD_SATN);
1554}
1555
1556static void esp_msgin_wdtr(struct esp *esp, struct esp_target_data *tp)
1557{
1558 int size = 8 << esp->msg_in[3];
1559 u8 cfg3;
1560
1561 if (esp->rev != FASHME)
1562 goto do_reject;
1563
1564 if (size != 8 && size != 16)
1565 goto do_reject;
1566
1567 if (!(tp->flags & ESP_TGT_NEGO_WIDE))
1568 goto do_reject;
1569
1570 cfg3 = tp->esp_config3;
1571 if (size == 16) {
1572 tp->flags |= ESP_TGT_WIDE;
1573 cfg3 |= ESP_CONFIG3_EWIDE;
1574 } else {
1575 tp->flags &= ~ESP_TGT_WIDE;
1576 cfg3 &= ~ESP_CONFIG3_EWIDE;
1577 }
1578 tp->esp_config3 = cfg3;
1579 esp->prev_cfg3 = cfg3;
1580 esp_write8(cfg3, ESP_CFG3);
1581
1582 tp->flags &= ~ESP_TGT_NEGO_WIDE;
1583
1584 spi_period(tp->starget) = 0;
1585 spi_offset(tp->starget) = 0;
1586 if (!esp_need_to_nego_sync(tp)) {
1587 tp->flags &= ~ESP_TGT_CHECK_NEGO;
1588 scsi_esp_cmd(esp, ESP_CMD_RATN);
1589 } else {
1590 esp->msg_out_len =
1591 spi_populate_sync_msg(&esp->msg_out[0],
1592 tp->nego_goal_period,
1593 tp->nego_goal_offset);
1594 tp->flags |= ESP_TGT_NEGO_SYNC;
1595 scsi_esp_cmd(esp, ESP_CMD_SATN);
1596 }
1597 return;
1598
1599do_reject:
1600 esp->msg_out[0] = MESSAGE_REJECT;
1601 esp->msg_out_len = 1;
1602 scsi_esp_cmd(esp, ESP_CMD_SATN);
1603}
1604
1605static void esp_msgin_extended(struct esp *esp)
1606{
1607 struct esp_cmd_entry *ent = esp->active_cmd;
1608 struct scsi_cmnd *cmd = ent->cmd;
1609 struct esp_target_data *tp;
1610 int tgt = cmd->device->id;
1611
1612 tp = &esp->target[tgt];
1613 if (esp->msg_in[2] == EXTENDED_SDTR) {
1614 esp_msgin_sdtr(esp, tp);
1615 return;
1616 }
1617 if (esp->msg_in[2] == EXTENDED_WDTR) {
1618 esp_msgin_wdtr(esp, tp);
1619 return;
1620 }
1621
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001622 shost_printk(KERN_INFO, esp->host,
1623 "Unexpected extended msg type %x\n", esp->msg_in[2]);
David S. Millercd9ad582007-04-26 21:19:23 -07001624
1625 esp->msg_out[0] = ABORT_TASK_SET;
1626 esp->msg_out_len = 1;
1627 scsi_esp_cmd(esp, ESP_CMD_SATN);
1628}
1629
1630/* Analyze msgin bytes received from target so far. Return non-zero
1631 * if there are more bytes needed to complete the message.
1632 */
1633static int esp_msgin_process(struct esp *esp)
1634{
1635 u8 msg0 = esp->msg_in[0];
1636 int len = esp->msg_in_len;
1637
1638 if (msg0 & 0x80) {
1639 /* Identify */
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001640 shost_printk(KERN_INFO, esp->host,
1641 "Unexpected msgin identify\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001642 return 0;
1643 }
1644
1645 switch (msg0) {
1646 case EXTENDED_MESSAGE:
1647 if (len == 1)
1648 return 1;
1649 if (len < esp->msg_in[1] + 2)
1650 return 1;
1651 esp_msgin_extended(esp);
1652 return 0;
1653
1654 case IGNORE_WIDE_RESIDUE: {
1655 struct esp_cmd_entry *ent;
1656 struct esp_cmd_priv *spriv;
1657 if (len == 1)
1658 return 1;
1659
1660 if (esp->msg_in[1] != 1)
1661 goto do_reject;
1662
1663 ent = esp->active_cmd;
1664 spriv = ESP_CMD_PRIV(ent->cmd);
1665
1666 if (spriv->cur_residue == sg_dma_len(spriv->cur_sg)) {
1667 spriv->cur_sg--;
1668 spriv->cur_residue = 1;
1669 } else
1670 spriv->cur_residue++;
1671 spriv->tot_residue++;
1672 return 0;
1673 }
1674 case NOP:
1675 return 0;
1676 case RESTORE_POINTERS:
1677 esp_restore_pointers(esp, esp->active_cmd);
1678 return 0;
1679 case SAVE_POINTERS:
1680 esp_save_pointers(esp, esp->active_cmd);
1681 return 0;
1682
1683 case COMMAND_COMPLETE:
1684 case DISCONNECT: {
1685 struct esp_cmd_entry *ent = esp->active_cmd;
1686
1687 ent->message = msg0;
1688 esp_event(esp, ESP_EVENT_FREE_BUS);
1689 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1690 return 0;
1691 }
1692 case MESSAGE_REJECT:
1693 esp_msgin_reject(esp);
1694 return 0;
1695
1696 default:
1697 do_reject:
1698 esp->msg_out[0] = MESSAGE_REJECT;
1699 esp->msg_out_len = 1;
1700 scsi_esp_cmd(esp, ESP_CMD_SATN);
1701 return 0;
1702 }
1703}
1704
1705static int esp_process_event(struct esp *esp)
1706{
Hannes Reinecke31708662014-11-24 15:37:24 +01001707 int write, i;
David S. Millercd9ad582007-04-26 21:19:23 -07001708
1709again:
1710 write = 0;
Hannes Reinecke1af6f602014-11-24 15:37:22 +01001711 esp_log_event("process event %d phase %x\n",
1712 esp->event, esp->sreg & ESP_STAT_PMASK);
David S. Millercd9ad582007-04-26 21:19:23 -07001713 switch (esp->event) {
1714 case ESP_EVENT_CHECK_PHASE:
1715 switch (esp->sreg & ESP_STAT_PMASK) {
1716 case ESP_DOP:
1717 esp_event(esp, ESP_EVENT_DATA_OUT);
1718 break;
1719 case ESP_DIP:
1720 esp_event(esp, ESP_EVENT_DATA_IN);
1721 break;
1722 case ESP_STATP:
1723 esp_flush_fifo(esp);
1724 scsi_esp_cmd(esp, ESP_CMD_ICCSEQ);
1725 esp_event(esp, ESP_EVENT_STATUS);
1726 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1727 return 1;
1728
1729 case ESP_MOP:
1730 esp_event(esp, ESP_EVENT_MSGOUT);
1731 break;
1732
1733 case ESP_MIP:
1734 esp_event(esp, ESP_EVENT_MSGIN);
1735 break;
1736
1737 case ESP_CMDP:
1738 esp_event(esp, ESP_EVENT_CMD_START);
1739 break;
1740
1741 default:
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001742 shost_printk(KERN_INFO, esp->host,
1743 "Unexpected phase, sreg=%02x\n",
1744 esp->sreg);
David S. Millercd9ad582007-04-26 21:19:23 -07001745 esp_schedule_reset(esp);
1746 return 0;
1747 }
1748 goto again;
1749 break;
1750
1751 case ESP_EVENT_DATA_IN:
1752 write = 1;
1753 /* fallthru */
1754
1755 case ESP_EVENT_DATA_OUT: {
1756 struct esp_cmd_entry *ent = esp->active_cmd;
1757 struct scsi_cmnd *cmd = ent->cmd;
1758 dma_addr_t dma_addr = esp_cur_dma_addr(ent, cmd);
1759 unsigned int dma_len = esp_cur_dma_len(ent, cmd);
1760
1761 if (esp->rev == ESP100)
1762 scsi_esp_cmd(esp, ESP_CMD_NULL);
1763
1764 if (write)
1765 ent->flags |= ESP_CMD_FLAG_WRITE;
1766 else
1767 ent->flags &= ~ESP_CMD_FLAG_WRITE;
1768
Finn Thain6fe07aa2008-04-25 10:06:05 -05001769 if (esp->ops->dma_length_limit)
1770 dma_len = esp->ops->dma_length_limit(esp, dma_addr,
1771 dma_len);
1772 else
1773 dma_len = esp_dma_length_limit(esp, dma_addr, dma_len);
1774
David S. Millercd9ad582007-04-26 21:19:23 -07001775 esp->data_dma_len = dma_len;
1776
1777 if (!dma_len) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001778 shost_printk(KERN_ERR, esp->host,
1779 "DMA length is zero!\n");
1780 shost_printk(KERN_ERR, esp->host,
1781 "cur adr[%08llx] len[%08x]\n",
1782 (unsigned long long)esp_cur_dma_addr(ent, cmd),
1783 esp_cur_dma_len(ent, cmd));
David S. Millercd9ad582007-04-26 21:19:23 -07001784 esp_schedule_reset(esp);
1785 return 0;
1786 }
1787
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001788 esp_log_datastart("start data addr[%08llx] len[%u] write(%d)\n",
Alexey Dobriyane1f2a092007-04-27 15:19:27 -07001789 (unsigned long long)dma_addr, dma_len, write);
David S. Millercd9ad582007-04-26 21:19:23 -07001790
1791 esp->ops->send_dma_cmd(esp, dma_addr, dma_len, dma_len,
1792 write, ESP_CMD_DMA | ESP_CMD_TI);
1793 esp_event(esp, ESP_EVENT_DATA_DONE);
1794 break;
1795 }
1796 case ESP_EVENT_DATA_DONE: {
1797 struct esp_cmd_entry *ent = esp->active_cmd;
1798 struct scsi_cmnd *cmd = ent->cmd;
1799 int bytes_sent;
1800
1801 if (esp->ops->dma_error(esp)) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001802 shost_printk(KERN_INFO, esp->host,
1803 "data done, DMA error, resetting\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001804 esp_schedule_reset(esp);
1805 return 0;
1806 }
1807
1808 if (ent->flags & ESP_CMD_FLAG_WRITE) {
1809 /* XXX parity errors, etc. XXX */
1810
1811 esp->ops->dma_drain(esp);
1812 }
1813 esp->ops->dma_invalidate(esp);
1814
1815 if (esp->ireg != ESP_INTR_BSERV) {
1816 /* We should always see exactly a bus-service
1817 * interrupt at the end of a successful transfer.
1818 */
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001819 shost_printk(KERN_INFO, esp->host,
1820 "data done, not BSERV, resetting\n");
David S. Millercd9ad582007-04-26 21:19:23 -07001821 esp_schedule_reset(esp);
1822 return 0;
1823 }
1824
1825 bytes_sent = esp_data_bytes_sent(esp, ent, cmd);
1826
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001827 esp_log_datadone("data done flgs[%x] sent[%d]\n",
David S. Millercd9ad582007-04-26 21:19:23 -07001828 ent->flags, bytes_sent);
1829
1830 if (bytes_sent < 0) {
1831 /* XXX force sync mode for this target XXX */
1832 esp_schedule_reset(esp);
1833 return 0;
1834 }
1835
1836 esp_advance_dma(esp, ent, cmd, bytes_sent);
1837 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1838 goto again;
David S. Millercd9ad582007-04-26 21:19:23 -07001839 }
1840
1841 case ESP_EVENT_STATUS: {
1842 struct esp_cmd_entry *ent = esp->active_cmd;
1843
1844 if (esp->ireg & ESP_INTR_FDONE) {
1845 ent->status = esp_read8(ESP_FDATA);
1846 ent->message = esp_read8(ESP_FDATA);
1847 scsi_esp_cmd(esp, ESP_CMD_MOK);
1848 } else if (esp->ireg == ESP_INTR_BSERV) {
1849 ent->status = esp_read8(ESP_FDATA);
1850 ent->message = 0xff;
1851 esp_event(esp, ESP_EVENT_MSGIN);
1852 return 0;
1853 }
1854
1855 if (ent->message != COMMAND_COMPLETE) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001856 shost_printk(KERN_INFO, esp->host,
1857 "Unexpected message %x in status\n",
1858 ent->message);
David S. Millercd9ad582007-04-26 21:19:23 -07001859 esp_schedule_reset(esp);
1860 return 0;
1861 }
1862
1863 esp_event(esp, ESP_EVENT_FREE_BUS);
1864 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1865 break;
1866 }
1867 case ESP_EVENT_FREE_BUS: {
1868 struct esp_cmd_entry *ent = esp->active_cmd;
1869 struct scsi_cmnd *cmd = ent->cmd;
1870
1871 if (ent->message == COMMAND_COMPLETE ||
1872 ent->message == DISCONNECT)
1873 scsi_esp_cmd(esp, ESP_CMD_ESEL);
1874
1875 if (ent->message == COMMAND_COMPLETE) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001876 esp_log_cmddone("Command done status[%x] message[%x]\n",
David S. Millercd9ad582007-04-26 21:19:23 -07001877 ent->status, ent->message);
1878 if (ent->status == SAM_STAT_TASK_SET_FULL)
1879 esp_event_queue_full(esp, ent);
1880
1881 if (ent->status == SAM_STAT_CHECK_CONDITION &&
1882 !(ent->flags & ESP_CMD_FLAG_AUTOSENSE)) {
1883 ent->flags |= ESP_CMD_FLAG_AUTOSENSE;
1884 esp_autosense(esp, ent);
1885 } else {
1886 esp_cmd_is_done(esp, ent, cmd,
1887 compose_result(ent->status,
1888 ent->message,
1889 DID_OK));
1890 }
1891 } else if (ent->message == DISCONNECT) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001892 esp_log_disconnect("Disconnecting tgt[%d] tag[%x:%x]\n",
David S. Millercd9ad582007-04-26 21:19:23 -07001893 cmd->device->id,
1894 ent->tag[0], ent->tag[1]);
1895
1896 esp->active_cmd = NULL;
1897 esp_maybe_execute_command(esp);
1898 } else {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001899 shost_printk(KERN_INFO, esp->host,
1900 "Unexpected message %x in freebus\n",
1901 ent->message);
David S. Millercd9ad582007-04-26 21:19:23 -07001902 esp_schedule_reset(esp);
1903 return 0;
1904 }
1905 if (esp->active_cmd)
1906 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1907 break;
1908 }
1909 case ESP_EVENT_MSGOUT: {
1910 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1911
1912 if (esp_debug & ESP_DEBUG_MSGOUT) {
1913 int i;
1914 printk("ESP: Sending message [ ");
1915 for (i = 0; i < esp->msg_out_len; i++)
1916 printk("%02x ", esp->msg_out[i]);
1917 printk("]\n");
1918 }
1919
1920 if (esp->rev == FASHME) {
1921 int i;
1922
1923 /* Always use the fifo. */
1924 for (i = 0; i < esp->msg_out_len; i++) {
1925 esp_write8(esp->msg_out[i], ESP_FDATA);
1926 esp_write8(0, ESP_FDATA);
1927 }
1928 scsi_esp_cmd(esp, ESP_CMD_TI);
1929 } else {
1930 if (esp->msg_out_len == 1) {
1931 esp_write8(esp->msg_out[0], ESP_FDATA);
1932 scsi_esp_cmd(esp, ESP_CMD_TI);
Hannes Reinecke31708662014-11-24 15:37:24 +01001933 } else if (esp->flags & ESP_FLAG_USE_FIFO) {
1934 for (i = 0; i < esp->msg_out_len; i++)
1935 esp_write8(esp->msg_out[i], ESP_FDATA);
1936 scsi_esp_cmd(esp, ESP_CMD_TI);
David S. Millercd9ad582007-04-26 21:19:23 -07001937 } else {
1938 /* Use DMA. */
1939 memcpy(esp->command_block,
1940 esp->msg_out,
1941 esp->msg_out_len);
1942
1943 esp->ops->send_dma_cmd(esp,
1944 esp->command_block_dma,
1945 esp->msg_out_len,
1946 esp->msg_out_len,
1947 0,
1948 ESP_CMD_DMA|ESP_CMD_TI);
1949 }
1950 }
1951 esp_event(esp, ESP_EVENT_MSGOUT_DONE);
1952 break;
1953 }
1954 case ESP_EVENT_MSGOUT_DONE:
1955 if (esp->rev == FASHME) {
1956 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1957 } else {
1958 if (esp->msg_out_len > 1)
1959 esp->ops->dma_invalidate(esp);
1960 }
1961
1962 if (!(esp->ireg & ESP_INTR_DC)) {
1963 if (esp->rev != FASHME)
1964 scsi_esp_cmd(esp, ESP_CMD_NULL);
1965 }
1966 esp_event(esp, ESP_EVENT_CHECK_PHASE);
1967 goto again;
1968 case ESP_EVENT_MSGIN:
1969 if (esp->ireg & ESP_INTR_BSERV) {
1970 if (esp->rev == FASHME) {
1971 if (!(esp_read8(ESP_STATUS2) &
1972 ESP_STAT2_FEMPTY))
1973 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1974 } else {
1975 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1976 if (esp->rev == ESP100)
1977 scsi_esp_cmd(esp, ESP_CMD_NULL);
1978 }
1979 scsi_esp_cmd(esp, ESP_CMD_TI);
1980 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
1981 return 1;
1982 }
1983 if (esp->ireg & ESP_INTR_FDONE) {
1984 u8 val;
1985
1986 if (esp->rev == FASHME)
1987 val = esp->fifo[0];
1988 else
1989 val = esp_read8(ESP_FDATA);
1990 esp->msg_in[esp->msg_in_len++] = val;
1991
Hannes Reineckea1a75b32014-11-24 15:37:21 +01001992 esp_log_msgin("Got msgin byte %x\n", val);
David S. Millercd9ad582007-04-26 21:19:23 -07001993
1994 if (!esp_msgin_process(esp))
1995 esp->msg_in_len = 0;
1996
1997 if (esp->rev == FASHME)
1998 scsi_esp_cmd(esp, ESP_CMD_FLUSH);
1999
2000 scsi_esp_cmd(esp, ESP_CMD_MOK);
2001
2002 if (esp->event != ESP_EVENT_FREE_BUS)
2003 esp_event(esp, ESP_EVENT_CHECK_PHASE);
2004 } else {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002005 shost_printk(KERN_INFO, esp->host,
2006 "MSGIN neither BSERV not FDON, resetting");
David S. Millercd9ad582007-04-26 21:19:23 -07002007 esp_schedule_reset(esp);
2008 return 0;
2009 }
2010 break;
2011 case ESP_EVENT_CMD_START:
2012 memcpy(esp->command_block, esp->cmd_bytes_ptr,
2013 esp->cmd_bytes_left);
Hannes Reinecke31708662014-11-24 15:37:24 +01002014 esp_send_dma_cmd(esp, esp->cmd_bytes_left, 16, ESP_CMD_TI);
David S. Millercd9ad582007-04-26 21:19:23 -07002015 esp_event(esp, ESP_EVENT_CMD_DONE);
2016 esp->flags |= ESP_FLAG_QUICKIRQ_CHECK;
2017 break;
2018 case ESP_EVENT_CMD_DONE:
2019 esp->ops->dma_invalidate(esp);
2020 if (esp->ireg & ESP_INTR_BSERV) {
2021 esp_event(esp, ESP_EVENT_CHECK_PHASE);
2022 goto again;
2023 }
2024 esp_schedule_reset(esp);
2025 return 0;
2026 break;
2027
2028 case ESP_EVENT_RESET:
2029 scsi_esp_cmd(esp, ESP_CMD_RS);
2030 break;
2031
2032 default:
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002033 shost_printk(KERN_INFO, esp->host,
2034 "Unexpected event %x, resetting\n", esp->event);
David S. Millercd9ad582007-04-26 21:19:23 -07002035 esp_schedule_reset(esp);
2036 return 0;
2037 break;
2038 }
2039 return 1;
2040}
2041
2042static void esp_reset_cleanup_one(struct esp *esp, struct esp_cmd_entry *ent)
2043{
2044 struct scsi_cmnd *cmd = ent->cmd;
2045
2046 esp_unmap_dma(esp, cmd);
2047 esp_free_lun_tag(ent, cmd->device->hostdata);
2048 cmd->result = DID_RESET << 16;
2049
2050 if (ent->flags & ESP_CMD_FLAG_AUTOSENSE) {
2051 esp->ops->unmap_single(esp, ent->sense_dma,
2052 SCSI_SENSE_BUFFERSIZE, DMA_FROM_DEVICE);
2053 ent->sense_ptr = NULL;
2054 }
2055
2056 cmd->scsi_done(cmd);
2057 list_del(&ent->list);
2058 esp_put_ent(esp, ent);
2059}
2060
2061static void esp_clear_hold(struct scsi_device *dev, void *data)
2062{
2063 struct esp_lun_data *lp = dev->hostdata;
2064
2065 BUG_ON(lp->num_tagged);
2066 lp->hold = 0;
2067}
2068
2069static void esp_reset_cleanup(struct esp *esp)
2070{
2071 struct esp_cmd_entry *ent, *tmp;
2072 int i;
2073
2074 list_for_each_entry_safe(ent, tmp, &esp->queued_cmds, list) {
2075 struct scsi_cmnd *cmd = ent->cmd;
2076
2077 list_del(&ent->list);
2078 cmd->result = DID_RESET << 16;
2079 cmd->scsi_done(cmd);
2080 esp_put_ent(esp, ent);
2081 }
2082
2083 list_for_each_entry_safe(ent, tmp, &esp->active_cmds, list) {
2084 if (ent == esp->active_cmd)
2085 esp->active_cmd = NULL;
2086 esp_reset_cleanup_one(esp, ent);
2087 }
2088
2089 BUG_ON(esp->active_cmd != NULL);
2090
2091 /* Force renegotiation of sync/wide transfers. */
2092 for (i = 0; i < ESP_MAX_TARGET; i++) {
2093 struct esp_target_data *tp = &esp->target[i];
2094
2095 tp->esp_period = 0;
2096 tp->esp_offset = 0;
2097 tp->esp_config3 &= ~(ESP_CONFIG3_EWIDE |
2098 ESP_CONFIG3_FSCSI |
2099 ESP_CONFIG3_FAST);
2100 tp->flags &= ~ESP_TGT_WIDE;
2101 tp->flags |= ESP_TGT_CHECK_NEGO;
2102
2103 if (tp->starget)
Maciej W. Rozycki522939d2007-12-10 15:49:31 -08002104 __starget_for_each_device(tp->starget, NULL,
2105 esp_clear_hold);
David S. Millercd9ad582007-04-26 21:19:23 -07002106 }
Thomas Bogendoerfer204abf22007-06-13 12:58:53 -07002107 esp->flags &= ~ESP_FLAG_RESETTING;
David S. Millercd9ad582007-04-26 21:19:23 -07002108}
2109
2110/* Runs under host->lock */
2111static void __esp_interrupt(struct esp *esp)
2112{
2113 int finish_reset, intr_done;
2114 u8 phase;
2115
Hannes Reinecke9535fff2014-11-24 15:37:23 +01002116 /*
2117 * Once INTRPT is read STATUS and SSTEP are cleared.
2118 */
David S. Millercd9ad582007-04-26 21:19:23 -07002119 esp->sreg = esp_read8(ESP_STATUS);
Hannes Reinecke9535fff2014-11-24 15:37:23 +01002120 esp->seqreg = esp_read8(ESP_SSTEP);
2121 esp->ireg = esp_read8(ESP_INTRPT);
David S. Millercd9ad582007-04-26 21:19:23 -07002122
2123 if (esp->flags & ESP_FLAG_RESETTING) {
2124 finish_reset = 1;
2125 } else {
2126 if (esp_check_gross_error(esp))
2127 return;
2128
2129 finish_reset = esp_check_spur_intr(esp);
2130 if (finish_reset < 0)
2131 return;
2132 }
2133
David S. Millercd9ad582007-04-26 21:19:23 -07002134 if (esp->ireg & ESP_INTR_SR)
2135 finish_reset = 1;
2136
2137 if (finish_reset) {
2138 esp_reset_cleanup(esp);
2139 if (esp->eh_reset) {
2140 complete(esp->eh_reset);
2141 esp->eh_reset = NULL;
2142 }
2143 return;
2144 }
2145
2146 phase = (esp->sreg & ESP_STAT_PMASK);
2147 if (esp->rev == FASHME) {
2148 if (((phase != ESP_DIP && phase != ESP_DOP) &&
2149 esp->select_state == ESP_SELECT_NONE &&
2150 esp->event != ESP_EVENT_STATUS &&
2151 esp->event != ESP_EVENT_DATA_DONE) ||
2152 (esp->ireg & ESP_INTR_RSEL)) {
2153 esp->sreg2 = esp_read8(ESP_STATUS2);
2154 if (!(esp->sreg2 & ESP_STAT2_FEMPTY) ||
2155 (esp->sreg2 & ESP_STAT2_F1BYTE))
2156 hme_read_fifo(esp);
2157 }
2158 }
2159
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002160 esp_log_intr("intr sreg[%02x] seqreg[%02x] "
David S. Millercd9ad582007-04-26 21:19:23 -07002161 "sreg2[%02x] ireg[%02x]\n",
2162 esp->sreg, esp->seqreg, esp->sreg2, esp->ireg);
2163
2164 intr_done = 0;
2165
2166 if (esp->ireg & (ESP_INTR_S | ESP_INTR_SATN | ESP_INTR_IC)) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002167 shost_printk(KERN_INFO, esp->host,
2168 "unexpected IREG %02x\n", esp->ireg);
David S. Millercd9ad582007-04-26 21:19:23 -07002169 if (esp->ireg & ESP_INTR_IC)
2170 esp_dump_cmd_log(esp);
2171
2172 esp_schedule_reset(esp);
2173 } else {
2174 if (!(esp->ireg & ESP_INTR_RSEL)) {
2175 /* Some combination of FDONE, BSERV, DC. */
2176 if (esp->select_state != ESP_SELECT_NONE)
2177 intr_done = esp_finish_select(esp);
2178 } else if (esp->ireg & ESP_INTR_RSEL) {
2179 if (esp->active_cmd)
2180 (void) esp_finish_select(esp);
2181 intr_done = esp_reconnect(esp);
2182 }
2183 }
2184 while (!intr_done)
2185 intr_done = esp_process_event(esp);
2186}
2187
2188irqreturn_t scsi_esp_intr(int irq, void *dev_id)
2189{
2190 struct esp *esp = dev_id;
2191 unsigned long flags;
2192 irqreturn_t ret;
2193
2194 spin_lock_irqsave(esp->host->host_lock, flags);
2195 ret = IRQ_NONE;
2196 if (esp->ops->irq_pending(esp)) {
2197 ret = IRQ_HANDLED;
2198 for (;;) {
2199 int i;
2200
2201 __esp_interrupt(esp);
2202 if (!(esp->flags & ESP_FLAG_QUICKIRQ_CHECK))
2203 break;
2204 esp->flags &= ~ESP_FLAG_QUICKIRQ_CHECK;
2205
2206 for (i = 0; i < ESP_QUICKIRQ_LIMIT; i++) {
2207 if (esp->ops->irq_pending(esp))
2208 break;
2209 }
2210 if (i == ESP_QUICKIRQ_LIMIT)
2211 break;
2212 }
2213 }
2214 spin_unlock_irqrestore(esp->host->host_lock, flags);
2215
2216 return ret;
2217}
2218EXPORT_SYMBOL(scsi_esp_intr);
2219
Adrian Bunk76246802007-10-11 17:35:20 +02002220static void esp_get_revision(struct esp *esp)
David S. Millercd9ad582007-04-26 21:19:23 -07002221{
2222 u8 val;
2223
2224 esp->config1 = (ESP_CONFIG1_PENABLE | (esp->scsi_id & 7));
Paolo Bonzini8a9aeb42014-11-24 15:37:28 +01002225 if (esp->config2 == 0) {
2226 esp->config2 = (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY);
David S. Millercd9ad582007-04-26 21:19:23 -07002227 esp_write8(esp->config2, ESP_CFG2);
Paolo Bonzini8a9aeb42014-11-24 15:37:28 +01002228
2229 val = esp_read8(ESP_CFG2);
2230 val &= ~ESP_CONFIG2_MAGIC;
2231
2232 esp->config2 = 0;
2233 if (val != (ESP_CONFIG2_SCSI2ENAB | ESP_CONFIG2_REGPARITY)) {
2234 /*
2235 * If what we write to cfg2 does not come back,
2236 * cfg2 is not implemented.
2237 * Therefore this must be a plain esp100.
2238 */
2239 esp->rev = ESP100;
2240 return;
2241 }
2242 }
2243
2244 esp_set_all_config3(esp, 5);
2245 esp->prev_cfg3 = 5;
2246 esp_write8(esp->config2, ESP_CFG2);
2247 esp_write8(0, ESP_CFG3);
2248 esp_write8(esp->prev_cfg3, ESP_CFG3);
2249
2250 val = esp_read8(ESP_CFG3);
2251 if (val != 5) {
2252 /* The cfg2 register is implemented, however
2253 * cfg3 is not, must be esp100a.
2254 */
2255 esp->rev = ESP100A;
2256 } else {
2257 esp_set_all_config3(esp, 0);
2258 esp->prev_cfg3 = 0;
David S. Millercd9ad582007-04-26 21:19:23 -07002259 esp_write8(esp->prev_cfg3, ESP_CFG3);
2260
Paolo Bonzini8a9aeb42014-11-24 15:37:28 +01002261 /* All of cfg{1,2,3} implemented, must be one of
2262 * the fas variants, figure out which one.
2263 */
2264 if (esp->cfact == 0 || esp->cfact > ESP_CCF_F5) {
2265 esp->rev = FAST;
2266 esp->sync_defp = SYNC_DEFP_FAST;
David S. Millercd9ad582007-04-26 21:19:23 -07002267 } else {
Paolo Bonzini8a9aeb42014-11-24 15:37:28 +01002268 esp->rev = ESP236;
David S. Millercd9ad582007-04-26 21:19:23 -07002269 }
2270 }
2271}
2272
Adrian Bunk76246802007-10-11 17:35:20 +02002273static void esp_init_swstate(struct esp *esp)
David S. Millercd9ad582007-04-26 21:19:23 -07002274{
2275 int i;
2276
2277 INIT_LIST_HEAD(&esp->queued_cmds);
2278 INIT_LIST_HEAD(&esp->active_cmds);
2279 INIT_LIST_HEAD(&esp->esp_cmd_pool);
2280
2281 /* Start with a clear state, domain validation (via ->slave_configure,
2282 * spi_dv_device()) will attempt to enable SYNC, WIDE, and tagged
2283 * commands.
2284 */
2285 for (i = 0 ; i < ESP_MAX_TARGET; i++) {
2286 esp->target[i].flags = 0;
2287 esp->target[i].nego_goal_period = 0;
2288 esp->target[i].nego_goal_offset = 0;
2289 esp->target[i].nego_goal_width = 0;
2290 esp->target[i].nego_goal_tags = 0;
2291 }
2292}
2293
2294/* This places the ESP into a known state at boot time. */
Martin Habetsd679f802007-05-07 14:05:03 -07002295static void esp_bootup_reset(struct esp *esp)
David S. Millercd9ad582007-04-26 21:19:23 -07002296{
2297 u8 val;
2298
2299 /* Reset the DMA */
2300 esp->ops->reset_dma(esp);
2301
2302 /* Reset the ESP */
2303 esp_reset_esp(esp);
2304
2305 /* Reset the SCSI bus, but tell ESP not to generate an irq */
2306 val = esp_read8(ESP_CFG1);
2307 val |= ESP_CONFIG1_SRRDISAB;
2308 esp_write8(val, ESP_CFG1);
2309
2310 scsi_esp_cmd(esp, ESP_CMD_RS);
2311 udelay(400);
2312
2313 esp_write8(esp->config1, ESP_CFG1);
2314
2315 /* Eat any bitrot in the chip and we are done... */
2316 esp_read8(ESP_INTRPT);
2317}
2318
Adrian Bunk76246802007-10-11 17:35:20 +02002319static void esp_set_clock_params(struct esp *esp)
David S. Millercd9ad582007-04-26 21:19:23 -07002320{
Finn Thain6fe07aa2008-04-25 10:06:05 -05002321 int fhz;
David S. Millercd9ad582007-04-26 21:19:23 -07002322 u8 ccf;
2323
2324 /* This is getting messy but it has to be done correctly or else
2325 * you get weird behavior all over the place. We are trying to
2326 * basically figure out three pieces of information.
2327 *
2328 * a) Clock Conversion Factor
2329 *
2330 * This is a representation of the input crystal clock frequency
2331 * going into the ESP on this machine. Any operation whose timing
2332 * is longer than 400ns depends on this value being correct. For
2333 * example, you'll get blips for arbitration/selection during high
2334 * load or with multiple targets if this is not set correctly.
2335 *
2336 * b) Selection Time-Out
2337 *
2338 * The ESP isn't very bright and will arbitrate for the bus and try
2339 * to select a target forever if you let it. This value tells the
2340 * ESP when it has taken too long to negotiate and that it should
2341 * interrupt the CPU so we can see what happened. The value is
2342 * computed as follows (from NCR/Symbios chip docs).
2343 *
2344 * (Time Out Period) * (Input Clock)
2345 * STO = ----------------------------------
2346 * (8192) * (Clock Conversion Factor)
2347 *
2348 * We use a time out period of 250ms (ESP_BUS_TIMEOUT).
2349 *
2350 * c) Imperical constants for synchronous offset and transfer period
2351 * register values
2352 *
2353 * This entails the smallest and largest sync period we could ever
2354 * handle on this ESP.
2355 */
Finn Thain6fe07aa2008-04-25 10:06:05 -05002356 fhz = esp->cfreq;
David S. Millercd9ad582007-04-26 21:19:23 -07002357
Finn Thain6fe07aa2008-04-25 10:06:05 -05002358 ccf = ((fhz / 1000000) + 4) / 5;
David S. Millercd9ad582007-04-26 21:19:23 -07002359 if (ccf == 1)
2360 ccf = 2;
2361
2362 /* If we can't find anything reasonable, just assume 20MHZ.
2363 * This is the clock frequency of the older sun4c's where I've
2364 * been unable to find the clock-frequency PROM property. All
2365 * other machines provide useful values it seems.
2366 */
Finn Thain6fe07aa2008-04-25 10:06:05 -05002367 if (fhz <= 5000000 || ccf < 1 || ccf > 8) {
2368 fhz = 20000000;
David S. Millercd9ad582007-04-26 21:19:23 -07002369 ccf = 4;
2370 }
2371
2372 esp->cfact = (ccf == 8 ? 0 : ccf);
Finn Thain6fe07aa2008-04-25 10:06:05 -05002373 esp->cfreq = fhz;
2374 esp->ccycle = ESP_HZ_TO_CYCLE(fhz);
David S. Millercd9ad582007-04-26 21:19:23 -07002375 esp->ctick = ESP_TICK(ccf, esp->ccycle);
Finn Thain6fe07aa2008-04-25 10:06:05 -05002376 esp->neg_defp = ESP_NEG_DEFP(fhz, ccf);
David S. Millercd9ad582007-04-26 21:19:23 -07002377 esp->sync_defp = SYNC_DEFP_SLOW;
2378}
2379
2380static const char *esp_chip_names[] = {
2381 "ESP100",
2382 "ESP100A",
2383 "ESP236",
2384 "FAS236",
2385 "FAS100A",
2386 "FAST",
2387 "FASHME",
Hannes Reineckeeeea2f92014-11-24 15:37:27 +01002388 "AM53C974",
David S. Millercd9ad582007-04-26 21:19:23 -07002389};
2390
2391static struct scsi_transport_template *esp_transport_template;
2392
Adrian Bunk76246802007-10-11 17:35:20 +02002393int scsi_esp_register(struct esp *esp, struct device *dev)
David S. Millercd9ad582007-04-26 21:19:23 -07002394{
2395 static int instance;
2396 int err;
2397
Hannes Reinecke3707a182014-11-24 15:37:20 +01002398 if (!esp->num_tags)
2399 esp->num_tags = ESP_DEFAULT_TAGS;
David S. Millercd9ad582007-04-26 21:19:23 -07002400 esp->host->transportt = esp_transport_template;
2401 esp->host->max_lun = ESP_MAX_LUN;
2402 esp->host->cmd_per_lun = 2;
David Millerff4abd62007-08-24 22:25:58 -07002403 esp->host->unique_id = instance;
David S. Millercd9ad582007-04-26 21:19:23 -07002404
2405 esp_set_clock_params(esp);
2406
2407 esp_get_revision(esp);
2408
2409 esp_init_swstate(esp);
2410
2411 esp_bootup_reset(esp);
2412
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002413 dev_printk(KERN_INFO, dev, "esp%u: regs[%1p:%1p] irq[%u]\n",
2414 esp->host->unique_id, esp->regs, esp->dma_regs,
2415 esp->host->irq);
2416 dev_printk(KERN_INFO, dev,
2417 "esp%u: is a %s, %u MHz (ccf=%u), SCSI ID %u\n",
2418 esp->host->unique_id, esp_chip_names[esp->rev],
2419 esp->cfreq / 1000000, esp->cfact, esp->scsi_id);
David S. Millercd9ad582007-04-26 21:19:23 -07002420
2421 /* Let the SCSI bus reset settle. */
2422 ssleep(esp_bus_reset_settle);
2423
2424 err = scsi_add_host(esp->host, dev);
2425 if (err)
2426 return err;
2427
David Millerff4abd62007-08-24 22:25:58 -07002428 instance++;
David S. Millercd9ad582007-04-26 21:19:23 -07002429
2430 scsi_scan_host(esp->host);
2431
2432 return 0;
2433}
2434EXPORT_SYMBOL(scsi_esp_register);
2435
Adrian Bunk76246802007-10-11 17:35:20 +02002436void scsi_esp_unregister(struct esp *esp)
David S. Millercd9ad582007-04-26 21:19:23 -07002437{
2438 scsi_remove_host(esp->host);
2439}
2440EXPORT_SYMBOL(scsi_esp_unregister);
2441
James Bottomleyec5e69f2008-06-23 14:52:09 -05002442static int esp_target_alloc(struct scsi_target *starget)
2443{
2444 struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2445 struct esp_target_data *tp = &esp->target[starget->id];
2446
2447 tp->starget = starget;
2448
2449 return 0;
2450}
2451
2452static void esp_target_destroy(struct scsi_target *starget)
2453{
2454 struct esp *esp = shost_priv(dev_to_shost(&starget->dev));
2455 struct esp_target_data *tp = &esp->target[starget->id];
2456
2457 tp->starget = NULL;
2458}
2459
David S. Millercd9ad582007-04-26 21:19:23 -07002460static int esp_slave_alloc(struct scsi_device *dev)
2461{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002462 struct esp *esp = shost_priv(dev->host);
David S. Millercd9ad582007-04-26 21:19:23 -07002463 struct esp_target_data *tp = &esp->target[dev->id];
2464 struct esp_lun_data *lp;
2465
2466 lp = kzalloc(sizeof(*lp), GFP_KERNEL);
2467 if (!lp)
2468 return -ENOMEM;
2469 dev->hostdata = lp;
2470
David S. Millercd9ad582007-04-26 21:19:23 -07002471 spi_min_period(tp->starget) = esp->min_period;
2472 spi_max_offset(tp->starget) = 15;
2473
2474 if (esp->flags & ESP_FLAG_WIDE_CAPABLE)
2475 spi_max_width(tp->starget) = 1;
2476 else
2477 spi_max_width(tp->starget) = 0;
2478
2479 return 0;
2480}
2481
2482static int esp_slave_configure(struct scsi_device *dev)
2483{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002484 struct esp *esp = shost_priv(dev->host);
David S. Millercd9ad582007-04-26 21:19:23 -07002485 struct esp_target_data *tp = &esp->target[dev->id];
David S. Millercd9ad582007-04-26 21:19:23 -07002486
Hannes Reinecke3707a182014-11-24 15:37:20 +01002487 if (dev->tagged_supported)
2488 scsi_change_queue_depth(dev, esp->num_tags);
David S. Millercd9ad582007-04-26 21:19:23 -07002489
David S. Millercd9ad582007-04-26 21:19:23 -07002490 tp->flags |= ESP_TGT_DISCONNECT;
2491
2492 if (!spi_initial_dv(dev->sdev_target))
2493 spi_dv_device(dev);
2494
2495 return 0;
2496}
2497
2498static void esp_slave_destroy(struct scsi_device *dev)
2499{
2500 struct esp_lun_data *lp = dev->hostdata;
2501
2502 kfree(lp);
2503 dev->hostdata = NULL;
2504}
2505
2506static int esp_eh_abort_handler(struct scsi_cmnd *cmd)
2507{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002508 struct esp *esp = shost_priv(cmd->device->host);
David S. Millercd9ad582007-04-26 21:19:23 -07002509 struct esp_cmd_entry *ent, *tmp;
2510 struct completion eh_done;
2511 unsigned long flags;
2512
2513 /* XXX This helps a lot with debugging but might be a bit
2514 * XXX much for the final driver.
2515 */
2516 spin_lock_irqsave(esp->host->host_lock, flags);
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002517 shost_printk(KERN_ERR, esp->host, "Aborting command [%p:%02x]\n",
2518 cmd, cmd->cmnd[0]);
David S. Millercd9ad582007-04-26 21:19:23 -07002519 ent = esp->active_cmd;
2520 if (ent)
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002521 shost_printk(KERN_ERR, esp->host,
2522 "Current command [%p:%02x]\n",
2523 ent->cmd, ent->cmd->cmnd[0]);
David S. Millercd9ad582007-04-26 21:19:23 -07002524 list_for_each_entry(ent, &esp->queued_cmds, list) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002525 shost_printk(KERN_ERR, esp->host, "Queued command [%p:%02x]\n",
2526 ent->cmd, ent->cmd->cmnd[0]);
David S. Millercd9ad582007-04-26 21:19:23 -07002527 }
2528 list_for_each_entry(ent, &esp->active_cmds, list) {
Hannes Reineckea1a75b32014-11-24 15:37:21 +01002529 shost_printk(KERN_ERR, esp->host, " Active command [%p:%02x]\n",
2530 ent->cmd, ent->cmd->cmnd[0]);
David S. Millercd9ad582007-04-26 21:19:23 -07002531 }
2532 esp_dump_cmd_log(esp);
2533 spin_unlock_irqrestore(esp->host->host_lock, flags);
2534
2535 spin_lock_irqsave(esp->host->host_lock, flags);
2536
2537 ent = NULL;
2538 list_for_each_entry(tmp, &esp->queued_cmds, list) {
2539 if (tmp->cmd == cmd) {
2540 ent = tmp;
2541 break;
2542 }
2543 }
2544
2545 if (ent) {
2546 /* Easiest case, we didn't even issue the command
2547 * yet so it is trivial to abort.
2548 */
2549 list_del(&ent->list);
2550
2551 cmd->result = DID_ABORT << 16;
2552 cmd->scsi_done(cmd);
2553
2554 esp_put_ent(esp, ent);
2555
2556 goto out_success;
2557 }
2558
2559 init_completion(&eh_done);
2560
2561 ent = esp->active_cmd;
2562 if (ent && ent->cmd == cmd) {
2563 /* Command is the currently active command on
2564 * the bus. If we already have an output message
2565 * pending, no dice.
2566 */
2567 if (esp->msg_out_len)
2568 goto out_failure;
2569
2570 /* Send out an abort, encouraging the target to
2571 * go to MSGOUT phase by asserting ATN.
2572 */
2573 esp->msg_out[0] = ABORT_TASK_SET;
2574 esp->msg_out_len = 1;
2575 ent->eh_done = &eh_done;
2576
2577 scsi_esp_cmd(esp, ESP_CMD_SATN);
2578 } else {
2579 /* The command is disconnected. This is not easy to
2580 * abort. For now we fail and let the scsi error
2581 * handling layer go try a scsi bus reset or host
2582 * reset.
2583 *
2584 * What we could do is put together a scsi command
2585 * solely for the purpose of sending an abort message
2586 * to the target. Coming up with all the code to
2587 * cook up scsi commands, special case them everywhere,
2588 * etc. is for questionable gain and it would be better
2589 * if the generic scsi error handling layer could do at
2590 * least some of that for us.
2591 *
2592 * Anyways this is an area for potential future improvement
2593 * in this driver.
2594 */
2595 goto out_failure;
2596 }
2597
2598 spin_unlock_irqrestore(esp->host->host_lock, flags);
2599
2600 if (!wait_for_completion_timeout(&eh_done, 5 * HZ)) {
2601 spin_lock_irqsave(esp->host->host_lock, flags);
2602 ent->eh_done = NULL;
2603 spin_unlock_irqrestore(esp->host->host_lock, flags);
2604
2605 return FAILED;
2606 }
2607
2608 return SUCCESS;
2609
2610out_success:
2611 spin_unlock_irqrestore(esp->host->host_lock, flags);
2612 return SUCCESS;
2613
2614out_failure:
2615 /* XXX This might be a good location to set ESP_TGT_BROKEN
2616 * XXX since we know which target/lun in particular is
2617 * XXX causing trouble.
2618 */
2619 spin_unlock_irqrestore(esp->host->host_lock, flags);
2620 return FAILED;
2621}
2622
2623static int esp_eh_bus_reset_handler(struct scsi_cmnd *cmd)
2624{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002625 struct esp *esp = shost_priv(cmd->device->host);
David S. Millercd9ad582007-04-26 21:19:23 -07002626 struct completion eh_reset;
2627 unsigned long flags;
2628
2629 init_completion(&eh_reset);
2630
2631 spin_lock_irqsave(esp->host->host_lock, flags);
2632
2633 esp->eh_reset = &eh_reset;
2634
2635 /* XXX This is too simple... We should add lots of
2636 * XXX checks here so that if we find that the chip is
2637 * XXX very wedged we return failure immediately so
2638 * XXX that we can perform a full chip reset.
2639 */
2640 esp->flags |= ESP_FLAG_RESETTING;
2641 scsi_esp_cmd(esp, ESP_CMD_RS);
2642
2643 spin_unlock_irqrestore(esp->host->host_lock, flags);
2644
2645 ssleep(esp_bus_reset_settle);
2646
2647 if (!wait_for_completion_timeout(&eh_reset, 5 * HZ)) {
2648 spin_lock_irqsave(esp->host->host_lock, flags);
2649 esp->eh_reset = NULL;
2650 spin_unlock_irqrestore(esp->host->host_lock, flags);
2651
2652 return FAILED;
2653 }
2654
2655 return SUCCESS;
2656}
2657
2658/* All bets are off, reset the entire device. */
2659static int esp_eh_host_reset_handler(struct scsi_cmnd *cmd)
2660{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002661 struct esp *esp = shost_priv(cmd->device->host);
David S. Millercd9ad582007-04-26 21:19:23 -07002662 unsigned long flags;
2663
2664 spin_lock_irqsave(esp->host->host_lock, flags);
2665 esp_bootup_reset(esp);
2666 esp_reset_cleanup(esp);
2667 spin_unlock_irqrestore(esp->host->host_lock, flags);
2668
2669 ssleep(esp_bus_reset_settle);
2670
2671 return SUCCESS;
2672}
2673
2674static const char *esp_info(struct Scsi_Host *host)
2675{
2676 return "esp";
2677}
2678
2679struct scsi_host_template scsi_esp_template = {
2680 .module = THIS_MODULE,
2681 .name = "esp",
2682 .info = esp_info,
2683 .queuecommand = esp_queuecommand,
James Bottomleyec5e69f2008-06-23 14:52:09 -05002684 .target_alloc = esp_target_alloc,
2685 .target_destroy = esp_target_destroy,
David S. Millercd9ad582007-04-26 21:19:23 -07002686 .slave_alloc = esp_slave_alloc,
2687 .slave_configure = esp_slave_configure,
2688 .slave_destroy = esp_slave_destroy,
2689 .eh_abort_handler = esp_eh_abort_handler,
2690 .eh_bus_reset_handler = esp_eh_bus_reset_handler,
2691 .eh_host_reset_handler = esp_eh_host_reset_handler,
2692 .can_queue = 7,
2693 .this_id = 7,
2694 .sg_tablesize = SG_ALL,
2695 .use_clustering = ENABLE_CLUSTERING,
2696 .max_sectors = 0xffff,
2697 .skip_settle_delay = 1,
2698};
2699EXPORT_SYMBOL(scsi_esp_template);
2700
2701static void esp_get_signalling(struct Scsi_Host *host)
2702{
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002703 struct esp *esp = shost_priv(host);
David S. Millercd9ad582007-04-26 21:19:23 -07002704 enum spi_signal_type type;
2705
2706 if (esp->flags & ESP_FLAG_DIFFERENTIAL)
2707 type = SPI_SIGNAL_HVD;
2708 else
2709 type = SPI_SIGNAL_SE;
2710
2711 spi_signalling(host) = type;
2712}
2713
2714static void esp_set_offset(struct scsi_target *target, int offset)
2715{
2716 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002717 struct esp *esp = shost_priv(host);
David S. Millercd9ad582007-04-26 21:19:23 -07002718 struct esp_target_data *tp = &esp->target[target->id];
2719
Finn Thain02507a82009-12-05 12:30:42 +11002720 if (esp->flags & ESP_FLAG_DISABLE_SYNC)
2721 tp->nego_goal_offset = 0;
2722 else
2723 tp->nego_goal_offset = offset;
David S. Millercd9ad582007-04-26 21:19:23 -07002724 tp->flags |= ESP_TGT_CHECK_NEGO;
2725}
2726
2727static void esp_set_period(struct scsi_target *target, int period)
2728{
2729 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002730 struct esp *esp = shost_priv(host);
David S. Millercd9ad582007-04-26 21:19:23 -07002731 struct esp_target_data *tp = &esp->target[target->id];
2732
2733 tp->nego_goal_period = period;
2734 tp->flags |= ESP_TGT_CHECK_NEGO;
2735}
2736
2737static void esp_set_width(struct scsi_target *target, int width)
2738{
2739 struct Scsi_Host *host = dev_to_shost(target->dev.parent);
Christoph Hellwig2b14ec72007-05-31 20:12:32 +02002740 struct esp *esp = shost_priv(host);
David S. Millercd9ad582007-04-26 21:19:23 -07002741 struct esp_target_data *tp = &esp->target[target->id];
2742
2743 tp->nego_goal_width = (width ? 1 : 0);
2744 tp->flags |= ESP_TGT_CHECK_NEGO;
2745}
2746
2747static struct spi_function_template esp_transport_ops = {
2748 .set_offset = esp_set_offset,
2749 .show_offset = 1,
2750 .set_period = esp_set_period,
2751 .show_period = 1,
2752 .set_width = esp_set_width,
2753 .show_width = 1,
2754 .get_signalling = esp_get_signalling,
2755};
2756
2757static int __init esp_init(void)
2758{
2759 BUILD_BUG_ON(sizeof(struct scsi_pointer) <
2760 sizeof(struct esp_cmd_priv));
2761
2762 esp_transport_template = spi_attach_transport(&esp_transport_ops);
2763 if (!esp_transport_template)
2764 return -ENODEV;
2765
2766 return 0;
2767}
2768
2769static void __exit esp_exit(void)
2770{
2771 spi_release_transport(esp_transport_template);
2772}
2773
2774MODULE_DESCRIPTION("ESP SCSI driver core");
2775MODULE_AUTHOR("David S. Miller (davem@davemloft.net)");
2776MODULE_LICENSE("GPL");
2777MODULE_VERSION(DRV_VERSION);
2778
2779module_param(esp_bus_reset_settle, int, 0);
2780MODULE_PARM_DESC(esp_bus_reset_settle,
2781 "ESP scsi bus reset delay in seconds");
2782
2783module_param(esp_debug, int, 0);
2784MODULE_PARM_DESC(esp_debug,
2785"ESP bitmapped debugging message enable value:\n"
2786" 0x00000001 Log interrupt events\n"
2787" 0x00000002 Log scsi commands\n"
2788" 0x00000004 Log resets\n"
2789" 0x00000008 Log message in events\n"
2790" 0x00000010 Log message out events\n"
2791" 0x00000020 Log command completion\n"
2792" 0x00000040 Log disconnects\n"
2793" 0x00000080 Log data start\n"
2794" 0x00000100 Log data done\n"
2795" 0x00000200 Log reconnects\n"
2796" 0x00000400 Log auto-sense data\n"
2797);
2798
2799module_init(esp_init);
2800module_exit(esp_exit);