blob: b52124f3d3ac41dd704aab1a2a2d7895ce3cfc3d [file] [log] [blame]
James Bottomley2908d772006-08-29 09:22:51 -05001/*
2 * Aic94xx Task Management Functions
3 *
4 * Copyright (C) 2005 Adaptec, Inc. All rights reserved.
5 * Copyright (C) 2005 Luben Tuikov <luben_tuikov@adaptec.com>
6 *
7 * This file is licensed under GPLv2.
8 *
9 * This file is part of the aic94xx driver.
10 *
11 * The aic94xx driver is free software; you can redistribute it and/or
12 * modify it under the terms of the GNU General Public License as
13 * published by the Free Software Foundation; version 2 of the
14 * License.
15 *
16 * The aic94xx driver is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with the aic94xx driver; if not, write to the Free Software
23 * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
24 *
25 */
26
27#include <linux/spinlock.h>
28#include "aic94xx.h"
29#include "aic94xx_sas.h"
30#include "aic94xx_hwi.h"
31
32/* ---------- Internal enqueue ---------- */
33
34static int asd_enqueue_internal(struct asd_ascb *ascb,
35 void (*tasklet_complete)(struct asd_ascb *,
36 struct done_list_struct *),
37 void (*timed_out)(unsigned long))
38{
39 int res;
40
41 ascb->tasklet_complete = tasklet_complete;
42 ascb->uldd_timer = 1;
43
44 ascb->timer.data = (unsigned long) ascb;
45 ascb->timer.function = timed_out;
46 ascb->timer.expires = jiffies + AIC94XX_SCB_TIMEOUT;
47
48 add_timer(&ascb->timer);
49
50 res = asd_post_ascb_list(ascb->ha, ascb, 1);
51 if (unlikely(res))
52 del_timer(&ascb->timer);
53 return res;
54}
55
56static inline void asd_timedout_common(unsigned long data)
57{
58 struct asd_ascb *ascb = (void *) data;
59 struct asd_seq_data *seq = &ascb->ha->seq;
60 unsigned long flags;
61
62 spin_lock_irqsave(&seq->pend_q_lock, flags);
63 seq->pending--;
64 list_del_init(&ascb->list);
65 spin_unlock_irqrestore(&seq->pend_q_lock, flags);
66}
67
68/* ---------- CLEAR NEXUS ---------- */
69
70static void asd_clear_nexus_tasklet_complete(struct asd_ascb *ascb,
71 struct done_list_struct *dl)
72{
73 ASD_DPRINTK("%s: here\n", __FUNCTION__);
74 if (!del_timer(&ascb->timer)) {
75 ASD_DPRINTK("%s: couldn't delete timer\n", __FUNCTION__);
76 return;
77 }
78 ASD_DPRINTK("%s: opcode: 0x%x\n", __FUNCTION__, dl->opcode);
79 ascb->uldd_task = (void *) (unsigned long) dl->opcode;
80 complete(&ascb->completion);
81}
82
83static void asd_clear_nexus_timedout(unsigned long data)
84{
85 struct asd_ascb *ascb = (void *) data;
86
87 ASD_DPRINTK("%s: here\n", __FUNCTION__);
88 asd_timedout_common(data);
89 ascb->uldd_task = (void *) TMF_RESP_FUNC_FAILED;
90 complete(&ascb->completion);
91}
92
93#define CLEAR_NEXUS_PRE \
94 ASD_DPRINTK("%s: PRE\n", __FUNCTION__); \
95 res = 1; \
96 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL); \
97 if (!ascb) \
98 return -ENOMEM; \
99 \
100 scb = ascb->scb; \
101 scb->header.opcode = CLEAR_NEXUS
102
103#define CLEAR_NEXUS_POST \
104 ASD_DPRINTK("%s: POST\n", __FUNCTION__); \
105 res = asd_enqueue_internal(ascb, asd_clear_nexus_tasklet_complete, \
106 asd_clear_nexus_timedout); \
107 if (res) \
108 goto out_err; \
109 ASD_DPRINTK("%s: clear nexus posted, waiting...\n", __FUNCTION__); \
110 wait_for_completion(&ascb->completion); \
111 res = (int) (unsigned long) ascb->uldd_task; \
112 if (res == TC_NO_ERROR) \
113 res = TMF_RESP_FUNC_COMPLETE; \
114out_err: \
115 asd_ascb_free(ascb); \
116 return res
117
118int asd_clear_nexus_ha(struct sas_ha_struct *sas_ha)
119{
120 struct asd_ha_struct *asd_ha = sas_ha->lldd_ha;
121 struct asd_ascb *ascb;
122 struct scb *scb;
123 int res;
124
125 CLEAR_NEXUS_PRE;
126 scb->clear_nexus.nexus = NEXUS_ADAPTER;
127 CLEAR_NEXUS_POST;
128}
129
130int asd_clear_nexus_port(struct asd_sas_port *port)
131{
132 struct asd_ha_struct *asd_ha = port->ha->lldd_ha;
133 struct asd_ascb *ascb;
134 struct scb *scb;
135 int res;
136
137 CLEAR_NEXUS_PRE;
138 scb->clear_nexus.nexus = NEXUS_PORT;
139 scb->clear_nexus.conn_mask = port->phy_mask;
140 CLEAR_NEXUS_POST;
141}
142
143#if 0
144static int asd_clear_nexus_I_T(struct domain_device *dev)
145{
146 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
147 struct asd_ascb *ascb;
148 struct scb *scb;
149 int res;
150
151 CLEAR_NEXUS_PRE;
152 scb->clear_nexus.nexus = NEXUS_I_T;
153 scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ;
154 if (dev->tproto)
155 scb->clear_nexus.flags |= SUSPEND_TX;
156 scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
157 dev->lldd_dev);
158 CLEAR_NEXUS_POST;
159}
160#endif
161
162static int asd_clear_nexus_I_T_L(struct domain_device *dev, u8 *lun)
163{
164 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
165 struct asd_ascb *ascb;
166 struct scb *scb;
167 int res;
168
169 CLEAR_NEXUS_PRE;
170 scb->clear_nexus.nexus = NEXUS_I_T_L;
171 scb->clear_nexus.flags = SEND_Q | EXEC_Q | NOTINQ;
172 if (dev->tproto)
173 scb->clear_nexus.flags |= SUSPEND_TX;
174 memcpy(scb->clear_nexus.ssp_task.lun, lun, 8);
175 scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
176 dev->lldd_dev);
177 CLEAR_NEXUS_POST;
178}
179
180static int asd_clear_nexus_tag(struct sas_task *task)
181{
182 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
183 struct asd_ascb *tascb = task->lldd_task;
184 struct asd_ascb *ascb;
185 struct scb *scb;
186 int res;
187
188 CLEAR_NEXUS_PRE;
189 scb->clear_nexus.nexus = NEXUS_TAG;
190 memcpy(scb->clear_nexus.ssp_task.lun, task->ssp_task.LUN, 8);
191 scb->clear_nexus.ssp_task.tag = tascb->tag;
192 if (task->dev->tproto)
193 scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
194 task->dev->lldd_dev);
195 CLEAR_NEXUS_POST;
196}
197
198static int asd_clear_nexus_index(struct sas_task *task)
199{
200 struct asd_ha_struct *asd_ha = task->dev->port->ha->lldd_ha;
201 struct asd_ascb *tascb = task->lldd_task;
202 struct asd_ascb *ascb;
203 struct scb *scb;
204 int res;
205
206 CLEAR_NEXUS_PRE;
207 scb->clear_nexus.nexus = NEXUS_TRANS_CX;
208 if (task->dev->tproto)
209 scb->clear_nexus.conn_handle = cpu_to_le16((u16)(unsigned long)
210 task->dev->lldd_dev);
211 scb->clear_nexus.index = cpu_to_le16(tascb->tc_index);
212 CLEAR_NEXUS_POST;
213}
214
215/* ---------- TMFs ---------- */
216
217static void asd_tmf_timedout(unsigned long data)
218{
219 struct asd_ascb *ascb = (void *) data;
220
221 ASD_DPRINTK("tmf timed out\n");
222 asd_timedout_common(data);
223 ascb->uldd_task = (void *) TMF_RESP_FUNC_FAILED;
224 complete(&ascb->completion);
225}
226
227static int asd_get_tmf_resp_tasklet(struct asd_ascb *ascb,
228 struct done_list_struct *dl)
229{
230 struct asd_ha_struct *asd_ha = ascb->ha;
231 unsigned long flags;
232 struct tc_resp_sb_struct {
233 __le16 index_escb;
234 u8 len_lsb;
235 u8 flags;
236 } __attribute__ ((packed)) *resp_sb = (void *) dl->status_block;
237
238 int edb_id = ((resp_sb->flags & 0x70) >> 4)-1;
239 struct asd_ascb *escb;
240 struct asd_dma_tok *edb;
241 struct ssp_frame_hdr *fh;
242 struct ssp_response_iu *ru;
243 int res = TMF_RESP_FUNC_FAILED;
244
245 ASD_DPRINTK("tmf resp tasklet\n");
246
247 spin_lock_irqsave(&asd_ha->seq.tc_index_lock, flags);
248 escb = asd_tc_index_find(&asd_ha->seq,
249 (int)le16_to_cpu(resp_sb->index_escb));
250 spin_unlock_irqrestore(&asd_ha->seq.tc_index_lock, flags);
251
252 if (!escb) {
253 ASD_DPRINTK("Uh-oh! No escb for this dl?!\n");
254 return res;
255 }
256
257 edb = asd_ha->seq.edb_arr[edb_id + escb->edb_index];
258 ascb->tag = *(__be16 *)(edb->vaddr+4);
259 fh = edb->vaddr + 16;
260 ru = edb->vaddr + 16 + sizeof(*fh);
261 res = ru->status;
262 if (ru->datapres == 1) /* Response data present */
263 res = ru->resp_data[3];
264#if 0
265 ascb->tag = fh->tag;
266#endif
267 ascb->tag_valid = 1;
268
269 asd_invalidate_edb(escb, edb_id);
270 return res;
271}
272
273static void asd_tmf_tasklet_complete(struct asd_ascb *ascb,
274 struct done_list_struct *dl)
275{
276 if (!del_timer(&ascb->timer))
277 return;
278
279 ASD_DPRINTK("tmf tasklet complete\n");
280
281 if (dl->opcode == TC_SSP_RESP)
282 ascb->uldd_task = (void *) (unsigned long)
283 asd_get_tmf_resp_tasklet(ascb, dl);
284 else
285 ascb->uldd_task = (void *) 0xFF00 + (unsigned long) dl->opcode;
286
287 complete(&ascb->completion);
288}
289
290static inline int asd_clear_nexus(struct sas_task *task)
291{
292 int res = TMF_RESP_FUNC_FAILED;
Darrick J. Wong8fdcf862007-05-16 14:01:48 -0700293 int leftover;
James Bottomley2908d772006-08-29 09:22:51 -0500294 struct asd_ascb *tascb = task->lldd_task;
295 unsigned long flags;
296
297 ASD_DPRINTK("task not done, clearing nexus\n");
298 if (tascb->tag_valid)
299 res = asd_clear_nexus_tag(task);
300 else
301 res = asd_clear_nexus_index(task);
Darrick J. Wong8fdcf862007-05-16 14:01:48 -0700302 leftover = wait_for_completion_timeout(&tascb->completion,
303 AIC94XX_SCB_TIMEOUT);
James Bottomley2908d772006-08-29 09:22:51 -0500304 ASD_DPRINTK("came back from clear nexus\n");
305 spin_lock_irqsave(&task->task_state_lock, flags);
Darrick J. Wong8fdcf862007-05-16 14:01:48 -0700306 if (leftover < 1)
307 res = TMF_RESP_FUNC_FAILED;
James Bottomley2908d772006-08-29 09:22:51 -0500308 if (task->task_state_flags & SAS_TASK_STATE_DONE)
309 res = TMF_RESP_FUNC_COMPLETE;
310 spin_unlock_irqrestore(&task->task_state_lock, flags);
311
312 return res;
313}
314
315/**
316 * asd_abort_task -- ABORT TASK TMF
317 * @task: the task to be aborted
318 *
319 * Before calling ABORT TASK the task state flags should be ORed with
320 * SAS_TASK_STATE_ABORTED (unless SAS_TASK_STATE_DONE is set) under
321 * the task_state_lock IRQ spinlock, then ABORT TASK *must* be called.
322 *
323 * Implements the ABORT TASK TMF, I_T_L_Q nexus.
324 * Returns: SAS TMF responses (see sas_task.h),
325 * -ENOMEM,
326 * -SAS_QUEUE_FULL.
327 *
328 * When ABORT TASK returns, the caller of ABORT TASK checks first the
329 * task->task_state_flags, and then the return value of ABORT TASK.
330 *
331 * If the task has task state bit SAS_TASK_STATE_DONE set, then the
332 * task was completed successfully prior to it being aborted. The
333 * caller of ABORT TASK has responsibility to call task->task_done()
334 * xor free the task, depending on their framework. The return code
335 * is TMF_RESP_FUNC_FAILED in this case.
336 *
337 * Else the SAS_TASK_STATE_DONE bit is not set,
338 * If the return code is TMF_RESP_FUNC_COMPLETE, then
339 * the task was aborted successfully. The caller of
340 * ABORT TASK has responsibility to call task->task_done()
341 * to finish the task, xor free the task depending on their
342 * framework.
343 * else
344 * the ABORT TASK returned some kind of error. The task
345 * was _not_ cancelled. Nothing can be assumed.
346 * The caller of ABORT TASK may wish to retry.
347 */
348int asd_abort_task(struct sas_task *task)
349{
350 struct asd_ascb *tascb = task->lldd_task;
351 struct asd_ha_struct *asd_ha = tascb->ha;
352 int res = 1;
353 unsigned long flags;
354 struct asd_ascb *ascb = NULL;
355 struct scb *scb;
Darrick J. Wong8fdcf862007-05-16 14:01:48 -0700356 int leftover;
James Bottomley2908d772006-08-29 09:22:51 -0500357
358 spin_lock_irqsave(&task->task_state_lock, flags);
359 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
360 spin_unlock_irqrestore(&task->task_state_lock, flags);
361 res = TMF_RESP_FUNC_COMPLETE;
362 ASD_DPRINTK("%s: task 0x%p done\n", __FUNCTION__, task);
363 goto out_done;
364 }
365 spin_unlock_irqrestore(&task->task_state_lock, flags);
366
367 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
368 if (!ascb)
369 return -ENOMEM;
370 scb = ascb->scb;
371
Boaz Harrosh90b0c412008-02-06 15:38:33 +0200372 scb->header.opcode = SCB_ABORT_TASK;
James Bottomley2908d772006-08-29 09:22:51 -0500373
374 switch (task->task_proto) {
Darrick J. Wong5929faf2007-11-05 11:51:17 -0800375 case SAS_PROTOCOL_SATA:
376 case SAS_PROTOCOL_STP:
James Bottomley2908d772006-08-29 09:22:51 -0500377 scb->abort_task.proto_conn_rate = (1 << 5); /* STP */
378 break;
Darrick J. Wong5929faf2007-11-05 11:51:17 -0800379 case SAS_PROTOCOL_SSP:
James Bottomley2908d772006-08-29 09:22:51 -0500380 scb->abort_task.proto_conn_rate = (1 << 4); /* SSP */
381 scb->abort_task.proto_conn_rate |= task->dev->linkrate;
382 break;
Darrick J. Wong5929faf2007-11-05 11:51:17 -0800383 case SAS_PROTOCOL_SMP:
James Bottomley2908d772006-08-29 09:22:51 -0500384 break;
385 default:
386 break;
387 }
388
Darrick J. Wong5929faf2007-11-05 11:51:17 -0800389 if (task->task_proto == SAS_PROTOCOL_SSP) {
James Bottomley2908d772006-08-29 09:22:51 -0500390 scb->abort_task.ssp_frame.frame_type = SSP_TASK;
391 memcpy(scb->abort_task.ssp_frame.hashed_dest_addr,
392 task->dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
393 memcpy(scb->abort_task.ssp_frame.hashed_src_addr,
394 task->dev->port->ha->hashed_sas_addr,
395 HASHED_SAS_ADDR_SIZE);
396 scb->abort_task.ssp_frame.tptt = cpu_to_be16(0xFFFF);
397
398 memcpy(scb->abort_task.ssp_task.lun, task->ssp_task.LUN, 8);
399 scb->abort_task.ssp_task.tmf = TMF_ABORT_TASK;
400 scb->abort_task.ssp_task.tag = cpu_to_be16(0xFFFF);
401 }
402
403 scb->abort_task.sister_scb = cpu_to_le16(0xFFFF);
404 scb->abort_task.conn_handle = cpu_to_le16(
405 (u16)(unsigned long)task->dev->lldd_dev);
406 scb->abort_task.retry_count = 1;
407 scb->abort_task.index = cpu_to_le16((u16)tascb->tc_index);
408 scb->abort_task.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST);
409
410 res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
411 asd_tmf_timedout);
412 if (res)
413 goto out;
414 wait_for_completion(&ascb->completion);
415 ASD_DPRINTK("tmf came back\n");
416
417 res = (int) (unsigned long) ascb->uldd_task;
418 tascb->tag = ascb->tag;
419 tascb->tag_valid = ascb->tag_valid;
420
421 spin_lock_irqsave(&task->task_state_lock, flags);
422 if (task->task_state_flags & SAS_TASK_STATE_DONE) {
423 spin_unlock_irqrestore(&task->task_state_lock, flags);
424 res = TMF_RESP_FUNC_COMPLETE;
425 ASD_DPRINTK("%s: task 0x%p done\n", __FUNCTION__, task);
426 goto out_done;
427 }
428 spin_unlock_irqrestore(&task->task_state_lock, flags);
429
430 switch (res) {
431 /* The task to be aborted has been sent to the device.
432 * We got a Response IU for the ABORT TASK TMF. */
433 case TC_NO_ERROR + 0xFF00:
434 case TMF_RESP_FUNC_COMPLETE:
435 case TMF_RESP_FUNC_FAILED:
436 res = asd_clear_nexus(task);
437 break;
438 case TMF_RESP_INVALID_FRAME:
439 case TMF_RESP_OVERLAPPED_TAG:
440 case TMF_RESP_FUNC_ESUPP:
441 case TMF_RESP_NO_LUN:
442 goto out_done; break;
443 }
444 /* In the following we assume that the managing layer
445 * will _never_ make a mistake, when issuing ABORT TASK.
446 */
447 switch (res) {
448 default:
449 res = asd_clear_nexus(task);
450 /* fallthrough */
451 case TC_NO_ERROR + 0xFF00:
452 case TMF_RESP_FUNC_COMPLETE:
453 break;
454 /* The task hasn't been sent to the device xor we never got
455 * a (sane) Response IU for the ABORT TASK TMF.
456 */
457 case TF_NAK_RECV + 0xFF00:
458 res = TMF_RESP_INVALID_FRAME;
459 break;
460 case TF_TMF_TASK_DONE + 0xFF00: /* done but not reported yet */
461 res = TMF_RESP_FUNC_FAILED;
Darrick J. Wong8fdcf862007-05-16 14:01:48 -0700462 leftover = wait_for_completion_timeout(&tascb->completion,
463 AIC94XX_SCB_TIMEOUT);
James Bottomley2908d772006-08-29 09:22:51 -0500464 spin_lock_irqsave(&task->task_state_lock, flags);
Darrick J. Wong8fdcf862007-05-16 14:01:48 -0700465 if (leftover < 1)
466 res = TMF_RESP_FUNC_FAILED;
James Bottomley2908d772006-08-29 09:22:51 -0500467 if (task->task_state_flags & SAS_TASK_STATE_DONE)
468 res = TMF_RESP_FUNC_COMPLETE;
469 spin_unlock_irqrestore(&task->task_state_lock, flags);
470 goto out_done;
471 case TF_TMF_NO_TAG + 0xFF00:
472 case TF_TMF_TAG_FREE + 0xFF00: /* the tag is in the free list */
473 case TF_TMF_NO_CONN_HANDLE + 0xFF00: /* no such device */
474 res = TMF_RESP_FUNC_COMPLETE;
475 goto out_done;
476 case TF_TMF_NO_CTX + 0xFF00: /* not in seq, or proto != SSP */
477 res = TMF_RESP_FUNC_ESUPP;
478 goto out;
479 }
480out_done:
481 if (res == TMF_RESP_FUNC_COMPLETE) {
482 task->lldd_task = NULL;
483 mb();
484 asd_ascb_free(tascb);
485 }
486out:
487 asd_ascb_free(ascb);
488 ASD_DPRINTK("task 0x%p aborted, res: 0x%x\n", task, res);
489 return res;
490}
491
492/**
493 * asd_initiate_ssp_tmf -- send a TMF to an I_T_L or I_T_L_Q nexus
494 * @dev: pointer to struct domain_device of interest
495 * @lun: pointer to u8[8] which is the LUN
496 * @tmf: the TMF to be performed (see sas_task.h or the SAS spec)
497 * @index: the transaction context of the task to be queried if QT TMF
498 *
499 * This function is used to send ABORT TASK SET, CLEAR ACA,
500 * CLEAR TASK SET, LU RESET and QUERY TASK TMFs.
501 *
502 * No SCBs should be queued to the I_T_L nexus when this SCB is
503 * pending.
504 *
505 * Returns: TMF response code (see sas_task.h or the SAS spec)
506 */
507static int asd_initiate_ssp_tmf(struct domain_device *dev, u8 *lun,
508 int tmf, int index)
509{
510 struct asd_ha_struct *asd_ha = dev->port->ha->lldd_ha;
511 struct asd_ascb *ascb;
512 int res = 1;
513 struct scb *scb;
514
Darrick J. Wong5929faf2007-11-05 11:51:17 -0800515 if (!(dev->tproto & SAS_PROTOCOL_SSP))
James Bottomley2908d772006-08-29 09:22:51 -0500516 return TMF_RESP_FUNC_ESUPP;
517
518 ascb = asd_ascb_alloc_list(asd_ha, &res, GFP_KERNEL);
519 if (!ascb)
520 return -ENOMEM;
521 scb = ascb->scb;
522
523 if (tmf == TMF_QUERY_TASK)
524 scb->header.opcode = QUERY_SSP_TASK;
525 else
526 scb->header.opcode = INITIATE_SSP_TMF;
527
528 scb->ssp_tmf.proto_conn_rate = (1 << 4); /* SSP */
529 scb->ssp_tmf.proto_conn_rate |= dev->linkrate;
530 /* SSP frame header */
531 scb->ssp_tmf.ssp_frame.frame_type = SSP_TASK;
532 memcpy(scb->ssp_tmf.ssp_frame.hashed_dest_addr,
533 dev->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
534 memcpy(scb->ssp_tmf.ssp_frame.hashed_src_addr,
535 dev->port->ha->hashed_sas_addr, HASHED_SAS_ADDR_SIZE);
536 scb->ssp_tmf.ssp_frame.tptt = cpu_to_be16(0xFFFF);
537 /* SSP Task IU */
538 memcpy(scb->ssp_tmf.ssp_task.lun, lun, 8);
539 scb->ssp_tmf.ssp_task.tmf = tmf;
540
541 scb->ssp_tmf.sister_scb = cpu_to_le16(0xFFFF);
542 scb->ssp_tmf.conn_handle= cpu_to_le16((u16)(unsigned long)
543 dev->lldd_dev);
544 scb->ssp_tmf.retry_count = 1;
545 scb->ssp_tmf.itnl_to = cpu_to_le16(ITNL_TIMEOUT_CONST);
546 if (tmf == TMF_QUERY_TASK)
547 scb->ssp_tmf.index = cpu_to_le16(index);
548
549 res = asd_enqueue_internal(ascb, asd_tmf_tasklet_complete,
550 asd_tmf_timedout);
551 if (res)
552 goto out_err;
553 wait_for_completion(&ascb->completion);
554 res = (int) (unsigned long) ascb->uldd_task;
555
556 switch (res) {
557 case TC_NO_ERROR + 0xFF00:
558 res = TMF_RESP_FUNC_COMPLETE;
559 break;
560 case TF_NAK_RECV + 0xFF00:
561 res = TMF_RESP_INVALID_FRAME;
562 break;
563 case TF_TMF_TASK_DONE + 0xFF00:
564 res = TMF_RESP_FUNC_FAILED;
565 break;
566 case TF_TMF_NO_TAG + 0xFF00:
567 case TF_TMF_TAG_FREE + 0xFF00: /* the tag is in the free list */
568 case TF_TMF_NO_CONN_HANDLE + 0xFF00: /* no such device */
569 res = TMF_RESP_FUNC_COMPLETE;
570 break;
571 case TF_TMF_NO_CTX + 0xFF00: /* not in seq, or proto != SSP */
572 res = TMF_RESP_FUNC_ESUPP;
573 break;
574 default:
Darrick J. Wong058e2c42007-01-29 23:48:22 -0800575 /* Allow TMF response codes to propagate upwards */
James Bottomley2908d772006-08-29 09:22:51 -0500576 break;
577 }
578out_err:
579 asd_ascb_free(ascb);
580 return res;
581}
582
583int asd_abort_task_set(struct domain_device *dev, u8 *lun)
584{
585 int res = asd_initiate_ssp_tmf(dev, lun, TMF_ABORT_TASK_SET, 0);
586
587 if (res == TMF_RESP_FUNC_COMPLETE)
588 asd_clear_nexus_I_T_L(dev, lun);
589 return res;
590}
591
592int asd_clear_aca(struct domain_device *dev, u8 *lun)
593{
594 int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_ACA, 0);
595
596 if (res == TMF_RESP_FUNC_COMPLETE)
597 asd_clear_nexus_I_T_L(dev, lun);
598 return res;
599}
600
601int asd_clear_task_set(struct domain_device *dev, u8 *lun)
602{
603 int res = asd_initiate_ssp_tmf(dev, lun, TMF_CLEAR_TASK_SET, 0);
604
605 if (res == TMF_RESP_FUNC_COMPLETE)
606 asd_clear_nexus_I_T_L(dev, lun);
607 return res;
608}
609
610int asd_lu_reset(struct domain_device *dev, u8 *lun)
611{
612 int res = asd_initiate_ssp_tmf(dev, lun, TMF_LU_RESET, 0);
613
614 if (res == TMF_RESP_FUNC_COMPLETE)
615 asd_clear_nexus_I_T_L(dev, lun);
616 return res;
617}
618
619/**
620 * asd_query_task -- send a QUERY TASK TMF to an I_T_L_Q nexus
621 * task: pointer to sas_task struct of interest
622 *
623 * Returns: TMF_RESP_FUNC_COMPLETE if the task is not in the task set,
624 * or TMF_RESP_FUNC_SUCC if the task is in the task set.
625 *
626 * Normally the management layer sets the task to aborted state,
627 * and then calls query task and then abort task.
628 */
629int asd_query_task(struct sas_task *task)
630{
631 struct asd_ascb *ascb = task->lldd_task;
632 int index;
633
634 if (ascb) {
635 index = ascb->tc_index;
636 return asd_initiate_ssp_tmf(task->dev, task->ssp_task.LUN,
637 TMF_QUERY_TASK, index);
638 }
639 return TMF_RESP_FUNC_COMPLETE;
640}