blob: cb13b78d80262a7c7c14167fc94e5a8ad4a66c38 [file] [log] [blame]
Dan Williams6f231dd2011-07-02 22:56:22 -07001/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
56#include "isci.h"
Dan Williams6f231dd2011-07-02 22:56:22 -070057#include "task.h"
58#include "request.h"
59#include "sata.h"
60#include "scu_completion_codes.h"
Dan Williams5dec6f42011-05-10 02:28:49 -070061#include "scu_event_codes.h"
Dave Jiang2ec53eb2011-05-04 18:01:22 -070062#include "sas.h"
Dan Williams6f231dd2011-07-02 22:56:22 -070063
Dan Williamsf1f52e72011-05-10 02:28:45 -070064/**
65 * This method returns the sgl element pair for the specificed sgl_pair index.
66 * @sci_req: This parameter specifies the IO request for which to retrieve
67 * the Scatter-Gather List element pair.
68 * @sgl_pair_index: This parameter specifies the index into the SGL element
69 * pair to be retrieved.
70 *
71 * This method returns a pointer to an struct scu_sgl_element_pair.
72 */
73static struct scu_sgl_element_pair *scic_sds_request_get_sgl_element_pair(
74 struct scic_sds_request *sci_req,
75 u32 sgl_pair_index
76 ) {
77 struct scu_task_context *task_context;
Dan Williams6f231dd2011-07-02 22:56:22 -070078
Dan Williamsf1f52e72011-05-10 02:28:45 -070079 task_context = (struct scu_task_context *)sci_req->task_context_buffer;
Dan Williams6f231dd2011-07-02 22:56:22 -070080
Dan Williamsf1f52e72011-05-10 02:28:45 -070081 if (sgl_pair_index == 0) {
82 return &task_context->sgl_pair_ab;
83 } else if (sgl_pair_index == 1) {
84 return &task_context->sgl_pair_cd;
Dan Williams6f231dd2011-07-02 22:56:22 -070085 }
86
Dan Williamsf1f52e72011-05-10 02:28:45 -070087 return &sci_req->sg_table[sgl_pair_index - 2];
Dan Williams6f231dd2011-07-02 22:56:22 -070088}
89
90/**
Dan Williamsf1f52e72011-05-10 02:28:45 -070091 * This function will build the SGL list for an IO request.
92 * @sci_req: This parameter specifies the IO request for which to build
93 * the Scatter-Gather List.
Dan Williams6f231dd2011-07-02 22:56:22 -070094 *
Dan Williams6f231dd2011-07-02 22:56:22 -070095 */
Dan Williams5dec6f42011-05-10 02:28:49 -070096static void scic_sds_request_build_sgl(struct scic_sds_request *sds_request)
Dan Williamsf1f52e72011-05-10 02:28:45 -070097{
98 struct isci_request *isci_request = sci_req_to_ireq(sds_request);
99 struct isci_host *isci_host = isci_request->isci_host;
100 struct sas_task *task = isci_request_access_task(isci_request);
101 struct scatterlist *sg = NULL;
102 dma_addr_t dma_addr;
103 u32 sg_idx = 0;
104 struct scu_sgl_element_pair *scu_sg = NULL;
105 struct scu_sgl_element_pair *prev_sg = NULL;
106
107 if (task->num_scatter > 0) {
108 sg = task->scatter;
109
110 while (sg) {
111 scu_sg = scic_sds_request_get_sgl_element_pair(
112 sds_request,
113 sg_idx);
114
115 SCU_SGL_COPY(scu_sg->A, sg);
116
117 sg = sg_next(sg);
118
119 if (sg) {
120 SCU_SGL_COPY(scu_sg->B, sg);
121 sg = sg_next(sg);
122 } else
123 SCU_SGL_ZERO(scu_sg->B);
124
125 if (prev_sg) {
126 dma_addr =
127 scic_io_request_get_dma_addr(
128 sds_request,
129 scu_sg);
130
131 prev_sg->next_pair_upper =
132 upper_32_bits(dma_addr);
133 prev_sg->next_pair_lower =
134 lower_32_bits(dma_addr);
135 }
136
137 prev_sg = scu_sg;
138 sg_idx++;
139 }
140 } else { /* handle when no sg */
141 scu_sg = scic_sds_request_get_sgl_element_pair(sds_request,
142 sg_idx);
143
144 dma_addr = dma_map_single(&isci_host->pdev->dev,
145 task->scatter,
146 task->total_xfer_len,
147 task->data_dir);
148
149 isci_request->zero_scatter_daddr = dma_addr;
150
151 scu_sg->A.length = task->total_xfer_len;
152 scu_sg->A.address_upper = upper_32_bits(dma_addr);
153 scu_sg->A.address_lower = lower_32_bits(dma_addr);
154 }
155
156 if (scu_sg) {
157 scu_sg->next_pair_upper = 0;
158 scu_sg->next_pair_lower = 0;
159 }
160}
161
Dan Williamsf1f52e72011-05-10 02:28:45 -0700162static void scic_sds_io_request_build_ssp_command_iu(struct scic_sds_request *sci_req)
163{
164 struct ssp_cmd_iu *cmd_iu;
165 struct isci_request *ireq = sci_req_to_ireq(sci_req);
166 struct sas_task *task = isci_request_access_task(ireq);
167
168 cmd_iu = &sci_req->ssp.cmd;
169
170 memcpy(cmd_iu->LUN, task->ssp_task.LUN, 8);
171 cmd_iu->add_cdb_len = 0;
172 cmd_iu->_r_a = 0;
173 cmd_iu->_r_b = 0;
174 cmd_iu->en_fburst = 0; /* unsupported */
175 cmd_iu->task_prio = task->ssp_task.task_prio;
176 cmd_iu->task_attr = task->ssp_task.task_attr;
177 cmd_iu->_r_c = 0;
178
179 sci_swab32_cpy(&cmd_iu->cdb, task->ssp_task.cdb,
180 sizeof(task->ssp_task.cdb) / sizeof(u32));
181}
182
183static void scic_sds_task_request_build_ssp_task_iu(struct scic_sds_request *sci_req)
184{
185 struct ssp_task_iu *task_iu;
186 struct isci_request *ireq = sci_req_to_ireq(sci_req);
187 struct sas_task *task = isci_request_access_task(ireq);
188 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
189
190 task_iu = &sci_req->ssp.tmf;
191
192 memset(task_iu, 0, sizeof(struct ssp_task_iu));
193
194 memcpy(task_iu->LUN, task->ssp_task.LUN, 8);
195
196 task_iu->task_func = isci_tmf->tmf_code;
197 task_iu->task_tag =
198 (ireq->ttype == tmf_task) ?
199 isci_tmf->io_tag :
200 SCI_CONTROLLER_INVALID_IO_TAG;
201}
202
203/**
204 * This method is will fill in the SCU Task Context for any type of SSP request.
205 * @sci_req:
206 * @task_context:
207 *
208 */
209static void scu_ssp_reqeust_construct_task_context(
210 struct scic_sds_request *sds_request,
211 struct scu_task_context *task_context)
212{
213 dma_addr_t dma_addr;
214 struct scic_sds_controller *controller;
215 struct scic_sds_remote_device *target_device;
216 struct scic_sds_port *target_port;
217
218 controller = scic_sds_request_get_controller(sds_request);
219 target_device = scic_sds_request_get_device(sds_request);
220 target_port = scic_sds_request_get_port(sds_request);
221
222 /* Fill in the TC with the its required data */
223 task_context->abort = 0;
224 task_context->priority = 0;
225 task_context->initiator_request = 1;
226 task_context->connection_rate = target_device->connection_rate;
227 task_context->protocol_engine_index =
228 scic_sds_controller_get_protocol_engine_group(controller);
229 task_context->logical_port_index =
230 scic_sds_port_get_index(target_port);
231 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SSP;
232 task_context->valid = SCU_TASK_CONTEXT_VALID;
233 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
234
235 task_context->remote_node_index =
236 scic_sds_remote_device_get_index(sds_request->target_device);
237 task_context->command_code = 0;
238
239 task_context->link_layer_control = 0;
240 task_context->do_not_dma_ssp_good_response = 1;
241 task_context->strict_ordering = 0;
242 task_context->control_frame = 0;
243 task_context->timeout_enable = 0;
244 task_context->block_guard_enable = 0;
245
246 task_context->address_modifier = 0;
247
248 /* task_context->type.ssp.tag = sci_req->io_tag; */
249 task_context->task_phase = 0x01;
250
251 if (sds_request->was_tag_assigned_by_user) {
252 /*
253 * Build the task context now since we have already read
254 * the data
255 */
256 sds_request->post_context =
257 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
258 (scic_sds_controller_get_protocol_engine_group(
259 controller) <<
260 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
261 (scic_sds_port_get_index(target_port) <<
262 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
263 scic_sds_io_tag_get_index(sds_request->io_tag));
264 } else {
265 /*
266 * Build the task context now since we have already read
267 * the data
268 *
269 * I/O tag index is not assigned because we have to wait
270 * until we get a TCi
271 */
272 sds_request->post_context =
273 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
274 (scic_sds_controller_get_protocol_engine_group(
275 owning_controller) <<
276 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
277 (scic_sds_port_get_index(target_port) <<
278 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
279 }
280
281 /*
282 * Copy the physical address for the command buffer to the
283 * SCU Task Context
284 */
285 dma_addr = scic_io_request_get_dma_addr(sds_request,
286 &sds_request->ssp.cmd);
287
288 task_context->command_iu_upper = upper_32_bits(dma_addr);
289 task_context->command_iu_lower = lower_32_bits(dma_addr);
290
291 /*
292 * Copy the physical address for the response buffer to the
293 * SCU Task Context
294 */
295 dma_addr = scic_io_request_get_dma_addr(sds_request,
296 &sds_request->ssp.rsp);
297
298 task_context->response_iu_upper = upper_32_bits(dma_addr);
299 task_context->response_iu_lower = lower_32_bits(dma_addr);
300}
301
302/**
303 * This method is will fill in the SCU Task Context for a SSP IO request.
304 * @sci_req:
305 *
306 */
307static void scu_ssp_io_request_construct_task_context(
308 struct scic_sds_request *sci_req,
309 enum dma_data_direction dir,
310 u32 len)
311{
312 struct scu_task_context *task_context;
313
314 task_context = scic_sds_request_get_task_context(sci_req);
315
316 scu_ssp_reqeust_construct_task_context(sci_req, task_context);
317
318 task_context->ssp_command_iu_length =
319 sizeof(struct ssp_cmd_iu) / sizeof(u32);
320 task_context->type.ssp.frame_type = SSP_COMMAND;
321
322 switch (dir) {
323 case DMA_FROM_DEVICE:
324 case DMA_NONE:
325 default:
326 task_context->task_type = SCU_TASK_TYPE_IOREAD;
327 break;
328 case DMA_TO_DEVICE:
329 task_context->task_type = SCU_TASK_TYPE_IOWRITE;
330 break;
331 }
332
333 task_context->transfer_length_bytes = len;
334
335 if (task_context->transfer_length_bytes > 0)
336 scic_sds_request_build_sgl(sci_req);
337}
338
Dan Williamsf1f52e72011-05-10 02:28:45 -0700339/**
340 * This method will fill in the SCU Task Context for a SSP Task request. The
341 * following important settings are utilized: -# priority ==
342 * SCU_TASK_PRIORITY_HIGH. This ensures that the task request is issued
343 * ahead of other task destined for the same Remote Node. -# task_type ==
344 * SCU_TASK_TYPE_IOREAD. This simply indicates that a normal request type
345 * (i.e. non-raw frame) is being utilized to perform task management. -#
346 * control_frame == 1. This ensures that the proper endianess is set so
347 * that the bytes are transmitted in the right order for a task frame.
348 * @sci_req: This parameter specifies the task request object being
349 * constructed.
350 *
351 */
352static void scu_ssp_task_request_construct_task_context(
353 struct scic_sds_request *sci_req)
354{
355 struct scu_task_context *task_context;
356
357 task_context = scic_sds_request_get_task_context(sci_req);
358
359 scu_ssp_reqeust_construct_task_context(sci_req, task_context);
360
361 task_context->control_frame = 1;
362 task_context->priority = SCU_TASK_PRIORITY_HIGH;
363 task_context->task_type = SCU_TASK_TYPE_RAW_FRAME;
364 task_context->transfer_length_bytes = 0;
365 task_context->type.ssp.frame_type = SSP_TASK;
366 task_context->ssp_command_iu_length =
367 sizeof(struct ssp_task_iu) / sizeof(u32);
368}
369
Dan Williamsf1f52e72011-05-10 02:28:45 -0700370/**
Dan Williams5dec6f42011-05-10 02:28:49 -0700371 * This method is will fill in the SCU Task Context for any type of SATA
372 * request. This is called from the various SATA constructors.
373 * @sci_req: The general IO request object which is to be used in
374 * constructing the SCU task context.
375 * @task_context: The buffer pointer for the SCU task context which is being
376 * constructed.
Dan Williamsf1f52e72011-05-10 02:28:45 -0700377 *
Dan Williams5dec6f42011-05-10 02:28:49 -0700378 * The general io request construction is complete. The buffer assignment for
379 * the command buffer is complete. none Revisit task context construction to
380 * determine what is common for SSP/SMP/STP task context structures.
Dan Williamsf1f52e72011-05-10 02:28:45 -0700381 */
Dan Williams5dec6f42011-05-10 02:28:49 -0700382static void scu_sata_reqeust_construct_task_context(
383 struct scic_sds_request *sci_req,
384 struct scu_task_context *task_context)
385{
386 dma_addr_t dma_addr;
387 struct scic_sds_controller *controller;
388 struct scic_sds_remote_device *target_device;
389 struct scic_sds_port *target_port;
390
391 controller = scic_sds_request_get_controller(sci_req);
392 target_device = scic_sds_request_get_device(sci_req);
393 target_port = scic_sds_request_get_port(sci_req);
394
395 /* Fill in the TC with the its required data */
396 task_context->abort = 0;
397 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
398 task_context->initiator_request = 1;
399 task_context->connection_rate = target_device->connection_rate;
400 task_context->protocol_engine_index =
401 scic_sds_controller_get_protocol_engine_group(controller);
402 task_context->logical_port_index =
403 scic_sds_port_get_index(target_port);
404 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_STP;
405 task_context->valid = SCU_TASK_CONTEXT_VALID;
406 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
407
408 task_context->remote_node_index =
409 scic_sds_remote_device_get_index(sci_req->target_device);
410 task_context->command_code = 0;
411
412 task_context->link_layer_control = 0;
413 task_context->do_not_dma_ssp_good_response = 1;
414 task_context->strict_ordering = 0;
415 task_context->control_frame = 0;
416 task_context->timeout_enable = 0;
417 task_context->block_guard_enable = 0;
418
419 task_context->address_modifier = 0;
420 task_context->task_phase = 0x01;
421
422 task_context->ssp_command_iu_length =
423 (sizeof(struct host_to_dev_fis) - sizeof(u32)) / sizeof(u32);
424
425 /* Set the first word of the H2D REG FIS */
426 task_context->type.words[0] = *(u32 *)&sci_req->stp.cmd;
427
428 if (sci_req->was_tag_assigned_by_user) {
429 /*
430 * Build the task context now since we have already read
431 * the data
432 */
433 sci_req->post_context =
434 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
435 (scic_sds_controller_get_protocol_engine_group(
436 controller) <<
437 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
438 (scic_sds_port_get_index(target_port) <<
439 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
440 scic_sds_io_tag_get_index(sci_req->io_tag));
441 } else {
442 /*
443 * Build the task context now since we have already read
444 * the data.
445 * I/O tag index is not assigned because we have to wait
446 * until we get a TCi.
447 */
448 sci_req->post_context =
449 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
450 (scic_sds_controller_get_protocol_engine_group(
451 controller) <<
452 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
453 (scic_sds_port_get_index(target_port) <<
454 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
455 }
456
457 /*
458 * Copy the physical address for the command buffer to the SCU Task
459 * Context. We must offset the command buffer by 4 bytes because the
460 * first 4 bytes are transfered in the body of the TC.
461 */
462 dma_addr = scic_io_request_get_dma_addr(sci_req,
463 ((char *) &sci_req->stp.cmd) +
464 sizeof(u32));
465
466 task_context->command_iu_upper = upper_32_bits(dma_addr);
467 task_context->command_iu_lower = lower_32_bits(dma_addr);
468
469 /* SATA Requests do not have a response buffer */
470 task_context->response_iu_upper = 0;
471 task_context->response_iu_lower = 0;
472}
473
Dan Williamsf1f52e72011-05-10 02:28:45 -0700474
475
476/**
Dan Williams5dec6f42011-05-10 02:28:49 -0700477 * scu_stp_raw_request_construct_task_context -
478 * @sci_req: This parameter specifies the STP request object for which to
479 * construct a RAW command frame task context.
480 * @task_context: This parameter specifies the SCU specific task context buffer
481 * to construct.
Dan Williamsf1f52e72011-05-10 02:28:45 -0700482 *
Dan Williams5dec6f42011-05-10 02:28:49 -0700483 * This method performs the operations common to all SATA/STP requests
484 * utilizing the raw frame method. none
Dan Williamsf1f52e72011-05-10 02:28:45 -0700485 */
Dan Williams5dec6f42011-05-10 02:28:49 -0700486static void scu_stp_raw_request_construct_task_context(struct scic_sds_stp_request *stp_req,
487 struct scu_task_context *task_context)
488{
489 struct scic_sds_request *sci_req = to_sci_req(stp_req);
490
491 scu_sata_reqeust_construct_task_context(sci_req, task_context);
492
493 task_context->control_frame = 0;
494 task_context->priority = SCU_TASK_PRIORITY_NORMAL;
495 task_context->task_type = SCU_TASK_TYPE_SATA_RAW_FRAME;
496 task_context->type.stp.fis_type = FIS_REGH2D;
497 task_context->transfer_length_bytes = sizeof(struct host_to_dev_fis) - sizeof(u32);
498}
499
500static enum sci_status
501scic_sds_stp_pio_request_construct(struct scic_sds_request *sci_req,
502 bool copy_rx_frame)
503{
504 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
505 struct scic_sds_stp_pio_request *pio = &stp_req->type.pio;
506
507 scu_stp_raw_request_construct_task_context(stp_req,
508 sci_req->task_context_buffer);
509
510 pio->current_transfer_bytes = 0;
511 pio->ending_error = 0;
512 pio->ending_status = 0;
513
514 pio->request_current.sgl_offset = 0;
515 pio->request_current.sgl_set = SCU_SGL_ELEMENT_PAIR_A;
516
517 if (copy_rx_frame) {
518 scic_sds_request_build_sgl(sci_req);
519 /* Since the IO request copy of the TC contains the same data as
520 * the actual TC this pointer is vaild for either.
521 */
522 pio->request_current.sgl_pair = &sci_req->task_context_buffer->sgl_pair_ab;
523 } else {
524 /* The user does not want the data copied to the SGL buffer location */
525 pio->request_current.sgl_pair = NULL;
526 }
527
528 return SCI_SUCCESS;
529}
530
531/**
532 *
533 * @sci_req: This parameter specifies the request to be constructed as an
534 * optimized request.
535 * @optimized_task_type: This parameter specifies whether the request is to be
536 * an UDMA request or a NCQ request. - A value of 0 indicates UDMA. - A
537 * value of 1 indicates NCQ.
538 *
539 * This method will perform request construction common to all types of STP
540 * requests that are optimized by the silicon (i.e. UDMA, NCQ). This method
541 * returns an indication as to whether the construction was successful.
542 */
543static void scic_sds_stp_optimized_request_construct(struct scic_sds_request *sci_req,
544 u8 optimized_task_type,
545 u32 len,
546 enum dma_data_direction dir)
547{
548 struct scu_task_context *task_context = sci_req->task_context_buffer;
549
550 /* Build the STP task context structure */
551 scu_sata_reqeust_construct_task_context(sci_req, task_context);
552
553 /* Copy over the SGL elements */
554 scic_sds_request_build_sgl(sci_req);
555
556 /* Copy over the number of bytes to be transfered */
557 task_context->transfer_length_bytes = len;
558
559 if (dir == DMA_TO_DEVICE) {
560 /*
561 * The difference between the DMA IN and DMA OUT request task type
562 * values are consistent with the difference between FPDMA READ
563 * and FPDMA WRITE values. Add the supplied task type parameter
564 * to this difference to set the task type properly for this
565 * DATA OUT (WRITE) case. */
566 task_context->task_type = optimized_task_type + (SCU_TASK_TYPE_DMA_OUT
567 - SCU_TASK_TYPE_DMA_IN);
568 } else {
569 /*
570 * For the DATA IN (READ) case, simply save the supplied
571 * optimized task type. */
572 task_context->task_type = optimized_task_type;
573 }
574}
575
576
577
Dan Williamsf1f52e72011-05-10 02:28:45 -0700578static enum sci_status
579scic_io_request_construct_sata(struct scic_sds_request *sci_req,
580 u32 len,
581 enum dma_data_direction dir,
582 bool copy)
Dan Williams6f231dd2011-07-02 22:56:22 -0700583{
Dan Williams6f231dd2011-07-02 22:56:22 -0700584 enum sci_status status = SCI_SUCCESS;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700585 struct isci_request *ireq = sci_req_to_ireq(sci_req);
586 struct sas_task *task = isci_request_access_task(ireq);
Dan Williams6f231dd2011-07-02 22:56:22 -0700587
Dan Williamsf1f52e72011-05-10 02:28:45 -0700588 /* check for management protocols */
589 if (ireq->ttype == tmf_task) {
590 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
Dan Williams6f231dd2011-07-02 22:56:22 -0700591
Dan Williamsf1f52e72011-05-10 02:28:45 -0700592 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
Dan Williams5dec6f42011-05-10 02:28:49 -0700593 tmf->tmf_code == isci_tmf_sata_srst_low) {
594 scu_stp_raw_request_construct_task_context(&sci_req->stp.req,
595 sci_req->task_context_buffer);
596 return SCI_SUCCESS;
597 } else {
Dan Williamsf1f52e72011-05-10 02:28:45 -0700598 dev_err(scic_to_dev(sci_req->owning_controller),
599 "%s: Request 0x%p received un-handled SAT "
600 "management protocol 0x%x.\n",
601 __func__, sci_req, tmf->tmf_code);
Dan Williams6f231dd2011-07-02 22:56:22 -0700602
Dan Williamsf1f52e72011-05-10 02:28:45 -0700603 return SCI_FAILURE;
604 }
Dan Williams6f231dd2011-07-02 22:56:22 -0700605 }
606
Dan Williamsf1f52e72011-05-10 02:28:45 -0700607 if (!sas_protocol_ata(task->task_proto)) {
608 dev_err(scic_to_dev(sci_req->owning_controller),
609 "%s: Non-ATA protocol in SATA path: 0x%x\n",
610 __func__,
611 task->task_proto);
Dan Williams6f231dd2011-07-02 22:56:22 -0700612 return SCI_FAILURE;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700613
Dan Williams6f231dd2011-07-02 22:56:22 -0700614 }
615
Dan Williamsf1f52e72011-05-10 02:28:45 -0700616 /* non data */
Dan Williams5dec6f42011-05-10 02:28:49 -0700617 if (task->data_dir == DMA_NONE) {
618 scu_stp_raw_request_construct_task_context(&sci_req->stp.req,
619 sci_req->task_context_buffer);
620 return SCI_SUCCESS;
621 }
Dan Williamsf1f52e72011-05-10 02:28:45 -0700622
623 /* NCQ */
Dan Williams5dec6f42011-05-10 02:28:49 -0700624 if (task->ata_task.use_ncq) {
625 scic_sds_stp_optimized_request_construct(sci_req,
626 SCU_TASK_TYPE_FPDMAQ_READ,
627 len, dir);
628 return SCI_SUCCESS;
629 }
Dan Williamsf1f52e72011-05-10 02:28:45 -0700630
631 /* DMA */
Dan Williams5dec6f42011-05-10 02:28:49 -0700632 if (task->ata_task.dma_xfer) {
633 scic_sds_stp_optimized_request_construct(sci_req,
634 SCU_TASK_TYPE_DMA_IN,
635 len, dir);
636 return SCI_SUCCESS;
637 } else /* PIO */
Dan Williamsf1f52e72011-05-10 02:28:45 -0700638 return scic_sds_stp_pio_request_construct(sci_req, copy);
639
640 return status;
641}
642
643static enum sci_status scic_io_request_construct_basic_ssp(struct scic_sds_request *sci_req)
644{
645 struct isci_request *ireq = sci_req_to_ireq(sci_req);
646 struct sas_task *task = isci_request_access_task(ireq);
647
648 sci_req->protocol = SCIC_SSP_PROTOCOL;
649
650 scu_ssp_io_request_construct_task_context(sci_req,
651 task->data_dir,
652 task->total_xfer_len);
653
654 scic_sds_io_request_build_ssp_command_iu(sci_req);
655
Dan Williams5dec6f42011-05-10 02:28:49 -0700656 sci_base_state_machine_change_state(&sci_req->state_machine,
657 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700658
659 return SCI_SUCCESS;
660}
661
662enum sci_status scic_task_request_construct_ssp(
663 struct scic_sds_request *sci_req)
664{
665 /* Construct the SSP Task SCU Task Context */
666 scu_ssp_task_request_construct_task_context(sci_req);
667
668 /* Fill in the SSP Task IU */
669 scic_sds_task_request_build_ssp_task_iu(sci_req);
670
671 sci_base_state_machine_change_state(&sci_req->state_machine,
Dan Williams5dec6f42011-05-10 02:28:49 -0700672 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
Dan Williams6f231dd2011-07-02 22:56:22 -0700673
674 return SCI_SUCCESS;
675}
676
Dan Williamsf1f52e72011-05-10 02:28:45 -0700677static enum sci_status scic_io_request_construct_basic_sata(struct scic_sds_request *sci_req)
Dan Williams6f231dd2011-07-02 22:56:22 -0700678{
Dan Williamsf1f52e72011-05-10 02:28:45 -0700679 enum sci_status status;
680 struct scic_sds_stp_request *stp_req;
681 bool copy = false;
682 struct isci_request *isci_request = sci_req_to_ireq(sci_req);
683 struct sas_task *task = isci_request_access_task(isci_request);
Dan Williams6f231dd2011-07-02 22:56:22 -0700684
Dan Williamsf1f52e72011-05-10 02:28:45 -0700685 stp_req = &sci_req->stp.req;
686 sci_req->protocol = SCIC_STP_PROTOCOL;
Dan Williams6f231dd2011-07-02 22:56:22 -0700687
Dan Williamsf1f52e72011-05-10 02:28:45 -0700688 copy = (task->data_dir == DMA_NONE) ? false : true;
Dan Williams6f231dd2011-07-02 22:56:22 -0700689
Dan Williamsf1f52e72011-05-10 02:28:45 -0700690 status = scic_io_request_construct_sata(sci_req,
691 task->total_xfer_len,
692 task->data_dir,
693 copy);
Dan Williams6f231dd2011-07-02 22:56:22 -0700694
Dan Williamsf1f52e72011-05-10 02:28:45 -0700695 if (status == SCI_SUCCESS)
696 sci_base_state_machine_change_state(&sci_req->state_machine,
Dan Williams5dec6f42011-05-10 02:28:49 -0700697 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
Dan Williams6f231dd2011-07-02 22:56:22 -0700698
Dan Williamsf1f52e72011-05-10 02:28:45 -0700699 return status;
Dan Williams6f231dd2011-07-02 22:56:22 -0700700}
701
Dan Williamsf1f52e72011-05-10 02:28:45 -0700702enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req)
Dan Williams6f231dd2011-07-02 22:56:22 -0700703{
Dan Williamsf1f52e72011-05-10 02:28:45 -0700704 enum sci_status status = SCI_SUCCESS;
705 struct isci_request *ireq = sci_req_to_ireq(sci_req);
Dan Williams6f231dd2011-07-02 22:56:22 -0700706
Dan Williamsf1f52e72011-05-10 02:28:45 -0700707 /* check for management protocols */
708 if (ireq->ttype == tmf_task) {
709 struct isci_tmf *tmf = isci_request_access_tmf(ireq);
Dan Williams6f231dd2011-07-02 22:56:22 -0700710
Dan Williamsf1f52e72011-05-10 02:28:45 -0700711 if (tmf->tmf_code == isci_tmf_sata_srst_high ||
712 tmf->tmf_code == isci_tmf_sata_srst_low) {
Dan Williams5dec6f42011-05-10 02:28:49 -0700713 scu_stp_raw_request_construct_task_context(&sci_req->stp.req,
714 sci_req->task_context_buffer);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700715 } else {
716 dev_err(scic_to_dev(sci_req->owning_controller),
717 "%s: Request 0x%p received un-handled SAT "
718 "Protocol 0x%x.\n",
719 __func__, sci_req, tmf->tmf_code);
720
721 return SCI_FAILURE;
722 }
Dan Williams6f231dd2011-07-02 22:56:22 -0700723 }
Dan Williamsf1f52e72011-05-10 02:28:45 -0700724
Dan Williams5dec6f42011-05-10 02:28:49 -0700725 if (status != SCI_SUCCESS)
726 return status;
727 sci_base_state_machine_change_state(&sci_req->state_machine,
728 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700729
730 return status;
Dan Williams6f231dd2011-07-02 22:56:22 -0700731}
732
733/**
Dan Williamsf1f52e72011-05-10 02:28:45 -0700734 * sci_req_tx_bytes - bytes transferred when reply underruns request
735 * @sci_req: request that was terminated early
Dan Williams6f231dd2011-07-02 22:56:22 -0700736 */
Dan Williamsf1f52e72011-05-10 02:28:45 -0700737#define SCU_TASK_CONTEXT_SRAM 0x200000
738static u32 sci_req_tx_bytes(struct scic_sds_request *sci_req)
Dan Williams6f231dd2011-07-02 22:56:22 -0700739{
Dan Williamsf1f52e72011-05-10 02:28:45 -0700740 struct scic_sds_controller *scic = sci_req->owning_controller;
741 u32 ret_val = 0;
Dan Williams6f231dd2011-07-02 22:56:22 -0700742
Dan Williamsf1f52e72011-05-10 02:28:45 -0700743 if (readl(&scic->smu_registers->address_modifier) == 0) {
744 void __iomem *scu_reg_base = scic->scu_registers;
Dan Williams6f231dd2011-07-02 22:56:22 -0700745
Dan Williamsf1f52e72011-05-10 02:28:45 -0700746 /* get the bytes of data from the Address == BAR1 + 20002Ch + (256*TCi) where
747 * BAR1 is the scu_registers
748 * 0x20002C = 0x200000 + 0x2c
749 * = start of task context SRAM + offset of (type.ssp.data_offset)
750 * TCi is the io_tag of struct scic_sds_request
Dan Williams67ea8382011-05-08 11:47:15 -0700751 */
Dan Williamsf1f52e72011-05-10 02:28:45 -0700752 ret_val = readl(scu_reg_base +
753 (SCU_TASK_CONTEXT_SRAM + offsetof(struct scu_task_context, type.ssp.data_offset)) +
754 ((sizeof(struct scu_task_context)) * scic_sds_io_tag_get_index(sci_req->io_tag)));
Dan Williams67ea8382011-05-08 11:47:15 -0700755 }
Dan Williams6f231dd2011-07-02 22:56:22 -0700756
Dan Williamsf1f52e72011-05-10 02:28:45 -0700757 return ret_val;
Dan Williams6f231dd2011-07-02 22:56:22 -0700758}
759
Piotr Sawickif4636a72011-05-10 23:50:32 +0000760enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req)
Dan Williamsf1f52e72011-05-10 02:28:45 -0700761{
Piotr Sawickif4636a72011-05-10 23:50:32 +0000762 struct scic_sds_controller *scic = sci_req->owning_controller;
763 struct scu_task_context *task_context;
764 enum sci_base_request_states state;
765
766 if (sci_req->device_sequence !=
767 scic_sds_remote_device_get_sequence(sci_req->target_device))
Dan Williamsf1f52e72011-05-10 02:28:45 -0700768 return SCI_FAILURE;
769
Piotr Sawickif4636a72011-05-10 23:50:32 +0000770 state = sci_req->state_machine.current_state_id;
771 if (state != SCI_BASE_REQUEST_STATE_CONSTRUCTED) {
772 dev_warn(scic_to_dev(scic),
773 "%s: SCIC IO Request requested to start while in wrong "
774 "state %d\n", __func__, state);
775 return SCI_FAILURE_INVALID_STATE;
776 }
Dan Williamsf1f52e72011-05-10 02:28:45 -0700777
Piotr Sawickif4636a72011-05-10 23:50:32 +0000778 /* if necessary, allocate a TCi for the io request object and then will,
779 * if necessary, copy the constructed TC data into the actual TC buffer.
780 * If everything is successful the post context field is updated with
781 * the TCi so the controller can post the request to the hardware.
782 */
783 if (sci_req->io_tag == SCI_CONTROLLER_INVALID_IO_TAG)
784 sci_req->io_tag = scic_controller_allocate_io_tag(scic);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700785
Piotr Sawickif4636a72011-05-10 23:50:32 +0000786 /* Record the IO Tag in the request */
787 if (sci_req->io_tag != SCI_CONTROLLER_INVALID_IO_TAG) {
788 task_context = sci_req->task_context_buffer;
789
790 task_context->task_index = scic_sds_io_tag_get_index(sci_req->io_tag);
791
792 switch (task_context->protocol_type) {
793 case SCU_TASK_CONTEXT_PROTOCOL_SMP:
794 case SCU_TASK_CONTEXT_PROTOCOL_SSP:
795 /* SSP/SMP Frame */
796 task_context->type.ssp.tag = sci_req->io_tag;
797 task_context->type.ssp.target_port_transfer_tag =
798 0xFFFF;
799 break;
800
801 case SCU_TASK_CONTEXT_PROTOCOL_STP:
802 /* STP/SATA Frame
803 * task_context->type.stp.ncq_tag = sci_req->ncq_tag;
804 */
805 break;
806
807 case SCU_TASK_CONTEXT_PROTOCOL_NONE:
808 /* / @todo When do we set no protocol type? */
809 break;
810
811 default:
812 /* This should never happen since we build the IO
813 * requests */
814 break;
815 }
816
817 /*
818 * Check to see if we need to copy the task context buffer
819 * or have been building into the task context buffer */
820 if (sci_req->was_tag_assigned_by_user == false)
821 scic_sds_controller_copy_task_context(scic, sci_req);
822
823 /* Add to the post_context the io tag value */
824 sci_req->post_context |= scic_sds_io_tag_get_index(sci_req->io_tag);
825
826 /* Everything is good go ahead and change state */
827 sci_base_state_machine_change_state(&sci_req->state_machine,
828 SCI_BASE_REQUEST_STATE_STARTED);
829
830 return SCI_SUCCESS;
831 }
832
833 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700834}
835
836enum sci_status
Dan Williamsf00e6ba2011-05-10 02:39:11 -0700837scic_sds_io_request_terminate(struct scic_sds_request *sci_req)
Dan Williamsf1f52e72011-05-10 02:28:45 -0700838{
Dan Williamsf00e6ba2011-05-10 02:39:11 -0700839 enum sci_base_request_states state;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700840
Dan Williamsf00e6ba2011-05-10 02:39:11 -0700841 state = sci_req->state_machine.current_state_id;
842
843 switch (state) {
844 case SCI_BASE_REQUEST_STATE_CONSTRUCTED:
845 scic_sds_request_set_status(sci_req,
846 SCU_TASK_DONE_TASK_ABORT,
847 SCI_FAILURE_IO_TERMINATED);
848
849 sci_base_state_machine_change_state(&sci_req->state_machine,
850 SCI_BASE_REQUEST_STATE_COMPLETED);
851 return SCI_SUCCESS;
852 case SCI_BASE_REQUEST_STATE_STARTED:
853 case SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION:
854 case SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE:
855 case SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION:
856 case SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE:
857 case SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE:
858 case SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE:
859 case SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE:
860 case SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE:
861 case SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE:
862 case SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE:
863 case SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE:
864 case SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE:
865 case SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE:
866 case SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE:
867 sci_base_state_machine_change_state(&sci_req->state_machine,
868 SCI_BASE_REQUEST_STATE_ABORTING);
869 return SCI_SUCCESS;
870 case SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE:
871 sci_base_state_machine_change_state(&sci_req->state_machine,
872 SCI_BASE_REQUEST_STATE_ABORTING);
873 sci_base_state_machine_change_state(&sci_req->state_machine,
874 SCI_BASE_REQUEST_STATE_COMPLETED);
875 return SCI_SUCCESS;
876 case SCI_BASE_REQUEST_STATE_ABORTING:
877 sci_base_state_machine_change_state(&sci_req->state_machine,
878 SCI_BASE_REQUEST_STATE_COMPLETED);
879 return SCI_SUCCESS;
880 case SCI_BASE_REQUEST_STATE_COMPLETED:
881 default:
882 dev_warn(scic_to_dev(sci_req->owning_controller),
883 "%s: SCIC IO Request requested to abort while in wrong "
884 "state %d\n",
885 __func__,
886 sci_base_state_machine_get_state(&sci_req->state_machine));
887 break;
888 }
Dan Williamsf1f52e72011-05-10 02:28:45 -0700889
890 return SCI_FAILURE_INVALID_STATE;
891}
892
893enum sci_status scic_sds_io_request_event_handler(
894 struct scic_sds_request *request,
895 u32 event_code)
896{
897 if (request->state_handlers->event_handler)
898 return request->state_handlers->event_handler(request, event_code);
899
900 dev_warn(scic_to_dev(request->owning_controller),
901 "%s: SCIC IO Request given event code notification %x while "
902 "in wrong state %d\n",
903 __func__,
904 event_code,
905 sci_base_state_machine_get_state(&request->state_machine));
906
907 return SCI_FAILURE_INVALID_STATE;
908}
909
Dan Williamsf1f52e72011-05-10 02:28:45 -0700910/*
911 * This function copies response data for requests returning response data
912 * instead of sense data.
913 * @sci_req: This parameter specifies the request object for which to copy
914 * the response data.
915 */
Dan Williamsf1393032011-05-10 02:28:47 -0700916static void scic_sds_io_request_copy_response(struct scic_sds_request *sci_req)
Dan Williamsf1f52e72011-05-10 02:28:45 -0700917{
918 void *resp_buf;
919 u32 len;
920 struct ssp_response_iu *ssp_response;
921 struct isci_request *ireq = sci_req_to_ireq(sci_req);
922 struct isci_tmf *isci_tmf = isci_request_access_tmf(ireq);
923
924 ssp_response = &sci_req->ssp.rsp;
925
926 resp_buf = &isci_tmf->resp.resp_iu;
927
928 len = min_t(u32,
929 SSP_RESP_IU_MAX_SIZE,
930 be32_to_cpu(ssp_response->response_data_len));
931
932 memcpy(resp_buf, ssp_response->resp_data, len);
933}
934
Dan Williamsa7e255a2011-05-11 08:27:47 -0700935static enum sci_status request_started_state_tc_event(struct scic_sds_request *sci_req,
936 u32 completion_code)
Dan Williamsf1f52e72011-05-10 02:28:45 -0700937{
Dan Williamsf1f52e72011-05-10 02:28:45 -0700938 struct ssp_response_iu *resp_iu;
Dan Williamsa7e255a2011-05-11 08:27:47 -0700939 u8 datapres;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700940
Dan Williamsa7e255a2011-05-11 08:27:47 -0700941 /* TODO: Any SDMA return code of other than 0 is bad decode 0x003C0000
942 * to determine SDMA status
Dan Williamsf1f52e72011-05-10 02:28:45 -0700943 */
944 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
945 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
946 scic_sds_request_set_status(sci_req,
947 SCU_TASK_DONE_GOOD,
948 SCI_SUCCESS);
949 break;
Dan Williamsa7e255a2011-05-11 08:27:47 -0700950 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EARLY_RESP): {
951 /* There are times when the SCU hardware will return an early
Dan Williamsf1f52e72011-05-10 02:28:45 -0700952 * response because the io request specified more data than is
953 * returned by the target device (mode pages, inquiry data,
954 * etc.). We must check the response stats to see if this is
955 * truly a failed request or a good request that just got
956 * completed early.
957 */
958 struct ssp_response_iu *resp = &sci_req->ssp.rsp;
959 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
960
961 sci_swab32_cpy(&sci_req->ssp.rsp,
962 &sci_req->ssp.rsp,
963 word_cnt);
964
965 if (resp->status == 0) {
Dan Williamsa7e255a2011-05-11 08:27:47 -0700966 scic_sds_request_set_status(sci_req,
967 SCU_TASK_DONE_GOOD,
968 SCI_SUCCESS_IO_DONE_EARLY);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700969 } else {
Dan Williamsa7e255a2011-05-11 08:27:47 -0700970 scic_sds_request_set_status(sci_req,
971 SCU_TASK_DONE_CHECK_RESPONSE,
972 SCI_FAILURE_IO_RESPONSE_VALID);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700973 }
Dan Williamsa7e255a2011-05-11 08:27:47 -0700974 break;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700975 }
Dan Williamsa7e255a2011-05-11 08:27:47 -0700976 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CHECK_RESPONSE): {
Dan Williamsf1f52e72011-05-10 02:28:45 -0700977 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
978
979 sci_swab32_cpy(&sci_req->ssp.rsp,
980 &sci_req->ssp.rsp,
981 word_cnt);
982
983 scic_sds_request_set_status(sci_req,
984 SCU_TASK_DONE_CHECK_RESPONSE,
985 SCI_FAILURE_IO_RESPONSE_VALID);
986 break;
987 }
988
989 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RESP_LEN_ERR):
Dan Williamsa7e255a2011-05-11 08:27:47 -0700990 /* TODO With TASK_DONE_RESP_LEN_ERR is the response frame
Dan Williamsf1f52e72011-05-10 02:28:45 -0700991 * guaranteed to be received before this completion status is
992 * posted?
993 */
994 resp_iu = &sci_req->ssp.rsp;
995 datapres = resp_iu->datapres;
996
Dan Williamsa7e255a2011-05-11 08:27:47 -0700997 if (datapres == 1 || datapres == 2) {
998 scic_sds_request_set_status(sci_req,
999 SCU_TASK_DONE_CHECK_RESPONSE,
1000 SCI_FAILURE_IO_RESPONSE_VALID);
Dan Williamsf1f52e72011-05-10 02:28:45 -07001001 } else
Dan Williamsa7e255a2011-05-11 08:27:47 -07001002 scic_sds_request_set_status(sci_req,
1003 SCU_TASK_DONE_GOOD,
1004 SCI_SUCCESS);
Dan Williamsf1f52e72011-05-10 02:28:45 -07001005 break;
Dan Williamsf1f52e72011-05-10 02:28:45 -07001006 /* only stp device gets suspended. */
1007 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
1008 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_PERR):
1009 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_ERR):
1010 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_DATA_LEN_ERR):
1011 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_ABORT_ERR):
1012 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_WD_LEN):
1013 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
1014 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_RESP):
1015 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_SDBFIS):
1016 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
1017 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDB_ERR):
1018 if (sci_req->protocol == SCIC_STP_PROTOCOL) {
Dan Williamsa7e255a2011-05-11 08:27:47 -07001019 scic_sds_request_set_status(sci_req,
Dan Williamsf1f52e72011-05-10 02:28:45 -07001020 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1021 SCU_COMPLETION_TL_STATUS_SHIFT,
1022 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
1023 } else {
Dan Williamsa7e255a2011-05-11 08:27:47 -07001024 scic_sds_request_set_status(sci_req,
Dan Williamsf1f52e72011-05-10 02:28:45 -07001025 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1026 SCU_COMPLETION_TL_STATUS_SHIFT,
1027 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1028 }
1029 break;
1030
1031 /* both stp/ssp device gets suspended */
1032 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LF_ERR):
1033 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_WRONG_DESTINATION):
1034 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1):
1035 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2):
1036 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3):
1037 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_BAD_DESTINATION):
1038 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_ZONE_VIOLATION):
1039 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY):
1040 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED):
1041 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED):
Dan Williamsa7e255a2011-05-11 08:27:47 -07001042 scic_sds_request_set_status(sci_req,
1043 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1044 SCU_COMPLETION_TL_STATUS_SHIFT,
1045 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED);
Dan Williamsf1f52e72011-05-10 02:28:45 -07001046 break;
1047
1048 /* neither ssp nor stp gets suspended. */
1049 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_NAK_CMD_ERR):
1050 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_XR):
1051 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_XR_IU_LEN_ERR):
1052 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SDMA_ERR):
1053 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OFFSET_ERR):
1054 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_EXCESS_DATA):
1055 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
1056 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
1057 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
1058 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
1059 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_DATA):
1060 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_OPEN_FAIL):
1061 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_VIIT_ENTRY_NV):
1062 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_IIT_ENTRY_NV):
1063 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_RNCNV_OUTBOUND):
1064 default:
1065 scic_sds_request_set_status(
1066 sci_req,
1067 SCU_GET_COMPLETION_TL_STATUS(completion_code) >>
1068 SCU_COMPLETION_TL_STATUS_SHIFT,
1069 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1070 break;
1071 }
1072
1073 /*
1074 * TODO: This is probably wrong for ACK/NAK timeout conditions
1075 */
1076
1077 /* In all cases we will treat this as the completion of the IO req. */
Dan Williams5dec6f42011-05-10 02:28:49 -07001078 sci_base_state_machine_change_state(&sci_req->state_machine,
1079 SCI_BASE_REQUEST_STATE_COMPLETED);
Dan Williamsf1f52e72011-05-10 02:28:45 -07001080 return SCI_SUCCESS;
1081}
1082
Dan Williamsf1f52e72011-05-10 02:28:45 -07001083/*
1084 * This method implements the action to be taken when an SCIC_SDS_IO_REQUEST_T
Dan Williamsf1f52e72011-05-10 02:28:45 -07001085 * object receives a scic_sds_request_complete() request. This method frees up
1086 * any io request resources that have been allocated and transitions the
1087 * request to its final state. Consider stopping the state machine instead of
1088 * transitioning to the final state? enum sci_status SCI_SUCCESS
1089 */
1090static enum sci_status scic_sds_request_completed_state_complete_handler(
1091 struct scic_sds_request *request)
1092{
1093 if (request->was_tag_assigned_by_user != true) {
1094 scic_controller_free_io_tag(
1095 request->owning_controller, request->io_tag);
1096 }
1097
1098 if (request->saved_rx_frame_index != SCU_INVALID_FRAME_INDEX) {
1099 scic_sds_controller_release_frame(
1100 request->owning_controller, request->saved_rx_frame_index);
1101 }
1102
1103 sci_base_state_machine_change_state(&request->state_machine,
Dan Williams5dec6f42011-05-10 02:28:49 -07001104 SCI_BASE_REQUEST_STATE_FINAL);
Dan Williamsf1f52e72011-05-10 02:28:45 -07001105 return SCI_SUCCESS;
1106}
1107
Dan Williamsa7e255a2011-05-11 08:27:47 -07001108static enum sci_status request_aborting_state_tc_event(
Dan Williamsf1f52e72011-05-10 02:28:45 -07001109 struct scic_sds_request *sci_req,
1110 u32 completion_code)
1111{
1112 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1113 case (SCU_TASK_DONE_GOOD << SCU_COMPLETION_TL_STATUS_SHIFT):
1114 case (SCU_TASK_DONE_TASK_ABORT << SCU_COMPLETION_TL_STATUS_SHIFT):
Dan Williamsa7e255a2011-05-11 08:27:47 -07001115 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_TASK_ABORT,
1116 SCI_FAILURE_IO_TERMINATED);
Dan Williamsf1f52e72011-05-10 02:28:45 -07001117
1118 sci_base_state_machine_change_state(&sci_req->state_machine,
Dan Williams5dec6f42011-05-10 02:28:49 -07001119 SCI_BASE_REQUEST_STATE_COMPLETED);
Dan Williamsf1f52e72011-05-10 02:28:45 -07001120 break;
1121
1122 default:
Dan Williamsa7e255a2011-05-11 08:27:47 -07001123 /* Unless we get some strange error wait for the task abort to complete
1124 * TODO: Should there be a state change for this completion?
1125 */
Dan Williamsf1f52e72011-05-10 02:28:45 -07001126 break;
1127 }
1128
1129 return SCI_SUCCESS;
1130}
1131
Dan Williamsa7e255a2011-05-11 08:27:47 -07001132static enum sci_status ssp_task_request_await_tc_event(struct scic_sds_request *sci_req,
1133 u32 completion_code)
Dan Williamsf1393032011-05-10 02:28:47 -07001134{
1135 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1136 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1137 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1138 SCI_SUCCESS);
1139
1140 sci_base_state_machine_change_state(&sci_req->state_machine,
Dan Williams5dec6f42011-05-10 02:28:49 -07001141 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE);
Dan Williamsf1393032011-05-10 02:28:47 -07001142 break;
Dan Williamsf1393032011-05-10 02:28:47 -07001143 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_ACK_NAK_TO):
Dan Williamsa7e255a2011-05-11 08:27:47 -07001144 /* Currently, the decision is to simply allow the task request
1145 * to timeout if the task IU wasn't received successfully.
1146 * There is a potential for receiving multiple task responses if
1147 * we decide to send the task IU again.
1148 */
Dan Williamsf1393032011-05-10 02:28:47 -07001149 dev_warn(scic_to_dev(sci_req->owning_controller),
1150 "%s: TaskRequest:0x%p CompletionCode:%x - "
Dan Williamsa7e255a2011-05-11 08:27:47 -07001151 "ACK/NAK timeout\n", __func__, sci_req,
Dan Williamsf1393032011-05-10 02:28:47 -07001152 completion_code);
1153
1154 sci_base_state_machine_change_state(&sci_req->state_machine,
Dan Williams5dec6f42011-05-10 02:28:49 -07001155 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE);
Dan Williamsf1393032011-05-10 02:28:47 -07001156 break;
Dan Williamsf1393032011-05-10 02:28:47 -07001157 default:
Dan Williamsa7e255a2011-05-11 08:27:47 -07001158 /* All other completion status cause the IO to be complete. If a NAK
1159 * was received, then it is up to the user to retry the request.
1160 */
1161 scic_sds_request_set_status(sci_req,
Dan Williamsf1393032011-05-10 02:28:47 -07001162 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
Dan Williamsa7e255a2011-05-11 08:27:47 -07001163 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
Dan Williamsf1393032011-05-10 02:28:47 -07001164
1165 sci_base_state_machine_change_state(&sci_req->state_machine,
Dan Williams5dec6f42011-05-10 02:28:49 -07001166 SCI_BASE_REQUEST_STATE_COMPLETED);
Dan Williamsf1393032011-05-10 02:28:47 -07001167 break;
1168 }
1169
1170 return SCI_SUCCESS;
1171}
1172
Dan Williamsa7e255a2011-05-11 08:27:47 -07001173static enum sci_status smp_request_await_response_tc_event(struct scic_sds_request *sci_req,
1174 u32 completion_code)
Dan Williamsc72086e2011-05-10 02:28:48 -07001175{
1176 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1177 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
Dan Williamsa7e255a2011-05-11 08:27:47 -07001178 /* In the AWAIT RESPONSE state, any TC completion is
1179 * unexpected. but if the TC has success status, we
1180 * complete the IO anyway.
1181 */
Dan Williams5dec6f42011-05-10 02:28:49 -07001182 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1183 SCI_SUCCESS);
Dan Williamsc72086e2011-05-10 02:28:48 -07001184
Dan Williams5dec6f42011-05-10 02:28:49 -07001185 sci_base_state_machine_change_state(&sci_req->state_machine,
1186 SCI_BASE_REQUEST_STATE_COMPLETED);
Dan Williamsc72086e2011-05-10 02:28:48 -07001187 break;
1188
1189 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_RESP_TO_ERR):
1190 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_UFI_ERR):
1191 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_FRM_TYPE_ERR):
1192 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_SMP_LL_RX_ERR):
Dan Williamsa7e255a2011-05-11 08:27:47 -07001193 /* These status has been seen in a specific LSI
1194 * expander, which sometimes is not able to send smp
1195 * response within 2 ms. This causes our hardware break
1196 * the connection and set TC completion with one of
1197 * these SMP_XXX_XX_ERR status. For these type of error,
1198 * we ask scic user to retry the request.
1199 */
Dan Williams5dec6f42011-05-10 02:28:49 -07001200 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_SMP_RESP_TO_ERR,
1201 SCI_FAILURE_RETRY_REQUIRED);
Dan Williamsc72086e2011-05-10 02:28:48 -07001202
Dan Williams5dec6f42011-05-10 02:28:49 -07001203 sci_base_state_machine_change_state(&sci_req->state_machine,
1204 SCI_BASE_REQUEST_STATE_COMPLETED);
Dan Williamsc72086e2011-05-10 02:28:48 -07001205 break;
1206
1207 default:
Dan Williamsa7e255a2011-05-11 08:27:47 -07001208 /* All other completion status cause the IO to be complete. If a NAK
1209 * was received, then it is up to the user to retry the request
1210 */
1211 scic_sds_request_set_status(sci_req,
1212 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1213 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
Dan Williamsc72086e2011-05-10 02:28:48 -07001214
Dan Williams5dec6f42011-05-10 02:28:49 -07001215 sci_base_state_machine_change_state(&sci_req->state_machine,
1216 SCI_BASE_REQUEST_STATE_COMPLETED);
Dan Williamsc72086e2011-05-10 02:28:48 -07001217 break;
1218 }
1219
1220 return SCI_SUCCESS;
1221}
1222
Dan Williamsa7e255a2011-05-11 08:27:47 -07001223static enum sci_status smp_request_await_tc_event(struct scic_sds_request *sci_req,
1224 u32 completion_code)
Dan Williamsc72086e2011-05-10 02:28:48 -07001225{
1226 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1227 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
Dan Williams5dec6f42011-05-10 02:28:49 -07001228 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1229 SCI_SUCCESS);
Dan Williamsc72086e2011-05-10 02:28:48 -07001230
Dan Williams5dec6f42011-05-10 02:28:49 -07001231 sci_base_state_machine_change_state(&sci_req->state_machine,
1232 SCI_BASE_REQUEST_STATE_COMPLETED);
Dan Williamsc72086e2011-05-10 02:28:48 -07001233 break;
Dan Williamsc72086e2011-05-10 02:28:48 -07001234 default:
Dan Williamsa7e255a2011-05-11 08:27:47 -07001235 /* All other completion status cause the IO to be
1236 * complete. If a NAK was received, then it is up to
1237 * the user to retry the request.
1238 */
1239 scic_sds_request_set_status(sci_req,
1240 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1241 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
Dan Williamsc72086e2011-05-10 02:28:48 -07001242
Dan Williamsa7e255a2011-05-11 08:27:47 -07001243 sci_base_state_machine_change_state(&sci_req->state_machine,
1244 SCI_BASE_REQUEST_STATE_COMPLETED);
Dan Williamsc72086e2011-05-10 02:28:48 -07001245 break;
1246 }
1247
1248 return SCI_SUCCESS;
1249}
1250
Dan Williams5dec6f42011-05-10 02:28:49 -07001251void scic_stp_io_request_set_ncq_tag(struct scic_sds_request *req,
1252 u16 ncq_tag)
1253{
1254 /**
1255 * @note This could be made to return an error to the user if the user
1256 * attempts to set the NCQ tag in the wrong state.
1257 */
1258 req->task_context_buffer->type.stp.ncq_tag = ncq_tag;
1259}
1260
1261/**
1262 *
1263 * @sci_req:
1264 *
1265 * Get the next SGL element from the request. - Check on which SGL element pair
1266 * we are working - if working on SLG pair element A - advance to element B -
1267 * else - check to see if there are more SGL element pairs for this IO request
1268 * - if there are more SGL element pairs - advance to the next pair and return
1269 * element A struct scu_sgl_element*
1270 */
1271static struct scu_sgl_element *scic_sds_stp_request_pio_get_next_sgl(struct scic_sds_stp_request *stp_req)
1272{
1273 struct scu_sgl_element *current_sgl;
1274 struct scic_sds_request *sci_req = to_sci_req(stp_req);
1275 struct scic_sds_request_pio_sgl *pio_sgl = &stp_req->type.pio.request_current;
1276
1277 if (pio_sgl->sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
1278 if (pio_sgl->sgl_pair->B.address_lower == 0 &&
1279 pio_sgl->sgl_pair->B.address_upper == 0) {
1280 current_sgl = NULL;
1281 } else {
1282 pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_B;
1283 current_sgl = &pio_sgl->sgl_pair->B;
1284 }
1285 } else {
1286 if (pio_sgl->sgl_pair->next_pair_lower == 0 &&
1287 pio_sgl->sgl_pair->next_pair_upper == 0) {
1288 current_sgl = NULL;
1289 } else {
1290 u64 phys_addr;
1291
1292 phys_addr = pio_sgl->sgl_pair->next_pair_upper;
1293 phys_addr <<= 32;
1294 phys_addr |= pio_sgl->sgl_pair->next_pair_lower;
1295
1296 pio_sgl->sgl_pair = scic_request_get_virt_addr(sci_req, phys_addr);
1297 pio_sgl->sgl_set = SCU_SGL_ELEMENT_PAIR_A;
1298 current_sgl = &pio_sgl->sgl_pair->A;
1299 }
1300 }
1301
1302 return current_sgl;
1303}
1304
Dan Williamsa7e255a2011-05-11 08:27:47 -07001305static enum sci_status stp_request_non_data_await_h2d_tc_event(struct scic_sds_request *sci_req,
1306 u32 completion_code)
Dan Williams5dec6f42011-05-10 02:28:49 -07001307{
1308 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1309 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
Dan Williamsa7e255a2011-05-11 08:27:47 -07001310 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1311 SCI_SUCCESS);
Dan Williams5dec6f42011-05-10 02:28:49 -07001312
Dan Williamsa7e255a2011-05-11 08:27:47 -07001313 sci_base_state_machine_change_state(&sci_req->state_machine,
1314 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE);
Dan Williams5dec6f42011-05-10 02:28:49 -07001315 break;
1316
1317 default:
Dan Williamsa7e255a2011-05-11 08:27:47 -07001318 /* All other completion status cause the IO to be
1319 * complete. If a NAK was received, then it is up to
1320 * the user to retry the request.
1321 */
1322 scic_sds_request_set_status(sci_req,
1323 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1324 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
Dan Williams5dec6f42011-05-10 02:28:49 -07001325
Dan Williamsa7e255a2011-05-11 08:27:47 -07001326 sci_base_state_machine_change_state(&sci_req->state_machine,
1327 SCI_BASE_REQUEST_STATE_COMPLETED);
Dan Williams5dec6f42011-05-10 02:28:49 -07001328 break;
1329 }
1330
1331 return SCI_SUCCESS;
1332}
1333
Dan Williams5dec6f42011-05-10 02:28:49 -07001334#define SCU_MAX_FRAME_BUFFER_SIZE 0x400 /* 1K is the maximum SCU frame data payload */
1335
1336/* transmit DATA_FIS from (current sgl + offset) for input
1337 * parameter length. current sgl and offset is alreay stored in the IO request
1338 */
1339static enum sci_status scic_sds_stp_request_pio_data_out_trasmit_data_frame(
1340 struct scic_sds_request *sci_req,
1341 u32 length)
1342{
1343 struct scic_sds_controller *scic = sci_req->owning_controller;
1344 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1345 struct scu_task_context *task_context;
1346 struct scu_sgl_element *current_sgl;
1347
1348 /* Recycle the TC and reconstruct it for sending out DATA FIS containing
1349 * for the data from current_sgl+offset for the input length
1350 */
1351 task_context = scic_sds_controller_get_task_context_buffer(scic,
1352 sci_req->io_tag);
1353
1354 if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A)
1355 current_sgl = &stp_req->type.pio.request_current.sgl_pair->A;
1356 else
1357 current_sgl = &stp_req->type.pio.request_current.sgl_pair->B;
1358
1359 /* update the TC */
1360 task_context->command_iu_upper = current_sgl->address_upper;
1361 task_context->command_iu_lower = current_sgl->address_lower;
1362 task_context->transfer_length_bytes = length;
1363 task_context->type.stp.fis_type = FIS_DATA;
1364
1365 /* send the new TC out. */
1366 return scic_controller_continue_io(sci_req);
1367}
1368
1369static enum sci_status scic_sds_stp_request_pio_data_out_transmit_data(struct scic_sds_request *sci_req)
1370{
1371
1372 struct scu_sgl_element *current_sgl;
1373 u32 sgl_offset;
1374 u32 remaining_bytes_in_current_sgl = 0;
1375 enum sci_status status = SCI_SUCCESS;
1376 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1377
1378 sgl_offset = stp_req->type.pio.request_current.sgl_offset;
1379
1380 if (stp_req->type.pio.request_current.sgl_set == SCU_SGL_ELEMENT_PAIR_A) {
1381 current_sgl = &(stp_req->type.pio.request_current.sgl_pair->A);
1382 remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->A.length - sgl_offset;
1383 } else {
1384 current_sgl = &(stp_req->type.pio.request_current.sgl_pair->B);
1385 remaining_bytes_in_current_sgl = stp_req->type.pio.request_current.sgl_pair->B.length - sgl_offset;
1386 }
1387
1388
1389 if (stp_req->type.pio.pio_transfer_bytes > 0) {
1390 if (stp_req->type.pio.pio_transfer_bytes >= remaining_bytes_in_current_sgl) {
1391 /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = remaining_bytes_in_current_sgl */
1392 status = scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, remaining_bytes_in_current_sgl);
1393 if (status == SCI_SUCCESS) {
1394 stp_req->type.pio.pio_transfer_bytes -= remaining_bytes_in_current_sgl;
1395
1396 /* update the current sgl, sgl_offset and save for future */
1397 current_sgl = scic_sds_stp_request_pio_get_next_sgl(stp_req);
1398 sgl_offset = 0;
1399 }
1400 } else if (stp_req->type.pio.pio_transfer_bytes < remaining_bytes_in_current_sgl) {
1401 /* recycle the TC and send the H2D Data FIS from (current sgl + sgl_offset) and length = type.pio.pio_transfer_bytes */
1402 scic_sds_stp_request_pio_data_out_trasmit_data_frame(sci_req, stp_req->type.pio.pio_transfer_bytes);
1403
1404 if (status == SCI_SUCCESS) {
1405 /* Sgl offset will be adjusted and saved for future */
1406 sgl_offset += stp_req->type.pio.pio_transfer_bytes;
1407 current_sgl->address_lower += stp_req->type.pio.pio_transfer_bytes;
1408 stp_req->type.pio.pio_transfer_bytes = 0;
1409 }
1410 }
1411 }
1412
1413 if (status == SCI_SUCCESS) {
1414 stp_req->type.pio.request_current.sgl_offset = sgl_offset;
1415 }
1416
1417 return status;
1418}
1419
1420/**
1421 *
1422 * @stp_request: The request that is used for the SGL processing.
1423 * @data_buffer: The buffer of data to be copied.
1424 * @length: The length of the data transfer.
1425 *
1426 * Copy the data from the buffer for the length specified to the IO reqeust SGL
1427 * specified data region. enum sci_status
1428 */
1429static enum sci_status
1430scic_sds_stp_request_pio_data_in_copy_data_buffer(struct scic_sds_stp_request *stp_req,
1431 u8 *data_buf, u32 len)
1432{
1433 struct scic_sds_request *sci_req;
1434 struct isci_request *ireq;
1435 u8 *src_addr;
1436 int copy_len;
1437 struct sas_task *task;
1438 struct scatterlist *sg;
1439 void *kaddr;
1440 int total_len = len;
1441
1442 sci_req = to_sci_req(stp_req);
1443 ireq = sci_req_to_ireq(sci_req);
1444 task = isci_request_access_task(ireq);
1445 src_addr = data_buf;
1446
1447 if (task->num_scatter > 0) {
1448 sg = task->scatter;
1449
1450 while (total_len > 0) {
1451 struct page *page = sg_page(sg);
1452
1453 copy_len = min_t(int, total_len, sg_dma_len(sg));
1454 kaddr = kmap_atomic(page, KM_IRQ0);
1455 memcpy(kaddr + sg->offset, src_addr, copy_len);
1456 kunmap_atomic(kaddr, KM_IRQ0);
1457 total_len -= copy_len;
1458 src_addr += copy_len;
1459 sg = sg_next(sg);
1460 }
1461 } else {
1462 BUG_ON(task->total_xfer_len < total_len);
1463 memcpy(task->scatter, src_addr, total_len);
1464 }
1465
1466 return SCI_SUCCESS;
1467}
1468
1469/**
1470 *
1471 * @sci_req: The PIO DATA IN request that is to receive the data.
1472 * @data_buffer: The buffer to copy from.
1473 *
1474 * Copy the data buffer to the io request data region. enum sci_status
1475 */
1476static enum sci_status scic_sds_stp_request_pio_data_in_copy_data(
1477 struct scic_sds_stp_request *sci_req,
1478 u8 *data_buffer)
1479{
1480 enum sci_status status;
1481
1482 /*
1483 * If there is less than 1K remaining in the transfer request
1484 * copy just the data for the transfer */
1485 if (sci_req->type.pio.pio_transfer_bytes < SCU_MAX_FRAME_BUFFER_SIZE) {
1486 status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
1487 sci_req, data_buffer, sci_req->type.pio.pio_transfer_bytes);
1488
1489 if (status == SCI_SUCCESS)
1490 sci_req->type.pio.pio_transfer_bytes = 0;
1491 } else {
1492 /* We are transfering the whole frame so copy */
1493 status = scic_sds_stp_request_pio_data_in_copy_data_buffer(
1494 sci_req, data_buffer, SCU_MAX_FRAME_BUFFER_SIZE);
1495
1496 if (status == SCI_SUCCESS)
1497 sci_req->type.pio.pio_transfer_bytes -= SCU_MAX_FRAME_BUFFER_SIZE;
1498 }
1499
1500 return status;
1501}
1502
Dan Williamsa7e255a2011-05-11 08:27:47 -07001503static enum sci_status stp_request_pio_await_h2d_completion_tc_event(struct scic_sds_request *sci_req,
1504 u32 completion_code)
Dan Williams5dec6f42011-05-10 02:28:49 -07001505{
1506 enum sci_status status = SCI_SUCCESS;
1507
1508 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1509 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
Dan Williamsa7e255a2011-05-11 08:27:47 -07001510 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD, SCI_SUCCESS);
Dan Williams5dec6f42011-05-10 02:28:49 -07001511
Dan Williamsa7e255a2011-05-11 08:27:47 -07001512 sci_base_state_machine_change_state(&sci_req->state_machine,
1513 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE);
Dan Williams5dec6f42011-05-10 02:28:49 -07001514 break;
1515
1516 default:
Dan Williamsa7e255a2011-05-11 08:27:47 -07001517 /* All other completion status cause the IO to be
1518 * complete. If a NAK was received, then it is up to
1519 * the user to retry the request.
1520 */
1521 scic_sds_request_set_status(sci_req,
1522 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1523 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
Dan Williams5dec6f42011-05-10 02:28:49 -07001524
Dan Williamsa7e255a2011-05-11 08:27:47 -07001525 sci_base_state_machine_change_state(&sci_req->state_machine,
1526 SCI_BASE_REQUEST_STATE_COMPLETED);
Dan Williams5dec6f42011-05-10 02:28:49 -07001527 break;
1528 }
1529
1530 return status;
1531}
1532
Dan Williamsa7e255a2011-05-11 08:27:47 -07001533static enum sci_status pio_data_out_tx_done_tc_event(struct scic_sds_request *sci_req,
1534 u32 completion_code)
Dan Williams5dec6f42011-05-10 02:28:49 -07001535{
1536 enum sci_status status = SCI_SUCCESS;
1537 bool all_frames_transferred = false;
1538 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1539
1540 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
1541 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
1542 /* Transmit data */
1543 if (stp_req->type.pio.pio_transfer_bytes != 0) {
1544 status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
1545 if (status == SCI_SUCCESS) {
1546 if (stp_req->type.pio.pio_transfer_bytes == 0)
1547 all_frames_transferred = true;
1548 }
1549 } else if (stp_req->type.pio.pio_transfer_bytes == 0) {
1550 /*
1551 * this will happen if the all data is written at the
1552 * first time after the pio setup fis is received
1553 */
1554 all_frames_transferred = true;
1555 }
1556
1557 /* all data transferred. */
1558 if (all_frames_transferred) {
1559 /*
1560 * Change the state to SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_FRAME_SUBSTATE
1561 * and wait for PIO_SETUP fis / or D2H REg fis. */
1562 sci_base_state_machine_change_state(
1563 &sci_req->state_machine,
1564 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
1565 );
1566 }
1567 break;
Dan Williams5dec6f42011-05-10 02:28:49 -07001568 default:
1569 /*
1570 * All other completion status cause the IO to be complete. If a NAK
1571 * was received, then it is up to the user to retry the request. */
1572 scic_sds_request_set_status(
1573 sci_req,
1574 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
1575 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR
1576 );
1577
1578 sci_base_state_machine_change_state(
1579 &sci_req->state_machine,
1580 SCI_BASE_REQUEST_STATE_COMPLETED
1581 );
1582 break;
1583 }
1584
1585 return status;
1586}
1587
1588/**
1589 *
1590 * @request: This is the request which is receiving the event.
1591 * @event_code: This is the event code that the request on which the request is
1592 * expected to take action.
1593 *
1594 * This method will handle any link layer events while waiting for the data
1595 * frame. enum sci_status SCI_SUCCESS SCI_FAILURE
1596 */
1597static enum sci_status scic_sds_stp_request_pio_data_in_await_data_event_handler(
1598 struct scic_sds_request *request,
1599 u32 event_code)
1600{
1601 enum sci_status status;
1602
1603 switch (scu_get_event_specifier(event_code)) {
1604 case SCU_TASK_DONE_CRC_ERR << SCU_EVENT_SPECIFIC_CODE_SHIFT:
1605 /*
1606 * We are waiting for data and the SCU has R_ERR the data frame.
1607 * Go back to waiting for the D2H Register FIS */
1608 sci_base_state_machine_change_state(
1609 &request->state_machine,
1610 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
1611 );
1612
1613 status = SCI_SUCCESS;
1614 break;
1615
1616 default:
1617 dev_err(scic_to_dev(request->owning_controller),
1618 "%s: SCIC PIO Request 0x%p received unexpected "
1619 "event 0x%08x\n",
1620 __func__, request, event_code);
1621
1622 /* / @todo Should we fail the PIO request when we get an unexpected event? */
1623 status = SCI_FAILURE;
1624 break;
1625 }
1626
1627 return status;
1628}
1629
1630static void scic_sds_stp_request_udma_complete_request(
1631 struct scic_sds_request *request,
1632 u32 scu_status,
1633 enum sci_status sci_status)
1634{
1635 scic_sds_request_set_status(request, scu_status, sci_status);
1636 sci_base_state_machine_change_state(&request->state_machine,
1637 SCI_BASE_REQUEST_STATE_COMPLETED);
1638}
1639
1640static enum sci_status scic_sds_stp_request_udma_general_frame_handler(struct scic_sds_request *sci_req,
1641 u32 frame_index)
1642{
1643 struct scic_sds_controller *scic = sci_req->owning_controller;
1644 struct dev_to_host_fis *frame_header;
1645 enum sci_status status;
1646 u32 *frame_buffer;
1647
1648 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1649 frame_index,
1650 (void **)&frame_header);
1651
1652 if ((status == SCI_SUCCESS) &&
1653 (frame_header->fis_type == FIS_REGD2H)) {
1654 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1655 frame_index,
1656 (void **)&frame_buffer);
1657
1658 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
1659 frame_header,
1660 frame_buffer);
1661 }
1662
1663 scic_sds_controller_release_frame(scic, frame_index);
1664
1665 return status;
1666}
1667
Dan Williamsd1c637c32011-05-11 08:27:47 -07001668enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
1669 u32 frame_index)
1670{
1671 struct scic_sds_controller *scic = sci_req->owning_controller;
1672 struct scic_sds_stp_request *stp_req = &sci_req->stp.req;
1673 enum sci_base_request_states state;
1674 enum sci_status status;
1675 ssize_t word_cnt;
1676
1677 state = sci_req->state_machine.current_state_id;
1678 switch (state) {
1679 case SCI_BASE_REQUEST_STATE_STARTED: {
1680 struct ssp_frame_hdr ssp_hdr;
1681 void *frame_header;
1682
1683 scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1684 frame_index,
1685 &frame_header);
1686
1687 word_cnt = sizeof(struct ssp_frame_hdr) / sizeof(u32);
1688 sci_swab32_cpy(&ssp_hdr, frame_header, word_cnt);
1689
1690 if (ssp_hdr.frame_type == SSP_RESPONSE) {
1691 struct ssp_response_iu *resp_iu;
1692 ssize_t word_cnt = SSP_RESP_IU_MAX_SIZE / sizeof(u32);
1693
1694 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1695 frame_index,
1696 (void **)&resp_iu);
1697
1698 sci_swab32_cpy(&sci_req->ssp.rsp, resp_iu, word_cnt);
1699
1700 resp_iu = &sci_req->ssp.rsp;
1701
1702 if (resp_iu->datapres == 0x01 ||
1703 resp_iu->datapres == 0x02) {
1704 scic_sds_request_set_status(sci_req,
1705 SCU_TASK_DONE_CHECK_RESPONSE,
1706 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1707 } else
1708 scic_sds_request_set_status(sci_req,
1709 SCU_TASK_DONE_GOOD,
1710 SCI_SUCCESS);
1711 } else {
1712 /* not a response frame, why did it get forwarded? */
1713 dev_err(scic_to_dev(scic),
1714 "%s: SCIC IO Request 0x%p received unexpected "
1715 "frame %d type 0x%02x\n", __func__, sci_req,
1716 frame_index, ssp_hdr.frame_type);
1717 }
1718
1719 /*
1720 * In any case we are done with this frame buffer return it to the
1721 * controller
1722 */
1723 scic_sds_controller_release_frame(scic, frame_index);
1724
1725 return SCI_SUCCESS;
1726 }
1727 case SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE:
1728 scic_sds_io_request_copy_response(sci_req);
1729 sci_base_state_machine_change_state(&sci_req->state_machine,
1730 SCI_BASE_REQUEST_STATE_COMPLETED);
1731 scic_sds_controller_release_frame(scic,frame_index);
1732 return SCI_SUCCESS;
1733 case SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE: {
1734 struct smp_resp *rsp_hdr = &sci_req->smp.rsp;
1735 void *frame_header;
1736
1737 scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1738 frame_index,
1739 &frame_header);
1740
1741 /* byte swap the header. */
1742 word_cnt = SMP_RESP_HDR_SZ / sizeof(u32);
1743 sci_swab32_cpy(rsp_hdr, frame_header, word_cnt);
1744
1745 if (rsp_hdr->frame_type == SMP_RESPONSE) {
1746 void *smp_resp;
1747
1748 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1749 frame_index,
1750 &smp_resp);
1751
1752 word_cnt = (sizeof(struct smp_req) - SMP_RESP_HDR_SZ) /
1753 sizeof(u32);
1754
1755 sci_swab32_cpy(((u8 *) rsp_hdr) + SMP_RESP_HDR_SZ,
1756 smp_resp, word_cnt);
1757
1758 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
1759 SCI_SUCCESS);
1760
1761 sci_base_state_machine_change_state(&sci_req->state_machine,
1762 SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION);
1763 } else {
1764 /* This was not a response frame why did it get forwarded? */
1765 dev_err(scic_to_dev(scic),
1766 "%s: SCIC SMP Request 0x%p received unexpected frame "
1767 "%d type 0x%02x\n", __func__, sci_req,
1768 frame_index, rsp_hdr->frame_type);
1769
1770 scic_sds_request_set_status(sci_req,
1771 SCU_TASK_DONE_SMP_FRM_TYPE_ERR,
1772 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
1773
1774 sci_base_state_machine_change_state(&sci_req->state_machine,
1775 SCI_BASE_REQUEST_STATE_COMPLETED);
1776 }
1777
1778 scic_sds_controller_release_frame(scic, frame_index);
1779
1780 return SCI_SUCCESS;
1781 }
1782 case SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE:
1783 return scic_sds_stp_request_udma_general_frame_handler(sci_req, frame_index);
1784 case SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE:
1785 /* Use the general frame handler to copy the resposne data */
1786 status = scic_sds_stp_request_udma_general_frame_handler(sci_req, frame_index);
1787
1788 if (status != SCI_SUCCESS)
1789 return status;
1790
1791 scic_sds_stp_request_udma_complete_request(sci_req,
1792 SCU_TASK_DONE_CHECK_RESPONSE,
1793 SCI_FAILURE_IO_RESPONSE_VALID);
1794 return SCI_SUCCESS;
1795 case SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE: {
1796 struct dev_to_host_fis *frame_header;
1797 u32 *frame_buffer;
1798
1799 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1800 frame_index,
1801 (void **)&frame_header);
1802
1803 if (status != SCI_SUCCESS) {
1804 dev_err(scic_to_dev(scic),
1805 "%s: SCIC IO Request 0x%p could not get frame header "
1806 "for frame index %d, status %x\n",
1807 __func__, stp_req, frame_index, status);
1808
1809 return status;
1810 }
1811
1812 switch (frame_header->fis_type) {
1813 case FIS_REGD2H:
1814 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1815 frame_index,
1816 (void **)&frame_buffer);
1817
1818 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
1819 frame_header,
1820 frame_buffer);
1821
1822 /* The command has completed with error */
1823 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_CHECK_RESPONSE,
1824 SCI_FAILURE_IO_RESPONSE_VALID);
1825 break;
1826
1827 default:
1828 dev_warn(scic_to_dev(scic),
1829 "%s: IO Request:0x%p Frame Id:%d protocol "
1830 "violation occurred\n", __func__, stp_req,
1831 frame_index);
1832
1833 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS,
1834 SCI_FAILURE_PROTOCOL_VIOLATION);
1835 break;
1836 }
1837
1838 sci_base_state_machine_change_state(&sci_req->state_machine,
1839 SCI_BASE_REQUEST_STATE_COMPLETED);
1840
1841 /* Frame has been decoded return it to the controller */
1842 scic_sds_controller_release_frame(scic, frame_index);
1843
1844 return status;
1845 }
1846 case SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE: {
1847 struct isci_request *ireq = sci_req_to_ireq(sci_req);
1848 struct sas_task *task = isci_request_access_task(ireq);
1849 struct dev_to_host_fis *frame_header;
1850 u32 *frame_buffer;
1851
1852 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1853 frame_index,
1854 (void **)&frame_header);
1855
1856 if (status != SCI_SUCCESS) {
1857 dev_err(scic_to_dev(scic),
1858 "%s: SCIC IO Request 0x%p could not get frame header "
1859 "for frame index %d, status %x\n",
1860 __func__, stp_req, frame_index, status);
1861 return status;
1862 }
1863
1864 switch (frame_header->fis_type) {
1865 case FIS_PIO_SETUP:
1866 /* Get from the frame buffer the PIO Setup Data */
1867 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1868 frame_index,
1869 (void **)&frame_buffer);
1870
1871 /* Get the data from the PIO Setup The SCU Hardware returns
1872 * first word in the frame_header and the rest of the data is in
1873 * the frame buffer so we need to back up one dword
1874 */
1875
1876 /* transfer_count: first 16bits in the 4th dword */
1877 stp_req->type.pio.pio_transfer_bytes = frame_buffer[3] & 0xffff;
1878
1879 /* ending_status: 4th byte in the 3rd dword */
1880 stp_req->type.pio.ending_status = (frame_buffer[2] >> 24) & 0xff;
1881
1882 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
1883 frame_header,
1884 frame_buffer);
1885
1886 sci_req->stp.rsp.status = stp_req->type.pio.ending_status;
1887
1888 /* The next state is dependent on whether the
1889 * request was PIO Data-in or Data out
1890 */
1891 if (task->data_dir == DMA_FROM_DEVICE) {
1892 sci_base_state_machine_change_state(&sci_req->state_machine,
1893 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE);
1894 } else if (task->data_dir == DMA_TO_DEVICE) {
1895 /* Transmit data */
1896 status = scic_sds_stp_request_pio_data_out_transmit_data(sci_req);
1897 if (status != SCI_SUCCESS)
1898 break;
1899 sci_base_state_machine_change_state(&sci_req->state_machine,
1900 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE);
1901 }
1902 break;
1903 case FIS_SETDEVBITS:
1904 sci_base_state_machine_change_state(&sci_req->state_machine,
1905 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE);
1906 break;
1907 case FIS_REGD2H:
1908 if (frame_header->status & ATA_BUSY) {
1909 /* Now why is the drive sending a D2H Register FIS when
1910 * it is still busy? Do nothing since we are still in
1911 * the right state.
1912 */
1913 dev_dbg(scic_to_dev(scic),
1914 "%s: SCIC PIO Request 0x%p received "
1915 "D2H Register FIS with BSY status "
1916 "0x%x\n", __func__, stp_req,
1917 frame_header->status);
1918 break;
1919 }
1920
1921 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1922 frame_index,
1923 (void **)&frame_buffer);
1924
1925 scic_sds_controller_copy_sata_response(&sci_req->stp.req,
1926 frame_header,
1927 frame_buffer);
1928
1929 scic_sds_request_set_status(sci_req,
1930 SCU_TASK_DONE_CHECK_RESPONSE,
1931 SCI_FAILURE_IO_RESPONSE_VALID);
1932
1933 sci_base_state_machine_change_state(&sci_req->state_machine,
1934 SCI_BASE_REQUEST_STATE_COMPLETED);
1935 break;
1936 default:
1937 /* FIXME: what do we do here? */
1938 break;
1939 }
1940
1941 /* Frame is decoded return it to the controller */
1942 scic_sds_controller_release_frame(scic, frame_index);
1943
1944 return status;
1945 }
1946 case SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE: {
1947 struct dev_to_host_fis *frame_header;
1948 struct sata_fis_data *frame_buffer;
1949
1950 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
1951 frame_index,
1952 (void **)&frame_header);
1953
1954 if (status != SCI_SUCCESS) {
1955 dev_err(scic_to_dev(scic),
1956 "%s: SCIC IO Request 0x%p could not get frame header "
1957 "for frame index %d, status %x\n",
1958 __func__, stp_req, frame_index, status);
1959 return status;
1960 }
1961
1962 if (frame_header->fis_type != FIS_DATA) {
1963 dev_err(scic_to_dev(scic),
1964 "%s: SCIC PIO Request 0x%p received frame %d "
1965 "with fis type 0x%02x when expecting a data "
1966 "fis.\n", __func__, stp_req, frame_index,
1967 frame_header->fis_type);
1968
1969 scic_sds_request_set_status(sci_req,
1970 SCU_TASK_DONE_GOOD,
1971 SCI_FAILURE_IO_REQUIRES_SCSI_ABORT);
1972
1973 sci_base_state_machine_change_state(&sci_req->state_machine,
1974 SCI_BASE_REQUEST_STATE_COMPLETED);
1975
1976 /* Frame is decoded return it to the controller */
1977 scic_sds_controller_release_frame(scic, frame_index);
1978 return status;
1979 }
1980
1981 if (stp_req->type.pio.request_current.sgl_pair == NULL) {
1982 sci_req->saved_rx_frame_index = frame_index;
1983 stp_req->type.pio.pio_transfer_bytes = 0;
1984 } else {
1985 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
1986 frame_index,
1987 (void **)&frame_buffer);
1988
1989 status = scic_sds_stp_request_pio_data_in_copy_data(stp_req,
1990 (u8 *)frame_buffer);
1991
1992 /* Frame is decoded return it to the controller */
1993 scic_sds_controller_release_frame(scic, frame_index);
1994 }
1995
1996 /* Check for the end of the transfer, are there more
1997 * bytes remaining for this data transfer
1998 */
1999 if (status != SCI_SUCCESS ||
2000 stp_req->type.pio.pio_transfer_bytes != 0)
2001 return status;
2002
2003 if ((stp_req->type.pio.ending_status & ATA_BUSY) == 0) {
2004 scic_sds_request_set_status(sci_req,
2005 SCU_TASK_DONE_CHECK_RESPONSE,
2006 SCI_FAILURE_IO_RESPONSE_VALID);
2007
2008 sci_base_state_machine_change_state(&sci_req->state_machine,
2009 SCI_BASE_REQUEST_STATE_COMPLETED);
2010 } else {
2011 sci_base_state_machine_change_state(&sci_req->state_machine,
2012 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE);
2013 }
2014 return status;
2015 }
2016 case SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE: {
2017 struct dev_to_host_fis *frame_header;
2018 u32 *frame_buffer;
2019
2020 status = scic_sds_unsolicited_frame_control_get_header(&scic->uf_control,
2021 frame_index,
2022 (void **)&frame_header);
2023 if (status != SCI_SUCCESS) {
2024 dev_err(scic_to_dev(scic),
2025 "%s: SCIC IO Request 0x%p could not get frame header "
2026 "for frame index %d, status %x\n",
2027 __func__, stp_req, frame_index, status);
2028 return status;
2029 }
2030
2031 switch (frame_header->fis_type) {
2032 case FIS_REGD2H:
2033 scic_sds_unsolicited_frame_control_get_buffer(&scic->uf_control,
2034 frame_index,
2035 (void **)&frame_buffer);
2036
2037 scic_sds_controller_copy_sata_response(&sci_req->stp.rsp,
2038 frame_header,
2039 frame_buffer);
2040
2041 /* The command has completed with error */
2042 scic_sds_request_set_status(sci_req,
2043 SCU_TASK_DONE_CHECK_RESPONSE,
2044 SCI_FAILURE_IO_RESPONSE_VALID);
2045 break;
2046 default:
2047 dev_warn(scic_to_dev(scic),
2048 "%s: IO Request:0x%p Frame Id:%d protocol "
2049 "violation occurred\n", __func__, stp_req,
2050 frame_index);
2051
2052 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_UNEXP_FIS,
2053 SCI_FAILURE_PROTOCOL_VIOLATION);
2054 break;
2055 }
2056
2057 sci_base_state_machine_change_state(&sci_req->state_machine,
2058 SCI_BASE_REQUEST_STATE_COMPLETED);
2059
2060 /* Frame has been decoded return it to the controller */
2061 scic_sds_controller_release_frame(scic, frame_index);
2062
2063 return status;
2064 }
2065 case SCI_BASE_REQUEST_STATE_ABORTING:
2066 /* TODO: Is it even possible to get an unsolicited frame in the
2067 * aborting state?
2068 */
2069 scic_sds_controller_release_frame(scic, frame_index);
2070 return SCI_SUCCESS;
2071 default:
2072 dev_warn(scic_to_dev(scic),
2073 "%s: SCIC IO Request given unexpected frame %x while in "
2074 "state %d\n", __func__, frame_index, state);
2075
2076 scic_sds_controller_release_frame(scic, frame_index);
2077 return SCI_FAILURE_INVALID_STATE;
2078 }
2079}
2080
Dan Williamsa7e255a2011-05-11 08:27:47 -07002081static enum sci_status stp_request_udma_await_tc_event(struct scic_sds_request *sci_req,
2082 u32 completion_code)
Dan Williams5dec6f42011-05-10 02:28:49 -07002083{
2084 enum sci_status status = SCI_SUCCESS;
2085
2086 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2087 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2088 scic_sds_stp_request_udma_complete_request(sci_req,
2089 SCU_TASK_DONE_GOOD,
2090 SCI_SUCCESS);
2091 break;
2092 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_UNEXP_FIS):
2093 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_REG_ERR):
Dan Williamsa7e255a2011-05-11 08:27:47 -07002094 /* We must check ther response buffer to see if the D2H
2095 * Register FIS was received before we got the TC
2096 * completion.
2097 */
Dan Williams5dec6f42011-05-10 02:28:49 -07002098 if (sci_req->stp.rsp.fis_type == FIS_REGD2H) {
2099 scic_sds_remote_device_suspend(sci_req->target_device,
2100 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
2101
2102 scic_sds_stp_request_udma_complete_request(sci_req,
2103 SCU_TASK_DONE_CHECK_RESPONSE,
2104 SCI_FAILURE_IO_RESPONSE_VALID);
2105 } else {
Dan Williamsa7e255a2011-05-11 08:27:47 -07002106 /* If we have an error completion status for the
2107 * TC then we can expect a D2H register FIS from
2108 * the device so we must change state to wait
2109 * for it
2110 */
Dan Williams5dec6f42011-05-10 02:28:49 -07002111 sci_base_state_machine_change_state(&sci_req->state_machine,
2112 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE);
2113 }
2114 break;
2115
Dan Williamsa7e255a2011-05-11 08:27:47 -07002116 /* TODO Check to see if any of these completion status need to
2117 * wait for the device to host register fis.
2118 */
2119 /* TODO We can retry the command for SCU_TASK_DONE_CMD_LL_R_ERR
2120 * - this comes only for B0
2121 */
Dan Williams5dec6f42011-05-10 02:28:49 -07002122 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_INV_FIS_LEN):
2123 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_MAX_PLD_ERR):
2124 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_LL_R_ERR):
2125 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CMD_LL_R_ERR):
2126 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_CRC_ERR):
2127 scic_sds_remote_device_suspend(sci_req->target_device,
2128 SCU_EVENT_SPECIFIC(SCU_NORMALIZE_COMPLETION_STATUS(completion_code)));
2129 /* Fall through to the default case */
2130 default:
2131 /* All other completion status cause the IO to be complete. */
2132 scic_sds_stp_request_udma_complete_request(sci_req,
2133 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
2134 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
2135 break;
2136 }
2137
2138 return status;
2139}
2140
Dan Williamsa7e255a2011-05-11 08:27:47 -07002141static enum sci_status stp_request_soft_reset_await_h2d_asserted_tc_event(struct scic_sds_request *sci_req,
2142 u32 completion_code)
Dan Williams5dec6f42011-05-10 02:28:49 -07002143{
2144 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2145 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
Dan Williamsa7e255a2011-05-11 08:27:47 -07002146 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
2147 SCI_SUCCESS);
Dan Williams5dec6f42011-05-10 02:28:49 -07002148
Dan Williamsa7e255a2011-05-11 08:27:47 -07002149 sci_base_state_machine_change_state(&sci_req->state_machine,
2150 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE);
Dan Williams5dec6f42011-05-10 02:28:49 -07002151 break;
2152
2153 default:
2154 /*
2155 * All other completion status cause the IO to be complete. If a NAK
2156 * was received, then it is up to the user to retry the request. */
Dan Williamsa7e255a2011-05-11 08:27:47 -07002157 scic_sds_request_set_status(sci_req,
Dan Williams5dec6f42011-05-10 02:28:49 -07002158 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
Dan Williamsa7e255a2011-05-11 08:27:47 -07002159 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
Dan Williams5dec6f42011-05-10 02:28:49 -07002160
Dan Williamsa7e255a2011-05-11 08:27:47 -07002161 sci_base_state_machine_change_state(&sci_req->state_machine,
2162 SCI_BASE_REQUEST_STATE_COMPLETED);
Dan Williams5dec6f42011-05-10 02:28:49 -07002163 break;
2164 }
2165
2166 return SCI_SUCCESS;
2167}
2168
Dan Williamsa7e255a2011-05-11 08:27:47 -07002169static enum sci_status stp_request_soft_reset_await_h2d_diagnostic_tc_event(
Dan Williams5dec6f42011-05-10 02:28:49 -07002170 struct scic_sds_request *sci_req,
2171 u32 completion_code)
2172{
2173 switch (SCU_GET_COMPLETION_TL_STATUS(completion_code)) {
2174 case SCU_MAKE_COMPLETION_STATUS(SCU_TASK_DONE_GOOD):
2175 scic_sds_request_set_status(sci_req, SCU_TASK_DONE_GOOD,
2176 SCI_SUCCESS);
2177
2178 sci_base_state_machine_change_state(&sci_req->state_machine,
2179 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE);
2180 break;
2181
2182 default:
Dan Williamsa7e255a2011-05-11 08:27:47 -07002183 /* All other completion status cause the IO to be complete. If
2184 * a NAK was received, then it is up to the user to retry the
2185 * request.
2186 */
2187 scic_sds_request_set_status(sci_req,
Dan Williams5dec6f42011-05-10 02:28:49 -07002188 SCU_NORMALIZE_COMPLETION_STATUS(completion_code),
Dan Williamsa7e255a2011-05-11 08:27:47 -07002189 SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR);
Dan Williams5dec6f42011-05-10 02:28:49 -07002190
2191 sci_base_state_machine_change_state(&sci_req->state_machine,
Dan Williamsa7e255a2011-05-11 08:27:47 -07002192 SCI_BASE_REQUEST_STATE_COMPLETED);
Dan Williams5dec6f42011-05-10 02:28:49 -07002193 break;
2194 }
2195
2196 return SCI_SUCCESS;
2197}
2198
Dan Williamsa7e255a2011-05-11 08:27:47 -07002199enum sci_status
2200scic_sds_io_request_tc_completion(struct scic_sds_request *sci_req, u32 completion_code)
2201{
2202 enum sci_base_request_states state;
2203 struct scic_sds_controller *scic = sci_req->owning_controller;
2204
2205 state = sci_req->state_machine.current_state_id;
2206
2207 switch (state) {
2208 case SCI_BASE_REQUEST_STATE_STARTED:
2209 return request_started_state_tc_event(sci_req, completion_code);
2210 case SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION:
2211 return ssp_task_request_await_tc_event(sci_req, completion_code);
2212 case SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE:
2213 return smp_request_await_response_tc_event(sci_req, completion_code);
2214 case SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION:
2215 return smp_request_await_tc_event(sci_req, completion_code);
2216 case SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE:
2217 return stp_request_udma_await_tc_event(sci_req, completion_code);
2218 case SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE:
2219 return stp_request_non_data_await_h2d_tc_event(sci_req, completion_code);
2220 case SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE:
2221 return stp_request_pio_await_h2d_completion_tc_event(sci_req, completion_code);
2222 case SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE:
2223 return pio_data_out_tx_done_tc_event(sci_req, completion_code);
2224 case SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE:
2225 return stp_request_soft_reset_await_h2d_asserted_tc_event(sci_req, completion_code);
2226 case SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE:
2227 return stp_request_soft_reset_await_h2d_diagnostic_tc_event(sci_req, completion_code);
2228 case SCI_BASE_REQUEST_STATE_ABORTING:
2229 return request_aborting_state_tc_event(sci_req, completion_code);
2230 default:
2231 dev_warn(scic_to_dev(scic),
2232 "%s: SCIC IO Request given task completion notification %x "
2233 "while in wrong state %d\n", __func__, completion_code,
2234 state);
2235 return SCI_FAILURE_INVALID_STATE;
2236 }
2237}
2238
2239
2240
Dan Williamsf1f52e72011-05-10 02:28:45 -07002241static const struct scic_sds_io_request_state_handler scic_sds_request_state_handler_table[] = {
Piotr Sawickif4636a72011-05-10 23:50:32 +00002242 [SCI_BASE_REQUEST_STATE_INITIAL] = {},
2243 [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = {},
Dan Williamsa7e255a2011-05-11 08:27:47 -07002244 [SCI_BASE_REQUEST_STATE_STARTED] = { },
2245 [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION] = { },
Dan Williamsd1c637c32011-05-11 08:27:47 -07002246 [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE] = { },
Dan Williamsa7e255a2011-05-11 08:27:47 -07002247 [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE] = { },
2248 [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION] = { },
2249 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = { },
Dan Williamsd1c637c32011-05-11 08:27:47 -07002250 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = { },
Dan Williamsa7e255a2011-05-11 08:27:47 -07002251 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = { },
Dan Williamsd1c637c32011-05-11 08:27:47 -07002252 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = { },
Dan Williamsa7e255a2011-05-11 08:27:47 -07002253 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = { },
Dan Williamsd1c637c32011-05-11 08:27:47 -07002254 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = { },
Dan Williams5dec6f42011-05-10 02:28:49 -07002255 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = {
Dan Williams5dec6f42011-05-10 02:28:49 -07002256 .event_handler = scic_sds_stp_request_pio_data_in_await_data_event_handler,
Dan Williams5dec6f42011-05-10 02:28:49 -07002257 },
Dan Williamsa7e255a2011-05-11 08:27:47 -07002258 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = { },
2259 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = { },
2260 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = { },
Dan Williamsd1c637c32011-05-11 08:27:47 -07002261 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = { },
Dan Williamsf1f52e72011-05-10 02:28:45 -07002262 [SCI_BASE_REQUEST_STATE_COMPLETED] = {
2263 .complete_handler = scic_sds_request_completed_state_complete_handler,
2264 },
Dan Williamsa7e255a2011-05-11 08:27:47 -07002265 [SCI_BASE_REQUEST_STATE_ABORTING] = { },
Dan Williamsf1393032011-05-10 02:28:47 -07002266 [SCI_BASE_REQUEST_STATE_FINAL] = { },
Dan Williamsf1f52e72011-05-10 02:28:45 -07002267};
2268
Dan Williams6f231dd2011-07-02 22:56:22 -07002269
2270/**
2271 * isci_request_process_response_iu() - This function sets the status and
2272 * response iu, in the task struct, from the request object for the upper
2273 * layer driver.
2274 * @sas_task: This parameter is the task struct from the upper layer driver.
2275 * @resp_iu: This parameter points to the response iu of the completed request.
2276 * @dev: This parameter specifies the linux device struct.
2277 *
2278 * none.
2279 */
2280static void isci_request_process_response_iu(
2281 struct sas_task *task,
2282 struct ssp_response_iu *resp_iu,
2283 struct device *dev)
2284{
2285 dev_dbg(dev,
2286 "%s: resp_iu = %p "
2287 "resp_iu->status = 0x%x,\nresp_iu->datapres = %d "
2288 "resp_iu->response_data_len = %x, "
2289 "resp_iu->sense_data_len = %x\nrepsonse data: ",
2290 __func__,
2291 resp_iu,
2292 resp_iu->status,
2293 resp_iu->datapres,
2294 resp_iu->response_data_len,
2295 resp_iu->sense_data_len);
2296
2297 task->task_status.stat = resp_iu->status;
2298
2299 /* libsas updates the task status fields based on the response iu. */
2300 sas_ssp_task_response(dev, task, resp_iu);
2301}
2302
2303/**
2304 * isci_request_set_open_reject_status() - This function prepares the I/O
2305 * completion for OPEN_REJECT conditions.
2306 * @request: This parameter is the completed isci_request object.
2307 * @response_ptr: This parameter specifies the service response for the I/O.
2308 * @status_ptr: This parameter specifies the exec status for the I/O.
2309 * @complete_to_host_ptr: This parameter specifies the action to be taken by
2310 * the LLDD with respect to completing this request or forcing an abort
2311 * condition on the I/O.
2312 * @open_rej_reason: This parameter specifies the encoded reason for the
2313 * abandon-class reject.
2314 *
2315 * none.
2316 */
2317static void isci_request_set_open_reject_status(
2318 struct isci_request *request,
2319 struct sas_task *task,
2320 enum service_response *response_ptr,
2321 enum exec_status *status_ptr,
2322 enum isci_completion_selection *complete_to_host_ptr,
2323 enum sas_open_rej_reason open_rej_reason)
2324{
2325 /* Task in the target is done. */
2326 request->complete_in_target = true;
2327 *response_ptr = SAS_TASK_UNDELIVERED;
2328 *status_ptr = SAS_OPEN_REJECT;
2329 *complete_to_host_ptr = isci_perform_normal_io_completion;
2330 task->task_status.open_rej_reason = open_rej_reason;
2331}
2332
2333/**
2334 * isci_request_handle_controller_specific_errors() - This function decodes
2335 * controller-specific I/O completion error conditions.
2336 * @request: This parameter is the completed isci_request object.
2337 * @response_ptr: This parameter specifies the service response for the I/O.
2338 * @status_ptr: This parameter specifies the exec status for the I/O.
2339 * @complete_to_host_ptr: This parameter specifies the action to be taken by
2340 * the LLDD with respect to completing this request or forcing an abort
2341 * condition on the I/O.
2342 *
2343 * none.
2344 */
2345static void isci_request_handle_controller_specific_errors(
2346 struct isci_remote_device *isci_device,
2347 struct isci_request *request,
2348 struct sas_task *task,
2349 enum service_response *response_ptr,
2350 enum exec_status *status_ptr,
2351 enum isci_completion_selection *complete_to_host_ptr)
2352{
2353 unsigned int cstatus;
2354
Dan Williamsf1f52e72011-05-10 02:28:45 -07002355 cstatus = request->sci.scu_status;
Dan Williams6f231dd2011-07-02 22:56:22 -07002356
2357 dev_dbg(&request->isci_host->pdev->dev,
2358 "%s: %p SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR "
2359 "- controller status = 0x%x\n",
2360 __func__, request, cstatus);
2361
2362 /* Decode the controller-specific errors; most
2363 * important is to recognize those conditions in which
2364 * the target may still have a task outstanding that
2365 * must be aborted.
2366 *
2367 * Note that there are SCU completion codes being
2368 * named in the decode below for which SCIC has already
2369 * done work to handle them in a way other than as
2370 * a controller-specific completion code; these are left
2371 * in the decode below for completeness sake.
2372 */
2373 switch (cstatus) {
2374 case SCU_TASK_DONE_DMASETUP_DIRERR:
2375 /* Also SCU_TASK_DONE_SMP_FRM_TYPE_ERR: */
2376 case SCU_TASK_DONE_XFERCNT_ERR:
2377 /* Also SCU_TASK_DONE_SMP_UFI_ERR: */
2378 if (task->task_proto == SAS_PROTOCOL_SMP) {
2379 /* SCU_TASK_DONE_SMP_UFI_ERR == Task Done. */
2380 *response_ptr = SAS_TASK_COMPLETE;
2381
2382 /* See if the device has been/is being stopped. Note
2383 * that we ignore the quiesce state, since we are
2384 * concerned about the actual device state.
2385 */
2386 if ((isci_device->status == isci_stopping) ||
2387 (isci_device->status == isci_stopped))
2388 *status_ptr = SAS_DEVICE_UNKNOWN;
2389 else
2390 *status_ptr = SAS_ABORTED_TASK;
2391
2392 request->complete_in_target = true;
2393
2394 *complete_to_host_ptr =
2395 isci_perform_normal_io_completion;
2396 } else {
2397 /* Task in the target is not done. */
2398 *response_ptr = SAS_TASK_UNDELIVERED;
2399
2400 if ((isci_device->status == isci_stopping) ||
2401 (isci_device->status == isci_stopped))
2402 *status_ptr = SAS_DEVICE_UNKNOWN;
2403 else
2404 *status_ptr = SAM_STAT_TASK_ABORTED;
2405
2406 request->complete_in_target = false;
2407
2408 *complete_to_host_ptr =
2409 isci_perform_error_io_completion;
2410 }
2411
2412 break;
2413
2414 case SCU_TASK_DONE_CRC_ERR:
2415 case SCU_TASK_DONE_NAK_CMD_ERR:
2416 case SCU_TASK_DONE_EXCESS_DATA:
2417 case SCU_TASK_DONE_UNEXP_FIS:
2418 /* Also SCU_TASK_DONE_UNEXP_RESP: */
2419 case SCU_TASK_DONE_VIIT_ENTRY_NV: /* TODO - conditions? */
2420 case SCU_TASK_DONE_IIT_ENTRY_NV: /* TODO - conditions? */
2421 case SCU_TASK_DONE_RNCNV_OUTBOUND: /* TODO - conditions? */
2422 /* These are conditions in which the target
2423 * has completed the task, so that no cleanup
2424 * is necessary.
2425 */
2426 *response_ptr = SAS_TASK_COMPLETE;
2427
2428 /* See if the device has been/is being stopped. Note
2429 * that we ignore the quiesce state, since we are
2430 * concerned about the actual device state.
2431 */
2432 if ((isci_device->status == isci_stopping) ||
2433 (isci_device->status == isci_stopped))
2434 *status_ptr = SAS_DEVICE_UNKNOWN;
2435 else
2436 *status_ptr = SAS_ABORTED_TASK;
2437
2438 request->complete_in_target = true;
2439
2440 *complete_to_host_ptr = isci_perform_normal_io_completion;
2441 break;
2442
2443
2444 /* Note that the only open reject completion codes seen here will be
2445 * abandon-class codes; all others are automatically retried in the SCU.
2446 */
2447 case SCU_TASK_OPEN_REJECT_WRONG_DESTINATION:
2448
2449 isci_request_set_open_reject_status(
2450 request, task, response_ptr, status_ptr,
2451 complete_to_host_ptr, SAS_OREJ_WRONG_DEST);
2452 break;
2453
2454 case SCU_TASK_OPEN_REJECT_ZONE_VIOLATION:
2455
2456 /* Note - the return of AB0 will change when
2457 * libsas implements detection of zone violations.
2458 */
2459 isci_request_set_open_reject_status(
2460 request, task, response_ptr, status_ptr,
2461 complete_to_host_ptr, SAS_OREJ_RESV_AB0);
2462 break;
2463
2464 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_1:
2465
2466 isci_request_set_open_reject_status(
2467 request, task, response_ptr, status_ptr,
2468 complete_to_host_ptr, SAS_OREJ_RESV_AB1);
2469 break;
2470
2471 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_2:
2472
2473 isci_request_set_open_reject_status(
2474 request, task, response_ptr, status_ptr,
2475 complete_to_host_ptr, SAS_OREJ_RESV_AB2);
2476 break;
2477
2478 case SCU_TASK_OPEN_REJECT_RESERVED_ABANDON_3:
2479
2480 isci_request_set_open_reject_status(
2481 request, task, response_ptr, status_ptr,
2482 complete_to_host_ptr, SAS_OREJ_RESV_AB3);
2483 break;
2484
2485 case SCU_TASK_OPEN_REJECT_BAD_DESTINATION:
2486
2487 isci_request_set_open_reject_status(
2488 request, task, response_ptr, status_ptr,
2489 complete_to_host_ptr, SAS_OREJ_BAD_DEST);
2490 break;
2491
2492 case SCU_TASK_OPEN_REJECT_STP_RESOURCES_BUSY:
2493
2494 isci_request_set_open_reject_status(
2495 request, task, response_ptr, status_ptr,
2496 complete_to_host_ptr, SAS_OREJ_STP_NORES);
2497 break;
2498
2499 case SCU_TASK_OPEN_REJECT_PROTOCOL_NOT_SUPPORTED:
2500
2501 isci_request_set_open_reject_status(
2502 request, task, response_ptr, status_ptr,
2503 complete_to_host_ptr, SAS_OREJ_EPROTO);
2504 break;
2505
2506 case SCU_TASK_OPEN_REJECT_CONNECTION_RATE_NOT_SUPPORTED:
2507
2508 isci_request_set_open_reject_status(
2509 request, task, response_ptr, status_ptr,
2510 complete_to_host_ptr, SAS_OREJ_CONN_RATE);
2511 break;
2512
2513 case SCU_TASK_DONE_LL_R_ERR:
2514 /* Also SCU_TASK_DONE_ACK_NAK_TO: */
2515 case SCU_TASK_DONE_LL_PERR:
2516 case SCU_TASK_DONE_LL_SY_TERM:
2517 /* Also SCU_TASK_DONE_NAK_ERR:*/
2518 case SCU_TASK_DONE_LL_LF_TERM:
2519 /* Also SCU_TASK_DONE_DATA_LEN_ERR: */
2520 case SCU_TASK_DONE_LL_ABORT_ERR:
2521 case SCU_TASK_DONE_SEQ_INV_TYPE:
2522 /* Also SCU_TASK_DONE_UNEXP_XR: */
2523 case SCU_TASK_DONE_XR_IU_LEN_ERR:
2524 case SCU_TASK_DONE_INV_FIS_LEN:
2525 /* Also SCU_TASK_DONE_XR_WD_LEN: */
2526 case SCU_TASK_DONE_SDMA_ERR:
2527 case SCU_TASK_DONE_OFFSET_ERR:
2528 case SCU_TASK_DONE_MAX_PLD_ERR:
2529 case SCU_TASK_DONE_LF_ERR:
2530 case SCU_TASK_DONE_SMP_RESP_TO_ERR: /* Escalate to dev reset? */
2531 case SCU_TASK_DONE_SMP_LL_RX_ERR:
2532 case SCU_TASK_DONE_UNEXP_DATA:
2533 case SCU_TASK_DONE_UNEXP_SDBFIS:
2534 case SCU_TASK_DONE_REG_ERR:
2535 case SCU_TASK_DONE_SDB_ERR:
2536 case SCU_TASK_DONE_TASK_ABORT:
2537 default:
2538 /* Task in the target is not done. */
2539 *response_ptr = SAS_TASK_UNDELIVERED;
2540 *status_ptr = SAM_STAT_TASK_ABORTED;
2541 request->complete_in_target = false;
2542
2543 *complete_to_host_ptr = isci_perform_error_io_completion;
2544 break;
2545 }
2546}
2547
2548/**
2549 * isci_task_save_for_upper_layer_completion() - This function saves the
2550 * request for later completion to the upper layer driver.
2551 * @host: This parameter is a pointer to the host on which the the request
2552 * should be queued (either as an error or success).
2553 * @request: This parameter is the completed request.
2554 * @response: This parameter is the response code for the completed task.
2555 * @status: This parameter is the status code for the completed task.
2556 *
2557 * none.
2558 */
2559static void isci_task_save_for_upper_layer_completion(
2560 struct isci_host *host,
2561 struct isci_request *request,
2562 enum service_response response,
2563 enum exec_status status,
2564 enum isci_completion_selection task_notification_selection)
2565{
2566 struct sas_task *task = isci_request_access_task(request);
2567
Jeff Skirvinec6c9632011-03-04 14:06:44 -08002568 task_notification_selection
2569 = isci_task_set_completion_status(task, response, status,
2570 task_notification_selection);
Dan Williams6f231dd2011-07-02 22:56:22 -07002571
2572 /* Tasks aborted specifically by a call to the lldd_abort_task
2573 * function should not be completed to the host in the regular path.
2574 */
2575 switch (task_notification_selection) {
2576
2577 case isci_perform_normal_io_completion:
2578
2579 /* Normal notification (task_done) */
2580 dev_dbg(&host->pdev->dev,
Jeff Skirvinaa145102011-03-07 16:40:47 -07002581 "%s: Normal - task = %p, response=%d (%d), status=%d (%d)\n",
Dan Williams6f231dd2011-07-02 22:56:22 -07002582 __func__,
2583 task,
Jeff Skirvinaa145102011-03-07 16:40:47 -07002584 task->task_status.resp, response,
2585 task->task_status.stat, status);
Dan Williams6f231dd2011-07-02 22:56:22 -07002586 /* Add to the completed list. */
2587 list_add(&request->completed_node,
2588 &host->requests_to_complete);
Jeff Skirvinec6c9632011-03-04 14:06:44 -08002589
2590 /* Take the request off the device's pending request list. */
2591 list_del_init(&request->dev_node);
Dan Williams6f231dd2011-07-02 22:56:22 -07002592 break;
2593
2594 case isci_perform_aborted_io_completion:
Jeff Skirvina5fde222011-03-04 14:06:42 -08002595 /* No notification to libsas because this request is
2596 * already in the abort path.
Dan Williams6f231dd2011-07-02 22:56:22 -07002597 */
2598 dev_warn(&host->pdev->dev,
Jeff Skirvinaa145102011-03-07 16:40:47 -07002599 "%s: Aborted - task = %p, response=%d (%d), status=%d (%d)\n",
Dan Williams6f231dd2011-07-02 22:56:22 -07002600 __func__,
2601 task,
Jeff Skirvinaa145102011-03-07 16:40:47 -07002602 task->task_status.resp, response,
2603 task->task_status.stat, status);
Jeff Skirvina5fde222011-03-04 14:06:42 -08002604
2605 /* Wake up whatever process was waiting for this
2606 * request to complete.
2607 */
2608 WARN_ON(request->io_request_completion == NULL);
2609
2610 if (request->io_request_completion != NULL) {
2611
2612 /* Signal whoever is waiting that this
2613 * request is complete.
2614 */
2615 complete(request->io_request_completion);
2616 }
Dan Williams6f231dd2011-07-02 22:56:22 -07002617 break;
2618
2619 case isci_perform_error_io_completion:
2620 /* Use sas_task_abort */
2621 dev_warn(&host->pdev->dev,
Jeff Skirvinaa145102011-03-07 16:40:47 -07002622 "%s: Error - task = %p, response=%d (%d), status=%d (%d)\n",
Dan Williams6f231dd2011-07-02 22:56:22 -07002623 __func__,
2624 task,
Jeff Skirvinaa145102011-03-07 16:40:47 -07002625 task->task_status.resp, response,
2626 task->task_status.stat, status);
Dan Williams6f231dd2011-07-02 22:56:22 -07002627 /* Add to the aborted list. */
2628 list_add(&request->completed_node,
Jeff Skirvin11b00c12011-03-04 14:06:40 -08002629 &host->requests_to_errorback);
Dan Williams6f231dd2011-07-02 22:56:22 -07002630 break;
2631
2632 default:
2633 dev_warn(&host->pdev->dev,
Jeff Skirvinaa145102011-03-07 16:40:47 -07002634 "%s: Unknown - task = %p, response=%d (%d), status=%d (%d)\n",
Dan Williams6f231dd2011-07-02 22:56:22 -07002635 __func__,
2636 task,
Jeff Skirvinaa145102011-03-07 16:40:47 -07002637 task->task_status.resp, response,
2638 task->task_status.stat, status);
Dan Williams6f231dd2011-07-02 22:56:22 -07002639
Jeff Skirvina5fde222011-03-04 14:06:42 -08002640 /* Add to the error to libsas list. */
Dan Williams6f231dd2011-07-02 22:56:22 -07002641 list_add(&request->completed_node,
Jeff Skirvin11b00c12011-03-04 14:06:40 -08002642 &host->requests_to_errorback);
Dan Williams6f231dd2011-07-02 22:56:22 -07002643 break;
2644 }
2645}
2646
Dan Williamsf1f52e72011-05-10 02:28:45 -07002647static void isci_request_io_request_complete(struct isci_host *isci_host,
2648 struct isci_request *request,
2649 enum sci_io_status completion_status)
Dan Williams6f231dd2011-07-02 22:56:22 -07002650{
2651 struct sas_task *task = isci_request_access_task(request);
2652 struct ssp_response_iu *resp_iu;
2653 void *resp_buf;
2654 unsigned long task_flags;
Dan Williams6f231dd2011-07-02 22:56:22 -07002655 struct isci_remote_device *isci_device = request->isci_device;
2656 enum service_response response = SAS_TASK_UNDELIVERED;
2657 enum exec_status status = SAS_ABORTED_TASK;
2658 enum isci_request_status request_status;
2659 enum isci_completion_selection complete_to_host
2660 = isci_perform_normal_io_completion;
2661
2662 dev_dbg(&isci_host->pdev->dev,
2663 "%s: request = %p, task = %p,\n"
2664 "task->data_dir = %d completion_status = 0x%x\n",
2665 __func__,
2666 request,
2667 task,
2668 task->data_dir,
2669 completion_status);
2670
Jeff Skirvina5fde222011-03-04 14:06:42 -08002671 spin_lock(&request->state_lock);
Dan Williams6f231dd2011-07-02 22:56:22 -07002672 request_status = isci_request_get_state(request);
Dan Williams6f231dd2011-07-02 22:56:22 -07002673
2674 /* Decode the request status. Note that if the request has been
2675 * aborted by a task management function, we don't care
2676 * what the status is.
2677 */
2678 switch (request_status) {
2679
2680 case aborted:
2681 /* "aborted" indicates that the request was aborted by a task
2682 * management function, since once a task management request is
2683 * perfomed by the device, the request only completes because
2684 * of the subsequent driver terminate.
2685 *
2686 * Aborted also means an external thread is explicitly managing
2687 * this request, so that we do not complete it up the stack.
2688 *
2689 * The target is still there (since the TMF was successful).
2690 */
2691 request->complete_in_target = true;
2692 response = SAS_TASK_COMPLETE;
2693
2694 /* See if the device has been/is being stopped. Note
2695 * that we ignore the quiesce state, since we are
2696 * concerned about the actual device state.
2697 */
2698 if ((isci_device->status == isci_stopping)
2699 || (isci_device->status == isci_stopped)
2700 )
2701 status = SAS_DEVICE_UNKNOWN;
2702 else
2703 status = SAS_ABORTED_TASK;
2704
2705 complete_to_host = isci_perform_aborted_io_completion;
2706 /* This was an aborted request. */
Jeff Skirvina5fde222011-03-04 14:06:42 -08002707
2708 spin_unlock(&request->state_lock);
Dan Williams6f231dd2011-07-02 22:56:22 -07002709 break;
2710
2711 case aborting:
2712 /* aborting means that the task management function tried and
2713 * failed to abort the request. We need to note the request
2714 * as SAS_TASK_UNDELIVERED, so that the scsi mid layer marks the
2715 * target as down.
2716 *
2717 * Aborting also means an external thread is explicitly managing
2718 * this request, so that we do not complete it up the stack.
2719 */
2720 request->complete_in_target = true;
2721 response = SAS_TASK_UNDELIVERED;
2722
2723 if ((isci_device->status == isci_stopping) ||
2724 (isci_device->status == isci_stopped))
2725 /* The device has been /is being stopped. Note that
2726 * we ignore the quiesce state, since we are
2727 * concerned about the actual device state.
2728 */
2729 status = SAS_DEVICE_UNKNOWN;
2730 else
2731 status = SAS_PHY_DOWN;
2732
2733 complete_to_host = isci_perform_aborted_io_completion;
2734
2735 /* This was an aborted request. */
Jeff Skirvina5fde222011-03-04 14:06:42 -08002736
2737 spin_unlock(&request->state_lock);
Dan Williams6f231dd2011-07-02 22:56:22 -07002738 break;
2739
2740 case terminating:
2741
2742 /* This was an terminated request. This happens when
2743 * the I/O is being terminated because of an action on
2744 * the device (reset, tear down, etc.), and the I/O needs
2745 * to be completed up the stack.
2746 */
2747 request->complete_in_target = true;
2748 response = SAS_TASK_UNDELIVERED;
2749
2750 /* See if the device has been/is being stopped. Note
2751 * that we ignore the quiesce state, since we are
2752 * concerned about the actual device state.
2753 */
2754 if ((isci_device->status == isci_stopping) ||
2755 (isci_device->status == isci_stopped))
2756 status = SAS_DEVICE_UNKNOWN;
2757 else
2758 status = SAS_ABORTED_TASK;
2759
Jeff Skirvina5fde222011-03-04 14:06:42 -08002760 complete_to_host = isci_perform_aborted_io_completion;
Dan Williams6f231dd2011-07-02 22:56:22 -07002761
2762 /* This was a terminated request. */
Jeff Skirvina5fde222011-03-04 14:06:42 -08002763
2764 spin_unlock(&request->state_lock);
Dan Williams6f231dd2011-07-02 22:56:22 -07002765 break;
2766
2767 default:
2768
Jeff Skirvina5fde222011-03-04 14:06:42 -08002769 /* The request is done from an SCU HW perspective. */
2770 request->status = completed;
2771
2772 spin_unlock(&request->state_lock);
2773
Dan Williams6f231dd2011-07-02 22:56:22 -07002774 /* This is an active request being completed from the core. */
2775 switch (completion_status) {
2776
2777 case SCI_IO_FAILURE_RESPONSE_VALID:
2778 dev_dbg(&isci_host->pdev->dev,
2779 "%s: SCI_IO_FAILURE_RESPONSE_VALID (%p/%p)\n",
2780 __func__,
2781 request,
2782 task);
2783
2784 if (sas_protocol_ata(task->task_proto)) {
Dan Williams67ea8382011-05-08 11:47:15 -07002785 resp_buf = &request->sci.stp.rsp;
Dan Williams6f231dd2011-07-02 22:56:22 -07002786 isci_request_process_stp_response(task,
Dan Williamsb7645812011-05-08 02:35:32 -07002787 resp_buf);
Dan Williams6f231dd2011-07-02 22:56:22 -07002788 } else if (SAS_PROTOCOL_SSP == task->task_proto) {
2789
2790 /* crack the iu response buffer. */
Dan Williams67ea8382011-05-08 11:47:15 -07002791 resp_iu = &request->sci.ssp.rsp;
Dan Williams6f231dd2011-07-02 22:56:22 -07002792 isci_request_process_response_iu(task, resp_iu,
Dan Williamsb7645812011-05-08 02:35:32 -07002793 &isci_host->pdev->dev);
Dan Williams6f231dd2011-07-02 22:56:22 -07002794
2795 } else if (SAS_PROTOCOL_SMP == task->task_proto) {
2796
2797 dev_err(&isci_host->pdev->dev,
2798 "%s: SCI_IO_FAILURE_RESPONSE_VALID: "
2799 "SAS_PROTOCOL_SMP protocol\n",
2800 __func__);
2801
2802 } else
2803 dev_err(&isci_host->pdev->dev,
2804 "%s: unknown protocol\n", __func__);
2805
2806 /* use the task status set in the task struct by the
2807 * isci_request_process_response_iu call.
2808 */
2809 request->complete_in_target = true;
2810 response = task->task_status.resp;
2811 status = task->task_status.stat;
2812 break;
2813
2814 case SCI_IO_SUCCESS:
2815 case SCI_IO_SUCCESS_IO_DONE_EARLY:
2816
2817 response = SAS_TASK_COMPLETE;
2818 status = SAM_STAT_GOOD;
2819 request->complete_in_target = true;
2820
2821 if (task->task_proto == SAS_PROTOCOL_SMP) {
Dan Williams67ea8382011-05-08 11:47:15 -07002822 void *rsp = &request->sci.smp.rsp;
Dan Williams6f231dd2011-07-02 22:56:22 -07002823
2824 dev_dbg(&isci_host->pdev->dev,
2825 "%s: SMP protocol completion\n",
2826 __func__);
2827
2828 sg_copy_from_buffer(
2829 &task->smp_task.smp_resp, 1,
Dan Williamsb7645812011-05-08 02:35:32 -07002830 rsp, sizeof(struct smp_resp));
Dan Williams6f231dd2011-07-02 22:56:22 -07002831 } else if (completion_status
2832 == SCI_IO_SUCCESS_IO_DONE_EARLY) {
2833
2834 /* This was an SSP / STP / SATA transfer.
2835 * There is a possibility that less data than
2836 * the maximum was transferred.
2837 */
Dan Williamsf1f52e72011-05-10 02:28:45 -07002838 u32 transferred_length = sci_req_tx_bytes(&request->sci);
Dan Williams6f231dd2011-07-02 22:56:22 -07002839
2840 task->task_status.residual
2841 = task->total_xfer_len - transferred_length;
2842
2843 /* If there were residual bytes, call this an
2844 * underrun.
2845 */
2846 if (task->task_status.residual != 0)
2847 status = SAS_DATA_UNDERRUN;
2848
2849 dev_dbg(&isci_host->pdev->dev,
2850 "%s: SCI_IO_SUCCESS_IO_DONE_EARLY %d\n",
2851 __func__,
2852 status);
2853
2854 } else
2855 dev_dbg(&isci_host->pdev->dev,
2856 "%s: SCI_IO_SUCCESS\n",
2857 __func__);
2858
2859 break;
2860
2861 case SCI_IO_FAILURE_TERMINATED:
2862 dev_dbg(&isci_host->pdev->dev,
2863 "%s: SCI_IO_FAILURE_TERMINATED (%p/%p)\n",
2864 __func__,
2865 request,
2866 task);
2867
2868 /* The request was terminated explicitly. No handling
2869 * is needed in the SCSI error handler path.
2870 */
2871 request->complete_in_target = true;
2872 response = SAS_TASK_UNDELIVERED;
2873
2874 /* See if the device has been/is being stopped. Note
2875 * that we ignore the quiesce state, since we are
2876 * concerned about the actual device state.
2877 */
2878 if ((isci_device->status == isci_stopping) ||
2879 (isci_device->status == isci_stopped))
2880 status = SAS_DEVICE_UNKNOWN;
2881 else
2882 status = SAS_ABORTED_TASK;
2883
2884 complete_to_host = isci_perform_normal_io_completion;
2885 break;
2886
2887 case SCI_FAILURE_CONTROLLER_SPECIFIC_IO_ERR:
2888
2889 isci_request_handle_controller_specific_errors(
2890 isci_device, request, task, &response, &status,
2891 &complete_to_host);
2892
2893 break;
2894
2895 case SCI_IO_FAILURE_REMOTE_DEVICE_RESET_REQUIRED:
2896 /* This is a special case, in that the I/O completion
2897 * is telling us that the device needs a reset.
2898 * In order for the device reset condition to be
2899 * noticed, the I/O has to be handled in the error
2900 * handler. Set the reset flag and cause the
2901 * SCSI error thread to be scheduled.
2902 */
2903 spin_lock_irqsave(&task->task_state_lock, task_flags);
2904 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
2905 spin_unlock_irqrestore(&task->task_state_lock, task_flags);
2906
Jeff Skirvinaa145102011-03-07 16:40:47 -07002907 /* Fail the I/O. */
2908 response = SAS_TASK_UNDELIVERED;
2909 status = SAM_STAT_TASK_ABORTED;
2910
Dan Williams6f231dd2011-07-02 22:56:22 -07002911 complete_to_host = isci_perform_error_io_completion;
2912 request->complete_in_target = false;
2913 break;
2914
2915 default:
2916 /* Catch any otherwise unhandled error codes here. */
2917 dev_warn(&isci_host->pdev->dev,
2918 "%s: invalid completion code: 0x%x - "
2919 "isci_request = %p\n",
2920 __func__, completion_status, request);
2921
2922 response = SAS_TASK_UNDELIVERED;
2923
2924 /* See if the device has been/is being stopped. Note
2925 * that we ignore the quiesce state, since we are
2926 * concerned about the actual device state.
2927 */
2928 if ((isci_device->status == isci_stopping) ||
2929 (isci_device->status == isci_stopped))
2930 status = SAS_DEVICE_UNKNOWN;
2931 else
2932 status = SAS_ABORTED_TASK;
2933
2934 complete_to_host = isci_perform_error_io_completion;
2935 request->complete_in_target = false;
2936 break;
2937 }
2938 break;
2939 }
2940
2941 isci_request_unmap_sgl(request, isci_host->pdev);
2942
2943 /* Put the completed request on the correct list */
2944 isci_task_save_for_upper_layer_completion(isci_host, request, response,
2945 status, complete_to_host
2946 );
2947
2948 /* complete the io request to the core. */
Artur Wojcikcc3dbd02011-05-04 07:58:16 +00002949 scic_controller_complete_io(&isci_host->sci,
Dan Williams57f20f42011-04-21 18:14:45 -07002950 &isci_device->sci,
Dan Williams67ea8382011-05-08 11:47:15 -07002951 &request->sci);
2952 /* set terminated handle so it cannot be completed or
Dan Williams6f231dd2011-07-02 22:56:22 -07002953 * terminated again, and to cause any calls into abort
2954 * task to recognize the already completed case.
2955 */
Dan Williams67ea8382011-05-08 11:47:15 -07002956 request->terminated = true;
Dan Williams6f231dd2011-07-02 22:56:22 -07002957
Dan Williams6f231dd2011-07-02 22:56:22 -07002958 isci_host_can_dequeue(isci_host, 1);
2959}
Dan Williamsf1f52e72011-05-10 02:28:45 -07002960
2961/**
2962 * scic_sds_request_initial_state_enter() -
2963 * @object: This parameter specifies the base object for which the state
2964 * transition is occurring.
2965 *
2966 * This method implements the actions taken when entering the
2967 * SCI_BASE_REQUEST_STATE_INITIAL state. This state is entered when the initial
2968 * base request is constructed. Entry into the initial state sets all handlers
2969 * for the io request object to their default handlers. none
2970 */
2971static void scic_sds_request_initial_state_enter(void *object)
2972{
2973 struct scic_sds_request *sci_req = object;
2974
2975 SET_STATE_HANDLER(
2976 sci_req,
2977 scic_sds_request_state_handler_table,
2978 SCI_BASE_REQUEST_STATE_INITIAL
2979 );
2980}
2981
2982/**
2983 * scic_sds_request_constructed_state_enter() -
2984 * @object: The io request object that is to enter the constructed state.
2985 *
2986 * This method implements the actions taken when entering the
2987 * SCI_BASE_REQUEST_STATE_CONSTRUCTED state. The method sets the state handlers
2988 * for the the constructed state. none
2989 */
2990static void scic_sds_request_constructed_state_enter(void *object)
2991{
2992 struct scic_sds_request *sci_req = object;
2993
2994 SET_STATE_HANDLER(
2995 sci_req,
2996 scic_sds_request_state_handler_table,
2997 SCI_BASE_REQUEST_STATE_CONSTRUCTED
2998 );
2999}
3000
Dan Williamsf1f52e72011-05-10 02:28:45 -07003001static void scic_sds_request_started_state_enter(void *object)
3002{
3003 struct scic_sds_request *sci_req = object;
Dan Williamsf1393032011-05-10 02:28:47 -07003004 struct sci_base_state_machine *sm = &sci_req->state_machine;
3005 struct isci_request *ireq = sci_req_to_ireq(sci_req);
3006 struct domain_device *dev = sci_dev_to_domain(sci_req->target_device);
Dan Williamsc72086e2011-05-10 02:28:48 -07003007 struct sas_task *task;
3008
3009 /* XXX as hch said always creating an internal sas_task for tmf
3010 * requests would simplify the driver
3011 */
3012 task = ireq->ttype == io_task ? isci_request_access_task(ireq) : NULL;
Dan Williamsf1f52e72011-05-10 02:28:45 -07003013
3014 SET_STATE_HANDLER(
3015 sci_req,
3016 scic_sds_request_state_handler_table,
3017 SCI_BASE_REQUEST_STATE_STARTED
3018 );
3019
Dan Williams5dec6f42011-05-10 02:28:49 -07003020 /* all unaccelerated request types (non ssp or ncq) handled with
3021 * substates
Dan Williamsf1393032011-05-10 02:28:47 -07003022 */
Dan Williamsc72086e2011-05-10 02:28:48 -07003023 if (!task && dev->dev_type == SAS_END_DEV) {
3024 sci_base_state_machine_change_state(sm,
3025 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION);
Dan Williams5dec6f42011-05-10 02:28:49 -07003026 } else if (!task &&
3027 (isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_high ||
3028 isci_request_access_tmf(ireq)->tmf_code == isci_tmf_sata_srst_low)) {
3029 sci_base_state_machine_change_state(sm,
3030 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE);
Dan Williamsc72086e2011-05-10 02:28:48 -07003031 } else if (task && task->task_proto == SAS_PROTOCOL_SMP) {
3032 sci_base_state_machine_change_state(sm,
3033 SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE);
Dan Williams5dec6f42011-05-10 02:28:49 -07003034 } else if (task && sas_protocol_ata(task->task_proto) &&
3035 !task->ata_task.use_ncq) {
3036 u32 state;
3037
3038 if (task->data_dir == DMA_NONE)
3039 state = SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE;
3040 else if (task->ata_task.dma_xfer)
3041 state = SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE;
3042 else /* PIO */
3043 state = SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE;
3044
3045 sci_base_state_machine_change_state(sm, state);
Dan Williamsc72086e2011-05-10 02:28:48 -07003046 }
Dan Williamsf1f52e72011-05-10 02:28:45 -07003047}
3048
3049/**
Dan Williamsf1f52e72011-05-10 02:28:45 -07003050 * scic_sds_request_completed_state_enter() -
3051 * @object: This parameter specifies the base object for which the state
3052 * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST
3053 * object.
3054 *
3055 * This method implements the actions taken when entering the
3056 * SCI_BASE_REQUEST_STATE_COMPLETED state. This state is entered when the
3057 * SCIC_SDS_IO_REQUEST has completed. The method will decode the request
3058 * completion status and convert it to an enum sci_status to return in the
3059 * completion callback function. none
3060 */
3061static void scic_sds_request_completed_state_enter(void *object)
3062{
3063 struct scic_sds_request *sci_req = object;
3064 struct scic_sds_controller *scic =
3065 scic_sds_request_get_controller(sci_req);
3066 struct isci_host *ihost = scic_to_ihost(scic);
3067 struct isci_request *ireq = sci_req_to_ireq(sci_req);
3068
3069 SET_STATE_HANDLER(sci_req,
3070 scic_sds_request_state_handler_table,
3071 SCI_BASE_REQUEST_STATE_COMPLETED);
3072
3073 /* Tell the SCI_USER that the IO request is complete */
3074 if (sci_req->is_task_management_request == false)
3075 isci_request_io_request_complete(ihost, ireq,
3076 sci_req->sci_status);
3077 else
3078 isci_task_request_complete(ihost, ireq, sci_req->sci_status);
3079}
3080
3081/**
3082 * scic_sds_request_aborting_state_enter() -
3083 * @object: This parameter specifies the base object for which the state
3084 * transition is occurring. This object is cast into a SCIC_SDS_IO_REQUEST
3085 * object.
3086 *
3087 * This method implements the actions taken when entering the
3088 * SCI_BASE_REQUEST_STATE_ABORTING state. none
3089 */
3090static void scic_sds_request_aborting_state_enter(void *object)
3091{
3092 struct scic_sds_request *sci_req = object;
3093
3094 /* Setting the abort bit in the Task Context is required by the silicon. */
3095 sci_req->task_context_buffer->abort = 1;
3096
3097 SET_STATE_HANDLER(
3098 sci_req,
3099 scic_sds_request_state_handler_table,
3100 SCI_BASE_REQUEST_STATE_ABORTING
3101 );
3102}
3103
3104/**
3105 * scic_sds_request_final_state_enter() -
3106 * @object: This parameter specifies the base object for which the state
3107 * transition is occurring. This is cast into a SCIC_SDS_IO_REQUEST object.
3108 *
3109 * This method implements the actions taken when entering the
3110 * SCI_BASE_REQUEST_STATE_FINAL state. The only action required is to put the
3111 * state handlers in place. none
3112 */
3113static void scic_sds_request_final_state_enter(void *object)
3114{
3115 struct scic_sds_request *sci_req = object;
3116
3117 SET_STATE_HANDLER(
3118 sci_req,
3119 scic_sds_request_state_handler_table,
3120 SCI_BASE_REQUEST_STATE_FINAL
3121 );
3122}
3123
Dan Williamsf1393032011-05-10 02:28:47 -07003124static void scic_sds_io_request_started_task_mgmt_await_tc_completion_substate_enter(
3125 void *object)
3126{
3127 struct scic_sds_request *sci_req = object;
3128
3129 SET_STATE_HANDLER(
3130 sci_req,
3131 scic_sds_request_state_handler_table,
3132 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION
3133 );
3134}
3135
3136static void scic_sds_io_request_started_task_mgmt_await_task_response_substate_enter(
3137 void *object)
3138{
3139 struct scic_sds_request *sci_req = object;
3140
3141 SET_STATE_HANDLER(
3142 sci_req,
3143 scic_sds_request_state_handler_table,
3144 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE
3145 );
3146}
3147
Dan Williamsc72086e2011-05-10 02:28:48 -07003148static void scic_sds_smp_request_started_await_response_substate_enter(void *object)
3149{
3150 struct scic_sds_request *sci_req = object;
3151
3152 SET_STATE_HANDLER(
3153 sci_req,
3154 scic_sds_request_state_handler_table,
3155 SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE
3156 );
3157}
3158
3159static void scic_sds_smp_request_started_await_tc_completion_substate_enter(void *object)
3160{
3161 struct scic_sds_request *sci_req = object;
3162
3163 SET_STATE_HANDLER(
3164 sci_req,
3165 scic_sds_request_state_handler_table,
3166 SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION
3167 );
3168}
3169
Dan Williams5dec6f42011-05-10 02:28:49 -07003170static void scic_sds_stp_request_started_non_data_await_h2d_completion_enter(
3171 void *object)
3172{
3173 struct scic_sds_request *sci_req = object;
3174
3175 SET_STATE_HANDLER(
3176 sci_req,
3177 scic_sds_request_state_handler_table,
3178 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE
3179 );
3180
3181 scic_sds_remote_device_set_working_request(
3182 sci_req->target_device, sci_req
3183 );
3184}
3185
3186static void scic_sds_stp_request_started_non_data_await_d2h_enter(void *object)
3187{
3188 struct scic_sds_request *sci_req = object;
3189
3190 SET_STATE_HANDLER(
3191 sci_req,
3192 scic_sds_request_state_handler_table,
3193 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE
3194 );
3195}
3196
3197
3198
3199static void scic_sds_stp_request_started_pio_await_h2d_completion_enter(
3200 void *object)
3201{
3202 struct scic_sds_request *sci_req = object;
3203
3204 SET_STATE_HANDLER(
3205 sci_req,
3206 scic_sds_request_state_handler_table,
3207 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE
3208 );
3209
3210 scic_sds_remote_device_set_working_request(
3211 sci_req->target_device, sci_req);
3212}
3213
3214static void scic_sds_stp_request_started_pio_await_frame_enter(void *object)
3215{
3216 struct scic_sds_request *sci_req = object;
3217
3218 SET_STATE_HANDLER(
3219 sci_req,
3220 scic_sds_request_state_handler_table,
3221 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE
3222 );
3223}
3224
3225static void scic_sds_stp_request_started_pio_data_in_await_data_enter(
3226 void *object)
3227{
3228 struct scic_sds_request *sci_req = object;
3229
3230 SET_STATE_HANDLER(
3231 sci_req,
3232 scic_sds_request_state_handler_table,
3233 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE
3234 );
3235}
3236
3237static void scic_sds_stp_request_started_pio_data_out_transmit_data_enter(
3238 void *object)
3239{
3240 struct scic_sds_request *sci_req = object;
3241
3242 SET_STATE_HANDLER(
3243 sci_req,
3244 scic_sds_request_state_handler_table,
3245 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE
3246 );
3247}
3248
3249
3250
3251static void scic_sds_stp_request_started_udma_await_tc_completion_enter(
3252 void *object)
3253{
3254 struct scic_sds_request *sci_req = object;
3255
3256 SET_STATE_HANDLER(
3257 sci_req,
3258 scic_sds_request_state_handler_table,
3259 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE
3260 );
3261}
3262
3263/**
3264 *
3265 *
3266 * This state is entered when there is an TC completion failure. The hardware
3267 * received an unexpected condition while processing the IO request and now
3268 * will UF the D2H register FIS to complete the IO.
3269 */
3270static void scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter(
3271 void *object)
3272{
3273 struct scic_sds_request *sci_req = object;
3274
3275 SET_STATE_HANDLER(
3276 sci_req,
3277 scic_sds_request_state_handler_table,
3278 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE
3279 );
3280}
3281
3282
3283
3284static void scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter(
3285 void *object)
3286{
3287 struct scic_sds_request *sci_req = object;
3288
3289 SET_STATE_HANDLER(
3290 sci_req,
3291 scic_sds_request_state_handler_table,
3292 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE
3293 );
3294
3295 scic_sds_remote_device_set_working_request(
3296 sci_req->target_device, sci_req
3297 );
3298}
3299
3300static void scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter(
3301 void *object)
3302{
3303 struct scic_sds_request *sci_req = object;
3304 struct scu_task_context *task_context;
3305 struct host_to_dev_fis *h2d_fis;
3306 enum sci_status status;
3307
3308 /* Clear the SRST bit */
3309 h2d_fis = &sci_req->stp.cmd;
3310 h2d_fis->control = 0;
3311
3312 /* Clear the TC control bit */
3313 task_context = scic_sds_controller_get_task_context_buffer(
3314 sci_req->owning_controller, sci_req->io_tag);
3315 task_context->control_frame = 0;
3316
3317 status = scic_controller_continue_io(sci_req);
3318 if (status == SCI_SUCCESS) {
3319 SET_STATE_HANDLER(
3320 sci_req,
3321 scic_sds_request_state_handler_table,
3322 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE
3323 );
3324 }
3325}
3326
3327static void scic_sds_stp_request_started_soft_reset_await_d2h_response_enter(
3328 void *object)
3329{
3330 struct scic_sds_request *sci_req = object;
3331
3332 SET_STATE_HANDLER(
3333 sci_req,
3334 scic_sds_request_state_handler_table,
3335 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE
3336 );
3337}
3338
Dan Williamsf1f52e72011-05-10 02:28:45 -07003339static const struct sci_base_state scic_sds_request_state_table[] = {
3340 [SCI_BASE_REQUEST_STATE_INITIAL] = {
3341 .enter_state = scic_sds_request_initial_state_enter,
3342 },
3343 [SCI_BASE_REQUEST_STATE_CONSTRUCTED] = {
3344 .enter_state = scic_sds_request_constructed_state_enter,
3345 },
3346 [SCI_BASE_REQUEST_STATE_STARTED] = {
3347 .enter_state = scic_sds_request_started_state_enter,
Dan Williams5dec6f42011-05-10 02:28:49 -07003348 },
3349 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE] = {
3350 .enter_state = scic_sds_stp_request_started_non_data_await_h2d_completion_enter,
3351 },
3352 [SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE] = {
3353 .enter_state = scic_sds_stp_request_started_non_data_await_d2h_enter,
3354 },
3355 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE] = {
3356 .enter_state = scic_sds_stp_request_started_pio_await_h2d_completion_enter,
3357 },
3358 [SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE] = {
3359 .enter_state = scic_sds_stp_request_started_pio_await_frame_enter,
3360 },
3361 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE] = {
3362 .enter_state = scic_sds_stp_request_started_pio_data_in_await_data_enter,
3363 },
3364 [SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE] = {
3365 .enter_state = scic_sds_stp_request_started_pio_data_out_transmit_data_enter,
3366 },
3367 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE] = {
3368 .enter_state = scic_sds_stp_request_started_udma_await_tc_completion_enter,
3369 },
3370 [SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE] = {
3371 .enter_state = scic_sds_stp_request_started_udma_await_d2h_reg_fis_enter,
3372 },
3373 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE] = {
3374 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_asserted_completion_enter,
3375 },
3376 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE] = {
3377 .enter_state = scic_sds_stp_request_started_soft_reset_await_h2d_diagnostic_completion_enter,
3378 },
3379 [SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE] = {
3380 .enter_state = scic_sds_stp_request_started_soft_reset_await_d2h_response_enter,
Dan Williamsf1f52e72011-05-10 02:28:45 -07003381 },
Dan Williamsf1393032011-05-10 02:28:47 -07003382 [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION] = {
3383 .enter_state = scic_sds_io_request_started_task_mgmt_await_tc_completion_substate_enter,
3384 },
3385 [SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE] = {
3386 .enter_state = scic_sds_io_request_started_task_mgmt_await_task_response_substate_enter,
3387 },
Dan Williamsc72086e2011-05-10 02:28:48 -07003388 [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE] = {
3389 .enter_state = scic_sds_smp_request_started_await_response_substate_enter,
3390 },
3391 [SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION] = {
3392 .enter_state = scic_sds_smp_request_started_await_tc_completion_substate_enter,
3393 },
Dan Williamsf1f52e72011-05-10 02:28:45 -07003394 [SCI_BASE_REQUEST_STATE_COMPLETED] = {
3395 .enter_state = scic_sds_request_completed_state_enter,
3396 },
3397 [SCI_BASE_REQUEST_STATE_ABORTING] = {
3398 .enter_state = scic_sds_request_aborting_state_enter,
3399 },
3400 [SCI_BASE_REQUEST_STATE_FINAL] = {
3401 .enter_state = scic_sds_request_final_state_enter,
3402 },
3403};
3404
3405static void scic_sds_general_request_construct(struct scic_sds_controller *scic,
3406 struct scic_sds_remote_device *sci_dev,
3407 u16 io_tag, struct scic_sds_request *sci_req)
3408{
3409 sci_base_state_machine_construct(&sci_req->state_machine, sci_req,
3410 scic_sds_request_state_table, SCI_BASE_REQUEST_STATE_INITIAL);
3411 sci_base_state_machine_start(&sci_req->state_machine);
3412
3413 sci_req->io_tag = io_tag;
3414 sci_req->owning_controller = scic;
3415 sci_req->target_device = sci_dev;
Dan Williamsf1f52e72011-05-10 02:28:45 -07003416 sci_req->protocol = SCIC_NO_PROTOCOL;
3417 sci_req->saved_rx_frame_index = SCU_INVALID_FRAME_INDEX;
3418 sci_req->device_sequence = scic_sds_remote_device_get_sequence(sci_dev);
3419
3420 sci_req->sci_status = SCI_SUCCESS;
3421 sci_req->scu_status = 0;
3422 sci_req->post_context = 0xFFFFFFFF;
3423
3424 sci_req->is_task_management_request = false;
3425
3426 if (io_tag == SCI_CONTROLLER_INVALID_IO_TAG) {
3427 sci_req->was_tag_assigned_by_user = false;
Dan Williamsc72086e2011-05-10 02:28:48 -07003428 sci_req->task_context_buffer = &sci_req->tc;
Dan Williamsf1f52e72011-05-10 02:28:45 -07003429 } else {
3430 sci_req->was_tag_assigned_by_user = true;
3431
3432 sci_req->task_context_buffer =
3433 scic_sds_controller_get_task_context_buffer(scic, io_tag);
3434 }
3435}
3436
3437static enum sci_status
3438scic_io_request_construct(struct scic_sds_controller *scic,
3439 struct scic_sds_remote_device *sci_dev,
3440 u16 io_tag, struct scic_sds_request *sci_req)
3441{
3442 struct domain_device *dev = sci_dev_to_domain(sci_dev);
3443 enum sci_status status = SCI_SUCCESS;
3444
3445 /* Build the common part of the request */
3446 scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req);
3447
Dan Williamsc72086e2011-05-10 02:28:48 -07003448 if (sci_dev->rnc.remote_node_index == SCIC_SDS_REMOTE_NODE_CONTEXT_INVALID_INDEX)
Dan Williamsf1f52e72011-05-10 02:28:45 -07003449 return SCI_FAILURE_INVALID_REMOTE_DEVICE;
3450
3451 if (dev->dev_type == SAS_END_DEV)
Dan Williamsc72086e2011-05-10 02:28:48 -07003452 /* pass */;
3453 else if (dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP))
Dan Williamsf1f52e72011-05-10 02:28:45 -07003454 memset(&sci_req->stp.cmd, 0, sizeof(sci_req->stp.cmd));
Dan Williamsc72086e2011-05-10 02:28:48 -07003455 else if (dev_is_expander(dev))
Dan Williamsf1f52e72011-05-10 02:28:45 -07003456 memset(&sci_req->smp.cmd, 0, sizeof(sci_req->smp.cmd));
Dan Williamsc72086e2011-05-10 02:28:48 -07003457 else
3458 return SCI_FAILURE_UNSUPPORTED_PROTOCOL;
Dan Williamsf1f52e72011-05-10 02:28:45 -07003459
Dan Williamsc72086e2011-05-10 02:28:48 -07003460 memset(sci_req->task_context_buffer, 0,
3461 offsetof(struct scu_task_context, sgl_pair_ab));
Dan Williamsf1f52e72011-05-10 02:28:45 -07003462
3463 return status;
3464}
3465
3466enum sci_status scic_task_request_construct(struct scic_sds_controller *scic,
3467 struct scic_sds_remote_device *sci_dev,
3468 u16 io_tag, struct scic_sds_request *sci_req)
3469{
3470 struct domain_device *dev = sci_dev_to_domain(sci_dev);
3471 enum sci_status status = SCI_SUCCESS;
3472
3473 /* Build the common part of the request */
3474 scic_sds_general_request_construct(scic, sci_dev, io_tag, sci_req);
3475
Dan Williamsc72086e2011-05-10 02:28:48 -07003476 if (dev->dev_type == SAS_END_DEV ||
3477 dev->dev_type == SATA_DEV || (dev->tproto & SAS_PROTOCOL_STP)) {
Dan Williamsf1f52e72011-05-10 02:28:45 -07003478 sci_req->is_task_management_request = true;
3479 memset(sci_req->task_context_buffer, 0, sizeof(struct scu_task_context));
Dan Williamsc72086e2011-05-10 02:28:48 -07003480 } else
3481 status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
Dan Williamsf1f52e72011-05-10 02:28:45 -07003482
3483 return status;
3484}
3485
3486static enum sci_status isci_request_ssp_request_construct(
3487 struct isci_request *request)
3488{
3489 enum sci_status status;
3490
3491 dev_dbg(&request->isci_host->pdev->dev,
3492 "%s: request = %p\n",
3493 __func__,
3494 request);
3495 status = scic_io_request_construct_basic_ssp(&request->sci);
3496 return status;
3497}
3498
3499static enum sci_status isci_request_stp_request_construct(
3500 struct isci_request *request)
3501{
3502 struct sas_task *task = isci_request_access_task(request);
3503 enum sci_status status;
3504 struct host_to_dev_fis *register_fis;
3505
3506 dev_dbg(&request->isci_host->pdev->dev,
3507 "%s: request = %p\n",
3508 __func__,
3509 request);
3510
3511 /* Get the host_to_dev_fis from the core and copy
3512 * the fis from the task into it.
3513 */
3514 register_fis = isci_sata_task_to_fis_copy(task);
3515
3516 status = scic_io_request_construct_basic_sata(&request->sci);
3517
3518 /* Set the ncq tag in the fis, from the queue
3519 * command in the task.
3520 */
3521 if (isci_sata_is_task_ncq(task)) {
3522
3523 isci_sata_set_ncq_tag(
3524 register_fis,
3525 task
3526 );
3527 }
3528
3529 return status;
3530}
3531
3532/*
Dan Williamsc72086e2011-05-10 02:28:48 -07003533 * This function will fill in the SCU Task Context for a SMP request. The
3534 * following important settings are utilized: -# task_type ==
3535 * SCU_TASK_TYPE_SMP. This simply indicates that a normal request type
3536 * (i.e. non-raw frame) is being utilized to perform task management. -#
3537 * control_frame == 1. This ensures that the proper endianess is set so
3538 * that the bytes are transmitted in the right order for a smp request frame.
3539 * @sci_req: This parameter specifies the smp request object being
3540 * constructed.
3541 *
3542 */
3543static void
3544scu_smp_request_construct_task_context(struct scic_sds_request *sci_req,
3545 struct smp_req *smp_req)
3546{
3547 dma_addr_t dma_addr;
3548 struct scic_sds_controller *scic;
3549 struct scic_sds_remote_device *sci_dev;
3550 struct scic_sds_port *sci_port;
3551 struct scu_task_context *task_context;
3552 ssize_t word_cnt = sizeof(struct smp_req) / sizeof(u32);
3553
3554 /* byte swap the smp request. */
3555 sci_swab32_cpy(&sci_req->smp.cmd, smp_req,
3556 word_cnt);
3557
3558 task_context = scic_sds_request_get_task_context(sci_req);
3559
3560 scic = scic_sds_request_get_controller(sci_req);
3561 sci_dev = scic_sds_request_get_device(sci_req);
3562 sci_port = scic_sds_request_get_port(sci_req);
3563
3564 /*
3565 * Fill in the TC with the its required data
3566 * 00h
3567 */
3568 task_context->priority = 0;
3569 task_context->initiator_request = 1;
3570 task_context->connection_rate = sci_dev->connection_rate;
3571 task_context->protocol_engine_index =
3572 scic_sds_controller_get_protocol_engine_group(scic);
3573 task_context->logical_port_index = scic_sds_port_get_index(sci_port);
3574 task_context->protocol_type = SCU_TASK_CONTEXT_PROTOCOL_SMP;
3575 task_context->abort = 0;
3576 task_context->valid = SCU_TASK_CONTEXT_VALID;
3577 task_context->context_type = SCU_TASK_CONTEXT_TYPE;
3578
3579 /* 04h */
3580 task_context->remote_node_index = sci_dev->rnc.remote_node_index;
3581 task_context->command_code = 0;
3582 task_context->task_type = SCU_TASK_TYPE_SMP_REQUEST;
3583
3584 /* 08h */
3585 task_context->link_layer_control = 0;
3586 task_context->do_not_dma_ssp_good_response = 1;
3587 task_context->strict_ordering = 0;
3588 task_context->control_frame = 1;
3589 task_context->timeout_enable = 0;
3590 task_context->block_guard_enable = 0;
3591
3592 /* 0ch */
3593 task_context->address_modifier = 0;
3594
3595 /* 10h */
3596 task_context->ssp_command_iu_length = smp_req->req_len;
3597
3598 /* 14h */
3599 task_context->transfer_length_bytes = 0;
3600
3601 /*
3602 * 18h ~ 30h, protocol specific
3603 * since commandIU has been build by framework at this point, we just
3604 * copy the frist DWord from command IU to this location. */
3605 memcpy(&task_context->type.smp, &sci_req->smp.cmd, sizeof(u32));
3606
3607 /*
3608 * 40h
3609 * "For SMP you could program it to zero. We would prefer that way
3610 * so that done code will be consistent." - Venki
3611 */
3612 task_context->task_phase = 0;
3613
3614 if (sci_req->was_tag_assigned_by_user) {
3615 /*
3616 * Build the task context now since we have already read
3617 * the data
3618 */
3619 sci_req->post_context =
3620 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
3621 (scic_sds_controller_get_protocol_engine_group(scic) <<
3622 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
3623 (scic_sds_port_get_index(sci_port) <<
3624 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT) |
3625 scic_sds_io_tag_get_index(sci_req->io_tag));
3626 } else {
3627 /*
3628 * Build the task context now since we have already read
3629 * the data.
3630 * I/O tag index is not assigned because we have to wait
3631 * until we get a TCi.
3632 */
3633 sci_req->post_context =
3634 (SCU_CONTEXT_COMMAND_REQUEST_TYPE_POST_TC |
3635 (scic_sds_controller_get_protocol_engine_group(scic) <<
3636 SCU_CONTEXT_COMMAND_PROTOCOL_ENGINE_GROUP_SHIFT) |
3637 (scic_sds_port_get_index(sci_port) <<
3638 SCU_CONTEXT_COMMAND_LOGICAL_PORT_SHIFT));
3639 }
3640
3641 /*
3642 * Copy the physical address for the command buffer to the SCU Task
3643 * Context command buffer should not contain command header.
3644 */
3645 dma_addr = scic_io_request_get_dma_addr(sci_req,
3646 ((char *) &sci_req->smp.cmd) +
3647 sizeof(u32));
3648
3649 task_context->command_iu_upper = upper_32_bits(dma_addr);
3650 task_context->command_iu_lower = lower_32_bits(dma_addr);
3651
3652 /* SMP response comes as UF, so no need to set response IU address. */
3653 task_context->response_iu_upper = 0;
3654 task_context->response_iu_lower = 0;
3655}
3656
3657static enum sci_status scic_io_request_construct_smp(struct scic_sds_request *sci_req)
3658{
3659 struct smp_req *smp_req = kmalloc(sizeof(*smp_req), GFP_KERNEL);
3660
3661 if (!smp_req)
3662 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
3663
3664 sci_req->protocol = SCIC_SMP_PROTOCOL;
3665
3666 /* Construct the SMP SCU Task Context */
3667 memcpy(smp_req, &sci_req->smp.cmd, sizeof(*smp_req));
3668
3669 /*
3670 * Look at the SMP requests' header fields; for certain SAS 1.x SMP
3671 * functions under SAS 2.0, a zero request length really indicates
3672 * a non-zero default length. */
3673 if (smp_req->req_len == 0) {
3674 switch (smp_req->func) {
3675 case SMP_DISCOVER:
3676 case SMP_REPORT_PHY_ERR_LOG:
3677 case SMP_REPORT_PHY_SATA:
3678 case SMP_REPORT_ROUTE_INFO:
3679 smp_req->req_len = 2;
3680 break;
3681 case SMP_CONF_ROUTE_INFO:
3682 case SMP_PHY_CONTROL:
3683 case SMP_PHY_TEST_FUNCTION:
3684 smp_req->req_len = 9;
3685 break;
3686 /* Default - zero is a valid default for 2.0. */
3687 }
3688 }
3689
3690 scu_smp_request_construct_task_context(sci_req, smp_req);
3691
3692 sci_base_state_machine_change_state(&sci_req->state_machine,
3693 SCI_BASE_REQUEST_STATE_CONSTRUCTED);
3694
3695 kfree(smp_req);
3696
3697 return SCI_SUCCESS;
3698}
3699
3700/*
Dan Williamsf1f52e72011-05-10 02:28:45 -07003701 * isci_smp_request_build() - This function builds the smp request.
3702 * @ireq: This parameter points to the isci_request allocated in the
3703 * request construct function.
3704 *
3705 * SCI_SUCCESS on successfull completion, or specific failure code.
3706 */
3707static enum sci_status isci_smp_request_build(struct isci_request *ireq)
3708{
3709 enum sci_status status = SCI_FAILURE;
3710 struct sas_task *task = isci_request_access_task(ireq);
3711 struct scic_sds_request *sci_req = &ireq->sci;
3712
3713 dev_dbg(&ireq->isci_host->pdev->dev,
3714 "%s: request = %p\n", __func__, ireq);
3715
3716 dev_dbg(&ireq->isci_host->pdev->dev,
3717 "%s: smp_req len = %d\n",
3718 __func__,
3719 task->smp_task.smp_req.length);
3720
3721 /* copy the smp_command to the address; */
3722 sg_copy_to_buffer(&task->smp_task.smp_req, 1,
3723 &sci_req->smp.cmd,
3724 sizeof(struct smp_req));
3725
3726 status = scic_io_request_construct_smp(sci_req);
3727 if (status != SCI_SUCCESS)
3728 dev_warn(&ireq->isci_host->pdev->dev,
3729 "%s: failed with status = %d\n",
3730 __func__,
3731 status);
3732
3733 return status;
3734}
3735
3736/**
3737 * isci_io_request_build() - This function builds the io request object.
3738 * @isci_host: This parameter specifies the ISCI host object
3739 * @request: This parameter points to the isci_request object allocated in the
3740 * request construct function.
3741 * @sci_device: This parameter is the handle for the sci core's remote device
3742 * object that is the destination for this request.
3743 *
3744 * SCI_SUCCESS on successfull completion, or specific failure code.
3745 */
3746static enum sci_status isci_io_request_build(
3747 struct isci_host *isci_host,
3748 struct isci_request *request,
3749 struct isci_remote_device *isci_device)
3750{
3751 enum sci_status status = SCI_SUCCESS;
3752 struct sas_task *task = isci_request_access_task(request);
3753 struct scic_sds_remote_device *sci_device = &isci_device->sci;
3754
3755 dev_dbg(&isci_host->pdev->dev,
3756 "%s: isci_device = 0x%p; request = %p, "
3757 "num_scatter = %d\n",
3758 __func__,
3759 isci_device,
3760 request,
3761 task->num_scatter);
3762
3763 /* map the sgl addresses, if present.
3764 * libata does the mapping for sata devices
3765 * before we get the request.
3766 */
3767 if (task->num_scatter &&
3768 !sas_protocol_ata(task->task_proto) &&
3769 !(SAS_PROTOCOL_SMP & task->task_proto)) {
3770
3771 request->num_sg_entries = dma_map_sg(
3772 &isci_host->pdev->dev,
3773 task->scatter,
3774 task->num_scatter,
3775 task->data_dir
3776 );
3777
3778 if (request->num_sg_entries == 0)
3779 return SCI_FAILURE_INSUFFICIENT_RESOURCES;
3780 }
3781
3782 /* build the common request object. For now,
3783 * we will let the core allocate the IO tag.
3784 */
3785 status = scic_io_request_construct(&isci_host->sci, sci_device,
3786 SCI_CONTROLLER_INVALID_IO_TAG,
3787 &request->sci);
3788
3789 if (status != SCI_SUCCESS) {
3790 dev_warn(&isci_host->pdev->dev,
3791 "%s: failed request construct\n",
3792 __func__);
3793 return SCI_FAILURE;
3794 }
3795
3796 switch (task->task_proto) {
3797 case SAS_PROTOCOL_SMP:
3798 status = isci_smp_request_build(request);
3799 break;
3800 case SAS_PROTOCOL_SSP:
3801 status = isci_request_ssp_request_construct(request);
3802 break;
3803 case SAS_PROTOCOL_SATA:
3804 case SAS_PROTOCOL_STP:
3805 case SAS_PROTOCOL_SATA | SAS_PROTOCOL_STP:
3806 status = isci_request_stp_request_construct(request);
3807 break;
3808 default:
3809 dev_warn(&isci_host->pdev->dev,
3810 "%s: unknown protocol\n", __func__);
3811 return SCI_FAILURE;
3812 }
3813
3814 return SCI_SUCCESS;
3815}
3816
3817/**
3818 * isci_request_alloc_core() - This function gets the request object from the
3819 * isci_host dma cache.
3820 * @isci_host: This parameter specifies the ISCI host object
3821 * @isci_request: This parameter will contain the pointer to the new
3822 * isci_request object.
3823 * @isci_device: This parameter is the pointer to the isci remote device object
3824 * that is the destination for this request.
3825 * @gfp_flags: This parameter specifies the os allocation flags.
3826 *
3827 * SCI_SUCCESS on successfull completion, or specific failure code.
3828 */
3829static int isci_request_alloc_core(
3830 struct isci_host *isci_host,
3831 struct isci_request **isci_request,
3832 struct isci_remote_device *isci_device,
3833 gfp_t gfp_flags)
3834{
3835 int ret = 0;
3836 dma_addr_t handle;
3837 struct isci_request *request;
3838
3839
3840 /* get pointer to dma memory. This actually points
3841 * to both the isci_remote_device object and the
3842 * sci object. The isci object is at the beginning
3843 * of the memory allocated here.
3844 */
3845 request = dma_pool_alloc(isci_host->dma_pool, gfp_flags, &handle);
3846 if (!request) {
3847 dev_warn(&isci_host->pdev->dev,
3848 "%s: dma_pool_alloc returned NULL\n", __func__);
3849 return -ENOMEM;
3850 }
3851
3852 /* initialize the request object. */
3853 spin_lock_init(&request->state_lock);
3854 request->request_daddr = handle;
3855 request->isci_host = isci_host;
3856 request->isci_device = isci_device;
3857 request->io_request_completion = NULL;
3858 request->terminated = false;
3859
3860 request->num_sg_entries = 0;
3861
3862 request->complete_in_target = false;
3863
3864 INIT_LIST_HEAD(&request->completed_node);
3865 INIT_LIST_HEAD(&request->dev_node);
3866
3867 *isci_request = request;
3868 isci_request_change_state(request, allocated);
3869
3870 return ret;
3871}
3872
3873static int isci_request_alloc_io(
3874 struct isci_host *isci_host,
3875 struct sas_task *task,
3876 struct isci_request **isci_request,
3877 struct isci_remote_device *isci_device,
3878 gfp_t gfp_flags)
3879{
3880 int retval = isci_request_alloc_core(isci_host, isci_request,
3881 isci_device, gfp_flags);
3882
3883 if (!retval) {
3884 (*isci_request)->ttype_ptr.io_task_ptr = task;
3885 (*isci_request)->ttype = io_task;
3886
3887 task->lldd_task = *isci_request;
3888 }
3889 return retval;
3890}
3891
3892/**
3893 * isci_request_alloc_tmf() - This function gets the request object from the
3894 * isci_host dma cache and initializes the relevant fields as a sas_task.
3895 * @isci_host: This parameter specifies the ISCI host object
3896 * @sas_task: This parameter is the task struct from the upper layer driver.
3897 * @isci_request: This parameter will contain the pointer to the new
3898 * isci_request object.
3899 * @isci_device: This parameter is the pointer to the isci remote device object
3900 * that is the destination for this request.
3901 * @gfp_flags: This parameter specifies the os allocation flags.
3902 *
3903 * SCI_SUCCESS on successfull completion, or specific failure code.
3904 */
3905int isci_request_alloc_tmf(
3906 struct isci_host *isci_host,
3907 struct isci_tmf *isci_tmf,
3908 struct isci_request **isci_request,
3909 struct isci_remote_device *isci_device,
3910 gfp_t gfp_flags)
3911{
3912 int retval = isci_request_alloc_core(isci_host, isci_request,
3913 isci_device, gfp_flags);
3914
3915 if (!retval) {
3916
3917 (*isci_request)->ttype_ptr.tmf_task_ptr = isci_tmf;
3918 (*isci_request)->ttype = tmf_task;
3919 }
3920 return retval;
3921}
3922
3923/**
3924 * isci_request_execute() - This function allocates the isci_request object,
3925 * all fills in some common fields.
3926 * @isci_host: This parameter specifies the ISCI host object
3927 * @sas_task: This parameter is the task struct from the upper layer driver.
3928 * @isci_request: This parameter will contain the pointer to the new
3929 * isci_request object.
3930 * @gfp_flags: This parameter specifies the os allocation flags.
3931 *
3932 * SCI_SUCCESS on successfull completion, or specific failure code.
3933 */
3934int isci_request_execute(
3935 struct isci_host *isci_host,
3936 struct sas_task *task,
3937 struct isci_request **isci_request,
3938 gfp_t gfp_flags)
3939{
3940 int ret = 0;
3941 struct scic_sds_remote_device *sci_device;
3942 enum sci_status status = SCI_FAILURE_UNSUPPORTED_PROTOCOL;
3943 struct isci_remote_device *isci_device;
3944 struct isci_request *request;
3945 unsigned long flags;
3946
3947 isci_device = task->dev->lldd_dev;
3948 sci_device = &isci_device->sci;
3949
3950 /* do common allocation and init of request object. */
3951 ret = isci_request_alloc_io(
3952 isci_host,
3953 task,
3954 &request,
3955 isci_device,
3956 gfp_flags
3957 );
3958
3959 if (ret)
3960 goto out;
3961
3962 status = isci_io_request_build(isci_host, request, isci_device);
3963 if (status != SCI_SUCCESS) {
3964 dev_warn(&isci_host->pdev->dev,
3965 "%s: request_construct failed - status = 0x%x\n",
3966 __func__,
3967 status);
3968 goto out;
3969 }
3970
3971 spin_lock_irqsave(&isci_host->scic_lock, flags);
3972
3973 /* send the request, let the core assign the IO TAG. */
3974 status = scic_controller_start_io(&isci_host->sci, sci_device,
3975 &request->sci,
3976 SCI_CONTROLLER_INVALID_IO_TAG);
3977 if (status != SCI_SUCCESS &&
3978 status != SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
3979 dev_warn(&isci_host->pdev->dev,
3980 "%s: failed request start (0x%x)\n",
3981 __func__, status);
3982 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
3983 goto out;
3984 }
3985
3986 /* Either I/O started OK, or the core has signaled that
3987 * the device needs a target reset.
3988 *
3989 * In either case, hold onto the I/O for later.
3990 *
3991 * Update it's status and add it to the list in the
3992 * remote device object.
3993 */
3994 isci_request_change_state(request, started);
3995 list_add(&request->dev_node, &isci_device->reqs_in_process);
3996
3997 if (status == SCI_SUCCESS) {
3998 /* Save the tag for possible task mgmt later. */
3999 request->io_tag = request->sci.io_tag;
4000 } else {
4001 /* The request did not really start in the
4002 * hardware, so clear the request handle
4003 * here so no terminations will be done.
4004 */
4005 request->terminated = true;
4006 }
4007 spin_unlock_irqrestore(&isci_host->scic_lock, flags);
4008
4009 if (status ==
4010 SCI_FAILURE_REMOTE_DEVICE_RESET_REQUIRED) {
4011 /* Signal libsas that we need the SCSI error
4012 * handler thread to work on this I/O and that
4013 * we want a device reset.
4014 */
4015 spin_lock_irqsave(&task->task_state_lock, flags);
4016 task->task_state_flags |= SAS_TASK_NEED_DEV_RESET;
4017 spin_unlock_irqrestore(&task->task_state_lock, flags);
4018
4019 /* Cause this task to be scheduled in the SCSI error
4020 * handler thread.
4021 */
4022 isci_execpath_callback(isci_host, task,
4023 sas_task_abort);
4024
4025 /* Change the status, since we are holding
4026 * the I/O until it is managed by the SCSI
4027 * error handler.
4028 */
4029 status = SCI_SUCCESS;
4030 }
4031
4032 out:
4033 if (status != SCI_SUCCESS) {
4034 /* release dma memory on failure. */
4035 isci_request_free(isci_host, request);
4036 request = NULL;
4037 ret = SCI_FAILURE;
4038 }
4039
4040 *isci_request = request;
4041 return ret;
4042}