blob: 9bb7c36257f30ed7f0086c95ac774028f24dc274 [file] [log] [blame]
Dan Williams6f231dd2011-07-02 22:56:22 -07001/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
Dan Williams0d843662011-05-08 01:56:57 -070056#ifndef _ISCI_REQUEST_H_
Dan Williams6f231dd2011-07-02 22:56:22 -070057#define _ISCI_REQUEST_H_
58
59#include "isci.h"
Dan Williamsce2b3262011-05-08 15:49:15 -070060#include "host.h"
Dan Williamsf1f52e72011-05-10 02:28:45 -070061#include "scu_task_context.h"
Dan Williams6f231dd2011-07-02 22:56:22 -070062
63/**
64 * struct isci_request_status - This enum defines the possible states of an I/O
65 * request.
66 *
67 *
68 */
69enum isci_request_status {
70 unallocated = 0x00,
71 allocated = 0x01,
72 started = 0x02,
73 completed = 0x03,
74 aborting = 0x04,
75 aborted = 0x05,
Jeff Skirvin4dc043c2011-03-04 14:06:52 -080076 terminating = 0x06,
77 dead = 0x07
Dan Williams6f231dd2011-07-02 22:56:22 -070078};
79
80enum task_type {
81 io_task = 0,
82 tmf_task = 1
83};
84
Dan Williamsf1f52e72011-05-10 02:28:45 -070085enum sci_request_protocol {
86 SCIC_NO_PROTOCOL,
87 SCIC_SMP_PROTOCOL,
88 SCIC_SSP_PROTOCOL,
89 SCIC_STP_PROTOCOL
Dan Williamsc72086e2011-05-10 02:28:48 -070090}; /* XXX remove me, use sas_task.{dev|task_proto} instead */;
Dan Williamsf1f52e72011-05-10 02:28:45 -070091
Dan Williams5dec6f42011-05-10 02:28:49 -070092struct scic_sds_stp_request {
93 union {
94 u32 ncq;
95
96 u32 udma;
97
98 struct scic_sds_stp_pio_request {
Edmund Nadolskie3013702011-06-02 00:10:43 +000099 /*
100 * Total transfer for the entire PIO request recorded
101 * at request constuction time.
Dan Williams5dec6f42011-05-10 02:28:49 -0700102 *
Edmund Nadolskie3013702011-06-02 00:10:43 +0000103 * @todo Should we just decrement this value for each
104 * byte of data transitted or received to elemenate
105 * the current_transfer_bytes field?
Dan Williams5dec6f42011-05-10 02:28:49 -0700106 */
107 u32 total_transfer_bytes;
108
Edmund Nadolskie3013702011-06-02 00:10:43 +0000109 /*
110 * Total number of bytes received/transmitted in data
111 * frames since the start of the IO request. At the
112 * end of the IO request this should equal the
Dan Williams5dec6f42011-05-10 02:28:49 -0700113 * total_transfer_bytes.
114 */
115 u32 current_transfer_bytes;
116
Edmund Nadolskie3013702011-06-02 00:10:43 +0000117 /*
118 * The number of bytes requested in the in the PIO
119 * setup.
Dan Williams5dec6f42011-05-10 02:28:49 -0700120 */
121 u32 pio_transfer_bytes;
122
Edmund Nadolskie3013702011-06-02 00:10:43 +0000123 /*
124 * PIO Setup ending status value to tell us if we need
125 * to wait for another FIS or if the transfer is
126 * complete. On the receipt of a D2H FIS this will be
Dan Williams5dec6f42011-05-10 02:28:49 -0700127 * the status field of that FIS.
128 */
129 u8 ending_status;
130
Edmund Nadolskie3013702011-06-02 00:10:43 +0000131 /*
132 * On receipt of a D2H FIS this will be the ending
133 * error field if the ending_status has the
134 * SATA_STATUS_ERR bit set.
Dan Williams5dec6f42011-05-10 02:28:49 -0700135 */
136 u8 ending_error;
137
138 struct scic_sds_request_pio_sgl {
139 struct scu_sgl_element_pair *sgl_pair;
140 u8 sgl_set;
141 u32 sgl_offset;
142 } request_current;
143 } pio;
144
145 struct {
Edmund Nadolskie3013702011-06-02 00:10:43 +0000146 /*
147 * The number of bytes requested in the PIO setup
148 * before CDB data frame.
Dan Williams5dec6f42011-05-10 02:28:49 -0700149 */
150 u32 device_preferred_cdb_length;
151 } packet;
152 } type;
153};
154
Dan Williamsf1f52e72011-05-10 02:28:45 -0700155struct scic_sds_request {
Edmund Nadolskie3013702011-06-02 00:10:43 +0000156 /*
157 * This field contains the information for the base request state
158 * machine.
Dan Williamsf1f52e72011-05-10 02:28:45 -0700159 */
Edmund Nadolskie3013702011-06-02 00:10:43 +0000160 struct sci_base_state_machine sm;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700161
Edmund Nadolskie3013702011-06-02 00:10:43 +0000162 /*
Dan Williamsf1f52e72011-05-10 02:28:45 -0700163 * This field simply points to the controller to which this IO request
164 * is associated.
165 */
166 struct scic_sds_controller *owning_controller;
167
Edmund Nadolskie3013702011-06-02 00:10:43 +0000168 /*
169 * This field simply points to the remote device to which this IO
170 * request is associated.
Dan Williamsf1f52e72011-05-10 02:28:45 -0700171 */
172 struct scic_sds_remote_device *target_device;
173
Edmund Nadolskie3013702011-06-02 00:10:43 +0000174 /*
Dan Williamsf1f52e72011-05-10 02:28:45 -0700175 * This field is utilized to determine if the SCI user is managing
176 * the IO tag for this request or if the core is managing it.
177 */
178 bool was_tag_assigned_by_user;
179
Edmund Nadolskie3013702011-06-02 00:10:43 +0000180 /*
Dan Williamsf1f52e72011-05-10 02:28:45 -0700181 * This field indicates the IO tag for this request. The IO tag is
182 * comprised of the task_index and a sequence count. The sequence count
183 * is utilized to help identify tasks from one life to another.
184 */
185 u16 io_tag;
186
Edmund Nadolskie3013702011-06-02 00:10:43 +0000187 /*
Dan Williamsf1f52e72011-05-10 02:28:45 -0700188 * This field specifies the protocol being utilized for this
189 * IO request.
190 */
191 enum sci_request_protocol protocol;
192
Edmund Nadolskie3013702011-06-02 00:10:43 +0000193 /*
Dan Williamsf1f52e72011-05-10 02:28:45 -0700194 * This field indicates the completion status taken from the SCUs
Edmund Nadolskie3013702011-06-02 00:10:43 +0000195 * completion code. It indicates the completion result for the SCU
196 * hardware.
Dan Williamsf1f52e72011-05-10 02:28:45 -0700197 */
198 u32 scu_status;
199
Edmund Nadolskie3013702011-06-02 00:10:43 +0000200 /*
201 * This field indicates the completion status returned to the SCI user.
202 * It indicates the users view of the io request completion.
Dan Williamsf1f52e72011-05-10 02:28:45 -0700203 */
204 u32 sci_status;
205
Edmund Nadolskie3013702011-06-02 00:10:43 +0000206 /*
207 * This field contains the value to be utilized when posting
208 * (e.g. Post_TC, * Post_TC_Abort) this request to the silicon.
Dan Williamsf1f52e72011-05-10 02:28:45 -0700209 */
210 u32 post_context;
211
212 struct scu_task_context *task_context_buffer;
213 struct scu_task_context tc ____cacheline_aligned;
214
215 /* could be larger with sg chaining */
Dan Williams7c78da32011-06-01 16:00:01 -0700216 #define SCU_SGL_SIZE ((SCI_MAX_SCATTER_GATHER_ELEMENTS + 1) / 2)
Dan Williamsf1f52e72011-05-10 02:28:45 -0700217 struct scu_sgl_element_pair sg_table[SCU_SGL_SIZE] __attribute__ ((aligned(32)));
218
Edmund Nadolskie3013702011-06-02 00:10:43 +0000219 /*
Dan Williamsf1f52e72011-05-10 02:28:45 -0700220 * This field indicates if this request is a task management request or
221 * normal IO request.
222 */
223 bool is_task_management_request;
224
Edmund Nadolskie3013702011-06-02 00:10:43 +0000225 /*
226 * This field is a pointer to the stored rx frame data. It is used in
227 * STP internal requests and SMP response frames. If this field is
228 * non-NULL the saved frame must be released on IO request completion.
Dan Williamsf1f52e72011-05-10 02:28:45 -0700229 *
230 * @todo In the future do we want to keep a list of RX frame buffers?
231 */
232 u32 saved_rx_frame_index;
233
Edmund Nadolskie3013702011-06-02 00:10:43 +0000234 /*
235 * This field in the recorded device sequence for the io request.
236 * This is recorded during the build operation and is compared in the
237 * start operation. If the sequence is different then there was a
238 * change of devices from the build to start operations.
Dan Williamsf1f52e72011-05-10 02:28:45 -0700239 */
240 u8 device_sequence;
241
242 union {
243 struct {
244 union {
245 struct ssp_cmd_iu cmd;
246 struct ssp_task_iu tmf;
247 };
248 union {
249 struct ssp_response_iu rsp;
250 u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
251 };
252 } ssp;
253
254 struct {
255 struct smp_req cmd;
256 struct smp_resp rsp;
257 } smp;
258
259 struct {
260 struct scic_sds_stp_request req;
261 struct host_to_dev_fis cmd;
262 struct dev_to_host_fis rsp;
263 } stp;
264 };
265
266};
267
268static inline struct scic_sds_request *to_sci_req(struct scic_sds_stp_request *stp_req)
269{
270 struct scic_sds_request *sci_req;
271
272 sci_req = container_of(stp_req, typeof(*sci_req), stp.req);
273 return sci_req;
274}
275
Dan Williams6f231dd2011-07-02 22:56:22 -0700276struct isci_request {
Dan Williams6f231dd2011-07-02 22:56:22 -0700277 enum isci_request_status status;
278 enum task_type ttype;
279 unsigned short io_tag;
280 bool complete_in_target;
Dan Williams67ea8382011-05-08 11:47:15 -0700281 bool terminated;
Dan Williams6f231dd2011-07-02 22:56:22 -0700282
283 union ttype_ptr_union {
284 struct sas_task *io_task_ptr; /* When ttype==io_task */
285 struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */
286 } ttype_ptr;
287 struct isci_host *isci_host;
Dan Williams6f231dd2011-07-02 22:56:22 -0700288 /* For use in the requests_to_{complete|abort} lists: */
289 struct list_head completed_node;
290 /* For use in the reqs_in_process list: */
291 struct list_head dev_node;
Dan Williams6f231dd2011-07-02 22:56:22 -0700292 spinlock_t state_lock;
293 dma_addr_t request_daddr;
294 dma_addr_t zero_scatter_daddr;
295
Edmund Nadolskie3013702011-06-02 00:10:43 +0000296 unsigned int num_sg_entries; /* returned by pci_alloc_sg */
Dan Williams6f231dd2011-07-02 22:56:22 -0700297
298 /** Note: "io_request_completion" is completed in two different ways
299 * depending on whether this is a TMF or regular request.
300 * - TMF requests are completed in the thread that started them;
301 * - regular requests are completed in the request completion callback
302 * function.
303 * This difference in operation allows the aborter of a TMF request
304 * to be sure that once the TMF request completes, the I/O that the
305 * TMF was aborting is guaranteed to have completed.
306 */
307 struct completion *io_request_completion;
Dan Williams67ea8382011-05-08 11:47:15 -0700308 struct scic_sds_request sci;
Dan Williams6f231dd2011-07-02 22:56:22 -0700309};
310
Dan Williams67ea8382011-05-08 11:47:15 -0700311static inline struct isci_request *sci_req_to_ireq(struct scic_sds_request *sci_req)
312{
313 struct isci_request *ireq = container_of(sci_req, typeof(*ireq), sci);
314
315 return ireq;
316}
317
Dan Williams6f231dd2011-07-02 22:56:22 -0700318/**
Dan Williamsf1f52e72011-05-10 02:28:45 -0700319 * enum sci_base_request_states - This enumeration depicts all the states for
320 * the common request state machine.
321 *
322 *
323 */
324enum sci_base_request_states {
Edmund Nadolskie3013702011-06-02 00:10:43 +0000325 /*
Dan Williamsf1f52e72011-05-10 02:28:45 -0700326 * Simply the initial state for the base request state machine.
327 */
Edmund Nadolskie3013702011-06-02 00:10:43 +0000328 SCI_REQ_INIT,
Dan Williamsf1f52e72011-05-10 02:28:45 -0700329
Edmund Nadolskie3013702011-06-02 00:10:43 +0000330 /*
331 * This state indicates that the request has been constructed.
332 * This state is entered from the INITIAL state.
Dan Williamsf1f52e72011-05-10 02:28:45 -0700333 */
Edmund Nadolskie3013702011-06-02 00:10:43 +0000334 SCI_REQ_CONSTRUCTED,
Dan Williamsf1f52e72011-05-10 02:28:45 -0700335
Edmund Nadolskie3013702011-06-02 00:10:43 +0000336 /*
337 * This state indicates that the request has been started. This state
338 * is entered from the CONSTRUCTED state.
Dan Williamsf1f52e72011-05-10 02:28:45 -0700339 */
Edmund Nadolskie3013702011-06-02 00:10:43 +0000340 SCI_REQ_STARTED,
Dan Williamsf1f52e72011-05-10 02:28:45 -0700341
Edmund Nadolskie3013702011-06-02 00:10:43 +0000342 SCI_REQ_STP_UDMA_WAIT_TC_COMP,
343 SCI_REQ_STP_UDMA_WAIT_D2H,
Dan Williams5dec6f42011-05-10 02:28:49 -0700344
Edmund Nadolskie3013702011-06-02 00:10:43 +0000345 SCI_REQ_STP_NON_DATA_WAIT_H2D,
346 SCI_REQ_STP_NON_DATA_WAIT_D2H,
Dan Williams5dec6f42011-05-10 02:28:49 -0700347
Edmund Nadolskie3013702011-06-02 00:10:43 +0000348 SCI_REQ_STP_SOFT_RESET_WAIT_H2D_ASSERTED,
349 SCI_REQ_STP_SOFT_RESET_WAIT_H2D_DIAG,
350 SCI_REQ_STP_SOFT_RESET_WAIT_D2H,
Dan Williams5dec6f42011-05-10 02:28:49 -0700351
Edmund Nadolskie3013702011-06-02 00:10:43 +0000352 /*
353 * While in this state the IO request object is waiting for the TC
354 * completion notification for the H2D Register FIS
Dan Williams5dec6f42011-05-10 02:28:49 -0700355 */
Edmund Nadolskie3013702011-06-02 00:10:43 +0000356 SCI_REQ_STP_PIO_WAIT_H2D,
Dan Williams5dec6f42011-05-10 02:28:49 -0700357
Edmund Nadolskie3013702011-06-02 00:10:43 +0000358 /*
359 * While in this state the IO request object is waiting for either a
360 * PIO Setup FIS or a D2H register FIS. The type of frame received is
361 * based on the result of the prior frame and line conditions.
Dan Williams5dec6f42011-05-10 02:28:49 -0700362 */
Edmund Nadolskie3013702011-06-02 00:10:43 +0000363 SCI_REQ_STP_PIO_WAIT_FRAME,
Dan Williams5dec6f42011-05-10 02:28:49 -0700364
Edmund Nadolskie3013702011-06-02 00:10:43 +0000365 /*
366 * While in this state the IO request object is waiting for a DATA
367 * frame from the device.
Dan Williams5dec6f42011-05-10 02:28:49 -0700368 */
Edmund Nadolskie3013702011-06-02 00:10:43 +0000369 SCI_REQ_STP_PIO_DATA_IN,
Dan Williams5dec6f42011-05-10 02:28:49 -0700370
Edmund Nadolskie3013702011-06-02 00:10:43 +0000371 /*
372 * While in this state the IO request object is waiting to transmit
373 * the next data frame to the device.
Dan Williams5dec6f42011-05-10 02:28:49 -0700374 */
Edmund Nadolskie3013702011-06-02 00:10:43 +0000375 SCI_REQ_STP_PIO_DATA_OUT,
Dan Williams5dec6f42011-05-10 02:28:49 -0700376
Edmund Nadolskie3013702011-06-02 00:10:43 +0000377 /*
Dan Williamsf1393032011-05-10 02:28:47 -0700378 * The AWAIT_TC_COMPLETION sub-state indicates that the started raw
379 * task management request is waiting for the transmission of the
380 * initial frame (i.e. command, task, etc.).
381 */
Edmund Nadolskie3013702011-06-02 00:10:43 +0000382 SCI_REQ_TASK_WAIT_TC_COMP,
Dan Williamsf1393032011-05-10 02:28:47 -0700383
Edmund Nadolskie3013702011-06-02 00:10:43 +0000384 /*
Dan Williamsf1393032011-05-10 02:28:47 -0700385 * This sub-state indicates that the started task management request
386 * is waiting for the reception of an unsolicited frame
387 * (i.e. response IU).
388 */
Edmund Nadolskie3013702011-06-02 00:10:43 +0000389 SCI_REQ_TASK_WAIT_TC_RESP,
Dan Williamsf1393032011-05-10 02:28:47 -0700390
Edmund Nadolskie3013702011-06-02 00:10:43 +0000391 /*
Dan Williamsc72086e2011-05-10 02:28:48 -0700392 * This sub-state indicates that the started task management request
393 * is waiting for the reception of an unsolicited frame
394 * (i.e. response IU).
395 */
Edmund Nadolskie3013702011-06-02 00:10:43 +0000396 SCI_REQ_SMP_WAIT_RESP,
Dan Williamsc72086e2011-05-10 02:28:48 -0700397
Edmund Nadolskie3013702011-06-02 00:10:43 +0000398 /*
399 * The AWAIT_TC_COMPLETION sub-state indicates that the started SMP
400 * request is waiting for the transmission of the initial frame
401 * (i.e. command, task, etc.).
Dan Williamsc72086e2011-05-10 02:28:48 -0700402 */
Edmund Nadolskie3013702011-06-02 00:10:43 +0000403 SCI_REQ_SMP_WAIT_TC_COMP,
Dan Williamsc72086e2011-05-10 02:28:48 -0700404
Edmund Nadolskie3013702011-06-02 00:10:43 +0000405 /*
Dan Williamsf1f52e72011-05-10 02:28:45 -0700406 * This state indicates that the request has completed.
Edmund Nadolskie3013702011-06-02 00:10:43 +0000407 * This state is entered from the STARTED state. This state is entered
408 * from the ABORTING state.
Dan Williamsf1f52e72011-05-10 02:28:45 -0700409 */
Edmund Nadolskie3013702011-06-02 00:10:43 +0000410 SCI_REQ_COMPLETED,
Dan Williamsf1f52e72011-05-10 02:28:45 -0700411
Edmund Nadolskie3013702011-06-02 00:10:43 +0000412 /*
Dan Williamsf1f52e72011-05-10 02:28:45 -0700413 * This state indicates that the request is in the process of being
414 * terminated/aborted.
415 * This state is entered from the CONSTRUCTED state.
416 * This state is entered from the STARTED state.
417 */
Edmund Nadolskie3013702011-06-02 00:10:43 +0000418 SCI_REQ_ABORTING,
Dan Williamsf1f52e72011-05-10 02:28:45 -0700419
Edmund Nadolskie3013702011-06-02 00:10:43 +0000420 /*
Dan Williamsf1f52e72011-05-10 02:28:45 -0700421 * Simply the final state for the base request state machine.
422 */
Edmund Nadolskie3013702011-06-02 00:10:43 +0000423 SCI_REQ_FINAL,
Dan Williamsf1f52e72011-05-10 02:28:45 -0700424};
425
Dan Williamsf1f52e72011-05-10 02:28:45 -0700426/**
427 * scic_sds_request_get_controller() -
428 *
429 * This macro will return the controller for this io request object
430 */
431#define scic_sds_request_get_controller(sci_req) \
432 ((sci_req)->owning_controller)
433
434/**
435 * scic_sds_request_get_device() -
436 *
437 * This macro will return the device for this io request object
438 */
439#define scic_sds_request_get_device(sci_req) \
440 ((sci_req)->target_device)
441
442/**
443 * scic_sds_request_get_port() -
444 *
445 * This macro will return the port for this io request object
446 */
447#define scic_sds_request_get_port(sci_req) \
448 scic_sds_remote_device_get_port(scic_sds_request_get_device(sci_req))
449
450/**
451 * scic_sds_request_get_post_context() -
452 *
453 * This macro returns the constructed post context result for the io request.
454 */
455#define scic_sds_request_get_post_context(sci_req) \
456 ((sci_req)->post_context)
457
458/**
459 * scic_sds_request_get_task_context() -
460 *
461 * This is a helper macro to return the os handle for this request object.
462 */
463#define scic_sds_request_get_task_context(request) \
464 ((request)->task_context_buffer)
465
466/**
467 * scic_sds_request_set_status() -
468 *
469 * This macro will set the scu hardware status and sci request completion
470 * status for an io request.
471 */
472#define scic_sds_request_set_status(request, scu_status_code, sci_status_code) \
473 { \
474 (request)->scu_status = (scu_status_code); \
475 (request)->sci_status = (sci_status_code); \
476 }
477
Dan Williamsf1f52e72011-05-10 02:28:45 -0700478/**
479 * SCU_SGL_ZERO() -
480 *
481 * This macro zeros the hardware SGL element data
482 */
483#define SCU_SGL_ZERO(scu_sge) \
484 { \
485 (scu_sge).length = 0; \
486 (scu_sge).address_lower = 0; \
487 (scu_sge).address_upper = 0; \
488 (scu_sge).address_modifier = 0; \
489 }
490
491/**
492 * SCU_SGL_COPY() -
493 *
494 * This macro copys the SGL Element data from the host os to the hardware SGL
495 * elment data
496 */
497#define SCU_SGL_COPY(scu_sge, os_sge) \
498 { \
499 (scu_sge).length = sg_dma_len(sg); \
500 (scu_sge).address_upper = \
501 upper_32_bits(sg_dma_address(sg)); \
502 (scu_sge).address_lower = \
503 lower_32_bits(sg_dma_address(sg)); \
504 (scu_sge).address_modifier = 0; \
505 }
506
Dan Williamsf1f52e72011-05-10 02:28:45 -0700507enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req);
508enum sci_status scic_sds_io_request_terminate(struct scic_sds_request *sci_req);
Edmund Nadolskie3013702011-06-02 00:10:43 +0000509enum sci_status
510scic_sds_io_request_event_handler(struct scic_sds_request *sci_req,
511 u32 event_code);
512enum sci_status
513scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
514 u32 frame_index);
515enum sci_status
516scic_sds_task_request_terminate(struct scic_sds_request *sci_req);
517extern enum sci_status
518scic_sds_request_complete(struct scic_sds_request *sci_req);
519extern enum sci_status
520scic_sds_io_request_tc_completion(struct scic_sds_request *sci_req, u32 code);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700521
Dan Williamsf1f52e72011-05-10 02:28:45 -0700522/* XXX open code in caller */
523static inline void *scic_request_get_virt_addr(struct scic_sds_request *sci_req,
524 dma_addr_t phys_addr)
525{
526 struct isci_request *ireq = sci_req_to_ireq(sci_req);
527 dma_addr_t offset;
528
529 BUG_ON(phys_addr < ireq->request_daddr);
530
531 offset = phys_addr - ireq->request_daddr;
532
533 BUG_ON(offset >= sizeof(*ireq));
534
535 return (char *)ireq + offset;
536}
537
538/* XXX open code in caller */
Edmund Nadolskie3013702011-06-02 00:10:43 +0000539static inline dma_addr_t
540scic_io_request_get_dma_addr(struct scic_sds_request *sci_req, void *virt_addr)
Dan Williamsf1f52e72011-05-10 02:28:45 -0700541{
542 struct isci_request *ireq = sci_req_to_ireq(sci_req);
543
544 char *requested_addr = (char *)virt_addr;
545 char *base_addr = (char *)ireq;
546
547 BUG_ON(requested_addr < base_addr);
548 BUG_ON((requested_addr - base_addr) >= sizeof(*ireq));
549
550 return ireq->request_daddr + (requested_addr - base_addr);
551}
552
553/**
Dan Williams6f231dd2011-07-02 22:56:22 -0700554 * This function gets the status of the request object.
555 * @request: This parameter points to the isci_request object
556 *
557 * status of the object as a isci_request_status enum.
558 */
Edmund Nadolskie3013702011-06-02 00:10:43 +0000559static inline enum isci_request_status
560isci_request_get_state(struct isci_request *isci_request)
Dan Williams6f231dd2011-07-02 22:56:22 -0700561{
562 BUG_ON(isci_request == NULL);
563
564 /*probably a bad sign... */
565 if (isci_request->status == unallocated)
566 dev_warn(&isci_request->isci_host->pdev->dev,
567 "%s: isci_request->status == unallocated\n",
568 __func__);
569
570 return isci_request->status;
571}
572
573
574/**
575 * isci_request_change_state() - This function sets the status of the request
576 * object.
577 * @request: This parameter points to the isci_request object
578 * @status: This Parameter is the new status of the object
579 *
580 */
Edmund Nadolskie3013702011-06-02 00:10:43 +0000581static inline enum isci_request_status
582isci_request_change_state(struct isci_request *isci_request,
583 enum isci_request_status status)
Dan Williams6f231dd2011-07-02 22:56:22 -0700584{
585 enum isci_request_status old_state;
586 unsigned long flags;
587
588 dev_dbg(&isci_request->isci_host->pdev->dev,
589 "%s: isci_request = %p, state = 0x%x\n",
590 __func__,
591 isci_request,
592 status);
593
594 BUG_ON(isci_request == NULL);
595
596 spin_lock_irqsave(&isci_request->state_lock, flags);
597 old_state = isci_request->status;
598 isci_request->status = status;
599 spin_unlock_irqrestore(&isci_request->state_lock, flags);
600
601 return old_state;
602}
603
604/**
605 * isci_request_change_started_to_newstate() - This function sets the status of
606 * the request object.
607 * @request: This parameter points to the isci_request object
608 * @status: This Parameter is the new status of the object
609 *
610 * state previous to any change.
611 */
Edmund Nadolskie3013702011-06-02 00:10:43 +0000612static inline enum isci_request_status
613isci_request_change_started_to_newstate(struct isci_request *isci_request,
614 struct completion *completion_ptr,
615 enum isci_request_status newstate)
Dan Williams6f231dd2011-07-02 22:56:22 -0700616{
617 enum isci_request_status old_state;
618 unsigned long flags;
619
Dan Williams6f231dd2011-07-02 22:56:22 -0700620 spin_lock_irqsave(&isci_request->state_lock, flags);
621
622 old_state = isci_request->status;
623
Jeff Skirvinf219f012011-03-31 13:10:34 -0700624 if (old_state == started || old_state == aborting) {
Dan Williams6f231dd2011-07-02 22:56:22 -0700625 BUG_ON(isci_request->io_request_completion != NULL);
626
627 isci_request->io_request_completion = completion_ptr;
628 isci_request->status = newstate;
629 }
Edmund Nadolskie3013702011-06-02 00:10:43 +0000630
Dan Williams6f231dd2011-07-02 22:56:22 -0700631 spin_unlock_irqrestore(&isci_request->state_lock, flags);
632
633 dev_dbg(&isci_request->isci_host->pdev->dev,
634 "%s: isci_request = %p, old_state = 0x%x\n",
635 __func__,
636 isci_request,
637 old_state);
638
639 return old_state;
640}
641
642/**
643 * isci_request_change_started_to_aborted() - This function sets the status of
644 * the request object.
645 * @request: This parameter points to the isci_request object
646 * @completion_ptr: This parameter is saved as the kernel completion structure
647 * signalled when the old request completes.
648 *
649 * state previous to any change.
650 */
Edmund Nadolskie3013702011-06-02 00:10:43 +0000651static inline enum isci_request_status
652isci_request_change_started_to_aborted(struct isci_request *isci_request,
653 struct completion *completion_ptr)
Dan Williams6f231dd2011-07-02 22:56:22 -0700654{
Edmund Nadolskie3013702011-06-02 00:10:43 +0000655 return isci_request_change_started_to_newstate(isci_request,
656 completion_ptr,
657 aborted);
Dan Williams6f231dd2011-07-02 22:56:22 -0700658}
659/**
660 * isci_request_free() - This function frees the request object.
661 * @isci_host: This parameter specifies the ISCI host object
662 * @isci_request: This parameter points to the isci_request object
663 *
664 */
Edmund Nadolskie3013702011-06-02 00:10:43 +0000665static inline void isci_request_free(struct isci_host *isci_host,
666 struct isci_request *isci_request)
Dan Williams6f231dd2011-07-02 22:56:22 -0700667{
Bartosz Barcinski6cb4d6b2011-04-12 17:28:43 -0700668 if (!isci_request)
669 return;
Dan Williams6f231dd2011-07-02 22:56:22 -0700670
671 /* release the dma memory if we fail. */
Edmund Nadolskie3013702011-06-02 00:10:43 +0000672 dma_pool_free(isci_host->dma_pool,
673 isci_request,
Dan Williams6f231dd2011-07-02 22:56:22 -0700674 isci_request->request_daddr);
675}
676
Edmund Nadolskie3013702011-06-02 00:10:43 +0000677#define isci_request_access_task(req) ((req)->ttype_ptr.io_task_ptr)
Dan Williams6f231dd2011-07-02 22:56:22 -0700678
Edmund Nadolskie3013702011-06-02 00:10:43 +0000679#define isci_request_access_tmf(req) ((req)->ttype_ptr.tmf_task_ptr)
Dan Williams6f231dd2011-07-02 22:56:22 -0700680
Dan Williams0d0cf142011-06-13 00:51:30 -0700681struct isci_request *isci_request_alloc_tmf(struct isci_host *ihost,
682 struct isci_tmf *isci_tmf,
Dan Williams0d0cf142011-06-13 00:51:30 -0700683 gfp_t gfp_flags);
Dan Williams6f231dd2011-07-02 22:56:22 -0700684
Dan Williams209fae12011-06-13 17:39:44 -0700685int isci_request_execute(struct isci_host *ihost, struct isci_remote_device *idev,
686 struct sas_task *task, gfp_t gfp_flags);
Dan Williams6f231dd2011-07-02 22:56:22 -0700687
688/**
689 * isci_request_unmap_sgl() - This function unmaps the DMA address of a given
690 * sgl
691 * @request: This parameter points to the isci_request object
692 * @*pdev: This Parameter is the pci_device struct for the controller
693 *
694 */
Edmund Nadolskie3013702011-06-02 00:10:43 +0000695static inline void
696isci_request_unmap_sgl(struct isci_request *request, struct pci_dev *pdev)
Dan Williams6f231dd2011-07-02 22:56:22 -0700697{
698 struct sas_task *task = isci_request_access_task(request);
699
700 dev_dbg(&request->isci_host->pdev->dev,
701 "%s: request = %p, task = %p,\n"
702 "task->data_dir = %d, is_sata = %d\n ",
703 __func__,
704 request,
705 task,
706 task->data_dir,
707 sas_protocol_ata(task->task_proto));
708
709 if ((task->data_dir != PCI_DMA_NONE) &&
710 !sas_protocol_ata(task->task_proto)) {
711 if (task->num_scatter == 0)
712 /* 0 indicates a single dma address */
713 dma_unmap_single(
714 &pdev->dev,
715 request->zero_scatter_daddr,
716 task->total_xfer_len,
717 task->data_dir
718 );
719
720 else /* unmap the sgl dma addresses */
721 dma_unmap_sg(
722 &pdev->dev,
723 task->scatter,
724 request->num_sg_entries,
725 task->data_dir
726 );
727 }
728}
729
Dan Williams6f231dd2011-07-02 22:56:22 -0700730/**
731 * isci_request_io_request_get_next_sge() - This function is called by the sci
732 * core to retrieve the next sge for a given request.
733 * @request: This parameter is the isci_request object.
734 * @current_sge_address: This parameter is the last sge retrieved by the sci
735 * core for this request.
736 *
737 * pointer to the next sge for specified request.
738 */
Edmund Nadolskie3013702011-06-02 00:10:43 +0000739static inline void *
740isci_request_io_request_get_next_sge(struct isci_request *request,
741 void *current_sge_address)
Dan Williams6f231dd2011-07-02 22:56:22 -0700742{
743 struct sas_task *task = isci_request_access_task(request);
744 void *ret = NULL;
745
746 dev_dbg(&request->isci_host->pdev->dev,
747 "%s: request = %p, "
748 "current_sge_address = %p, "
749 "num_scatter = %d\n",
750 __func__,
751 request,
752 current_sge_address,
753 task->num_scatter);
754
755 if (!current_sge_address) /* First time through.. */
756 ret = task->scatter; /* always task->scatter */
757 else if (task->num_scatter == 0) /* Next element, if num_scatter == 0 */
758 ret = NULL; /* there is only one element. */
759 else
760 ret = sg_next(current_sge_address); /* sg_next returns NULL
761 * for the last element
762 */
763
764 dev_dbg(&request->isci_host->pdev->dev,
765 "%s: next sge address = %p\n",
766 __func__,
767 ret);
768
769 return ret;
770}
771
Edmund Nadolskie3013702011-06-02 00:10:43 +0000772void
Dan Williams980d3ae2011-06-20 15:11:22 -0700773isci_terminate_pending_requests(struct isci_host *ihost,
774 struct isci_remote_device *idev);
Edmund Nadolskie3013702011-06-02 00:10:43 +0000775enum sci_status
776scic_task_request_construct(struct scic_sds_controller *scic,
777 struct scic_sds_remote_device *sci_dev,
778 u16 io_tag,
779 struct scic_sds_request *sci_req);
780enum sci_status
781scic_task_request_construct_ssp(struct scic_sds_request *sci_req);
782enum sci_status
783scic_task_request_construct_sata(struct scic_sds_request *sci_req);
784void
785scic_stp_io_request_set_ncq_tag(struct scic_sds_request *sci_req, u16 ncq_tag);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700786void scic_sds_smp_request_copy_response(struct scic_sds_request *sci_req);
Dan Williams6f231dd2011-07-02 22:56:22 -0700787#endif /* !defined(_ISCI_REQUEST_H_) */