blob: e13ca3f7c8d7d08364aa191fb04b3f610729099f [file] [log] [blame]
Dan Williams6f231dd2011-07-02 22:56:22 -07001/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
Dan Williams0d843662011-05-08 01:56:57 -070056#ifndef _ISCI_REQUEST_H_
Dan Williams6f231dd2011-07-02 22:56:22 -070057#define _ISCI_REQUEST_H_
58
59#include "isci.h"
Dan Williamsce2b3262011-05-08 15:49:15 -070060#include "host.h"
Dan Williamsf1f52e72011-05-10 02:28:45 -070061#include "scu_task_context.h"
Dan Williams6f231dd2011-07-02 22:56:22 -070062
63/**
64 * struct isci_request_status - This enum defines the possible states of an I/O
65 * request.
66 *
67 *
68 */
69enum isci_request_status {
70 unallocated = 0x00,
71 allocated = 0x01,
72 started = 0x02,
73 completed = 0x03,
74 aborting = 0x04,
75 aborted = 0x05,
Jeff Skirvin4dc043c2011-03-04 14:06:52 -080076 terminating = 0x06,
77 dead = 0x07
Dan Williams6f231dd2011-07-02 22:56:22 -070078};
79
80enum task_type {
81 io_task = 0,
82 tmf_task = 1
83};
84
Dan Williamsf1f52e72011-05-10 02:28:45 -070085enum sci_request_protocol {
86 SCIC_NO_PROTOCOL,
87 SCIC_SMP_PROTOCOL,
88 SCIC_SSP_PROTOCOL,
89 SCIC_STP_PROTOCOL
Dan Williamsc72086e2011-05-10 02:28:48 -070090}; /* XXX remove me, use sas_task.{dev|task_proto} instead */;
Dan Williamsf1f52e72011-05-10 02:28:45 -070091
Dan Williams5dec6f42011-05-10 02:28:49 -070092struct scic_sds_stp_request {
93 union {
94 u32 ncq;
95
96 u32 udma;
97
98 struct scic_sds_stp_pio_request {
99 /**
100 * Total transfer for the entire PIO request recorded at request constuction
101 * time.
102 *
103 * @todo Should we just decrement this value for each byte of data transitted
104 * or received to elemenate the current_transfer_bytes field?
105 */
106 u32 total_transfer_bytes;
107
108 /**
109 * Total number of bytes received/transmitted in data frames since the start
110 * of the IO request. At the end of the IO request this should equal the
111 * total_transfer_bytes.
112 */
113 u32 current_transfer_bytes;
114
115 /**
116 * The number of bytes requested in the in the PIO setup.
117 */
118 u32 pio_transfer_bytes;
119
120 /**
121 * PIO Setup ending status value to tell us if we need to wait for another FIS
122 * or if the transfer is complete. On the receipt of a D2H FIS this will be
123 * the status field of that FIS.
124 */
125 u8 ending_status;
126
127 /**
128 * On receipt of a D2H FIS this will be the ending error field if the
129 * ending_status has the SATA_STATUS_ERR bit set.
130 */
131 u8 ending_error;
132
133 struct scic_sds_request_pio_sgl {
134 struct scu_sgl_element_pair *sgl_pair;
135 u8 sgl_set;
136 u32 sgl_offset;
137 } request_current;
138 } pio;
139
140 struct {
141 /**
142 * The number of bytes requested in the PIO setup before CDB data frame.
143 */
144 u32 device_preferred_cdb_length;
145 } packet;
146 } type;
147};
148
Dan Williamsf1f52e72011-05-10 02:28:45 -0700149struct scic_sds_request {
150 /**
151 * This field contains the information for the base request state machine.
152 */
153 struct sci_base_state_machine state_machine;
154
155 /**
156 * This field simply points to the controller to which this IO request
157 * is associated.
158 */
159 struct scic_sds_controller *owning_controller;
160
161 /**
162 * This field simply points to the remote device to which this IO request
163 * is associated.
164 */
165 struct scic_sds_remote_device *target_device;
166
167 /**
168 * This field is utilized to determine if the SCI user is managing
169 * the IO tag for this request or if the core is managing it.
170 */
171 bool was_tag_assigned_by_user;
172
173 /**
174 * This field indicates the IO tag for this request. The IO tag is
175 * comprised of the task_index and a sequence count. The sequence count
176 * is utilized to help identify tasks from one life to another.
177 */
178 u16 io_tag;
179
180 /**
181 * This field specifies the protocol being utilized for this
182 * IO request.
183 */
184 enum sci_request_protocol protocol;
185
186 /**
187 * This field indicates the completion status taken from the SCUs
188 * completion code. It indicates the completion result for the SCU hardware.
189 */
190 u32 scu_status;
191
192 /**
193 * This field indicates the completion status returned to the SCI user. It
194 * indicates the users view of the io request completion.
195 */
196 u32 sci_status;
197
198 /**
199 * This field contains the value to be utilized when posting (e.g. Post_TC,
200 * Post_TC_Abort) this request to the silicon.
201 */
202 u32 post_context;
203
204 struct scu_task_context *task_context_buffer;
205 struct scu_task_context tc ____cacheline_aligned;
206
207 /* could be larger with sg chaining */
208 #define SCU_SGL_SIZE ((SCU_IO_REQUEST_SGE_COUNT + 1) / 2)
209 struct scu_sgl_element_pair sg_table[SCU_SGL_SIZE] __attribute__ ((aligned(32)));
210
211 /**
212 * This field indicates if this request is a task management request or
213 * normal IO request.
214 */
215 bool is_task_management_request;
216
217 /**
Dan Williamsf1f52e72011-05-10 02:28:45 -0700218 * This field is a pointer to the stored rx frame data. It is used in STP
219 * internal requests and SMP response frames. If this field is non-NULL the
220 * saved frame must be released on IO request completion.
221 *
222 * @todo In the future do we want to keep a list of RX frame buffers?
223 */
224 u32 saved_rx_frame_index;
225
226 /**
Dan Williamsf1f52e72011-05-10 02:28:45 -0700227 * This field specifies the current state handlers in place for this
228 * IO Request object. This field is updated each time the request
229 * changes state.
230 */
231 const struct scic_sds_io_request_state_handler *state_handlers;
232
233 /**
234 * This field in the recorded device sequence for the io request. This is
235 * recorded during the build operation and is compared in the start
236 * operation. If the sequence is different then there was a change of
237 * devices from the build to start operations.
238 */
239 u8 device_sequence;
240
241 union {
242 struct {
243 union {
244 struct ssp_cmd_iu cmd;
245 struct ssp_task_iu tmf;
246 };
247 union {
248 struct ssp_response_iu rsp;
249 u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
250 };
251 } ssp;
252
253 struct {
254 struct smp_req cmd;
255 struct smp_resp rsp;
256 } smp;
257
258 struct {
259 struct scic_sds_stp_request req;
260 struct host_to_dev_fis cmd;
261 struct dev_to_host_fis rsp;
262 } stp;
263 };
264
265};
266
267static inline struct scic_sds_request *to_sci_req(struct scic_sds_stp_request *stp_req)
268{
269 struct scic_sds_request *sci_req;
270
271 sci_req = container_of(stp_req, typeof(*sci_req), stp.req);
272 return sci_req;
273}
274
Dan Williams6f231dd2011-07-02 22:56:22 -0700275struct isci_request {
Dan Williams6f231dd2011-07-02 22:56:22 -0700276 enum isci_request_status status;
277 enum task_type ttype;
278 unsigned short io_tag;
279 bool complete_in_target;
Dan Williams67ea8382011-05-08 11:47:15 -0700280 bool terminated;
Dan Williams6f231dd2011-07-02 22:56:22 -0700281
282 union ttype_ptr_union {
283 struct sas_task *io_task_ptr; /* When ttype==io_task */
284 struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */
285 } ttype_ptr;
286 struct isci_host *isci_host;
287 struct isci_remote_device *isci_device;
288 /* For use in the requests_to_{complete|abort} lists: */
289 struct list_head completed_node;
290 /* For use in the reqs_in_process list: */
291 struct list_head dev_node;
Dan Williams6f231dd2011-07-02 22:56:22 -0700292 spinlock_t state_lock;
293 dma_addr_t request_daddr;
294 dma_addr_t zero_scatter_daddr;
295
296 unsigned int num_sg_entries; /* returned by pci_alloc_sg */
Dan Williams6f231dd2011-07-02 22:56:22 -0700297
298 /** Note: "io_request_completion" is completed in two different ways
299 * depending on whether this is a TMF or regular request.
300 * - TMF requests are completed in the thread that started them;
301 * - regular requests are completed in the request completion callback
302 * function.
303 * This difference in operation allows the aborter of a TMF request
304 * to be sure that once the TMF request completes, the I/O that the
305 * TMF was aborting is guaranteed to have completed.
306 */
307 struct completion *io_request_completion;
Dan Williams67ea8382011-05-08 11:47:15 -0700308 struct scic_sds_request sci;
Dan Williams6f231dd2011-07-02 22:56:22 -0700309};
310
Dan Williams67ea8382011-05-08 11:47:15 -0700311static inline struct isci_request *sci_req_to_ireq(struct scic_sds_request *sci_req)
312{
313 struct isci_request *ireq = container_of(sci_req, typeof(*ireq), sci);
314
315 return ireq;
316}
317
Dan Williams6f231dd2011-07-02 22:56:22 -0700318/**
Dan Williamsf1f52e72011-05-10 02:28:45 -0700319 * enum sci_base_request_states - This enumeration depicts all the states for
320 * the common request state machine.
321 *
322 *
323 */
324enum sci_base_request_states {
325 /**
326 * Simply the initial state for the base request state machine.
327 */
328 SCI_BASE_REQUEST_STATE_INITIAL,
329
330 /**
331 * This state indicates that the request has been constructed. This state
332 * is entered from the INITIAL state.
333 */
334 SCI_BASE_REQUEST_STATE_CONSTRUCTED,
335
336 /**
337 * This state indicates that the request has been started. This state is
338 * entered from the CONSTRUCTED state.
339 */
340 SCI_BASE_REQUEST_STATE_STARTED,
341
Dan Williams5dec6f42011-05-10 02:28:49 -0700342 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE,
343 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE,
344
345 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE,
346 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE,
347
348 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE,
349 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE,
350 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE,
351
352 /**
353 * While in this state the IO request object is waiting for the TC completion
354 * notification for the H2D Register FIS
355 */
356 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE,
357
358 /**
359 * While in this state the IO request object is waiting for either a PIO Setup
360 * FIS or a D2H register FIS. The type of frame received is based on the
361 * result of the prior frame and line conditions.
362 */
363 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE,
364
365 /**
366 * While in this state the IO request object is waiting for a DATA frame from
367 * the device.
368 */
369 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE,
370
371 /**
372 * While in this state the IO request object is waiting to transmit the next data
373 * frame to the device.
374 */
375 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE,
376
Dan Williamsf1f52e72011-05-10 02:28:45 -0700377 /**
Dan Williamsf1393032011-05-10 02:28:47 -0700378 * The AWAIT_TC_COMPLETION sub-state indicates that the started raw
379 * task management request is waiting for the transmission of the
380 * initial frame (i.e. command, task, etc.).
381 */
382 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION,
383
384 /**
385 * This sub-state indicates that the started task management request
386 * is waiting for the reception of an unsolicited frame
387 * (i.e. response IU).
388 */
389 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE,
390
391 /**
Dan Williamsc72086e2011-05-10 02:28:48 -0700392 * This sub-state indicates that the started task management request
393 * is waiting for the reception of an unsolicited frame
394 * (i.e. response IU).
395 */
396 SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE,
397
398 /**
399 * The AWAIT_TC_COMPLETION sub-state indicates that the started SMP request is
400 * waiting for the transmission of the initial frame (i.e. command, task, etc.).
401 */
402 SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION,
403
404 /**
Dan Williamsf1f52e72011-05-10 02:28:45 -0700405 * This state indicates that the request has completed.
406 * This state is entered from the STARTED state. This state is entered from
407 * the ABORTING state.
408 */
409 SCI_BASE_REQUEST_STATE_COMPLETED,
410
411 /**
412 * This state indicates that the request is in the process of being
413 * terminated/aborted.
414 * This state is entered from the CONSTRUCTED state.
415 * This state is entered from the STARTED state.
416 */
417 SCI_BASE_REQUEST_STATE_ABORTING,
418
419 /**
420 * Simply the final state for the base request state machine.
421 */
422 SCI_BASE_REQUEST_STATE_FINAL,
423};
424
425typedef enum sci_status (*scic_sds_io_request_handler_t)
426 (struct scic_sds_request *request);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700427typedef enum sci_status (*scic_sds_io_request_event_handler_t)
428 (struct scic_sds_request *req, u32 event);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700429
430/**
431 * struct scic_sds_io_request_state_handler - This is the SDS core definition
432 * of the state handlers.
433 *
434 *
435 */
436struct scic_sds_io_request_state_handler {
437 /**
Dan Williamsf1f52e72011-05-10 02:28:45 -0700438 * The complete_handler specifies the method invoked when a user attempts to
439 * complete a request.
440 */
441 scic_sds_io_request_handler_t complete_handler;
442
Dan Williamsf1f52e72011-05-10 02:28:45 -0700443 scic_sds_io_request_event_handler_t event_handler;
Dan Williamsf1f52e72011-05-10 02:28:45 -0700444};
445
Dan Williamsf1f52e72011-05-10 02:28:45 -0700446/**
447 * scic_sds_request_get_controller() -
448 *
449 * This macro will return the controller for this io request object
450 */
451#define scic_sds_request_get_controller(sci_req) \
452 ((sci_req)->owning_controller)
453
454/**
455 * scic_sds_request_get_device() -
456 *
457 * This macro will return the device for this io request object
458 */
459#define scic_sds_request_get_device(sci_req) \
460 ((sci_req)->target_device)
461
462/**
463 * scic_sds_request_get_port() -
464 *
465 * This macro will return the port for this io request object
466 */
467#define scic_sds_request_get_port(sci_req) \
468 scic_sds_remote_device_get_port(scic_sds_request_get_device(sci_req))
469
470/**
471 * scic_sds_request_get_post_context() -
472 *
473 * This macro returns the constructed post context result for the io request.
474 */
475#define scic_sds_request_get_post_context(sci_req) \
476 ((sci_req)->post_context)
477
478/**
479 * scic_sds_request_get_task_context() -
480 *
481 * This is a helper macro to return the os handle for this request object.
482 */
483#define scic_sds_request_get_task_context(request) \
484 ((request)->task_context_buffer)
485
486/**
487 * scic_sds_request_set_status() -
488 *
489 * This macro will set the scu hardware status and sci request completion
490 * status for an io request.
491 */
492#define scic_sds_request_set_status(request, scu_status_code, sci_status_code) \
493 { \
494 (request)->scu_status = (scu_status_code); \
495 (request)->sci_status = (sci_status_code); \
496 }
497
498#define scic_sds_request_complete(a_request) \
499 ((a_request)->state_handlers->complete_handler(a_request))
500
501
502extern enum sci_status
503scic_sds_io_request_tc_completion(struct scic_sds_request *request, u32 completion_code);
504
505/**
506 * SCU_SGL_ZERO() -
507 *
508 * This macro zeros the hardware SGL element data
509 */
510#define SCU_SGL_ZERO(scu_sge) \
511 { \
512 (scu_sge).length = 0; \
513 (scu_sge).address_lower = 0; \
514 (scu_sge).address_upper = 0; \
515 (scu_sge).address_modifier = 0; \
516 }
517
518/**
519 * SCU_SGL_COPY() -
520 *
521 * This macro copys the SGL Element data from the host os to the hardware SGL
522 * elment data
523 */
524#define SCU_SGL_COPY(scu_sge, os_sge) \
525 { \
526 (scu_sge).length = sg_dma_len(sg); \
527 (scu_sge).address_upper = \
528 upper_32_bits(sg_dma_address(sg)); \
529 (scu_sge).address_lower = \
530 lower_32_bits(sg_dma_address(sg)); \
531 (scu_sge).address_modifier = 0; \
532 }
533
Dan Williamsf1f52e72011-05-10 02:28:45 -0700534enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req);
535enum sci_status scic_sds_io_request_terminate(struct scic_sds_request *sci_req);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700536enum sci_status scic_sds_io_request_event_handler(struct scic_sds_request *sci_req,
537 u32 event_code);
538enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
539 u32 frame_index);
540enum sci_status scic_sds_task_request_terminate(struct scic_sds_request *sci_req);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700541
Dan Williamsf1f52e72011-05-10 02:28:45 -0700542/* XXX open code in caller */
543static inline void *scic_request_get_virt_addr(struct scic_sds_request *sci_req,
544 dma_addr_t phys_addr)
545{
546 struct isci_request *ireq = sci_req_to_ireq(sci_req);
547 dma_addr_t offset;
548
549 BUG_ON(phys_addr < ireq->request_daddr);
550
551 offset = phys_addr - ireq->request_daddr;
552
553 BUG_ON(offset >= sizeof(*ireq));
554
555 return (char *)ireq + offset;
556}
557
558/* XXX open code in caller */
559static inline dma_addr_t scic_io_request_get_dma_addr(struct scic_sds_request *sci_req,
560 void *virt_addr)
561{
562 struct isci_request *ireq = sci_req_to_ireq(sci_req);
563
564 char *requested_addr = (char *)virt_addr;
565 char *base_addr = (char *)ireq;
566
567 BUG_ON(requested_addr < base_addr);
568 BUG_ON((requested_addr - base_addr) >= sizeof(*ireq));
569
570 return ireq->request_daddr + (requested_addr - base_addr);
571}
572
573/**
Dan Williams6f231dd2011-07-02 22:56:22 -0700574 * This function gets the status of the request object.
575 * @request: This parameter points to the isci_request object
576 *
577 * status of the object as a isci_request_status enum.
578 */
579static inline
580enum isci_request_status isci_request_get_state(
581 struct isci_request *isci_request)
582{
583 BUG_ON(isci_request == NULL);
584
585 /*probably a bad sign... */
586 if (isci_request->status == unallocated)
587 dev_warn(&isci_request->isci_host->pdev->dev,
588 "%s: isci_request->status == unallocated\n",
589 __func__);
590
591 return isci_request->status;
592}
593
594
595/**
596 * isci_request_change_state() - This function sets the status of the request
597 * object.
598 * @request: This parameter points to the isci_request object
599 * @status: This Parameter is the new status of the object
600 *
601 */
602static inline enum isci_request_status isci_request_change_state(
603 struct isci_request *isci_request,
604 enum isci_request_status status)
605{
606 enum isci_request_status old_state;
607 unsigned long flags;
608
609 dev_dbg(&isci_request->isci_host->pdev->dev,
610 "%s: isci_request = %p, state = 0x%x\n",
611 __func__,
612 isci_request,
613 status);
614
615 BUG_ON(isci_request == NULL);
616
617 spin_lock_irqsave(&isci_request->state_lock, flags);
618 old_state = isci_request->status;
619 isci_request->status = status;
620 spin_unlock_irqrestore(&isci_request->state_lock, flags);
621
622 return old_state;
623}
624
625/**
626 * isci_request_change_started_to_newstate() - This function sets the status of
627 * the request object.
628 * @request: This parameter points to the isci_request object
629 * @status: This Parameter is the new status of the object
630 *
631 * state previous to any change.
632 */
633static inline enum isci_request_status isci_request_change_started_to_newstate(
634 struct isci_request *isci_request,
635 struct completion *completion_ptr,
636 enum isci_request_status newstate)
637{
638 enum isci_request_status old_state;
639 unsigned long flags;
640
Dan Williams6f231dd2011-07-02 22:56:22 -0700641 spin_lock_irqsave(&isci_request->state_lock, flags);
642
643 old_state = isci_request->status;
644
Jeff Skirvinf219f012011-03-31 13:10:34 -0700645 if (old_state == started || old_state == aborting) {
Dan Williams6f231dd2011-07-02 22:56:22 -0700646 BUG_ON(isci_request->io_request_completion != NULL);
647
648 isci_request->io_request_completion = completion_ptr;
649 isci_request->status = newstate;
650 }
651 spin_unlock_irqrestore(&isci_request->state_lock, flags);
652
653 dev_dbg(&isci_request->isci_host->pdev->dev,
654 "%s: isci_request = %p, old_state = 0x%x\n",
655 __func__,
656 isci_request,
657 old_state);
658
659 return old_state;
660}
661
662/**
663 * isci_request_change_started_to_aborted() - This function sets the status of
664 * the request object.
665 * @request: This parameter points to the isci_request object
666 * @completion_ptr: This parameter is saved as the kernel completion structure
667 * signalled when the old request completes.
668 *
669 * state previous to any change.
670 */
671static inline enum isci_request_status isci_request_change_started_to_aborted(
672 struct isci_request *isci_request,
673 struct completion *completion_ptr)
674{
675 return isci_request_change_started_to_newstate(
676 isci_request, completion_ptr, aborted
677 );
678}
679/**
680 * isci_request_free() - This function frees the request object.
681 * @isci_host: This parameter specifies the ISCI host object
682 * @isci_request: This parameter points to the isci_request object
683 *
684 */
685static inline void isci_request_free(
686 struct isci_host *isci_host,
687 struct isci_request *isci_request)
688{
Bartosz Barcinski6cb4d6b2011-04-12 17:28:43 -0700689 if (!isci_request)
690 return;
Dan Williams6f231dd2011-07-02 22:56:22 -0700691
692 /* release the dma memory if we fail. */
693 dma_pool_free(isci_host->dma_pool, isci_request,
694 isci_request->request_daddr);
695}
696
697
698/* #define ISCI_REQUEST_VALIDATE_ACCESS
699 */
700
701#ifdef ISCI_REQUEST_VALIDATE_ACCESS
702
703static inline
704struct sas_task *isci_request_access_task(struct isci_request *isci_request)
705{
706 BUG_ON(isci_request->ttype != io_task);
707 return isci_request->ttype_ptr.io_task_ptr;
708}
709
710static inline
711struct isci_tmf *isci_request_access_tmf(struct isci_request *isci_request)
712{
713 BUG_ON(isci_request->ttype != tmf_task);
714 return isci_request->ttype_ptr.tmf_task_ptr;
715}
716
717#else /* not ISCI_REQUEST_VALIDATE_ACCESS */
718
719#define isci_request_access_task(RequestPtr) \
720 ((RequestPtr)->ttype_ptr.io_task_ptr)
721
722#define isci_request_access_tmf(RequestPtr) \
723 ((RequestPtr)->ttype_ptr.tmf_task_ptr)
724
725#endif /* not ISCI_REQUEST_VALIDATE_ACCESS */
726
727
728int isci_request_alloc_tmf(
729 struct isci_host *isci_host,
730 struct isci_tmf *isci_tmf,
731 struct isci_request **isci_request,
732 struct isci_remote_device *isci_device,
733 gfp_t gfp_flags);
734
735
736int isci_request_execute(
737 struct isci_host *isci_host,
738 struct sas_task *task,
739 struct isci_request **request,
740 gfp_t gfp_flags);
741
742/**
743 * isci_request_unmap_sgl() - This function unmaps the DMA address of a given
744 * sgl
745 * @request: This parameter points to the isci_request object
746 * @*pdev: This Parameter is the pci_device struct for the controller
747 *
748 */
749static inline void isci_request_unmap_sgl(
750 struct isci_request *request,
751 struct pci_dev *pdev)
752{
753 struct sas_task *task = isci_request_access_task(request);
754
755 dev_dbg(&request->isci_host->pdev->dev,
756 "%s: request = %p, task = %p,\n"
757 "task->data_dir = %d, is_sata = %d\n ",
758 __func__,
759 request,
760 task,
761 task->data_dir,
762 sas_protocol_ata(task->task_proto));
763
764 if ((task->data_dir != PCI_DMA_NONE) &&
765 !sas_protocol_ata(task->task_proto)) {
766 if (task->num_scatter == 0)
767 /* 0 indicates a single dma address */
768 dma_unmap_single(
769 &pdev->dev,
770 request->zero_scatter_daddr,
771 task->total_xfer_len,
772 task->data_dir
773 );
774
775 else /* unmap the sgl dma addresses */
776 dma_unmap_sg(
777 &pdev->dev,
778 task->scatter,
779 request->num_sg_entries,
780 task->data_dir
781 );
782 }
783}
784
Dan Williams6f231dd2011-07-02 22:56:22 -0700785/**
786 * isci_request_io_request_get_next_sge() - This function is called by the sci
787 * core to retrieve the next sge for a given request.
788 * @request: This parameter is the isci_request object.
789 * @current_sge_address: This parameter is the last sge retrieved by the sci
790 * core for this request.
791 *
792 * pointer to the next sge for specified request.
793 */
794static inline void *isci_request_io_request_get_next_sge(
795 struct isci_request *request,
796 void *current_sge_address)
797{
798 struct sas_task *task = isci_request_access_task(request);
799 void *ret = NULL;
800
801 dev_dbg(&request->isci_host->pdev->dev,
802 "%s: request = %p, "
803 "current_sge_address = %p, "
804 "num_scatter = %d\n",
805 __func__,
806 request,
807 current_sge_address,
808 task->num_scatter);
809
810 if (!current_sge_address) /* First time through.. */
811 ret = task->scatter; /* always task->scatter */
812 else if (task->num_scatter == 0) /* Next element, if num_scatter == 0 */
813 ret = NULL; /* there is only one element. */
814 else
815 ret = sg_next(current_sge_address); /* sg_next returns NULL
816 * for the last element
817 */
818
819 dev_dbg(&request->isci_host->pdev->dev,
820 "%s: next sge address = %p\n",
821 __func__,
822 ret);
823
824 return ret;
825}
826
Dan Williamsf1f52e72011-05-10 02:28:45 -0700827void isci_terminate_pending_requests(struct isci_host *isci_host,
828 struct isci_remote_device *isci_device,
829 enum isci_request_status new_request_state);
830enum sci_status scic_task_request_construct(struct scic_sds_controller *scic,
831 struct scic_sds_remote_device *sci_dev,
832 u16 io_tag,
833 struct scic_sds_request *sci_req);
834enum sci_status scic_task_request_construct_ssp(struct scic_sds_request *sci_req);
835enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700836void scic_stp_io_request_set_ncq_tag(struct scic_sds_request *sci_req, u16 ncq_tag);
837void scic_sds_smp_request_copy_response(struct scic_sds_request *sci_req);
Dan Williams6f231dd2011-07-02 22:56:22 -0700838#endif /* !defined(_ISCI_REQUEST_H_) */