blob: 31d6d571747352a7f4bad813e3e934b1de605c6e [file] [log] [blame]
Dan Williams6f231dd2011-07-02 22:56:22 -07001/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
Dan Williams0d843662011-05-08 01:56:57 -070056#ifndef _ISCI_REQUEST_H_
Dan Williams6f231dd2011-07-02 22:56:22 -070057#define _ISCI_REQUEST_H_
58
59#include "isci.h"
Dan Williamsce2b3262011-05-08 15:49:15 -070060#include "host.h"
Dan Williamsf1f52e72011-05-10 02:28:45 -070061#include "scu_task_context.h"
Dan Williams6f231dd2011-07-02 22:56:22 -070062
63/**
64 * struct isci_request_status - This enum defines the possible states of an I/O
65 * request.
66 *
67 *
68 */
69enum isci_request_status {
70 unallocated = 0x00,
71 allocated = 0x01,
72 started = 0x02,
73 completed = 0x03,
74 aborting = 0x04,
75 aborted = 0x05,
Jeff Skirvin4dc043c2011-03-04 14:06:52 -080076 terminating = 0x06,
77 dead = 0x07
Dan Williams6f231dd2011-07-02 22:56:22 -070078};
79
80enum task_type {
81 io_task = 0,
82 tmf_task = 1
83};
84
Dan Williamsf1f52e72011-05-10 02:28:45 -070085enum sci_request_protocol {
86 SCIC_NO_PROTOCOL,
87 SCIC_SMP_PROTOCOL,
88 SCIC_SSP_PROTOCOL,
89 SCIC_STP_PROTOCOL
Dan Williamsc72086e2011-05-10 02:28:48 -070090}; /* XXX remove me, use sas_task.{dev|task_proto} instead */;
Dan Williamsf1f52e72011-05-10 02:28:45 -070091
Dan Williams5dec6f42011-05-10 02:28:49 -070092struct scic_sds_stp_request {
93 union {
94 u32 ncq;
95
96 u32 udma;
97
98 struct scic_sds_stp_pio_request {
99 /**
100 * Total transfer for the entire PIO request recorded at request constuction
101 * time.
102 *
103 * @todo Should we just decrement this value for each byte of data transitted
104 * or received to elemenate the current_transfer_bytes field?
105 */
106 u32 total_transfer_bytes;
107
108 /**
109 * Total number of bytes received/transmitted in data frames since the start
110 * of the IO request. At the end of the IO request this should equal the
111 * total_transfer_bytes.
112 */
113 u32 current_transfer_bytes;
114
115 /**
116 * The number of bytes requested in the in the PIO setup.
117 */
118 u32 pio_transfer_bytes;
119
120 /**
121 * PIO Setup ending status value to tell us if we need to wait for another FIS
122 * or if the transfer is complete. On the receipt of a D2H FIS this will be
123 * the status field of that FIS.
124 */
125 u8 ending_status;
126
127 /**
128 * On receipt of a D2H FIS this will be the ending error field if the
129 * ending_status has the SATA_STATUS_ERR bit set.
130 */
131 u8 ending_error;
132
133 struct scic_sds_request_pio_sgl {
134 struct scu_sgl_element_pair *sgl_pair;
135 u8 sgl_set;
136 u32 sgl_offset;
137 } request_current;
138 } pio;
139
140 struct {
141 /**
142 * The number of bytes requested in the PIO setup before CDB data frame.
143 */
144 u32 device_preferred_cdb_length;
145 } packet;
146 } type;
147};
148
Dan Williamsf1f52e72011-05-10 02:28:45 -0700149struct scic_sds_request {
150 /**
151 * This field contains the information for the base request state machine.
152 */
153 struct sci_base_state_machine state_machine;
154
155 /**
156 * This field simply points to the controller to which this IO request
157 * is associated.
158 */
159 struct scic_sds_controller *owning_controller;
160
161 /**
162 * This field simply points to the remote device to which this IO request
163 * is associated.
164 */
165 struct scic_sds_remote_device *target_device;
166
167 /**
168 * This field is utilized to determine if the SCI user is managing
169 * the IO tag for this request or if the core is managing it.
170 */
171 bool was_tag_assigned_by_user;
172
173 /**
174 * This field indicates the IO tag for this request. The IO tag is
175 * comprised of the task_index and a sequence count. The sequence count
176 * is utilized to help identify tasks from one life to another.
177 */
178 u16 io_tag;
179
180 /**
181 * This field specifies the protocol being utilized for this
182 * IO request.
183 */
184 enum sci_request_protocol protocol;
185
186 /**
187 * This field indicates the completion status taken from the SCUs
188 * completion code. It indicates the completion result for the SCU hardware.
189 */
190 u32 scu_status;
191
192 /**
193 * This field indicates the completion status returned to the SCI user. It
194 * indicates the users view of the io request completion.
195 */
196 u32 sci_status;
197
198 /**
199 * This field contains the value to be utilized when posting (e.g. Post_TC,
200 * Post_TC_Abort) this request to the silicon.
201 */
202 u32 post_context;
203
204 struct scu_task_context *task_context_buffer;
205 struct scu_task_context tc ____cacheline_aligned;
206
207 /* could be larger with sg chaining */
208 #define SCU_SGL_SIZE ((SCU_IO_REQUEST_SGE_COUNT + 1) / 2)
209 struct scu_sgl_element_pair sg_table[SCU_SGL_SIZE] __attribute__ ((aligned(32)));
210
211 /**
212 * This field indicates if this request is a task management request or
213 * normal IO request.
214 */
215 bool is_task_management_request;
216
217 /**
Dan Williamsf1f52e72011-05-10 02:28:45 -0700218 * This field is a pointer to the stored rx frame data. It is used in STP
219 * internal requests and SMP response frames. If this field is non-NULL the
220 * saved frame must be released on IO request completion.
221 *
222 * @todo In the future do we want to keep a list of RX frame buffers?
223 */
224 u32 saved_rx_frame_index;
225
226 /**
Dan Williamsf1f52e72011-05-10 02:28:45 -0700227 * This field in the recorded device sequence for the io request. This is
228 * recorded during the build operation and is compared in the start
229 * operation. If the sequence is different then there was a change of
230 * devices from the build to start operations.
231 */
232 u8 device_sequence;
233
234 union {
235 struct {
236 union {
237 struct ssp_cmd_iu cmd;
238 struct ssp_task_iu tmf;
239 };
240 union {
241 struct ssp_response_iu rsp;
242 u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
243 };
244 } ssp;
245
246 struct {
247 struct smp_req cmd;
248 struct smp_resp rsp;
249 } smp;
250
251 struct {
252 struct scic_sds_stp_request req;
253 struct host_to_dev_fis cmd;
254 struct dev_to_host_fis rsp;
255 } stp;
256 };
257
258};
259
260static inline struct scic_sds_request *to_sci_req(struct scic_sds_stp_request *stp_req)
261{
262 struct scic_sds_request *sci_req;
263
264 sci_req = container_of(stp_req, typeof(*sci_req), stp.req);
265 return sci_req;
266}
267
Dan Williams6f231dd2011-07-02 22:56:22 -0700268struct isci_request {
Dan Williams6f231dd2011-07-02 22:56:22 -0700269 enum isci_request_status status;
270 enum task_type ttype;
271 unsigned short io_tag;
272 bool complete_in_target;
Dan Williams67ea8382011-05-08 11:47:15 -0700273 bool terminated;
Dan Williams6f231dd2011-07-02 22:56:22 -0700274
275 union ttype_ptr_union {
276 struct sas_task *io_task_ptr; /* When ttype==io_task */
277 struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */
278 } ttype_ptr;
279 struct isci_host *isci_host;
280 struct isci_remote_device *isci_device;
281 /* For use in the requests_to_{complete|abort} lists: */
282 struct list_head completed_node;
283 /* For use in the reqs_in_process list: */
284 struct list_head dev_node;
Dan Williams6f231dd2011-07-02 22:56:22 -0700285 spinlock_t state_lock;
286 dma_addr_t request_daddr;
287 dma_addr_t zero_scatter_daddr;
288
289 unsigned int num_sg_entries; /* returned by pci_alloc_sg */
Dan Williams6f231dd2011-07-02 22:56:22 -0700290
291 /** Note: "io_request_completion" is completed in two different ways
292 * depending on whether this is a TMF or regular request.
293 * - TMF requests are completed in the thread that started them;
294 * - regular requests are completed in the request completion callback
295 * function.
296 * This difference in operation allows the aborter of a TMF request
297 * to be sure that once the TMF request completes, the I/O that the
298 * TMF was aborting is guaranteed to have completed.
299 */
300 struct completion *io_request_completion;
Dan Williams67ea8382011-05-08 11:47:15 -0700301 struct scic_sds_request sci;
Dan Williams6f231dd2011-07-02 22:56:22 -0700302};
303
Dan Williams67ea8382011-05-08 11:47:15 -0700304static inline struct isci_request *sci_req_to_ireq(struct scic_sds_request *sci_req)
305{
306 struct isci_request *ireq = container_of(sci_req, typeof(*ireq), sci);
307
308 return ireq;
309}
310
Dan Williams6f231dd2011-07-02 22:56:22 -0700311/**
Dan Williamsf1f52e72011-05-10 02:28:45 -0700312 * enum sci_base_request_states - This enumeration depicts all the states for
313 * the common request state machine.
314 *
315 *
316 */
317enum sci_base_request_states {
318 /**
319 * Simply the initial state for the base request state machine.
320 */
321 SCI_BASE_REQUEST_STATE_INITIAL,
322
323 /**
324 * This state indicates that the request has been constructed. This state
325 * is entered from the INITIAL state.
326 */
327 SCI_BASE_REQUEST_STATE_CONSTRUCTED,
328
329 /**
330 * This state indicates that the request has been started. This state is
331 * entered from the CONSTRUCTED state.
332 */
333 SCI_BASE_REQUEST_STATE_STARTED,
334
Dan Williams5dec6f42011-05-10 02:28:49 -0700335 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_TC_COMPLETION_SUBSTATE,
336 SCIC_SDS_STP_REQUEST_STARTED_UDMA_AWAIT_D2H_REG_FIS_SUBSTATE,
337
338 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_H2D_COMPLETION_SUBSTATE,
339 SCIC_SDS_STP_REQUEST_STARTED_NON_DATA_AWAIT_D2H_SUBSTATE,
340
341 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_ASSERTED_COMPLETION_SUBSTATE,
342 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_H2D_DIAGNOSTIC_COMPLETION_SUBSTATE,
343 SCIC_SDS_STP_REQUEST_STARTED_SOFT_RESET_AWAIT_D2H_RESPONSE_FRAME_SUBSTATE,
344
345 /**
346 * While in this state the IO request object is waiting for the TC completion
347 * notification for the H2D Register FIS
348 */
349 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_H2D_COMPLETION_SUBSTATE,
350
351 /**
352 * While in this state the IO request object is waiting for either a PIO Setup
353 * FIS or a D2H register FIS. The type of frame received is based on the
354 * result of the prior frame and line conditions.
355 */
356 SCIC_SDS_STP_REQUEST_STARTED_PIO_AWAIT_FRAME_SUBSTATE,
357
358 /**
359 * While in this state the IO request object is waiting for a DATA frame from
360 * the device.
361 */
362 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_IN_AWAIT_DATA_SUBSTATE,
363
364 /**
365 * While in this state the IO request object is waiting to transmit the next data
366 * frame to the device.
367 */
368 SCIC_SDS_STP_REQUEST_STARTED_PIO_DATA_OUT_TRANSMIT_DATA_SUBSTATE,
369
Dan Williamsf1f52e72011-05-10 02:28:45 -0700370 /**
Dan Williamsf1393032011-05-10 02:28:47 -0700371 * The AWAIT_TC_COMPLETION sub-state indicates that the started raw
372 * task management request is waiting for the transmission of the
373 * initial frame (i.e. command, task, etc.).
374 */
375 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION,
376
377 /**
378 * This sub-state indicates that the started task management request
379 * is waiting for the reception of an unsolicited frame
380 * (i.e. response IU).
381 */
382 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE,
383
384 /**
Dan Williamsc72086e2011-05-10 02:28:48 -0700385 * This sub-state indicates that the started task management request
386 * is waiting for the reception of an unsolicited frame
387 * (i.e. response IU).
388 */
389 SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE,
390
391 /**
392 * The AWAIT_TC_COMPLETION sub-state indicates that the started SMP request is
393 * waiting for the transmission of the initial frame (i.e. command, task, etc.).
394 */
395 SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION,
396
397 /**
Dan Williamsf1f52e72011-05-10 02:28:45 -0700398 * This state indicates that the request has completed.
399 * This state is entered from the STARTED state. This state is entered from
400 * the ABORTING state.
401 */
402 SCI_BASE_REQUEST_STATE_COMPLETED,
403
404 /**
405 * This state indicates that the request is in the process of being
406 * terminated/aborted.
407 * This state is entered from the CONSTRUCTED state.
408 * This state is entered from the STARTED state.
409 */
410 SCI_BASE_REQUEST_STATE_ABORTING,
411
412 /**
413 * Simply the final state for the base request state machine.
414 */
415 SCI_BASE_REQUEST_STATE_FINAL,
416};
417
Dan Williamsf1f52e72011-05-10 02:28:45 -0700418/**
419 * scic_sds_request_get_controller() -
420 *
421 * This macro will return the controller for this io request object
422 */
423#define scic_sds_request_get_controller(sci_req) \
424 ((sci_req)->owning_controller)
425
426/**
427 * scic_sds_request_get_device() -
428 *
429 * This macro will return the device for this io request object
430 */
431#define scic_sds_request_get_device(sci_req) \
432 ((sci_req)->target_device)
433
434/**
435 * scic_sds_request_get_port() -
436 *
437 * This macro will return the port for this io request object
438 */
439#define scic_sds_request_get_port(sci_req) \
440 scic_sds_remote_device_get_port(scic_sds_request_get_device(sci_req))
441
442/**
443 * scic_sds_request_get_post_context() -
444 *
445 * This macro returns the constructed post context result for the io request.
446 */
447#define scic_sds_request_get_post_context(sci_req) \
448 ((sci_req)->post_context)
449
450/**
451 * scic_sds_request_get_task_context() -
452 *
453 * This is a helper macro to return the os handle for this request object.
454 */
455#define scic_sds_request_get_task_context(request) \
456 ((request)->task_context_buffer)
457
458/**
459 * scic_sds_request_set_status() -
460 *
461 * This macro will set the scu hardware status and sci request completion
462 * status for an io request.
463 */
464#define scic_sds_request_set_status(request, scu_status_code, sci_status_code) \
465 { \
466 (request)->scu_status = (scu_status_code); \
467 (request)->sci_status = (sci_status_code); \
468 }
469
Dan Williamsf1f52e72011-05-10 02:28:45 -0700470/**
471 * SCU_SGL_ZERO() -
472 *
473 * This macro zeros the hardware SGL element data
474 */
475#define SCU_SGL_ZERO(scu_sge) \
476 { \
477 (scu_sge).length = 0; \
478 (scu_sge).address_lower = 0; \
479 (scu_sge).address_upper = 0; \
480 (scu_sge).address_modifier = 0; \
481 }
482
483/**
484 * SCU_SGL_COPY() -
485 *
486 * This macro copys the SGL Element data from the host os to the hardware SGL
487 * elment data
488 */
489#define SCU_SGL_COPY(scu_sge, os_sge) \
490 { \
491 (scu_sge).length = sg_dma_len(sg); \
492 (scu_sge).address_upper = \
493 upper_32_bits(sg_dma_address(sg)); \
494 (scu_sge).address_lower = \
495 lower_32_bits(sg_dma_address(sg)); \
496 (scu_sge).address_modifier = 0; \
497 }
498
Dan Williamsf1f52e72011-05-10 02:28:45 -0700499enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req);
500enum sci_status scic_sds_io_request_terminate(struct scic_sds_request *sci_req);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700501enum sci_status scic_sds_io_request_event_handler(struct scic_sds_request *sci_req,
502 u32 event_code);
503enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
504 u32 frame_index);
505enum sci_status scic_sds_task_request_terminate(struct scic_sds_request *sci_req);
Dan Williams79e2b6b2011-05-11 08:29:56 -0700506extern enum sci_status scic_sds_request_complete(struct scic_sds_request *sci_req);
507extern enum sci_status scic_sds_io_request_tc_completion(struct scic_sds_request *sci_req, u32 code);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700508
Dan Williamsf1f52e72011-05-10 02:28:45 -0700509/* XXX open code in caller */
510static inline void *scic_request_get_virt_addr(struct scic_sds_request *sci_req,
511 dma_addr_t phys_addr)
512{
513 struct isci_request *ireq = sci_req_to_ireq(sci_req);
514 dma_addr_t offset;
515
516 BUG_ON(phys_addr < ireq->request_daddr);
517
518 offset = phys_addr - ireq->request_daddr;
519
520 BUG_ON(offset >= sizeof(*ireq));
521
522 return (char *)ireq + offset;
523}
524
525/* XXX open code in caller */
526static inline dma_addr_t scic_io_request_get_dma_addr(struct scic_sds_request *sci_req,
527 void *virt_addr)
528{
529 struct isci_request *ireq = sci_req_to_ireq(sci_req);
530
531 char *requested_addr = (char *)virt_addr;
532 char *base_addr = (char *)ireq;
533
534 BUG_ON(requested_addr < base_addr);
535 BUG_ON((requested_addr - base_addr) >= sizeof(*ireq));
536
537 return ireq->request_daddr + (requested_addr - base_addr);
538}
539
540/**
Dan Williams6f231dd2011-07-02 22:56:22 -0700541 * This function gets the status of the request object.
542 * @request: This parameter points to the isci_request object
543 *
544 * status of the object as a isci_request_status enum.
545 */
546static inline
547enum isci_request_status isci_request_get_state(
548 struct isci_request *isci_request)
549{
550 BUG_ON(isci_request == NULL);
551
552 /*probably a bad sign... */
553 if (isci_request->status == unallocated)
554 dev_warn(&isci_request->isci_host->pdev->dev,
555 "%s: isci_request->status == unallocated\n",
556 __func__);
557
558 return isci_request->status;
559}
560
561
562/**
563 * isci_request_change_state() - This function sets the status of the request
564 * object.
565 * @request: This parameter points to the isci_request object
566 * @status: This Parameter is the new status of the object
567 *
568 */
569static inline enum isci_request_status isci_request_change_state(
570 struct isci_request *isci_request,
571 enum isci_request_status status)
572{
573 enum isci_request_status old_state;
574 unsigned long flags;
575
576 dev_dbg(&isci_request->isci_host->pdev->dev,
577 "%s: isci_request = %p, state = 0x%x\n",
578 __func__,
579 isci_request,
580 status);
581
582 BUG_ON(isci_request == NULL);
583
584 spin_lock_irqsave(&isci_request->state_lock, flags);
585 old_state = isci_request->status;
586 isci_request->status = status;
587 spin_unlock_irqrestore(&isci_request->state_lock, flags);
588
589 return old_state;
590}
591
592/**
593 * isci_request_change_started_to_newstate() - This function sets the status of
594 * the request object.
595 * @request: This parameter points to the isci_request object
596 * @status: This Parameter is the new status of the object
597 *
598 * state previous to any change.
599 */
600static inline enum isci_request_status isci_request_change_started_to_newstate(
601 struct isci_request *isci_request,
602 struct completion *completion_ptr,
603 enum isci_request_status newstate)
604{
605 enum isci_request_status old_state;
606 unsigned long flags;
607
Dan Williams6f231dd2011-07-02 22:56:22 -0700608 spin_lock_irqsave(&isci_request->state_lock, flags);
609
610 old_state = isci_request->status;
611
Jeff Skirvinf219f012011-03-31 13:10:34 -0700612 if (old_state == started || old_state == aborting) {
Dan Williams6f231dd2011-07-02 22:56:22 -0700613 BUG_ON(isci_request->io_request_completion != NULL);
614
615 isci_request->io_request_completion = completion_ptr;
616 isci_request->status = newstate;
617 }
618 spin_unlock_irqrestore(&isci_request->state_lock, flags);
619
620 dev_dbg(&isci_request->isci_host->pdev->dev,
621 "%s: isci_request = %p, old_state = 0x%x\n",
622 __func__,
623 isci_request,
624 old_state);
625
626 return old_state;
627}
628
629/**
630 * isci_request_change_started_to_aborted() - This function sets the status of
631 * the request object.
632 * @request: This parameter points to the isci_request object
633 * @completion_ptr: This parameter is saved as the kernel completion structure
634 * signalled when the old request completes.
635 *
636 * state previous to any change.
637 */
638static inline enum isci_request_status isci_request_change_started_to_aborted(
639 struct isci_request *isci_request,
640 struct completion *completion_ptr)
641{
642 return isci_request_change_started_to_newstate(
643 isci_request, completion_ptr, aborted
644 );
645}
646/**
647 * isci_request_free() - This function frees the request object.
648 * @isci_host: This parameter specifies the ISCI host object
649 * @isci_request: This parameter points to the isci_request object
650 *
651 */
652static inline void isci_request_free(
653 struct isci_host *isci_host,
654 struct isci_request *isci_request)
655{
Bartosz Barcinski6cb4d6b2011-04-12 17:28:43 -0700656 if (!isci_request)
657 return;
Dan Williams6f231dd2011-07-02 22:56:22 -0700658
659 /* release the dma memory if we fail. */
660 dma_pool_free(isci_host->dma_pool, isci_request,
661 isci_request->request_daddr);
662}
663
664
665/* #define ISCI_REQUEST_VALIDATE_ACCESS
666 */
667
668#ifdef ISCI_REQUEST_VALIDATE_ACCESS
669
670static inline
671struct sas_task *isci_request_access_task(struct isci_request *isci_request)
672{
673 BUG_ON(isci_request->ttype != io_task);
674 return isci_request->ttype_ptr.io_task_ptr;
675}
676
677static inline
678struct isci_tmf *isci_request_access_tmf(struct isci_request *isci_request)
679{
680 BUG_ON(isci_request->ttype != tmf_task);
681 return isci_request->ttype_ptr.tmf_task_ptr;
682}
683
684#else /* not ISCI_REQUEST_VALIDATE_ACCESS */
685
686#define isci_request_access_task(RequestPtr) \
687 ((RequestPtr)->ttype_ptr.io_task_ptr)
688
689#define isci_request_access_tmf(RequestPtr) \
690 ((RequestPtr)->ttype_ptr.tmf_task_ptr)
691
692#endif /* not ISCI_REQUEST_VALIDATE_ACCESS */
693
694
695int isci_request_alloc_tmf(
696 struct isci_host *isci_host,
697 struct isci_tmf *isci_tmf,
698 struct isci_request **isci_request,
699 struct isci_remote_device *isci_device,
700 gfp_t gfp_flags);
701
702
703int isci_request_execute(
704 struct isci_host *isci_host,
705 struct sas_task *task,
706 struct isci_request **request,
707 gfp_t gfp_flags);
708
709/**
710 * isci_request_unmap_sgl() - This function unmaps the DMA address of a given
711 * sgl
712 * @request: This parameter points to the isci_request object
713 * @*pdev: This Parameter is the pci_device struct for the controller
714 *
715 */
716static inline void isci_request_unmap_sgl(
717 struct isci_request *request,
718 struct pci_dev *pdev)
719{
720 struct sas_task *task = isci_request_access_task(request);
721
722 dev_dbg(&request->isci_host->pdev->dev,
723 "%s: request = %p, task = %p,\n"
724 "task->data_dir = %d, is_sata = %d\n ",
725 __func__,
726 request,
727 task,
728 task->data_dir,
729 sas_protocol_ata(task->task_proto));
730
731 if ((task->data_dir != PCI_DMA_NONE) &&
732 !sas_protocol_ata(task->task_proto)) {
733 if (task->num_scatter == 0)
734 /* 0 indicates a single dma address */
735 dma_unmap_single(
736 &pdev->dev,
737 request->zero_scatter_daddr,
738 task->total_xfer_len,
739 task->data_dir
740 );
741
742 else /* unmap the sgl dma addresses */
743 dma_unmap_sg(
744 &pdev->dev,
745 task->scatter,
746 request->num_sg_entries,
747 task->data_dir
748 );
749 }
750}
751
Dan Williams6f231dd2011-07-02 22:56:22 -0700752/**
753 * isci_request_io_request_get_next_sge() - This function is called by the sci
754 * core to retrieve the next sge for a given request.
755 * @request: This parameter is the isci_request object.
756 * @current_sge_address: This parameter is the last sge retrieved by the sci
757 * core for this request.
758 *
759 * pointer to the next sge for specified request.
760 */
761static inline void *isci_request_io_request_get_next_sge(
762 struct isci_request *request,
763 void *current_sge_address)
764{
765 struct sas_task *task = isci_request_access_task(request);
766 void *ret = NULL;
767
768 dev_dbg(&request->isci_host->pdev->dev,
769 "%s: request = %p, "
770 "current_sge_address = %p, "
771 "num_scatter = %d\n",
772 __func__,
773 request,
774 current_sge_address,
775 task->num_scatter);
776
777 if (!current_sge_address) /* First time through.. */
778 ret = task->scatter; /* always task->scatter */
779 else if (task->num_scatter == 0) /* Next element, if num_scatter == 0 */
780 ret = NULL; /* there is only one element. */
781 else
782 ret = sg_next(current_sge_address); /* sg_next returns NULL
783 * for the last element
784 */
785
786 dev_dbg(&request->isci_host->pdev->dev,
787 "%s: next sge address = %p\n",
788 __func__,
789 ret);
790
791 return ret;
792}
793
Dan Williamsf1f52e72011-05-10 02:28:45 -0700794void isci_terminate_pending_requests(struct isci_host *isci_host,
795 struct isci_remote_device *isci_device,
796 enum isci_request_status new_request_state);
797enum sci_status scic_task_request_construct(struct scic_sds_controller *scic,
798 struct scic_sds_remote_device *sci_dev,
799 u16 io_tag,
800 struct scic_sds_request *sci_req);
801enum sci_status scic_task_request_construct_ssp(struct scic_sds_request *sci_req);
802enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req);
Dan Williamsf1f52e72011-05-10 02:28:45 -0700803void scic_stp_io_request_set_ncq_tag(struct scic_sds_request *sci_req, u16 ncq_tag);
804void scic_sds_smp_request_copy_response(struct scic_sds_request *sci_req);
Dan Williams6f231dd2011-07-02 22:56:22 -0700805#endif /* !defined(_ISCI_REQUEST_H_) */