blob: 932ea767c8c2e5ba02de0f2913c439c886331ae9 [file] [log] [blame]
Dan Williams6f231dd2011-07-02 22:56:22 -07001/*
2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
4 *
5 * GPL LICENSE SUMMARY
6 *
7 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful, but
14 * WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
16 * General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program; if not, write to the Free Software
20 * Foundation, Inc., 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
21 * The full GNU General Public License is included in this distribution
22 * in the file called LICENSE.GPL.
23 *
24 * BSD LICENSE
25 *
26 * Copyright(c) 2008 - 2011 Intel Corporation. All rights reserved.
27 * All rights reserved.
28 *
29 * Redistribution and use in source and binary forms, with or without
30 * modification, are permitted provided that the following conditions
31 * are met:
32 *
33 * * Redistributions of source code must retain the above copyright
34 * notice, this list of conditions and the following disclaimer.
35 * * Redistributions in binary form must reproduce the above copyright
36 * notice, this list of conditions and the following disclaimer in
37 * the documentation and/or other materials provided with the
38 * distribution.
39 * * Neither the name of Intel Corporation nor the names of its
40 * contributors may be used to endorse or promote products derived
41 * from this software without specific prior written permission.
42 *
43 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
44 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
45 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
46 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
47 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
48 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
49 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
50 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
51 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
52 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
53 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
54 */
55
Dan Williams0d843662011-05-08 01:56:57 -070056#ifndef _ISCI_REQUEST_H_
Dan Williams6f231dd2011-07-02 22:56:22 -070057#define _ISCI_REQUEST_H_
58
59#include "isci.h"
Dan Williamsce2b3262011-05-08 15:49:15 -070060#include "host.h"
Dan Williamsf1f52e72011-05-10 02:28:45 -070061#include "scu_task_context.h"
62#include "stp_request.h"
Dan Williams6f231dd2011-07-02 22:56:22 -070063
64/**
65 * struct isci_request_status - This enum defines the possible states of an I/O
66 * request.
67 *
68 *
69 */
70enum isci_request_status {
71 unallocated = 0x00,
72 allocated = 0x01,
73 started = 0x02,
74 completed = 0x03,
75 aborting = 0x04,
76 aborted = 0x05,
Jeff Skirvin4dc043c2011-03-04 14:06:52 -080077 terminating = 0x06,
78 dead = 0x07
Dan Williams6f231dd2011-07-02 22:56:22 -070079};
80
81enum task_type {
82 io_task = 0,
83 tmf_task = 1
84};
85
Dan Williamsf1f52e72011-05-10 02:28:45 -070086enum sci_request_protocol {
87 SCIC_NO_PROTOCOL,
88 SCIC_SMP_PROTOCOL,
89 SCIC_SSP_PROTOCOL,
90 SCIC_STP_PROTOCOL
91}; /* XXX remove me, use sas_task.dev instead */;
92
93struct scic_sds_request {
94 /**
95 * This field contains the information for the base request state machine.
96 */
97 struct sci_base_state_machine state_machine;
98
99 /**
100 * This field simply points to the controller to which this IO request
101 * is associated.
102 */
103 struct scic_sds_controller *owning_controller;
104
105 /**
106 * This field simply points to the remote device to which this IO request
107 * is associated.
108 */
109 struct scic_sds_remote_device *target_device;
110
111 /**
112 * This field is utilized to determine if the SCI user is managing
113 * the IO tag for this request or if the core is managing it.
114 */
115 bool was_tag_assigned_by_user;
116
117 /**
118 * This field indicates the IO tag for this request. The IO tag is
119 * comprised of the task_index and a sequence count. The sequence count
120 * is utilized to help identify tasks from one life to another.
121 */
122 u16 io_tag;
123
124 /**
125 * This field specifies the protocol being utilized for this
126 * IO request.
127 */
128 enum sci_request_protocol protocol;
129
130 /**
131 * This field indicates the completion status taken from the SCUs
132 * completion code. It indicates the completion result for the SCU hardware.
133 */
134 u32 scu_status;
135
136 /**
137 * This field indicates the completion status returned to the SCI user. It
138 * indicates the users view of the io request completion.
139 */
140 u32 sci_status;
141
142 /**
143 * This field contains the value to be utilized when posting (e.g. Post_TC,
144 * Post_TC_Abort) this request to the silicon.
145 */
146 u32 post_context;
147
148 struct scu_task_context *task_context_buffer;
149 struct scu_task_context tc ____cacheline_aligned;
150
151 /* could be larger with sg chaining */
152 #define SCU_SGL_SIZE ((SCU_IO_REQUEST_SGE_COUNT + 1) / 2)
153 struct scu_sgl_element_pair sg_table[SCU_SGL_SIZE] __attribute__ ((aligned(32)));
154
155 /**
156 * This field indicates if this request is a task management request or
157 * normal IO request.
158 */
159 bool is_task_management_request;
160
161 /**
162 * This field indicates that this request contains an initialized started
163 * substate machine.
164 */
165 bool has_started_substate_machine;
166
167 /**
168 * This field is a pointer to the stored rx frame data. It is used in STP
169 * internal requests and SMP response frames. If this field is non-NULL the
170 * saved frame must be released on IO request completion.
171 *
172 * @todo In the future do we want to keep a list of RX frame buffers?
173 */
174 u32 saved_rx_frame_index;
175
176 /**
177 * This field specifies the data necessary to manage the sub-state
178 * machine executed while in the SCI_BASE_REQUEST_STATE_STARTED state.
179 */
180 struct sci_base_state_machine started_substate_machine;
181
182 /**
183 * This field specifies the current state handlers in place for this
184 * IO Request object. This field is updated each time the request
185 * changes state.
186 */
187 const struct scic_sds_io_request_state_handler *state_handlers;
188
189 /**
190 * This field in the recorded device sequence for the io request. This is
191 * recorded during the build operation and is compared in the start
192 * operation. If the sequence is different then there was a change of
193 * devices from the build to start operations.
194 */
195 u8 device_sequence;
196
197 union {
198 struct {
199 union {
200 struct ssp_cmd_iu cmd;
201 struct ssp_task_iu tmf;
202 };
203 union {
204 struct ssp_response_iu rsp;
205 u8 rsp_buf[SSP_RESP_IU_MAX_SIZE];
206 };
207 } ssp;
208
209 struct {
210 struct smp_req cmd;
211 struct smp_resp rsp;
212 } smp;
213
214 struct {
215 struct scic_sds_stp_request req;
216 struct host_to_dev_fis cmd;
217 struct dev_to_host_fis rsp;
218 } stp;
219 };
220
221};
222
223static inline struct scic_sds_request *to_sci_req(struct scic_sds_stp_request *stp_req)
224{
225 struct scic_sds_request *sci_req;
226
227 sci_req = container_of(stp_req, typeof(*sci_req), stp.req);
228 return sci_req;
229}
230
Dan Williams6f231dd2011-07-02 22:56:22 -0700231struct isci_request {
Dan Williams6f231dd2011-07-02 22:56:22 -0700232 enum isci_request_status status;
233 enum task_type ttype;
234 unsigned short io_tag;
235 bool complete_in_target;
Dan Williams67ea8382011-05-08 11:47:15 -0700236 bool terminated;
Dan Williams6f231dd2011-07-02 22:56:22 -0700237
238 union ttype_ptr_union {
239 struct sas_task *io_task_ptr; /* When ttype==io_task */
240 struct isci_tmf *tmf_task_ptr; /* When ttype==tmf_task */
241 } ttype_ptr;
242 struct isci_host *isci_host;
243 struct isci_remote_device *isci_device;
244 /* For use in the requests_to_{complete|abort} lists: */
245 struct list_head completed_node;
246 /* For use in the reqs_in_process list: */
247 struct list_head dev_node;
Dan Williams6f231dd2011-07-02 22:56:22 -0700248 spinlock_t state_lock;
249 dma_addr_t request_daddr;
250 dma_addr_t zero_scatter_daddr;
251
252 unsigned int num_sg_entries; /* returned by pci_alloc_sg */
Dan Williams6f231dd2011-07-02 22:56:22 -0700253
254 /** Note: "io_request_completion" is completed in two different ways
255 * depending on whether this is a TMF or regular request.
256 * - TMF requests are completed in the thread that started them;
257 * - regular requests are completed in the request completion callback
258 * function.
259 * This difference in operation allows the aborter of a TMF request
260 * to be sure that once the TMF request completes, the I/O that the
261 * TMF was aborting is guaranteed to have completed.
262 */
263 struct completion *io_request_completion;
Dan Williams67ea8382011-05-08 11:47:15 -0700264 struct scic_sds_request sci;
Dan Williams6f231dd2011-07-02 22:56:22 -0700265};
266
Dan Williams67ea8382011-05-08 11:47:15 -0700267static inline struct isci_request *sci_req_to_ireq(struct scic_sds_request *sci_req)
268{
269 struct isci_request *ireq = container_of(sci_req, typeof(*ireq), sci);
270
271 return ireq;
272}
273
Dan Williams6f231dd2011-07-02 22:56:22 -0700274/**
Dan Williamsf1f52e72011-05-10 02:28:45 -0700275 * enum sci_base_request_states - This enumeration depicts all the states for
276 * the common request state machine.
277 *
278 *
279 */
280enum sci_base_request_states {
281 /**
282 * Simply the initial state for the base request state machine.
283 */
284 SCI_BASE_REQUEST_STATE_INITIAL,
285
286 /**
287 * This state indicates that the request has been constructed. This state
288 * is entered from the INITIAL state.
289 */
290 SCI_BASE_REQUEST_STATE_CONSTRUCTED,
291
292 /**
293 * This state indicates that the request has been started. This state is
294 * entered from the CONSTRUCTED state.
295 */
296 SCI_BASE_REQUEST_STATE_STARTED,
297
298 /**
299 * This state indicates that the request has completed.
300 * This state is entered from the STARTED state. This state is entered from
301 * the ABORTING state.
302 */
303 SCI_BASE_REQUEST_STATE_COMPLETED,
304
305 /**
306 * This state indicates that the request is in the process of being
307 * terminated/aborted.
308 * This state is entered from the CONSTRUCTED state.
309 * This state is entered from the STARTED state.
310 */
311 SCI_BASE_REQUEST_STATE_ABORTING,
312
313 /**
314 * Simply the final state for the base request state machine.
315 */
316 SCI_BASE_REQUEST_STATE_FINAL,
317};
318
319typedef enum sci_status (*scic_sds_io_request_handler_t)
320 (struct scic_sds_request *request);
321typedef enum sci_status (*scic_sds_io_request_frame_handler_t)
322 (struct scic_sds_request *req, u32 frame);
323typedef enum sci_status (*scic_sds_io_request_event_handler_t)
324 (struct scic_sds_request *req, u32 event);
325typedef enum sci_status (*scic_sds_io_request_task_completion_handler_t)
326 (struct scic_sds_request *req, u32 completion_code);
327
328/**
329 * struct scic_sds_io_request_state_handler - This is the SDS core definition
330 * of the state handlers.
331 *
332 *
333 */
334struct scic_sds_io_request_state_handler {
335 /**
336 * The start_handler specifies the method invoked when a user attempts to
337 * start a request.
338 */
339 scic_sds_io_request_handler_t start_handler;
340
341 /**
342 * The abort_handler specifies the method invoked when a user attempts to
343 * abort a request.
344 */
345 scic_sds_io_request_handler_t abort_handler;
346
347 /**
348 * The complete_handler specifies the method invoked when a user attempts to
349 * complete a request.
350 */
351 scic_sds_io_request_handler_t complete_handler;
352
353 scic_sds_io_request_task_completion_handler_t tc_completion_handler;
354 scic_sds_io_request_event_handler_t event_handler;
355 scic_sds_io_request_frame_handler_t frame_handler;
356
357};
358
359extern const struct sci_base_state scic_sds_io_request_started_task_mgmt_substate_table[];
360
361/**
362 * scic_sds_request_get_controller() -
363 *
364 * This macro will return the controller for this io request object
365 */
366#define scic_sds_request_get_controller(sci_req) \
367 ((sci_req)->owning_controller)
368
369/**
370 * scic_sds_request_get_device() -
371 *
372 * This macro will return the device for this io request object
373 */
374#define scic_sds_request_get_device(sci_req) \
375 ((sci_req)->target_device)
376
377/**
378 * scic_sds_request_get_port() -
379 *
380 * This macro will return the port for this io request object
381 */
382#define scic_sds_request_get_port(sci_req) \
383 scic_sds_remote_device_get_port(scic_sds_request_get_device(sci_req))
384
385/**
386 * scic_sds_request_get_post_context() -
387 *
388 * This macro returns the constructed post context result for the io request.
389 */
390#define scic_sds_request_get_post_context(sci_req) \
391 ((sci_req)->post_context)
392
393/**
394 * scic_sds_request_get_task_context() -
395 *
396 * This is a helper macro to return the os handle for this request object.
397 */
398#define scic_sds_request_get_task_context(request) \
399 ((request)->task_context_buffer)
400
401/**
402 * scic_sds_request_set_status() -
403 *
404 * This macro will set the scu hardware status and sci request completion
405 * status for an io request.
406 */
407#define scic_sds_request_set_status(request, scu_status_code, sci_status_code) \
408 { \
409 (request)->scu_status = (scu_status_code); \
410 (request)->sci_status = (sci_status_code); \
411 }
412
413#define scic_sds_request_complete(a_request) \
414 ((a_request)->state_handlers->complete_handler(a_request))
415
416
417extern enum sci_status
418scic_sds_io_request_tc_completion(struct scic_sds_request *request, u32 completion_code);
419
420/**
421 * SCU_SGL_ZERO() -
422 *
423 * This macro zeros the hardware SGL element data
424 */
425#define SCU_SGL_ZERO(scu_sge) \
426 { \
427 (scu_sge).length = 0; \
428 (scu_sge).address_lower = 0; \
429 (scu_sge).address_upper = 0; \
430 (scu_sge).address_modifier = 0; \
431 }
432
433/**
434 * SCU_SGL_COPY() -
435 *
436 * This macro copys the SGL Element data from the host os to the hardware SGL
437 * elment data
438 */
439#define SCU_SGL_COPY(scu_sge, os_sge) \
440 { \
441 (scu_sge).length = sg_dma_len(sg); \
442 (scu_sge).address_upper = \
443 upper_32_bits(sg_dma_address(sg)); \
444 (scu_sge).address_lower = \
445 lower_32_bits(sg_dma_address(sg)); \
446 (scu_sge).address_modifier = 0; \
447 }
448
449void scic_sds_request_build_sgl(struct scic_sds_request *sci_req);
450void scic_sds_stp_request_assign_buffers(struct scic_sds_request *sci_req);
451void scic_sds_smp_request_assign_buffers(struct scic_sds_request *sci_req);
452enum sci_status scic_sds_request_start(struct scic_sds_request *sci_req);
453enum sci_status scic_sds_io_request_terminate(struct scic_sds_request *sci_req);
454void scic_sds_io_request_copy_response(struct scic_sds_request *sci_req);
455enum sci_status scic_sds_io_request_event_handler(struct scic_sds_request *sci_req,
456 u32 event_code);
457enum sci_status scic_sds_io_request_frame_handler(struct scic_sds_request *sci_req,
458 u32 frame_index);
459enum sci_status scic_sds_task_request_terminate(struct scic_sds_request *sci_req);
460enum sci_status scic_sds_request_started_state_abort_handler(struct scic_sds_request *sci_req);
461
462/**
463 * enum _scic_sds_io_request_started_task_mgmt_substates - This enumeration
464 * depicts all of the substates for a task management request to be
465 * performed in the STARTED super-state.
466 *
467 *
468 */
469enum scic_sds_raw_request_started_task_mgmt_substates {
470 /**
471 * The AWAIT_TC_COMPLETION sub-state indicates that the started raw
472 * task management request is waiting for the transmission of the
473 * initial frame (i.e. command, task, etc.).
474 */
475 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_COMPLETION,
476
477 /**
478 * This sub-state indicates that the started task management request
479 * is waiting for the reception of an unsolicited frame
480 * (i.e. response IU).
481 */
482 SCIC_SDS_IO_REQUEST_STARTED_TASK_MGMT_SUBSTATE_AWAIT_TC_RESPONSE,
483};
484
485
486/**
487 * enum _scic_sds_smp_request_started_substates - This enumeration depicts all
488 * of the substates for a SMP request to be performed in the STARTED
489 * super-state.
490 *
491 *
492 */
493enum scic_sds_smp_request_started_substates {
494 /**
495 * This sub-state indicates that the started task management request
496 * is waiting for the reception of an unsolicited frame
497 * (i.e. response IU).
498 */
499 SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_RESPONSE,
500
501 /**
502 * The AWAIT_TC_COMPLETION sub-state indicates that the started SMP request is
503 * waiting for the transmission of the initial frame (i.e. command, task, etc.).
504 */
505 SCIC_SDS_SMP_REQUEST_STARTED_SUBSTATE_AWAIT_TC_COMPLETION,
506};
507
508
509
510/* XXX open code in caller */
511static inline void *scic_request_get_virt_addr(struct scic_sds_request *sci_req,
512 dma_addr_t phys_addr)
513{
514 struct isci_request *ireq = sci_req_to_ireq(sci_req);
515 dma_addr_t offset;
516
517 BUG_ON(phys_addr < ireq->request_daddr);
518
519 offset = phys_addr - ireq->request_daddr;
520
521 BUG_ON(offset >= sizeof(*ireq));
522
523 return (char *)ireq + offset;
524}
525
526/* XXX open code in caller */
527static inline dma_addr_t scic_io_request_get_dma_addr(struct scic_sds_request *sci_req,
528 void *virt_addr)
529{
530 struct isci_request *ireq = sci_req_to_ireq(sci_req);
531
532 char *requested_addr = (char *)virt_addr;
533 char *base_addr = (char *)ireq;
534
535 BUG_ON(requested_addr < base_addr);
536 BUG_ON((requested_addr - base_addr) >= sizeof(*ireq));
537
538 return ireq->request_daddr + (requested_addr - base_addr);
539}
540
541/**
Dan Williams6f231dd2011-07-02 22:56:22 -0700542 * This function gets the status of the request object.
543 * @request: This parameter points to the isci_request object
544 *
545 * status of the object as a isci_request_status enum.
546 */
547static inline
548enum isci_request_status isci_request_get_state(
549 struct isci_request *isci_request)
550{
551 BUG_ON(isci_request == NULL);
552
553 /*probably a bad sign... */
554 if (isci_request->status == unallocated)
555 dev_warn(&isci_request->isci_host->pdev->dev,
556 "%s: isci_request->status == unallocated\n",
557 __func__);
558
559 return isci_request->status;
560}
561
562
563/**
564 * isci_request_change_state() - This function sets the status of the request
565 * object.
566 * @request: This parameter points to the isci_request object
567 * @status: This Parameter is the new status of the object
568 *
569 */
570static inline enum isci_request_status isci_request_change_state(
571 struct isci_request *isci_request,
572 enum isci_request_status status)
573{
574 enum isci_request_status old_state;
575 unsigned long flags;
576
577 dev_dbg(&isci_request->isci_host->pdev->dev,
578 "%s: isci_request = %p, state = 0x%x\n",
579 __func__,
580 isci_request,
581 status);
582
583 BUG_ON(isci_request == NULL);
584
585 spin_lock_irqsave(&isci_request->state_lock, flags);
586 old_state = isci_request->status;
587 isci_request->status = status;
588 spin_unlock_irqrestore(&isci_request->state_lock, flags);
589
590 return old_state;
591}
592
593/**
594 * isci_request_change_started_to_newstate() - This function sets the status of
595 * the request object.
596 * @request: This parameter points to the isci_request object
597 * @status: This Parameter is the new status of the object
598 *
599 * state previous to any change.
600 */
601static inline enum isci_request_status isci_request_change_started_to_newstate(
602 struct isci_request *isci_request,
603 struct completion *completion_ptr,
604 enum isci_request_status newstate)
605{
606 enum isci_request_status old_state;
607 unsigned long flags;
608
Dan Williams6f231dd2011-07-02 22:56:22 -0700609 spin_lock_irqsave(&isci_request->state_lock, flags);
610
611 old_state = isci_request->status;
612
Jeff Skirvinf219f012011-03-31 13:10:34 -0700613 if (old_state == started || old_state == aborting) {
Dan Williams6f231dd2011-07-02 22:56:22 -0700614 BUG_ON(isci_request->io_request_completion != NULL);
615
616 isci_request->io_request_completion = completion_ptr;
617 isci_request->status = newstate;
618 }
619 spin_unlock_irqrestore(&isci_request->state_lock, flags);
620
621 dev_dbg(&isci_request->isci_host->pdev->dev,
622 "%s: isci_request = %p, old_state = 0x%x\n",
623 __func__,
624 isci_request,
625 old_state);
626
627 return old_state;
628}
629
630/**
631 * isci_request_change_started_to_aborted() - This function sets the status of
632 * the request object.
633 * @request: This parameter points to the isci_request object
634 * @completion_ptr: This parameter is saved as the kernel completion structure
635 * signalled when the old request completes.
636 *
637 * state previous to any change.
638 */
639static inline enum isci_request_status isci_request_change_started_to_aborted(
640 struct isci_request *isci_request,
641 struct completion *completion_ptr)
642{
643 return isci_request_change_started_to_newstate(
644 isci_request, completion_ptr, aborted
645 );
646}
647/**
648 * isci_request_free() - This function frees the request object.
649 * @isci_host: This parameter specifies the ISCI host object
650 * @isci_request: This parameter points to the isci_request object
651 *
652 */
653static inline void isci_request_free(
654 struct isci_host *isci_host,
655 struct isci_request *isci_request)
656{
Bartosz Barcinski6cb4d6b2011-04-12 17:28:43 -0700657 if (!isci_request)
658 return;
Dan Williams6f231dd2011-07-02 22:56:22 -0700659
660 /* release the dma memory if we fail. */
661 dma_pool_free(isci_host->dma_pool, isci_request,
662 isci_request->request_daddr);
663}
664
665
666/* #define ISCI_REQUEST_VALIDATE_ACCESS
667 */
668
669#ifdef ISCI_REQUEST_VALIDATE_ACCESS
670
671static inline
672struct sas_task *isci_request_access_task(struct isci_request *isci_request)
673{
674 BUG_ON(isci_request->ttype != io_task);
675 return isci_request->ttype_ptr.io_task_ptr;
676}
677
678static inline
679struct isci_tmf *isci_request_access_tmf(struct isci_request *isci_request)
680{
681 BUG_ON(isci_request->ttype != tmf_task);
682 return isci_request->ttype_ptr.tmf_task_ptr;
683}
684
685#else /* not ISCI_REQUEST_VALIDATE_ACCESS */
686
687#define isci_request_access_task(RequestPtr) \
688 ((RequestPtr)->ttype_ptr.io_task_ptr)
689
690#define isci_request_access_tmf(RequestPtr) \
691 ((RequestPtr)->ttype_ptr.tmf_task_ptr)
692
693#endif /* not ISCI_REQUEST_VALIDATE_ACCESS */
694
695
696int isci_request_alloc_tmf(
697 struct isci_host *isci_host,
698 struct isci_tmf *isci_tmf,
699 struct isci_request **isci_request,
700 struct isci_remote_device *isci_device,
701 gfp_t gfp_flags);
702
703
704int isci_request_execute(
705 struct isci_host *isci_host,
706 struct sas_task *task,
707 struct isci_request **request,
708 gfp_t gfp_flags);
709
710/**
711 * isci_request_unmap_sgl() - This function unmaps the DMA address of a given
712 * sgl
713 * @request: This parameter points to the isci_request object
714 * @*pdev: This Parameter is the pci_device struct for the controller
715 *
716 */
717static inline void isci_request_unmap_sgl(
718 struct isci_request *request,
719 struct pci_dev *pdev)
720{
721 struct sas_task *task = isci_request_access_task(request);
722
723 dev_dbg(&request->isci_host->pdev->dev,
724 "%s: request = %p, task = %p,\n"
725 "task->data_dir = %d, is_sata = %d\n ",
726 __func__,
727 request,
728 task,
729 task->data_dir,
730 sas_protocol_ata(task->task_proto));
731
732 if ((task->data_dir != PCI_DMA_NONE) &&
733 !sas_protocol_ata(task->task_proto)) {
734 if (task->num_scatter == 0)
735 /* 0 indicates a single dma address */
736 dma_unmap_single(
737 &pdev->dev,
738 request->zero_scatter_daddr,
739 task->total_xfer_len,
740 task->data_dir
741 );
742
743 else /* unmap the sgl dma addresses */
744 dma_unmap_sg(
745 &pdev->dev,
746 task->scatter,
747 request->num_sg_entries,
748 task->data_dir
749 );
750 }
751}
752
Dan Williams6f231dd2011-07-02 22:56:22 -0700753/**
754 * isci_request_io_request_get_next_sge() - This function is called by the sci
755 * core to retrieve the next sge for a given request.
756 * @request: This parameter is the isci_request object.
757 * @current_sge_address: This parameter is the last sge retrieved by the sci
758 * core for this request.
759 *
760 * pointer to the next sge for specified request.
761 */
762static inline void *isci_request_io_request_get_next_sge(
763 struct isci_request *request,
764 void *current_sge_address)
765{
766 struct sas_task *task = isci_request_access_task(request);
767 void *ret = NULL;
768
769 dev_dbg(&request->isci_host->pdev->dev,
770 "%s: request = %p, "
771 "current_sge_address = %p, "
772 "num_scatter = %d\n",
773 __func__,
774 request,
775 current_sge_address,
776 task->num_scatter);
777
778 if (!current_sge_address) /* First time through.. */
779 ret = task->scatter; /* always task->scatter */
780 else if (task->num_scatter == 0) /* Next element, if num_scatter == 0 */
781 ret = NULL; /* there is only one element. */
782 else
783 ret = sg_next(current_sge_address); /* sg_next returns NULL
784 * for the last element
785 */
786
787 dev_dbg(&request->isci_host->pdev->dev,
788 "%s: next sge address = %p\n",
789 __func__,
790 ret);
791
792 return ret;
793}
794
Dan Williamsf1f52e72011-05-10 02:28:45 -0700795void isci_terminate_pending_requests(struct isci_host *isci_host,
796 struct isci_remote_device *isci_device,
797 enum isci_request_status new_request_state);
798enum sci_status scic_task_request_construct(struct scic_sds_controller *scic,
799 struct scic_sds_remote_device *sci_dev,
800 u16 io_tag,
801 struct scic_sds_request *sci_req);
802enum sci_status scic_task_request_construct_ssp(struct scic_sds_request *sci_req);
803enum sci_status scic_task_request_construct_sata(struct scic_sds_request *sci_req);
804enum sci_status scic_io_request_construct_smp(struct scic_sds_request *sci_req);
805void scic_stp_io_request_set_ncq_tag(struct scic_sds_request *sci_req, u16 ncq_tag);
806void scic_sds_smp_request_copy_response(struct scic_sds_request *sci_req);
Dan Williams6f231dd2011-07-02 22:56:22 -0700807#endif /* !defined(_ISCI_REQUEST_H_) */