blob: 8c4a24077d9d701fc6902501f9ad9f53775c24b2 [file] [log] [blame]
Kiran Patil3699d922011-04-18 16:24:14 -07001/*
2 * Copyright (c) 2010 Cisco Systems, Inc.
3 *
4 * Portions based on tcm_loop_fabric_scsi.c and libfc/fc_fcp.c
5 *
6 * Copyright (c) 2007 Intel Corporation. All rights reserved.
7 * Copyright (c) 2008 Red Hat, Inc. All rights reserved.
8 * Copyright (c) 2008 Mike Christie
9 * Copyright (c) 2009 Rising Tide, Inc.
10 * Copyright (c) 2009 Linux-iSCSI.org
11 * Copyright (c) 2009 Nicholas A. Bellinger <nab@linux-iscsi.org>
12 *
13 * This program is free software; you can redistribute it and/or modify it
14 * under the terms and conditions of the GNU General Public License,
15 * version 2, as published by the Free Software Foundation.
16 *
17 * This program is distributed in the hope it will be useful, but WITHOUT
18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
20 * more details.
21 *
22 * You should have received a copy of the GNU General Public License along with
23 * this program; if not, write to the Free Software Foundation, Inc.,
24 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
25 */
26
27/* XXX TBD some includes may be extraneous */
28
29#include <linux/module.h>
30#include <linux/moduleparam.h>
31#include <linux/version.h>
32#include <generated/utsrelease.h>
33#include <linux/utsname.h>
34#include <linux/init.h>
35#include <linux/slab.h>
36#include <linux/kthread.h>
37#include <linux/types.h>
38#include <linux/string.h>
39#include <linux/configfs.h>
40#include <linux/ctype.h>
41#include <linux/hash.h>
42#include <asm/unaligned.h>
43#include <scsi/scsi.h>
44#include <scsi/scsi_host.h>
45#include <scsi/scsi_device.h>
46#include <scsi/scsi_cmnd.h>
47#include <scsi/libfc.h>
48#include <scsi/fc_encode.h>
49
50#include <target/target_core_base.h>
51#include <target/target_core_transport.h>
52#include <target/target_core_fabric_ops.h>
53#include <target/target_core_device.h>
54#include <target/target_core_tpg.h>
55#include <target/target_core_configfs.h>
56#include <target/target_core_base.h>
57#include <target/configfs_macros.h>
58
59#include "tcm_fc.h"
60
61/*
62 * Deliver read data back to initiator.
63 * XXX TBD handle resource problems later.
64 */
65int ft_queue_data_in(struct se_cmd *se_cmd)
66{
67 struct ft_cmd *cmd = container_of(se_cmd, struct ft_cmd, se_cmd);
68 struct se_transport_task *task;
69 struct fc_frame *fp = NULL;
70 struct fc_exch *ep;
71 struct fc_lport *lport;
72 struct se_mem *mem;
73 size_t remaining;
74 u32 f_ctl = FC_FC_EX_CTX | FC_FC_REL_OFF;
75 u32 mem_off;
76 u32 fh_off = 0;
77 u32 frame_off = 0;
78 size_t frame_len = 0;
79 size_t mem_len;
80 size_t tlen;
81 size_t off_in_page;
82 struct page *page;
83 int use_sg;
84 int error;
85 void *page_addr;
86 void *from;
87 void *to = NULL;
88
89 ep = fc_seq_exch(cmd->seq);
90 lport = ep->lp;
91 cmd->seq = lport->tt.seq_start_next(cmd->seq);
92
93 task = T_TASK(se_cmd);
94 BUG_ON(!task);
95 remaining = se_cmd->data_length;
96
97 /*
98 * Setup to use first mem list entry if any.
99 */
100 if (task->t_tasks_se_num) {
101 mem = list_first_entry(task->t_mem_list,
102 struct se_mem, se_list);
103 mem_len = mem->se_len;
104 mem_off = mem->se_off;
105 page = mem->se_page;
106 } else {
107 mem = NULL;
108 mem_len = remaining;
109 mem_off = 0;
110 page = NULL;
111 }
112
113 /* no scatter/gather in skb for odd word length due to fc_seq_send() */
114 use_sg = !(remaining % 4);
115
116 while (remaining) {
117 if (!mem_len) {
118 BUG_ON(!mem);
119 mem = list_entry(mem->se_list.next,
120 struct se_mem, se_list);
121 mem_len = min((size_t)mem->se_len, remaining);
122 mem_off = mem->se_off;
123 page = mem->se_page;
124 }
125 if (!frame_len) {
126 /*
127 * If lport's has capability of Large Send Offload LSO)
128 * , then allow 'frame_len' to be as big as 'lso_max'
129 * if indicated transfer length is >= lport->lso_max
130 */
131 frame_len = (lport->seq_offload) ? lport->lso_max :
132 cmd->sess->max_frame;
133 frame_len = min(frame_len, remaining);
134 fp = fc_frame_alloc(lport, use_sg ? 0 : frame_len);
135 if (!fp)
136 return -ENOMEM;
137 to = fc_frame_payload_get(fp, 0);
138 fh_off = frame_off;
139 frame_off += frame_len;
140 /*
141 * Setup the frame's max payload which is used by base
142 * driver to indicate HW about max frame size, so that
143 * HW can do fragmentation appropriately based on
144 * "gso_max_size" of underline netdev.
145 */
146 fr_max_payload(fp) = cmd->sess->max_frame;
147 }
148 tlen = min(mem_len, frame_len);
149
150 if (use_sg) {
151 if (!mem) {
152 BUG_ON(!task->t_task_buf);
153 page_addr = task->t_task_buf + mem_off;
154 /*
155 * In this case, offset is 'offset_in_page' of
156 * (t_task_buf + mem_off) instead of 'mem_off'.
157 */
158 off_in_page = offset_in_page(page_addr);
159 page = virt_to_page(page_addr);
160 tlen = min(tlen, PAGE_SIZE - off_in_page);
161 } else
162 off_in_page = mem_off;
163 BUG_ON(!page);
164 get_page(page);
165 skb_fill_page_desc(fp_skb(fp),
166 skb_shinfo(fp_skb(fp))->nr_frags,
167 page, off_in_page, tlen);
168 fr_len(fp) += tlen;
169 fp_skb(fp)->data_len += tlen;
170 fp_skb(fp)->truesize +=
171 PAGE_SIZE << compound_order(page);
172 } else if (mem) {
173 BUG_ON(!page);
174 from = kmap_atomic(page + (mem_off >> PAGE_SHIFT),
175 KM_SOFTIRQ0);
176 page_addr = from;
177 from += mem_off & ~PAGE_MASK;
178 tlen = min(tlen, (size_t)(PAGE_SIZE -
179 (mem_off & ~PAGE_MASK)));
180 memcpy(to, from, tlen);
181 kunmap_atomic(page_addr, KM_SOFTIRQ0);
182 to += tlen;
183 } else {
184 from = task->t_task_buf + mem_off;
185 memcpy(to, from, tlen);
186 to += tlen;
187 }
188
189 mem_off += tlen;
190 mem_len -= tlen;
191 frame_len -= tlen;
192 remaining -= tlen;
193
194 if (frame_len &&
195 (skb_shinfo(fp_skb(fp))->nr_frags < FC_FRAME_SG_LEN))
196 continue;
197 if (!remaining)
198 f_ctl |= FC_FC_END_SEQ;
199 fc_fill_fc_hdr(fp, FC_RCTL_DD_SOL_DATA, ep->did, ep->sid,
200 FC_TYPE_FCP, f_ctl, fh_off);
201 error = lport->tt.seq_send(lport, cmd->seq, fp);
202 if (error) {
203 /* XXX For now, initiator will retry */
204 if (printk_ratelimit())
205 printk(KERN_ERR "%s: Failed to send frame %p, "
Nicholas Bellinger95efa282011-06-23 23:28:46 +0000206 "xid <0x%x>, remaining %zu, "
Kiran Patil3699d922011-04-18 16:24:14 -0700207 "lso_max <0x%x>\n",
208 __func__, fp, ep->xid,
209 remaining, lport->lso_max);
210 }
211 }
212 return ft_queue_status(se_cmd);
213}
214
215/*
216 * Receive write data frame.
217 */
218void ft_recv_write_data(struct ft_cmd *cmd, struct fc_frame *fp)
219{
220 struct se_cmd *se_cmd = &cmd->se_cmd;
221 struct fc_seq *seq = cmd->seq;
222 struct fc_exch *ep;
223 struct fc_lport *lport;
224 struct se_transport_task *task;
225 struct fc_frame_header *fh;
226 struct se_mem *mem;
227 u32 mem_off;
228 u32 rel_off;
229 size_t frame_len;
230 size_t mem_len;
231 size_t tlen;
232 struct page *page;
233 void *page_addr;
234 void *from;
235 void *to;
236 u32 f_ctl;
237 void *buf;
238
239 task = T_TASK(se_cmd);
240 BUG_ON(!task);
241
242 fh = fc_frame_header_get(fp);
243 if (!(ntoh24(fh->fh_f_ctl) & FC_FC_REL_OFF))
244 goto drop;
245
246 /*
247 * Doesn't expect even single byte of payload. Payload
248 * is expected to be copied directly to user buffers
249 * due to DDP (Large Rx offload) feature, hence
250 * BUG_ON if BUF is non-NULL
251 */
252 buf = fc_frame_payload_get(fp, 1);
253 if (cmd->was_ddp_setup && buf) {
254 printk(KERN_INFO "%s: When DDP was setup, not expected to"
255 "receive frame with payload, Payload shall be"
256 "copied directly to buffer instead of coming "
257 "via. legacy receive queues\n", __func__);
258 BUG_ON(buf);
259 }
260
261 /*
262 * If ft_cmd indicated 'ddp_setup', in that case only the last frame
263 * should come with 'TSI bit being set'. If 'TSI bit is not set and if
264 * data frame appears here, means error condition. In both the cases
265 * release the DDP context (ddp_put) and in error case, as well
266 * initiate error recovery mechanism.
267 */
268 ep = fc_seq_exch(seq);
269 if (cmd->was_ddp_setup) {
270 BUG_ON(!ep);
271 lport = ep->lp;
272 BUG_ON(!lport);
273 }
274 if (cmd->was_ddp_setup && ep->xid != FC_XID_UNKNOWN) {
275 f_ctl = ntoh24(fh->fh_f_ctl);
276 /*
277 * If TSI bit set in f_ctl, means last write data frame is
278 * received successfully where payload is posted directly
279 * to user buffer and only the last frame's header is posted
280 * in legacy receive queue
281 */
282 if (f_ctl & FC_FC_SEQ_INIT) { /* TSI bit set in FC frame */
283 cmd->write_data_len = lport->tt.ddp_done(lport,
284 ep->xid);
285 goto last_frame;
286 } else {
287 /*
288 * Updating the write_data_len may be meaningless at
289 * this point, but just in case if required in future
290 * for debugging or any other purpose
291 */
292 printk(KERN_ERR "%s: Received frame with TSI bit not"
293 " being SET, dropping the frame, "
294 "cmd->sg <%p>, cmd->sg_cnt <0x%x>\n",
295 __func__, cmd->sg, cmd->sg_cnt);
296 cmd->write_data_len = lport->tt.ddp_done(lport,
297 ep->xid);
298 lport->tt.seq_exch_abort(cmd->seq, 0);
299 goto drop;
300 }
301 }
302
303 rel_off = ntohl(fh->fh_parm_offset);
304 frame_len = fr_len(fp);
305 if (frame_len <= sizeof(*fh))
306 goto drop;
307 frame_len -= sizeof(*fh);
308 from = fc_frame_payload_get(fp, 0);
309 if (rel_off >= se_cmd->data_length)
310 goto drop;
311 if (frame_len + rel_off > se_cmd->data_length)
312 frame_len = se_cmd->data_length - rel_off;
313
314 /*
315 * Setup to use first mem list entry if any.
316 */
317 if (task->t_tasks_se_num) {
318 mem = list_first_entry(task->t_mem_list,
319 struct se_mem, se_list);
320 mem_len = mem->se_len;
321 mem_off = mem->se_off;
322 page = mem->se_page;
323 } else {
324 mem = NULL;
325 page = NULL;
326 mem_off = 0;
327 mem_len = frame_len;
328 }
329
330 while (frame_len) {
331 if (!mem_len) {
332 BUG_ON(!mem);
333 mem = list_entry(mem->se_list.next,
334 struct se_mem, se_list);
335 mem_len = mem->se_len;
336 mem_off = mem->se_off;
337 page = mem->se_page;
338 }
339 if (rel_off >= mem_len) {
340 rel_off -= mem_len;
341 mem_len = 0;
342 continue;
343 }
344 mem_off += rel_off;
345 mem_len -= rel_off;
346 rel_off = 0;
347
348 tlen = min(mem_len, frame_len);
349
350 if (mem) {
351 to = kmap_atomic(page + (mem_off >> PAGE_SHIFT),
352 KM_SOFTIRQ0);
353 page_addr = to;
354 to += mem_off & ~PAGE_MASK;
355 tlen = min(tlen, (size_t)(PAGE_SIZE -
356 (mem_off & ~PAGE_MASK)));
357 memcpy(to, from, tlen);
358 kunmap_atomic(page_addr, KM_SOFTIRQ0);
359 } else {
360 to = task->t_task_buf + mem_off;
361 memcpy(to, from, tlen);
362 }
363 from += tlen;
364 frame_len -= tlen;
365 mem_off += tlen;
366 mem_len -= tlen;
367 cmd->write_data_len += tlen;
368 }
369last_frame:
370 if (cmd->write_data_len == se_cmd->data_length)
371 transport_generic_handle_data(se_cmd);
372drop:
373 fc_frame_free(fp);
374}