blob: ddd3398c156135ef3e92e054e0b0b98cb58de902 [file] [log] [blame]
Chris Boota511ce32012-04-14 17:50:35 -07001/*
2 * SBP2 target driver (SCSI over IEEE1394 in target mode)
3 *
4 * Copyright (C) 2011 Chris Boot <bootc@bootc.net>
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License as published by
8 * the Free Software Foundation; either version 2 of the License, or
9 * (at your option) any later version.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software Foundation,
18 * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
19 */
20
21#define KMSG_COMPONENT "sbp_target"
22#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
23
24#include <linux/kernel.h>
25#include <linux/module.h>
26#include <linux/init.h>
27#include <linux/types.h>
28#include <linux/string.h>
29#include <linux/configfs.h>
30#include <linux/ctype.h>
31#include <linux/firewire.h>
32#include <linux/firewire-constants.h>
Bart Van Asscheba929992015-05-08 10:11:12 +020033#include <scsi/scsi_proto.h>
Chris Boota511ce32012-04-14 17:50:35 -070034#include <scsi/scsi_tcq.h>
35#include <target/target_core_base.h>
36#include <target/target_core_backend.h>
37#include <target/target_core_fabric.h>
Chris Boota511ce32012-04-14 17:50:35 -070038#include <asm/unaligned.h>
39
40#include "sbp_target.h"
41
Chris Boota511ce32012-04-14 17:50:35 -070042/* FireWire address region for management and command block address handlers */
43static const struct fw_address_region sbp_register_region = {
44 .start = CSR_REGISTER_BASE + 0x10000,
45 .end = 0x1000000000000ULL,
46};
47
48static const u32 sbp_unit_directory_template[] = {
49 0x1200609e, /* unit_specifier_id: NCITS/T10 */
50 0x13010483, /* unit_sw_version: 1155D Rev 4 */
51 0x3800609e, /* command_set_specifier_id: NCITS/T10 */
52 0x390104d8, /* command_set: SPC-2 */
53 0x3b000000, /* command_set_revision: 0 */
54 0x3c000001, /* firmware_revision: 1 */
55};
56
57#define SESSION_MAINTENANCE_INTERVAL HZ
58
59static atomic_t login_id = ATOMIC_INIT(0);
60
61static void session_maintenance_work(struct work_struct *);
62static int sbp_run_transaction(struct fw_card *, int, int, int, int,
63 unsigned long long, void *, size_t);
64
65static int read_peer_guid(u64 *guid, const struct sbp_management_request *req)
66{
67 int ret;
68 __be32 high, low;
69
70 ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
71 req->node_addr, req->generation, req->speed,
72 (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 3 * 4,
73 &high, sizeof(high));
74 if (ret != RCODE_COMPLETE)
75 return ret;
76
77 ret = sbp_run_transaction(req->card, TCODE_READ_QUADLET_REQUEST,
78 req->node_addr, req->generation, req->speed,
79 (CSR_REGISTER_BASE | CSR_CONFIG_ROM) + 4 * 4,
80 &low, sizeof(low));
81 if (ret != RCODE_COMPLETE)
82 return ret;
83
84 *guid = (u64)be32_to_cpu(high) << 32 | be32_to_cpu(low);
85
86 return RCODE_COMPLETE;
87}
88
89static struct sbp_session *sbp_session_find_by_guid(
90 struct sbp_tpg *tpg, u64 guid)
91{
92 struct se_session *se_sess;
93 struct sbp_session *sess, *found = NULL;
94
95 spin_lock_bh(&tpg->se_tpg.session_lock);
96 list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
97 sess = se_sess->fabric_sess_ptr;
98 if (sess->guid == guid)
99 found = sess;
100 }
101 spin_unlock_bh(&tpg->se_tpg.session_lock);
102
103 return found;
104}
105
106static struct sbp_login_descriptor *sbp_login_find_by_lun(
Nicholas Bellinger6bb82612015-05-10 19:31:10 -0700107 struct sbp_session *session, u32 unpacked_lun)
Chris Boota511ce32012-04-14 17:50:35 -0700108{
109 struct sbp_login_descriptor *login, *found = NULL;
110
111 spin_lock_bh(&session->lock);
112 list_for_each_entry(login, &session->login_list, link) {
Nicholas Bellinger6bb82612015-05-10 19:31:10 -0700113 if (login->login_lun == unpacked_lun)
Chris Boota511ce32012-04-14 17:50:35 -0700114 found = login;
115 }
116 spin_unlock_bh(&session->lock);
117
118 return found;
119}
120
121static int sbp_login_count_all_by_lun(
122 struct sbp_tpg *tpg,
Nicholas Bellinger6bb82612015-05-10 19:31:10 -0700123 u32 unpacked_lun,
Chris Boota511ce32012-04-14 17:50:35 -0700124 int exclusive)
125{
126 struct se_session *se_sess;
127 struct sbp_session *sess;
128 struct sbp_login_descriptor *login;
129 int count = 0;
130
131 spin_lock_bh(&tpg->se_tpg.session_lock);
132 list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
133 sess = se_sess->fabric_sess_ptr;
134
135 spin_lock_bh(&sess->lock);
136 list_for_each_entry(login, &sess->login_list, link) {
Nicholas Bellinger6bb82612015-05-10 19:31:10 -0700137 if (login->login_lun != unpacked_lun)
Chris Boota511ce32012-04-14 17:50:35 -0700138 continue;
139
140 if (!exclusive || login->exclusive)
141 count++;
142 }
143 spin_unlock_bh(&sess->lock);
144 }
145 spin_unlock_bh(&tpg->se_tpg.session_lock);
146
147 return count;
148}
149
150static struct sbp_login_descriptor *sbp_login_find_by_id(
151 struct sbp_tpg *tpg, int login_id)
152{
153 struct se_session *se_sess;
154 struct sbp_session *sess;
155 struct sbp_login_descriptor *login, *found = NULL;
156
157 spin_lock_bh(&tpg->se_tpg.session_lock);
158 list_for_each_entry(se_sess, &tpg->se_tpg.tpg_sess_list, sess_list) {
159 sess = se_sess->fabric_sess_ptr;
160
161 spin_lock_bh(&sess->lock);
162 list_for_each_entry(login, &sess->login_list, link) {
163 if (login->login_id == login_id)
164 found = login;
165 }
166 spin_unlock_bh(&sess->lock);
167 }
168 spin_unlock_bh(&tpg->se_tpg.session_lock);
169
170 return found;
171}
172
Nicholas Bellinger6bb82612015-05-10 19:31:10 -0700173static u32 sbp_get_lun_from_tpg(struct sbp_tpg *tpg, u32 login_lun, int *err)
Chris Boota511ce32012-04-14 17:50:35 -0700174{
175 struct se_portal_group *se_tpg = &tpg->se_tpg;
176 struct se_lun *se_lun;
177
Nicholas Bellinger6bb82612015-05-10 19:31:10 -0700178 rcu_read_lock();
179 hlist_for_each_entry_rcu(se_lun, &se_tpg->tpg_lun_hlist, link) {
180 if (se_lun->unpacked_lun == login_lun) {
181 rcu_read_unlock();
182 *err = 0;
183 return login_lun;
184 }
185 }
186 rcu_read_unlock();
Chris Boota511ce32012-04-14 17:50:35 -0700187
Nicholas Bellinger6bb82612015-05-10 19:31:10 -0700188 *err = -ENODEV;
189 return login_lun;
Chris Boota511ce32012-04-14 17:50:35 -0700190}
191
192static struct sbp_session *sbp_session_create(
193 struct sbp_tpg *tpg,
194 u64 guid)
195{
196 struct sbp_session *sess;
197 int ret;
198 char guid_str[17];
Christoph Hellwigfb444abe2016-01-09 05:30:45 -0800199
200 snprintf(guid_str, sizeof(guid_str), "%016llx", guid);
Chris Boota511ce32012-04-14 17:50:35 -0700201
202 sess = kmalloc(sizeof(*sess), GFP_KERNEL);
203 if (!sess) {
204 pr_err("failed to allocate session descriptor\n");
205 return ERR_PTR(-ENOMEM);
206 }
Christoph Hellwigfb444abe2016-01-09 05:30:45 -0800207 spin_lock_init(&sess->lock);
208 INIT_LIST_HEAD(&sess->login_list);
209 INIT_DELAYED_WORK(&sess->maint_work, session_maintenance_work);
210 sess->guid = guid;
Chris Boota511ce32012-04-14 17:50:35 -0700211
Christoph Hellwigfb444abe2016-01-09 05:30:45 -0800212 sess->se_sess = target_alloc_session(&tpg->se_tpg, 0, 0, TARGET_PROT_NORMAL,
213 guid_str, sess, NULL);
Chris Boota511ce32012-04-14 17:50:35 -0700214 if (IS_ERR(sess->se_sess)) {
215 pr_err("failed to init se_session\n");
Chris Boota511ce32012-04-14 17:50:35 -0700216 ret = PTR_ERR(sess->se_sess);
217 kfree(sess);
218 return ERR_PTR(ret);
219 }
220
Chris Boota511ce32012-04-14 17:50:35 -0700221 return sess;
222}
223
224static void sbp_session_release(struct sbp_session *sess, bool cancel_work)
225{
226 spin_lock_bh(&sess->lock);
227 if (!list_empty(&sess->login_list)) {
228 spin_unlock_bh(&sess->lock);
229 return;
230 }
231 spin_unlock_bh(&sess->lock);
232
233 if (cancel_work)
234 cancel_delayed_work_sync(&sess->maint_work);
235
236 transport_deregister_session_configfs(sess->se_sess);
237 transport_deregister_session(sess->se_sess);
238
239 if (sess->card)
240 fw_card_put(sess->card);
241
242 kfree(sess);
243}
244
245static void sbp_target_agent_unregister(struct sbp_target_agent *);
246
247static void sbp_login_release(struct sbp_login_descriptor *login,
248 bool cancel_work)
249{
250 struct sbp_session *sess = login->sess;
251
252 /* FIXME: abort/wait on tasks */
253
254 sbp_target_agent_unregister(login->tgt_agt);
255
256 if (sess) {
257 spin_lock_bh(&sess->lock);
258 list_del(&login->link);
259 spin_unlock_bh(&sess->lock);
260
261 sbp_session_release(sess, cancel_work);
262 }
263
264 kfree(login);
265}
266
267static struct sbp_target_agent *sbp_target_agent_register(
268 struct sbp_login_descriptor *);
269
270static void sbp_management_request_login(
271 struct sbp_management_agent *agent, struct sbp_management_request *req,
272 int *status_data_size)
273{
274 struct sbp_tport *tport = agent->tport;
275 struct sbp_tpg *tpg = tport->tpg;
Chris Boota511ce32012-04-14 17:50:35 -0700276 struct sbp_session *sess;
277 struct sbp_login_descriptor *login;
278 struct sbp_login_response_block *response;
Nicholas Bellinger6bb82612015-05-10 19:31:10 -0700279 u64 guid;
280 u32 unpacked_lun;
281 int login_response_len, ret;
Chris Boota511ce32012-04-14 17:50:35 -0700282
Nicholas Bellinger6bb82612015-05-10 19:31:10 -0700283 unpacked_lun = sbp_get_lun_from_tpg(tpg,
284 LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)), &ret);
285 if (ret) {
Chris Boota511ce32012-04-14 17:50:35 -0700286 pr_notice("login to unknown LUN: %d\n",
287 LOGIN_ORB_LUN(be32_to_cpu(req->orb.misc)));
288
289 req->status.status = cpu_to_be32(
290 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
291 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LUN_NOTSUPP));
292 return;
293 }
294
295 ret = read_peer_guid(&guid, req);
296 if (ret != RCODE_COMPLETE) {
297 pr_warn("failed to read peer GUID: %d\n", ret);
298
299 req->status.status = cpu_to_be32(
300 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
301 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
302 return;
303 }
304
305 pr_notice("mgt_agent LOGIN to LUN %d from %016llx\n",
Nicholas Bellinger6bb82612015-05-10 19:31:10 -0700306 unpacked_lun, guid);
Chris Boota511ce32012-04-14 17:50:35 -0700307
308 sess = sbp_session_find_by_guid(tpg, guid);
309 if (sess) {
Nicholas Bellinger6bb82612015-05-10 19:31:10 -0700310 login = sbp_login_find_by_lun(sess, unpacked_lun);
Chris Boota511ce32012-04-14 17:50:35 -0700311 if (login) {
312 pr_notice("initiator already logged-in\n");
313
314 /*
315 * SBP-2 R4 says we should return access denied, but
316 * that can confuse initiators. Instead we need to
317 * treat this like a reconnect, but send the login
318 * response block like a fresh login.
319 *
320 * This is required particularly in the case of Apple
321 * devices booting off the FireWire target, where
322 * the firmware has an active login to the target. When
323 * the OS takes control of the session it issues its own
324 * LOGIN rather than a RECONNECT. To avoid the machine
325 * waiting until the reconnect_hold expires, we can skip
326 * the ACCESS_DENIED errors to speed things up.
327 */
328
329 goto already_logged_in;
330 }
331 }
332
333 /*
334 * check exclusive bit in login request
335 * reject with access_denied if any logins present
336 */
337 if (LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc)) &&
Nicholas Bellinger6bb82612015-05-10 19:31:10 -0700338 sbp_login_count_all_by_lun(tpg, unpacked_lun, 0)) {
Chris Boota511ce32012-04-14 17:50:35 -0700339 pr_warn("refusing exclusive login with other active logins\n");
340
341 req->status.status = cpu_to_be32(
342 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
343 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
344 return;
345 }
346
347 /*
348 * check exclusive bit in any existing login descriptor
349 * reject with access_denied if any exclusive logins present
350 */
Nicholas Bellinger6bb82612015-05-10 19:31:10 -0700351 if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 1)) {
Chris Boota511ce32012-04-14 17:50:35 -0700352 pr_warn("refusing login while another exclusive login present\n");
353
354 req->status.status = cpu_to_be32(
355 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
356 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
357 return;
358 }
359
360 /*
361 * check we haven't exceeded the number of allowed logins
362 * reject with resources_unavailable if we have
363 */
Nicholas Bellinger6bb82612015-05-10 19:31:10 -0700364 if (sbp_login_count_all_by_lun(tpg, unpacked_lun, 0) >=
Chris Boota511ce32012-04-14 17:50:35 -0700365 tport->max_logins_per_lun) {
366 pr_warn("max number of logins reached\n");
367
368 req->status.status = cpu_to_be32(
369 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
370 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
371 return;
372 }
373
374 if (!sess) {
375 sess = sbp_session_create(tpg, guid);
376 if (IS_ERR(sess)) {
377 switch (PTR_ERR(sess)) {
378 case -EPERM:
379 ret = SBP_STATUS_ACCESS_DENIED;
380 break;
381 default:
382 ret = SBP_STATUS_RESOURCES_UNAVAIL;
383 break;
384 }
385
386 req->status.status = cpu_to_be32(
387 STATUS_BLOCK_RESP(
388 STATUS_RESP_REQUEST_COMPLETE) |
389 STATUS_BLOCK_SBP_STATUS(ret));
390 return;
391 }
392
393 sess->node_id = req->node_addr;
394 sess->card = fw_card_get(req->card);
395 sess->generation = req->generation;
396 sess->speed = req->speed;
397
398 schedule_delayed_work(&sess->maint_work,
399 SESSION_MAINTENANCE_INTERVAL);
400 }
401
402 /* only take the latest reconnect_hold into account */
403 sess->reconnect_hold = min(
404 1 << LOGIN_ORB_RECONNECT(be32_to_cpu(req->orb.misc)),
405 tport->max_reconnect_timeout) - 1;
406
407 login = kmalloc(sizeof(*login), GFP_KERNEL);
408 if (!login) {
409 pr_err("failed to allocate login descriptor\n");
410
411 sbp_session_release(sess, true);
412
413 req->status.status = cpu_to_be32(
414 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
415 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
416 return;
417 }
418
419 login->sess = sess;
Nicholas Bellinger6bb82612015-05-10 19:31:10 -0700420 login->login_lun = unpacked_lun;
Chris Boota511ce32012-04-14 17:50:35 -0700421 login->status_fifo_addr = sbp2_pointer_to_addr(&req->orb.status_fifo);
422 login->exclusive = LOGIN_ORB_EXCLUSIVE(be32_to_cpu(req->orb.misc));
423 login->login_id = atomic_inc_return(&login_id);
424
425 login->tgt_agt = sbp_target_agent_register(login);
426 if (IS_ERR(login->tgt_agt)) {
427 ret = PTR_ERR(login->tgt_agt);
428 pr_err("failed to map command block handler: %d\n", ret);
429
430 sbp_session_release(sess, true);
431 kfree(login);
432
433 req->status.status = cpu_to_be32(
434 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
435 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
436 return;
437 }
438
439 spin_lock_bh(&sess->lock);
440 list_add_tail(&login->link, &sess->login_list);
441 spin_unlock_bh(&sess->lock);
442
443already_logged_in:
444 response = kzalloc(sizeof(*response), GFP_KERNEL);
445 if (!response) {
446 pr_err("failed to allocate login response block\n");
447
448 sbp_login_release(login, true);
449
450 req->status.status = cpu_to_be32(
451 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
452 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_RESOURCES_UNAVAIL));
453 return;
454 }
455
456 login_response_len = clamp_val(
457 LOGIN_ORB_RESPONSE_LENGTH(be32_to_cpu(req->orb.length)),
458 12, sizeof(*response));
459 response->misc = cpu_to_be32(
460 ((login_response_len & 0xffff) << 16) |
461 (login->login_id & 0xffff));
462 response->reconnect_hold = cpu_to_be32(sess->reconnect_hold & 0xffff);
463 addr_to_sbp2_pointer(login->tgt_agt->handler.offset,
464 &response->command_block_agent);
465
466 ret = sbp_run_transaction(sess->card, TCODE_WRITE_BLOCK_REQUEST,
467 sess->node_id, sess->generation, sess->speed,
468 sbp2_pointer_to_addr(&req->orb.ptr2), response,
469 login_response_len);
470 if (ret != RCODE_COMPLETE) {
471 pr_debug("failed to write login response block: %x\n", ret);
472
473 kfree(response);
474 sbp_login_release(login, true);
475
476 req->status.status = cpu_to_be32(
477 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
478 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
479 return;
480 }
481
482 kfree(response);
483
484 req->status.status = cpu_to_be32(
485 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
486 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
487}
488
489static void sbp_management_request_query_logins(
490 struct sbp_management_agent *agent, struct sbp_management_request *req,
491 int *status_data_size)
492{
493 pr_notice("QUERY LOGINS not implemented\n");
494 /* FIXME: implement */
495
496 req->status.status = cpu_to_be32(
497 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
498 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
499}
500
501static void sbp_management_request_reconnect(
502 struct sbp_management_agent *agent, struct sbp_management_request *req,
503 int *status_data_size)
504{
505 struct sbp_tport *tport = agent->tport;
506 struct sbp_tpg *tpg = tport->tpg;
507 int ret;
508 u64 guid;
509 struct sbp_login_descriptor *login;
510
511 ret = read_peer_guid(&guid, req);
512 if (ret != RCODE_COMPLETE) {
513 pr_warn("failed to read peer GUID: %d\n", ret);
514
515 req->status.status = cpu_to_be32(
516 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
517 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
518 return;
519 }
520
521 pr_notice("mgt_agent RECONNECT from %016llx\n", guid);
522
523 login = sbp_login_find_by_id(tpg,
524 RECONNECT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc)));
525
526 if (!login) {
527 pr_err("mgt_agent RECONNECT unknown login ID\n");
528
529 req->status.status = cpu_to_be32(
530 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
531 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
532 return;
533 }
534
535 if (login->sess->guid != guid) {
536 pr_err("mgt_agent RECONNECT login GUID doesn't match\n");
537
538 req->status.status = cpu_to_be32(
539 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
540 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
541 return;
542 }
543
544 spin_lock_bh(&login->sess->lock);
545 if (login->sess->card)
546 fw_card_put(login->sess->card);
547
548 /* update the node details */
549 login->sess->generation = req->generation;
550 login->sess->node_id = req->node_addr;
551 login->sess->card = fw_card_get(req->card);
552 login->sess->speed = req->speed;
553 spin_unlock_bh(&login->sess->lock);
554
555 req->status.status = cpu_to_be32(
556 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
557 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
558}
559
560static void sbp_management_request_logout(
561 struct sbp_management_agent *agent, struct sbp_management_request *req,
562 int *status_data_size)
563{
564 struct sbp_tport *tport = agent->tport;
565 struct sbp_tpg *tpg = tport->tpg;
Stefan Richter5f2a3d612012-05-24 22:07:35 +0200566 int id;
Chris Boota511ce32012-04-14 17:50:35 -0700567 struct sbp_login_descriptor *login;
568
Stefan Richter5f2a3d612012-05-24 22:07:35 +0200569 id = LOGOUT_ORB_LOGIN_ID(be32_to_cpu(req->orb.misc));
Chris Boota511ce32012-04-14 17:50:35 -0700570
Stefan Richter5f2a3d612012-05-24 22:07:35 +0200571 login = sbp_login_find_by_id(tpg, id);
Chris Boota511ce32012-04-14 17:50:35 -0700572 if (!login) {
Stefan Richter5f2a3d612012-05-24 22:07:35 +0200573 pr_warn("cannot find login: %d\n", id);
Chris Boota511ce32012-04-14 17:50:35 -0700574
575 req->status.status = cpu_to_be32(
576 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
577 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_LOGIN_ID_UNKNOWN));
578 return;
579 }
580
581 pr_info("mgt_agent LOGOUT from LUN %d session %d\n",
Nicholas Bellinger6bb82612015-05-10 19:31:10 -0700582 login->login_lun, login->login_id);
Chris Boota511ce32012-04-14 17:50:35 -0700583
584 if (req->node_addr != login->sess->node_id) {
585 pr_warn("logout from different node ID\n");
586
587 req->status.status = cpu_to_be32(
588 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
589 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_ACCESS_DENIED));
590 return;
591 }
592
593 sbp_login_release(login, true);
594
595 req->status.status = cpu_to_be32(
596 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
597 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
598}
599
600static void session_check_for_reset(struct sbp_session *sess)
601{
602 bool card_valid = false;
603
604 spin_lock_bh(&sess->lock);
605
606 if (sess->card) {
607 spin_lock_irq(&sess->card->lock);
608 card_valid = (sess->card->local_node != NULL);
609 spin_unlock_irq(&sess->card->lock);
610
611 if (!card_valid) {
612 fw_card_put(sess->card);
613 sess->card = NULL;
614 }
615 }
616
617 if (!card_valid || (sess->generation != sess->card->generation)) {
618 pr_info("Waiting for reconnect from node: %016llx\n",
619 sess->guid);
620
621 sess->node_id = -1;
622 sess->reconnect_expires = get_jiffies_64() +
623 ((sess->reconnect_hold + 1) * HZ);
624 }
625
626 spin_unlock_bh(&sess->lock);
627}
628
629static void session_reconnect_expired(struct sbp_session *sess)
630{
631 struct sbp_login_descriptor *login, *temp;
632 LIST_HEAD(login_list);
633
634 pr_info("Reconnect timer expired for node: %016llx\n", sess->guid);
635
636 spin_lock_bh(&sess->lock);
637 list_for_each_entry_safe(login, temp, &sess->login_list, link) {
638 login->sess = NULL;
Wei Yongjunbf11eef2012-09-05 14:42:48 +0800639 list_move_tail(&login->link, &login_list);
Chris Boota511ce32012-04-14 17:50:35 -0700640 }
641 spin_unlock_bh(&sess->lock);
642
643 list_for_each_entry_safe(login, temp, &login_list, link) {
644 list_del(&login->link);
645 sbp_login_release(login, false);
646 }
647
648 sbp_session_release(sess, false);
649}
650
651static void session_maintenance_work(struct work_struct *work)
652{
653 struct sbp_session *sess = container_of(work, struct sbp_session,
654 maint_work.work);
655
656 /* could be called while tearing down the session */
657 spin_lock_bh(&sess->lock);
658 if (list_empty(&sess->login_list)) {
659 spin_unlock_bh(&sess->lock);
660 return;
661 }
662 spin_unlock_bh(&sess->lock);
663
664 if (sess->node_id != -1) {
665 /* check for bus reset and make node_id invalid */
666 session_check_for_reset(sess);
667
668 schedule_delayed_work(&sess->maint_work,
669 SESSION_MAINTENANCE_INTERVAL);
670 } else if (!time_after64(get_jiffies_64(), sess->reconnect_expires)) {
671 /* still waiting for reconnect */
672 schedule_delayed_work(&sess->maint_work,
673 SESSION_MAINTENANCE_INTERVAL);
674 } else {
675 /* reconnect timeout has expired */
676 session_reconnect_expired(sess);
677 }
678}
679
680static int tgt_agent_rw_agent_state(struct fw_card *card, int tcode, void *data,
681 struct sbp_target_agent *agent)
682{
Chris Boot37419d62012-12-11 21:58:47 +0000683 int state;
Chris Boota511ce32012-04-14 17:50:35 -0700684
685 switch (tcode) {
686 case TCODE_READ_QUADLET_REQUEST:
687 pr_debug("tgt_agent AGENT_STATE READ\n");
688
689 spin_lock_bh(&agent->lock);
Chris Boot37419d62012-12-11 21:58:47 +0000690 state = agent->state;
Chris Boota511ce32012-04-14 17:50:35 -0700691 spin_unlock_bh(&agent->lock);
Chris Boot37419d62012-12-11 21:58:47 +0000692
693 *(__be32 *)data = cpu_to_be32(state);
Chris Boota511ce32012-04-14 17:50:35 -0700694
695 return RCODE_COMPLETE;
696
697 case TCODE_WRITE_QUADLET_REQUEST:
698 /* ignored */
699 return RCODE_COMPLETE;
700
701 default:
702 return RCODE_TYPE_ERROR;
703 }
704}
705
706static int tgt_agent_rw_agent_reset(struct fw_card *card, int tcode, void *data,
707 struct sbp_target_agent *agent)
708{
709 switch (tcode) {
710 case TCODE_WRITE_QUADLET_REQUEST:
711 pr_debug("tgt_agent AGENT_RESET\n");
712 spin_lock_bh(&agent->lock);
713 agent->state = AGENT_STATE_RESET;
714 spin_unlock_bh(&agent->lock);
715 return RCODE_COMPLETE;
716
717 default:
718 return RCODE_TYPE_ERROR;
719 }
720}
721
722static int tgt_agent_rw_orb_pointer(struct fw_card *card, int tcode, void *data,
723 struct sbp_target_agent *agent)
724{
725 struct sbp2_pointer *ptr = data;
726
727 switch (tcode) {
728 case TCODE_WRITE_BLOCK_REQUEST:
729 spin_lock_bh(&agent->lock);
730 if (agent->state != AGENT_STATE_SUSPENDED &&
731 agent->state != AGENT_STATE_RESET) {
732 spin_unlock_bh(&agent->lock);
733 pr_notice("Ignoring ORB_POINTER write while active.\n");
734 return RCODE_CONFLICT_ERROR;
735 }
736 agent->state = AGENT_STATE_ACTIVE;
737 spin_unlock_bh(&agent->lock);
738
739 agent->orb_pointer = sbp2_pointer_to_addr(ptr);
740 agent->doorbell = false;
741
742 pr_debug("tgt_agent ORB_POINTER write: 0x%llx\n",
743 agent->orb_pointer);
744
745 queue_work(system_unbound_wq, &agent->work);
746
747 return RCODE_COMPLETE;
748
749 case TCODE_READ_BLOCK_REQUEST:
750 pr_debug("tgt_agent ORB_POINTER READ\n");
751 spin_lock_bh(&agent->lock);
752 addr_to_sbp2_pointer(agent->orb_pointer, ptr);
753 spin_unlock_bh(&agent->lock);
754 return RCODE_COMPLETE;
755
756 default:
757 return RCODE_TYPE_ERROR;
758 }
759}
760
761static int tgt_agent_rw_doorbell(struct fw_card *card, int tcode, void *data,
762 struct sbp_target_agent *agent)
763{
764 switch (tcode) {
765 case TCODE_WRITE_QUADLET_REQUEST:
766 spin_lock_bh(&agent->lock);
767 if (agent->state != AGENT_STATE_SUSPENDED) {
768 spin_unlock_bh(&agent->lock);
769 pr_debug("Ignoring DOORBELL while active.\n");
770 return RCODE_CONFLICT_ERROR;
771 }
772 agent->state = AGENT_STATE_ACTIVE;
773 spin_unlock_bh(&agent->lock);
774
775 agent->doorbell = true;
776
777 pr_debug("tgt_agent DOORBELL\n");
778
779 queue_work(system_unbound_wq, &agent->work);
780
781 return RCODE_COMPLETE;
782
783 case TCODE_READ_QUADLET_REQUEST:
784 return RCODE_COMPLETE;
785
786 default:
787 return RCODE_TYPE_ERROR;
788 }
789}
790
791static int tgt_agent_rw_unsolicited_status_enable(struct fw_card *card,
792 int tcode, void *data, struct sbp_target_agent *agent)
793{
794 switch (tcode) {
795 case TCODE_WRITE_QUADLET_REQUEST:
796 pr_debug("tgt_agent UNSOLICITED_STATUS_ENABLE\n");
797 /* ignored as we don't send unsolicited status */
798 return RCODE_COMPLETE;
799
800 case TCODE_READ_QUADLET_REQUEST:
801 return RCODE_COMPLETE;
802
803 default:
804 return RCODE_TYPE_ERROR;
805 }
806}
807
808static void tgt_agent_rw(struct fw_card *card, struct fw_request *request,
809 int tcode, int destination, int source, int generation,
810 unsigned long long offset, void *data, size_t length,
811 void *callback_data)
812{
813 struct sbp_target_agent *agent = callback_data;
814 struct sbp_session *sess = agent->login->sess;
815 int sess_gen, sess_node, rcode;
816
817 spin_lock_bh(&sess->lock);
818 sess_gen = sess->generation;
819 sess_node = sess->node_id;
820 spin_unlock_bh(&sess->lock);
821
822 if (generation != sess_gen) {
823 pr_notice("ignoring request with wrong generation\n");
824 rcode = RCODE_TYPE_ERROR;
825 goto out;
826 }
827
828 if (source != sess_node) {
829 pr_notice("ignoring request from foreign node (%x != %x)\n",
830 source, sess_node);
831 rcode = RCODE_TYPE_ERROR;
832 goto out;
833 }
834
835 /* turn offset into the offset from the start of the block */
836 offset -= agent->handler.offset;
837
838 if (offset == 0x00 && length == 4) {
839 /* AGENT_STATE */
840 rcode = tgt_agent_rw_agent_state(card, tcode, data, agent);
841 } else if (offset == 0x04 && length == 4) {
842 /* AGENT_RESET */
843 rcode = tgt_agent_rw_agent_reset(card, tcode, data, agent);
844 } else if (offset == 0x08 && length == 8) {
845 /* ORB_POINTER */
846 rcode = tgt_agent_rw_orb_pointer(card, tcode, data, agent);
847 } else if (offset == 0x10 && length == 4) {
848 /* DOORBELL */
849 rcode = tgt_agent_rw_doorbell(card, tcode, data, agent);
850 } else if (offset == 0x14 && length == 4) {
851 /* UNSOLICITED_STATUS_ENABLE */
852 rcode = tgt_agent_rw_unsolicited_status_enable(card, tcode,
853 data, agent);
854 } else {
855 rcode = RCODE_ADDRESS_ERROR;
856 }
857
858out:
859 fw_send_response(card, request, rcode);
860}
861
862static void sbp_handle_command(struct sbp_target_request *);
863static int sbp_send_status(struct sbp_target_request *);
864static void sbp_free_request(struct sbp_target_request *);
865
866static void tgt_agent_process_work(struct work_struct *work)
867{
868 struct sbp_target_request *req =
869 container_of(work, struct sbp_target_request, work);
870
871 pr_debug("tgt_orb ptr:0x%llx next_ORB:0x%llx data_descriptor:0x%llx misc:0x%x\n",
872 req->orb_pointer,
873 sbp2_pointer_to_addr(&req->orb.next_orb),
874 sbp2_pointer_to_addr(&req->orb.data_descriptor),
875 be32_to_cpu(req->orb.misc));
876
877 if (req->orb_pointer >> 32)
878 pr_debug("ORB with high bits set\n");
879
880 switch (ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc))) {
881 case 0:/* Format specified by this standard */
882 sbp_handle_command(req);
883 return;
884 case 1: /* Reserved for future standardization */
885 case 2: /* Vendor-dependent */
886 req->status.status |= cpu_to_be32(
887 STATUS_BLOCK_RESP(
888 STATUS_RESP_REQUEST_COMPLETE) |
889 STATUS_BLOCK_DEAD(0) |
890 STATUS_BLOCK_LEN(1) |
891 STATUS_BLOCK_SBP_STATUS(
892 SBP_STATUS_REQ_TYPE_NOTSUPP));
893 sbp_send_status(req);
894 sbp_free_request(req);
895 return;
896 case 3: /* Dummy ORB */
897 req->status.status |= cpu_to_be32(
898 STATUS_BLOCK_RESP(
899 STATUS_RESP_REQUEST_COMPLETE) |
900 STATUS_BLOCK_DEAD(0) |
901 STATUS_BLOCK_LEN(1) |
902 STATUS_BLOCK_SBP_STATUS(
903 SBP_STATUS_DUMMY_ORB_COMPLETE));
904 sbp_send_status(req);
905 sbp_free_request(req);
906 return;
907 default:
908 BUG();
909 }
910}
911
912/* used to double-check we haven't been issued an AGENT_RESET */
913static inline bool tgt_agent_check_active(struct sbp_target_agent *agent)
914{
915 bool active;
916
917 spin_lock_bh(&agent->lock);
918 active = (agent->state == AGENT_STATE_ACTIVE);
919 spin_unlock_bh(&agent->lock);
920
921 return active;
922}
923
924static void tgt_agent_fetch_work(struct work_struct *work)
925{
926 struct sbp_target_agent *agent =
927 container_of(work, struct sbp_target_agent, work);
928 struct sbp_session *sess = agent->login->sess;
929 struct sbp_target_request *req;
930 int ret;
931 bool doorbell = agent->doorbell;
932 u64 next_orb = agent->orb_pointer;
933
934 while (next_orb && tgt_agent_check_active(agent)) {
935 req = kzalloc(sizeof(*req), GFP_KERNEL);
936 if (!req) {
937 spin_lock_bh(&agent->lock);
938 agent->state = AGENT_STATE_DEAD;
939 spin_unlock_bh(&agent->lock);
940 return;
941 }
942
943 req->login = agent->login;
944 req->orb_pointer = next_orb;
945
946 req->status.status = cpu_to_be32(STATUS_BLOCK_ORB_OFFSET_HIGH(
947 req->orb_pointer >> 32));
948 req->status.orb_low = cpu_to_be32(
949 req->orb_pointer & 0xfffffffc);
950
951 /* read in the ORB */
952 ret = sbp_run_transaction(sess->card, TCODE_READ_BLOCK_REQUEST,
953 sess->node_id, sess->generation, sess->speed,
954 req->orb_pointer, &req->orb, sizeof(req->orb));
955 if (ret != RCODE_COMPLETE) {
956 pr_debug("tgt_orb fetch failed: %x\n", ret);
957 req->status.status |= cpu_to_be32(
958 STATUS_BLOCK_SRC(
959 STATUS_SRC_ORB_FINISHED) |
960 STATUS_BLOCK_RESP(
961 STATUS_RESP_TRANSPORT_FAILURE) |
962 STATUS_BLOCK_DEAD(1) |
963 STATUS_BLOCK_LEN(1) |
964 STATUS_BLOCK_SBP_STATUS(
965 SBP_STATUS_UNSPECIFIED_ERROR));
966 spin_lock_bh(&agent->lock);
967 agent->state = AGENT_STATE_DEAD;
968 spin_unlock_bh(&agent->lock);
969
970 sbp_send_status(req);
971 sbp_free_request(req);
972 return;
973 }
974
975 /* check the next_ORB field */
976 if (be32_to_cpu(req->orb.next_orb.high) & 0x80000000) {
977 next_orb = 0;
978 req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
979 STATUS_SRC_ORB_FINISHED));
980 } else {
981 next_orb = sbp2_pointer_to_addr(&req->orb.next_orb);
982 req->status.status |= cpu_to_be32(STATUS_BLOCK_SRC(
983 STATUS_SRC_ORB_CONTINUING));
984 }
985
986 if (tgt_agent_check_active(agent) && !doorbell) {
987 INIT_WORK(&req->work, tgt_agent_process_work);
988 queue_work(system_unbound_wq, &req->work);
989 } else {
990 /* don't process this request, just check next_ORB */
991 sbp_free_request(req);
992 }
993
994 spin_lock_bh(&agent->lock);
995 doorbell = agent->doorbell = false;
996
997 /* check if we should carry on processing */
998 if (next_orb)
999 agent->orb_pointer = next_orb;
1000 else
1001 agent->state = AGENT_STATE_SUSPENDED;
1002
1003 spin_unlock_bh(&agent->lock);
1004 };
1005}
1006
1007static struct sbp_target_agent *sbp_target_agent_register(
1008 struct sbp_login_descriptor *login)
1009{
1010 struct sbp_target_agent *agent;
1011 int ret;
1012
1013 agent = kmalloc(sizeof(*agent), GFP_KERNEL);
1014 if (!agent)
1015 return ERR_PTR(-ENOMEM);
1016
1017 spin_lock_init(&agent->lock);
1018
1019 agent->handler.length = 0x20;
1020 agent->handler.address_callback = tgt_agent_rw;
1021 agent->handler.callback_data = agent;
1022
1023 agent->login = login;
1024 agent->state = AGENT_STATE_RESET;
1025 INIT_WORK(&agent->work, tgt_agent_fetch_work);
1026 agent->orb_pointer = 0;
1027 agent->doorbell = false;
1028
1029 ret = fw_core_add_address_handler(&agent->handler,
1030 &sbp_register_region);
1031 if (ret < 0) {
1032 kfree(agent);
1033 return ERR_PTR(ret);
1034 }
1035
1036 return agent;
1037}
1038
1039static void sbp_target_agent_unregister(struct sbp_target_agent *agent)
1040{
1041 fw_core_remove_address_handler(&agent->handler);
1042 cancel_work_sync(&agent->work);
1043 kfree(agent);
1044}
1045
1046/*
1047 * Simple wrapper around fw_run_transaction that retries the transaction several
1048 * times in case of failure, with an exponential backoff.
1049 */
1050static int sbp_run_transaction(struct fw_card *card, int tcode, int destination_id,
1051 int generation, int speed, unsigned long long offset,
1052 void *payload, size_t length)
1053{
1054 int attempt, ret, delay;
1055
1056 for (attempt = 1; attempt <= 5; attempt++) {
1057 ret = fw_run_transaction(card, tcode, destination_id,
1058 generation, speed, offset, payload, length);
1059
1060 switch (ret) {
1061 case RCODE_COMPLETE:
1062 case RCODE_TYPE_ERROR:
1063 case RCODE_ADDRESS_ERROR:
1064 case RCODE_GENERATION:
1065 return ret;
1066
1067 default:
1068 delay = 5 * attempt * attempt;
1069 usleep_range(delay, delay * 2);
1070 }
1071 }
1072
1073 return ret;
1074}
1075
1076/*
1077 * Wrapper around sbp_run_transaction that gets the card, destination,
1078 * generation and speed out of the request's session.
1079 */
1080static int sbp_run_request_transaction(struct sbp_target_request *req,
1081 int tcode, unsigned long long offset, void *payload,
1082 size_t length)
1083{
1084 struct sbp_login_descriptor *login = req->login;
1085 struct sbp_session *sess = login->sess;
1086 struct fw_card *card;
1087 int node_id, generation, speed, ret;
1088
1089 spin_lock_bh(&sess->lock);
1090 card = fw_card_get(sess->card);
1091 node_id = sess->node_id;
1092 generation = sess->generation;
1093 speed = sess->speed;
1094 spin_unlock_bh(&sess->lock);
1095
1096 ret = sbp_run_transaction(card, tcode, node_id, generation, speed,
1097 offset, payload, length);
1098
1099 fw_card_put(card);
1100
1101 return ret;
1102}
1103
1104static int sbp_fetch_command(struct sbp_target_request *req)
1105{
1106 int ret, cmd_len, copy_len;
1107
1108 cmd_len = scsi_command_size(req->orb.command_block);
1109
1110 req->cmd_buf = kmalloc(cmd_len, GFP_KERNEL);
1111 if (!req->cmd_buf)
1112 return -ENOMEM;
1113
1114 memcpy(req->cmd_buf, req->orb.command_block,
1115 min_t(int, cmd_len, sizeof(req->orb.command_block)));
1116
1117 if (cmd_len > sizeof(req->orb.command_block)) {
1118 pr_debug("sbp_fetch_command: filling in long command\n");
1119 copy_len = cmd_len - sizeof(req->orb.command_block);
1120
1121 ret = sbp_run_request_transaction(req,
1122 TCODE_READ_BLOCK_REQUEST,
1123 req->orb_pointer + sizeof(req->orb),
1124 req->cmd_buf + sizeof(req->orb.command_block),
1125 copy_len);
1126 if (ret != RCODE_COMPLETE)
1127 return -EIO;
1128 }
1129
1130 return 0;
1131}
1132
1133static int sbp_fetch_page_table(struct sbp_target_request *req)
1134{
1135 int pg_tbl_sz, ret;
1136 struct sbp_page_table_entry *pg_tbl;
1137
1138 if (!CMDBLK_ORB_PG_TBL_PRESENT(be32_to_cpu(req->orb.misc)))
1139 return 0;
1140
1141 pg_tbl_sz = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc)) *
1142 sizeof(struct sbp_page_table_entry);
1143
1144 pg_tbl = kmalloc(pg_tbl_sz, GFP_KERNEL);
1145 if (!pg_tbl)
1146 return -ENOMEM;
1147
1148 ret = sbp_run_request_transaction(req, TCODE_READ_BLOCK_REQUEST,
1149 sbp2_pointer_to_addr(&req->orb.data_descriptor),
1150 pg_tbl, pg_tbl_sz);
1151 if (ret != RCODE_COMPLETE) {
1152 kfree(pg_tbl);
1153 return -EIO;
1154 }
1155
1156 req->pg_tbl = pg_tbl;
1157 return 0;
1158}
1159
1160static void sbp_calc_data_length_direction(struct sbp_target_request *req,
1161 u32 *data_len, enum dma_data_direction *data_dir)
1162{
1163 int data_size, direction, idx;
1164
1165 data_size = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
1166 direction = CMDBLK_ORB_DIRECTION(be32_to_cpu(req->orb.misc));
1167
1168 if (!data_size) {
1169 *data_len = 0;
1170 *data_dir = DMA_NONE;
1171 return;
1172 }
1173
1174 *data_dir = direction ? DMA_FROM_DEVICE : DMA_TO_DEVICE;
1175
1176 if (req->pg_tbl) {
1177 *data_len = 0;
1178 for (idx = 0; idx < data_size; idx++) {
1179 *data_len += be16_to_cpu(
1180 req->pg_tbl[idx].segment_length);
1181 }
1182 } else {
1183 *data_len = data_size;
1184 }
1185}
1186
1187static void sbp_handle_command(struct sbp_target_request *req)
1188{
1189 struct sbp_login_descriptor *login = req->login;
1190 struct sbp_session *sess = login->sess;
1191 int ret, unpacked_lun;
1192 u32 data_length;
1193 enum dma_data_direction data_dir;
1194
1195 ret = sbp_fetch_command(req);
1196 if (ret) {
1197 pr_debug("sbp_handle_command: fetch command failed: %d\n", ret);
Roland Dreier7c78b8de2012-07-16 11:04:38 -07001198 goto err;
Chris Boota511ce32012-04-14 17:50:35 -07001199 }
1200
1201 ret = sbp_fetch_page_table(req);
1202 if (ret) {
1203 pr_debug("sbp_handle_command: fetch page table failed: %d\n",
1204 ret);
Roland Dreier7c78b8de2012-07-16 11:04:38 -07001205 goto err;
Chris Boota511ce32012-04-14 17:50:35 -07001206 }
1207
Nicholas Bellinger6bb82612015-05-10 19:31:10 -07001208 unpacked_lun = req->login->login_lun;
Chris Boota511ce32012-04-14 17:50:35 -07001209 sbp_calc_data_length_direction(req, &data_length, &data_dir);
1210
1211 pr_debug("sbp_handle_command ORB:0x%llx unpacked_lun:%d data_len:%d data_dir:%d\n",
1212 req->orb_pointer, unpacked_lun, data_length, data_dir);
1213
Bart Van Assche649ee052015-04-14 13:26:44 +02001214 /* only used for printk until we do TMRs */
1215 req->se_cmd.tag = req->orb_pointer;
Roland Dreierd6dfc862012-07-16 11:04:39 -07001216 if (target_submit_cmd(&req->se_cmd, sess->se_sess, req->cmd_buf,
1217 req->sense_buf, unpacked_lun, data_length,
Christoph Hellwig68d81f42014-11-24 07:07:25 -08001218 TCM_SIMPLE_TAG, data_dir, 0))
Roland Dreierd6dfc862012-07-16 11:04:39 -07001219 goto err;
1220
Roland Dreier7c78b8de2012-07-16 11:04:38 -07001221 return;
1222
1223err:
1224 req->status.status |= cpu_to_be32(
1225 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
1226 STATUS_BLOCK_DEAD(0) |
1227 STATUS_BLOCK_LEN(1) |
1228 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
1229 sbp_send_status(req);
1230 sbp_free_request(req);
Chris Boota511ce32012-04-14 17:50:35 -07001231}
1232
1233/*
1234 * DMA_TO_DEVICE = read from initiator (SCSI WRITE)
1235 * DMA_FROM_DEVICE = write to initiator (SCSI READ)
1236 */
1237static int sbp_rw_data(struct sbp_target_request *req)
1238{
1239 struct sbp_session *sess = req->login->sess;
1240 int tcode, sg_miter_flags, max_payload, pg_size, speed, node_id,
1241 generation, num_pte, length, tfr_length,
1242 rcode = RCODE_COMPLETE;
1243 struct sbp_page_table_entry *pte;
1244 unsigned long long offset;
1245 struct fw_card *card;
1246 struct sg_mapping_iter iter;
1247
1248 if (req->se_cmd.data_direction == DMA_FROM_DEVICE) {
1249 tcode = TCODE_WRITE_BLOCK_REQUEST;
1250 sg_miter_flags = SG_MITER_FROM_SG;
1251 } else {
1252 tcode = TCODE_READ_BLOCK_REQUEST;
1253 sg_miter_flags = SG_MITER_TO_SG;
1254 }
1255
1256 max_payload = 4 << CMDBLK_ORB_MAX_PAYLOAD(be32_to_cpu(req->orb.misc));
1257 speed = CMDBLK_ORB_SPEED(be32_to_cpu(req->orb.misc));
1258
1259 pg_size = CMDBLK_ORB_PG_SIZE(be32_to_cpu(req->orb.misc));
1260 if (pg_size) {
1261 pr_err("sbp_run_transaction: page size ignored\n");
1262 pg_size = 0x100 << pg_size;
1263 }
1264
1265 spin_lock_bh(&sess->lock);
1266 card = fw_card_get(sess->card);
1267 node_id = sess->node_id;
1268 generation = sess->generation;
1269 spin_unlock_bh(&sess->lock);
1270
1271 if (req->pg_tbl) {
1272 pte = req->pg_tbl;
1273 num_pte = CMDBLK_ORB_DATA_SIZE(be32_to_cpu(req->orb.misc));
1274
1275 offset = 0;
1276 length = 0;
1277 } else {
1278 pte = NULL;
1279 num_pte = 0;
1280
1281 offset = sbp2_pointer_to_addr(&req->orb.data_descriptor);
1282 length = req->se_cmd.data_length;
1283 }
1284
1285 sg_miter_start(&iter, req->se_cmd.t_data_sg, req->se_cmd.t_data_nents,
1286 sg_miter_flags);
1287
1288 while (length || num_pte) {
1289 if (!length) {
1290 offset = (u64)be16_to_cpu(pte->segment_base_hi) << 32 |
1291 be32_to_cpu(pte->segment_base_lo);
1292 length = be16_to_cpu(pte->segment_length);
1293
1294 pte++;
1295 num_pte--;
1296 }
1297
1298 sg_miter_next(&iter);
1299
1300 tfr_length = min3(length, max_payload, (int)iter.length);
1301
1302 /* FIXME: take page_size into account */
1303
1304 rcode = sbp_run_transaction(card, tcode, node_id,
1305 generation, speed,
1306 offset, iter.addr, tfr_length);
1307
1308 if (rcode != RCODE_COMPLETE)
1309 break;
1310
1311 length -= tfr_length;
1312 offset += tfr_length;
1313 iter.consumed = tfr_length;
1314 }
1315
1316 sg_miter_stop(&iter);
1317 fw_card_put(card);
1318
1319 if (rcode == RCODE_COMPLETE) {
1320 WARN_ON(length != 0);
1321 return 0;
1322 } else {
1323 return -EIO;
1324 }
1325}
1326
1327static int sbp_send_status(struct sbp_target_request *req)
1328{
1329 int ret, length;
1330 struct sbp_login_descriptor *login = req->login;
1331
1332 length = (((be32_to_cpu(req->status.status) >> 24) & 0x07) + 1) * 4;
1333
1334 ret = sbp_run_request_transaction(req, TCODE_WRITE_BLOCK_REQUEST,
1335 login->status_fifo_addr, &req->status, length);
1336 if (ret != RCODE_COMPLETE) {
1337 pr_debug("sbp_send_status: write failed: 0x%x\n", ret);
1338 return -EIO;
1339 }
1340
1341 pr_debug("sbp_send_status: status write complete for ORB: 0x%llx\n",
1342 req->orb_pointer);
1343
1344 return 0;
1345}
1346
1347static void sbp_sense_mangle(struct sbp_target_request *req)
1348{
1349 struct se_cmd *se_cmd = &req->se_cmd;
1350 u8 *sense = req->sense_buf;
1351 u8 *status = req->status.data;
1352
1353 WARN_ON(se_cmd->scsi_sense_length < 18);
1354
1355 switch (sense[0] & 0x7f) { /* sfmt */
1356 case 0x70: /* current, fixed */
1357 status[0] = 0 << 6;
1358 break;
1359 case 0x71: /* deferred, fixed */
1360 status[0] = 1 << 6;
1361 break;
1362 case 0x72: /* current, descriptor */
1363 case 0x73: /* deferred, descriptor */
1364 default:
1365 /*
1366 * TODO: SBP-3 specifies what we should do with descriptor
1367 * format sense data
1368 */
1369 pr_err("sbp_send_sense: unknown sense format: 0x%x\n",
1370 sense[0]);
1371 req->status.status |= cpu_to_be32(
1372 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1373 STATUS_BLOCK_DEAD(0) |
1374 STATUS_BLOCK_LEN(1) |
1375 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQUEST_ABORTED));
1376 return;
1377 }
1378
1379 status[0] |= se_cmd->scsi_status & 0x3f;/* status */
1380 status[1] =
1381 (sense[0] & 0x80) | /* valid */
1382 ((sense[2] & 0xe0) >> 1) | /* mark, eom, ili */
1383 (sense[2] & 0x0f); /* sense_key */
1384 status[2] = se_cmd->scsi_asc; /* sense_code */
1385 status[3] = se_cmd->scsi_ascq; /* sense_qualifier */
1386
1387 /* information */
1388 status[4] = sense[3];
1389 status[5] = sense[4];
1390 status[6] = sense[5];
1391 status[7] = sense[6];
1392
1393 /* CDB-dependent */
1394 status[8] = sense[8];
1395 status[9] = sense[9];
1396 status[10] = sense[10];
1397 status[11] = sense[11];
1398
1399 /* fru */
1400 status[12] = sense[14];
1401
1402 /* sense_key-dependent */
1403 status[13] = sense[15];
1404 status[14] = sense[16];
1405 status[15] = sense[17];
1406
1407 req->status.status |= cpu_to_be32(
1408 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1409 STATUS_BLOCK_DEAD(0) |
1410 STATUS_BLOCK_LEN(5) |
1411 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
1412}
1413
1414static int sbp_send_sense(struct sbp_target_request *req)
1415{
1416 struct se_cmd *se_cmd = &req->se_cmd;
1417
1418 if (se_cmd->scsi_sense_length) {
1419 sbp_sense_mangle(req);
1420 } else {
1421 req->status.status |= cpu_to_be32(
1422 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1423 STATUS_BLOCK_DEAD(0) |
1424 STATUS_BLOCK_LEN(1) |
1425 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_OK));
1426 }
1427
1428 return sbp_send_status(req);
1429}
1430
1431static void sbp_free_request(struct sbp_target_request *req)
1432{
1433 kfree(req->pg_tbl);
1434 kfree(req->cmd_buf);
1435 kfree(req);
1436}
1437
1438static void sbp_mgt_agent_process(struct work_struct *work)
1439{
1440 struct sbp_management_agent *agent =
1441 container_of(work, struct sbp_management_agent, work);
1442 struct sbp_management_request *req = agent->request;
1443 int ret;
1444 int status_data_len = 0;
1445
1446 /* fetch the ORB from the initiator */
1447 ret = sbp_run_transaction(req->card, TCODE_READ_BLOCK_REQUEST,
1448 req->node_addr, req->generation, req->speed,
1449 agent->orb_offset, &req->orb, sizeof(req->orb));
1450 if (ret != RCODE_COMPLETE) {
1451 pr_debug("mgt_orb fetch failed: %x\n", ret);
1452 goto out;
1453 }
1454
1455 pr_debug("mgt_orb ptr1:0x%llx ptr2:0x%llx misc:0x%x len:0x%x status_fifo:0x%llx\n",
1456 sbp2_pointer_to_addr(&req->orb.ptr1),
1457 sbp2_pointer_to_addr(&req->orb.ptr2),
1458 be32_to_cpu(req->orb.misc), be32_to_cpu(req->orb.length),
1459 sbp2_pointer_to_addr(&req->orb.status_fifo));
1460
1461 if (!ORB_NOTIFY(be32_to_cpu(req->orb.misc)) ||
1462 ORB_REQUEST_FORMAT(be32_to_cpu(req->orb.misc)) != 0) {
1463 pr_err("mgt_orb bad request\n");
1464 goto out;
1465 }
1466
1467 switch (MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc))) {
1468 case MANAGEMENT_ORB_FUNCTION_LOGIN:
1469 sbp_management_request_login(agent, req, &status_data_len);
1470 break;
1471
1472 case MANAGEMENT_ORB_FUNCTION_QUERY_LOGINS:
1473 sbp_management_request_query_logins(agent, req,
1474 &status_data_len);
1475 break;
1476
1477 case MANAGEMENT_ORB_FUNCTION_RECONNECT:
1478 sbp_management_request_reconnect(agent, req, &status_data_len);
1479 break;
1480
1481 case MANAGEMENT_ORB_FUNCTION_SET_PASSWORD:
1482 pr_notice("SET PASSWORD not implemented\n");
1483
1484 req->status.status = cpu_to_be32(
1485 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1486 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1487
1488 break;
1489
1490 case MANAGEMENT_ORB_FUNCTION_LOGOUT:
1491 sbp_management_request_logout(agent, req, &status_data_len);
1492 break;
1493
1494 case MANAGEMENT_ORB_FUNCTION_ABORT_TASK:
1495 pr_notice("ABORT TASK not implemented\n");
1496
1497 req->status.status = cpu_to_be32(
1498 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1499 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1500
1501 break;
1502
1503 case MANAGEMENT_ORB_FUNCTION_ABORT_TASK_SET:
1504 pr_notice("ABORT TASK SET not implemented\n");
1505
1506 req->status.status = cpu_to_be32(
1507 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1508 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1509
1510 break;
1511
1512 case MANAGEMENT_ORB_FUNCTION_LOGICAL_UNIT_RESET:
1513 pr_notice("LOGICAL UNIT RESET not implemented\n");
1514
1515 req->status.status = cpu_to_be32(
1516 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1517 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1518
1519 break;
1520
1521 case MANAGEMENT_ORB_FUNCTION_TARGET_RESET:
1522 pr_notice("TARGET RESET not implemented\n");
1523
1524 req->status.status = cpu_to_be32(
1525 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1526 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1527
1528 break;
1529
1530 default:
1531 pr_notice("unknown management function 0x%x\n",
1532 MANAGEMENT_ORB_FUNCTION(be32_to_cpu(req->orb.misc)));
1533
1534 req->status.status = cpu_to_be32(
1535 STATUS_BLOCK_RESP(STATUS_RESP_REQUEST_COMPLETE) |
1536 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_REQ_TYPE_NOTSUPP));
1537
1538 break;
1539 }
1540
1541 req->status.status |= cpu_to_be32(
1542 STATUS_BLOCK_SRC(1) | /* Response to ORB, next_ORB absent */
1543 STATUS_BLOCK_LEN(DIV_ROUND_UP(status_data_len, 4) + 1) |
1544 STATUS_BLOCK_ORB_OFFSET_HIGH(agent->orb_offset >> 32));
1545 req->status.orb_low = cpu_to_be32(agent->orb_offset);
1546
1547 /* write the status block back to the initiator */
1548 ret = sbp_run_transaction(req->card, TCODE_WRITE_BLOCK_REQUEST,
1549 req->node_addr, req->generation, req->speed,
1550 sbp2_pointer_to_addr(&req->orb.status_fifo),
1551 &req->status, 8 + status_data_len);
1552 if (ret != RCODE_COMPLETE) {
1553 pr_debug("mgt_orb status write failed: %x\n", ret);
1554 goto out;
1555 }
1556
1557out:
1558 fw_card_put(req->card);
1559 kfree(req);
1560
1561 spin_lock_bh(&agent->lock);
1562 agent->state = MANAGEMENT_AGENT_STATE_IDLE;
1563 spin_unlock_bh(&agent->lock);
1564}
1565
1566static void sbp_mgt_agent_rw(struct fw_card *card,
1567 struct fw_request *request, int tcode, int destination, int source,
1568 int generation, unsigned long long offset, void *data, size_t length,
1569 void *callback_data)
1570{
1571 struct sbp_management_agent *agent = callback_data;
1572 struct sbp2_pointer *ptr = data;
1573 int rcode = RCODE_ADDRESS_ERROR;
1574
1575 if (!agent->tport->enable)
1576 goto out;
1577
1578 if ((offset != agent->handler.offset) || (length != 8))
1579 goto out;
1580
1581 if (tcode == TCODE_WRITE_BLOCK_REQUEST) {
1582 struct sbp_management_request *req;
1583 int prev_state;
1584
1585 spin_lock_bh(&agent->lock);
1586 prev_state = agent->state;
1587 agent->state = MANAGEMENT_AGENT_STATE_BUSY;
1588 spin_unlock_bh(&agent->lock);
1589
1590 if (prev_state == MANAGEMENT_AGENT_STATE_BUSY) {
1591 pr_notice("ignoring management request while busy\n");
1592 rcode = RCODE_CONFLICT_ERROR;
1593 goto out;
1594 }
1595
1596 req = kzalloc(sizeof(*req), GFP_ATOMIC);
1597 if (!req) {
1598 rcode = RCODE_CONFLICT_ERROR;
1599 goto out;
1600 }
1601
1602 req->card = fw_card_get(card);
1603 req->generation = generation;
1604 req->node_addr = source;
1605 req->speed = fw_get_request_speed(request);
1606
1607 agent->orb_offset = sbp2_pointer_to_addr(ptr);
1608 agent->request = req;
1609
1610 queue_work(system_unbound_wq, &agent->work);
1611 rcode = RCODE_COMPLETE;
1612 } else if (tcode == TCODE_READ_BLOCK_REQUEST) {
1613 addr_to_sbp2_pointer(agent->orb_offset, ptr);
1614 rcode = RCODE_COMPLETE;
1615 } else {
1616 rcode = RCODE_TYPE_ERROR;
1617 }
1618
1619out:
1620 fw_send_response(card, request, rcode);
1621}
1622
1623static struct sbp_management_agent *sbp_management_agent_register(
1624 struct sbp_tport *tport)
1625{
1626 int ret;
1627 struct sbp_management_agent *agent;
1628
1629 agent = kmalloc(sizeof(*agent), GFP_KERNEL);
1630 if (!agent)
1631 return ERR_PTR(-ENOMEM);
1632
1633 spin_lock_init(&agent->lock);
1634 agent->tport = tport;
1635 agent->handler.length = 0x08;
1636 agent->handler.address_callback = sbp_mgt_agent_rw;
1637 agent->handler.callback_data = agent;
1638 agent->state = MANAGEMENT_AGENT_STATE_IDLE;
1639 INIT_WORK(&agent->work, sbp_mgt_agent_process);
1640 agent->orb_offset = 0;
1641 agent->request = NULL;
1642
1643 ret = fw_core_add_address_handler(&agent->handler,
1644 &sbp_register_region);
1645 if (ret < 0) {
1646 kfree(agent);
1647 return ERR_PTR(ret);
1648 }
1649
1650 return agent;
1651}
1652
1653static void sbp_management_agent_unregister(struct sbp_management_agent *agent)
1654{
1655 fw_core_remove_address_handler(&agent->handler);
1656 cancel_work_sync(&agent->work);
1657 kfree(agent);
1658}
1659
1660static int sbp_check_true(struct se_portal_group *se_tpg)
1661{
1662 return 1;
1663}
1664
1665static int sbp_check_false(struct se_portal_group *se_tpg)
1666{
1667 return 0;
1668}
1669
1670static char *sbp_get_fabric_name(void)
1671{
1672 return "sbp";
1673}
1674
1675static char *sbp_get_fabric_wwn(struct se_portal_group *se_tpg)
1676{
1677 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1678 struct sbp_tport *tport = tpg->tport;
1679
1680 return &tport->tport_name[0];
1681}
1682
1683static u16 sbp_get_tag(struct se_portal_group *se_tpg)
1684{
1685 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1686 return tpg->tport_tpgt;
1687}
1688
Chris Boota511ce32012-04-14 17:50:35 -07001689static u32 sbp_tpg_get_inst_index(struct se_portal_group *se_tpg)
1690{
1691 return 1;
1692}
1693
1694static void sbp_release_cmd(struct se_cmd *se_cmd)
1695{
1696 struct sbp_target_request *req = container_of(se_cmd,
1697 struct sbp_target_request, se_cmd);
1698
1699 sbp_free_request(req);
1700}
1701
1702static int sbp_shutdown_session(struct se_session *se_sess)
1703{
1704 return 0;
1705}
1706
1707static void sbp_close_session(struct se_session *se_sess)
1708{
1709 return;
1710}
1711
1712static u32 sbp_sess_get_index(struct se_session *se_sess)
1713{
1714 return 0;
1715}
1716
1717static int sbp_write_pending(struct se_cmd *se_cmd)
1718{
1719 struct sbp_target_request *req = container_of(se_cmd,
1720 struct sbp_target_request, se_cmd);
1721 int ret;
1722
1723 ret = sbp_rw_data(req);
1724 if (ret) {
1725 req->status.status |= cpu_to_be32(
1726 STATUS_BLOCK_RESP(
1727 STATUS_RESP_TRANSPORT_FAILURE) |
1728 STATUS_BLOCK_DEAD(0) |
1729 STATUS_BLOCK_LEN(1) |
1730 STATUS_BLOCK_SBP_STATUS(
1731 SBP_STATUS_UNSPECIFIED_ERROR));
1732 sbp_send_status(req);
1733 return ret;
1734 }
1735
Christoph Hellwig70baf0a2012-07-08 15:58:39 -04001736 target_execute_cmd(se_cmd);
Chris Boota511ce32012-04-14 17:50:35 -07001737 return 0;
1738}
1739
1740static int sbp_write_pending_status(struct se_cmd *se_cmd)
1741{
1742 return 0;
1743}
1744
1745static void sbp_set_default_node_attrs(struct se_node_acl *nacl)
1746{
1747 return;
1748}
1749
Chris Boota511ce32012-04-14 17:50:35 -07001750static int sbp_get_cmd_state(struct se_cmd *se_cmd)
1751{
1752 return 0;
1753}
1754
1755static int sbp_queue_data_in(struct se_cmd *se_cmd)
1756{
1757 struct sbp_target_request *req = container_of(se_cmd,
1758 struct sbp_target_request, se_cmd);
1759 int ret;
1760
1761 ret = sbp_rw_data(req);
1762 if (ret) {
1763 req->status.status |= cpu_to_be32(
1764 STATUS_BLOCK_RESP(STATUS_RESP_TRANSPORT_FAILURE) |
1765 STATUS_BLOCK_DEAD(0) |
1766 STATUS_BLOCK_LEN(1) |
1767 STATUS_BLOCK_SBP_STATUS(SBP_STATUS_UNSPECIFIED_ERROR));
1768 sbp_send_status(req);
1769 return ret;
1770 }
1771
1772 return sbp_send_sense(req);
1773}
1774
1775/*
1776 * Called after command (no data transfer) or after the write (to device)
1777 * operation is completed
1778 */
1779static int sbp_queue_status(struct se_cmd *se_cmd)
1780{
1781 struct sbp_target_request *req = container_of(se_cmd,
1782 struct sbp_target_request, se_cmd);
1783
1784 return sbp_send_sense(req);
1785}
1786
Joern Engelb79fafa2013-07-03 11:22:17 -04001787static void sbp_queue_tm_rsp(struct se_cmd *se_cmd)
Chris Boota511ce32012-04-14 17:50:35 -07001788{
Chris Boota511ce32012-04-14 17:50:35 -07001789}
1790
Nicholas Bellinger131e6ab2014-03-22 14:55:56 -07001791static void sbp_aborted_task(struct se_cmd *se_cmd)
1792{
1793 return;
1794}
1795
Chris Boota511ce32012-04-14 17:50:35 -07001796static int sbp_check_stop_free(struct se_cmd *se_cmd)
1797{
1798 struct sbp_target_request *req = container_of(se_cmd,
1799 struct sbp_target_request, se_cmd);
1800
1801 transport_generic_free_cmd(&req->se_cmd, 0);
1802 return 1;
1803}
1804
Chris Boota511ce32012-04-14 17:50:35 -07001805static int sbp_count_se_tpg_luns(struct se_portal_group *tpg)
1806{
Nicholas Bellinger6bb82612015-05-10 19:31:10 -07001807 struct se_lun *lun;
1808 int count = 0;
Chris Boota511ce32012-04-14 17:50:35 -07001809
Nicholas Bellinger6bb82612015-05-10 19:31:10 -07001810 rcu_read_lock();
1811 hlist_for_each_entry_rcu(lun, &tpg->tpg_lun_hlist, link)
Chris Boota511ce32012-04-14 17:50:35 -07001812 count++;
Nicholas Bellinger6bb82612015-05-10 19:31:10 -07001813 rcu_read_unlock();
Chris Boota511ce32012-04-14 17:50:35 -07001814
1815 return count;
1816}
1817
1818static int sbp_update_unit_directory(struct sbp_tport *tport)
1819{
Nicholas Bellinger6bb82612015-05-10 19:31:10 -07001820 struct se_lun *lun;
1821 int num_luns, num_entries, idx = 0, mgt_agt_addr, ret;
Chris Boota511ce32012-04-14 17:50:35 -07001822 u32 *data;
1823
1824 if (tport->unit_directory.data) {
1825 fw_core_remove_descriptor(&tport->unit_directory);
1826 kfree(tport->unit_directory.data);
1827 tport->unit_directory.data = NULL;
1828 }
1829
1830 if (!tport->enable || !tport->tpg)
1831 return 0;
1832
1833 num_luns = sbp_count_se_tpg_luns(&tport->tpg->se_tpg);
1834
1835 /*
1836 * Number of entries in the final unit directory:
1837 * - all of those in the template
1838 * - management_agent
1839 * - unit_characteristics
1840 * - reconnect_timeout
1841 * - unit unique ID
1842 * - one for each LUN
1843 *
1844 * MUST NOT include leaf or sub-directory entries
1845 */
1846 num_entries = ARRAY_SIZE(sbp_unit_directory_template) + 4 + num_luns;
1847
1848 if (tport->directory_id != -1)
1849 num_entries++;
1850
1851 /* allocate num_entries + 4 for the header and unique ID leaf */
1852 data = kcalloc((num_entries + 4), sizeof(u32), GFP_KERNEL);
1853 if (!data)
1854 return -ENOMEM;
1855
1856 /* directory_length */
1857 data[idx++] = num_entries << 16;
1858
1859 /* directory_id */
1860 if (tport->directory_id != -1)
1861 data[idx++] = (CSR_DIRECTORY_ID << 24) | tport->directory_id;
1862
1863 /* unit directory template */
1864 memcpy(&data[idx], sbp_unit_directory_template,
1865 sizeof(sbp_unit_directory_template));
1866 idx += ARRAY_SIZE(sbp_unit_directory_template);
1867
1868 /* management_agent */
1869 mgt_agt_addr = (tport->mgt_agt->handler.offset - CSR_REGISTER_BASE) / 4;
1870 data[idx++] = 0x54000000 | (mgt_agt_addr & 0x00ffffff);
1871
1872 /* unit_characteristics */
1873 data[idx++] = 0x3a000000 |
1874 (((tport->mgt_orb_timeout * 2) << 8) & 0xff00) |
1875 SBP_ORB_FETCH_SIZE;
1876
1877 /* reconnect_timeout */
1878 data[idx++] = 0x3d000000 | (tport->max_reconnect_timeout & 0xffff);
1879
1880 /* unit unique ID (leaf is just after LUNs) */
1881 data[idx++] = 0x8d000000 | (num_luns + 1);
1882
Nicholas Bellinger6bb82612015-05-10 19:31:10 -07001883 rcu_read_lock();
1884 hlist_for_each_entry_rcu(lun, &tport->tpg->se_tpg.tpg_lun_hlist, link) {
Chris Boota511ce32012-04-14 17:50:35 -07001885 struct se_device *dev;
1886 int type;
Nicholas Bellinger4cc987e2015-05-19 00:03:07 -07001887 /*
1888 * rcu_dereference_raw protected by se_lun->lun_group symlink
1889 * reference to se_device->dev_group.
1890 */
1891 dev = rcu_dereference_raw(lun->lun_se_dev);
Chris Boota511ce32012-04-14 17:50:35 -07001892 type = dev->transport->get_device_type(dev);
1893
1894 /* logical_unit_number */
1895 data[idx++] = 0x14000000 |
1896 ((type << 16) & 0x1f0000) |
Nicholas Bellinger6bb82612015-05-10 19:31:10 -07001897 (lun->unpacked_lun & 0xffff);
Chris Boota511ce32012-04-14 17:50:35 -07001898 }
Nicholas Bellinger6bb82612015-05-10 19:31:10 -07001899 rcu_read_unlock();
Chris Boota511ce32012-04-14 17:50:35 -07001900
1901 /* unit unique ID leaf */
1902 data[idx++] = 2 << 16;
1903 data[idx++] = tport->guid >> 32;
1904 data[idx++] = tport->guid;
1905
1906 tport->unit_directory.length = idx;
1907 tport->unit_directory.key = (CSR_DIRECTORY | CSR_UNIT) << 24;
1908 tport->unit_directory.data = data;
1909
1910 ret = fw_core_add_descriptor(&tport->unit_directory);
1911 if (ret < 0) {
1912 kfree(tport->unit_directory.data);
1913 tport->unit_directory.data = NULL;
1914 }
1915
1916 return ret;
1917}
1918
Andy Grover343d4752012-07-30 15:54:18 -07001919static ssize_t sbp_parse_wwn(const char *name, u64 *wwn)
Chris Boota511ce32012-04-14 17:50:35 -07001920{
1921 const char *cp;
1922 char c, nibble;
1923 int pos = 0, err;
1924
1925 *wwn = 0;
1926 for (cp = name; cp < &name[SBP_NAMELEN - 1]; cp++) {
1927 c = *cp;
1928 if (c == '\n' && cp[1] == '\0')
1929 continue;
1930 if (c == '\0') {
1931 err = 2;
1932 if (pos != 16)
1933 goto fail;
1934 return cp - name;
1935 }
1936 err = 3;
1937 if (isdigit(c))
1938 nibble = c - '0';
Andy Grover343d4752012-07-30 15:54:18 -07001939 else if (isxdigit(c))
Chris Boota511ce32012-04-14 17:50:35 -07001940 nibble = tolower(c) - 'a' + 10;
1941 else
1942 goto fail;
1943 *wwn = (*wwn << 4) | nibble;
1944 pos++;
1945 }
1946 err = 4;
1947fail:
1948 printk(KERN_INFO "err %u len %zu pos %u\n",
1949 err, cp - name, pos);
1950 return -1;
1951}
1952
1953static ssize_t sbp_format_wwn(char *buf, size_t len, u64 wwn)
1954{
1955 return snprintf(buf, len, "%016llx", wwn);
1956}
1957
Christoph Hellwigc7d6a802015-04-13 19:51:14 +02001958static int sbp_init_nodeacl(struct se_node_acl *se_nacl, const char *name)
Chris Boota511ce32012-04-14 17:50:35 -07001959{
Chris Boota511ce32012-04-14 17:50:35 -07001960 u64 guid = 0;
Chris Boota511ce32012-04-14 17:50:35 -07001961
Andy Grover343d4752012-07-30 15:54:18 -07001962 if (sbp_parse_wwn(name, &guid) < 0)
Christoph Hellwigc7d6a802015-04-13 19:51:14 +02001963 return -EINVAL;
1964 return 0;
Chris Boota511ce32012-04-14 17:50:35 -07001965}
1966
1967static int sbp_post_link_lun(
1968 struct se_portal_group *se_tpg,
1969 struct se_lun *se_lun)
1970{
1971 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1972
1973 return sbp_update_unit_directory(tpg->tport);
1974}
1975
1976static void sbp_pre_unlink_lun(
1977 struct se_portal_group *se_tpg,
1978 struct se_lun *se_lun)
1979{
1980 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
1981 struct sbp_tport *tport = tpg->tport;
1982 int ret;
1983
1984 if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0)
1985 tport->enable = 0;
1986
1987 ret = sbp_update_unit_directory(tport);
1988 if (ret < 0)
1989 pr_err("unlink LUN: failed to update unit directory\n");
1990}
1991
1992static struct se_portal_group *sbp_make_tpg(
1993 struct se_wwn *wwn,
1994 struct config_group *group,
1995 const char *name)
1996{
1997 struct sbp_tport *tport =
1998 container_of(wwn, struct sbp_tport, tport_wwn);
1999
2000 struct sbp_tpg *tpg;
2001 unsigned long tpgt;
2002 int ret;
2003
2004 if (strstr(name, "tpgt_") != name)
2005 return ERR_PTR(-EINVAL);
2006 if (kstrtoul(name + 5, 10, &tpgt) || tpgt > UINT_MAX)
2007 return ERR_PTR(-EINVAL);
2008
2009 if (tport->tpg) {
2010 pr_err("Only one TPG per Unit is possible.\n");
2011 return ERR_PTR(-EBUSY);
2012 }
2013
2014 tpg = kzalloc(sizeof(*tpg), GFP_KERNEL);
2015 if (!tpg) {
2016 pr_err("Unable to allocate struct sbp_tpg\n");
2017 return ERR_PTR(-ENOMEM);
2018 }
2019
2020 tpg->tport = tport;
2021 tpg->tport_tpgt = tpgt;
2022 tport->tpg = tpg;
2023
2024 /* default attribute values */
2025 tport->enable = 0;
2026 tport->directory_id = -1;
2027 tport->mgt_orb_timeout = 15;
2028 tport->max_reconnect_timeout = 5;
2029 tport->max_logins_per_lun = 1;
2030
2031 tport->mgt_agt = sbp_management_agent_register(tport);
2032 if (IS_ERR(tport->mgt_agt)) {
2033 ret = PTR_ERR(tport->mgt_agt);
Chris Boote1fe2062012-12-11 21:58:48 +00002034 goto out_free_tpg;
Chris Boota511ce32012-04-14 17:50:35 -07002035 }
2036
Nicholas Bellingerbc0c94b2015-05-20 21:48:03 -07002037 ret = core_tpg_register(wwn, &tpg->se_tpg, SCSI_PROTOCOL_SBP);
Chris Boote1fe2062012-12-11 21:58:48 +00002038 if (ret < 0)
2039 goto out_unreg_mgt_agt;
Chris Boota511ce32012-04-14 17:50:35 -07002040
2041 return &tpg->se_tpg;
Chris Boote1fe2062012-12-11 21:58:48 +00002042
2043out_unreg_mgt_agt:
2044 sbp_management_agent_unregister(tport->mgt_agt);
2045out_free_tpg:
2046 tport->tpg = NULL;
2047 kfree(tpg);
2048 return ERR_PTR(ret);
Chris Boota511ce32012-04-14 17:50:35 -07002049}
2050
2051static void sbp_drop_tpg(struct se_portal_group *se_tpg)
2052{
2053 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2054 struct sbp_tport *tport = tpg->tport;
2055
2056 core_tpg_deregister(se_tpg);
2057 sbp_management_agent_unregister(tport->mgt_agt);
2058 tport->tpg = NULL;
2059 kfree(tpg);
2060}
2061
2062static struct se_wwn *sbp_make_tport(
2063 struct target_fabric_configfs *tf,
2064 struct config_group *group,
2065 const char *name)
2066{
2067 struct sbp_tport *tport;
2068 u64 guid = 0;
2069
Andy Grover343d4752012-07-30 15:54:18 -07002070 if (sbp_parse_wwn(name, &guid) < 0)
Chris Boota511ce32012-04-14 17:50:35 -07002071 return ERR_PTR(-EINVAL);
2072
2073 tport = kzalloc(sizeof(*tport), GFP_KERNEL);
2074 if (!tport) {
2075 pr_err("Unable to allocate struct sbp_tport\n");
2076 return ERR_PTR(-ENOMEM);
2077 }
2078
2079 tport->guid = guid;
2080 sbp_format_wwn(tport->tport_name, SBP_NAMELEN, guid);
2081
2082 return &tport->tport_wwn;
2083}
2084
2085static void sbp_drop_tport(struct se_wwn *wwn)
2086{
2087 struct sbp_tport *tport =
2088 container_of(wwn, struct sbp_tport, tport_wwn);
2089
2090 kfree(tport);
2091}
2092
Christoph Hellwig2eafd722015-10-03 15:32:55 +02002093static ssize_t sbp_wwn_version_show(struct config_item *item, char *page)
Chris Boota511ce32012-04-14 17:50:35 -07002094{
2095 return sprintf(page, "FireWire SBP fabric module %s\n", SBP_VERSION);
2096}
2097
Christoph Hellwig2eafd722015-10-03 15:32:55 +02002098CONFIGFS_ATTR_RO(sbp_wwn_, version);
Chris Boota511ce32012-04-14 17:50:35 -07002099
2100static struct configfs_attribute *sbp_wwn_attrs[] = {
Christoph Hellwig2eafd722015-10-03 15:32:55 +02002101 &sbp_wwn_attr_version,
Chris Boota511ce32012-04-14 17:50:35 -07002102 NULL,
2103};
2104
Christoph Hellwig2eafd722015-10-03 15:32:55 +02002105static ssize_t sbp_tpg_directory_id_show(struct config_item *item, char *page)
Chris Boota511ce32012-04-14 17:50:35 -07002106{
Christoph Hellwig2eafd722015-10-03 15:32:55 +02002107 struct se_portal_group *se_tpg = to_tpg(item);
Chris Boota511ce32012-04-14 17:50:35 -07002108 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2109 struct sbp_tport *tport = tpg->tport;
2110
2111 if (tport->directory_id == -1)
2112 return sprintf(page, "implicit\n");
2113 else
2114 return sprintf(page, "%06x\n", tport->directory_id);
2115}
2116
Christoph Hellwig2eafd722015-10-03 15:32:55 +02002117static ssize_t sbp_tpg_directory_id_store(struct config_item *item,
2118 const char *page, size_t count)
Chris Boota511ce32012-04-14 17:50:35 -07002119{
Christoph Hellwig2eafd722015-10-03 15:32:55 +02002120 struct se_portal_group *se_tpg = to_tpg(item);
Chris Boota511ce32012-04-14 17:50:35 -07002121 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2122 struct sbp_tport *tport = tpg->tport;
2123 unsigned long val;
2124
2125 if (tport->enable) {
2126 pr_err("Cannot change the directory_id on an active target.\n");
2127 return -EBUSY;
2128 }
2129
2130 if (strstr(page, "implicit") == page) {
2131 tport->directory_id = -1;
2132 } else {
2133 if (kstrtoul(page, 16, &val) < 0)
2134 return -EINVAL;
2135 if (val > 0xffffff)
2136 return -EINVAL;
2137
2138 tport->directory_id = val;
2139 }
2140
2141 return count;
2142}
2143
Christoph Hellwig2eafd722015-10-03 15:32:55 +02002144static ssize_t sbp_tpg_enable_show(struct config_item *item, char *page)
Chris Boota511ce32012-04-14 17:50:35 -07002145{
Christoph Hellwig2eafd722015-10-03 15:32:55 +02002146 struct se_portal_group *se_tpg = to_tpg(item);
Chris Boota511ce32012-04-14 17:50:35 -07002147 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2148 struct sbp_tport *tport = tpg->tport;
2149 return sprintf(page, "%d\n", tport->enable);
2150}
2151
Christoph Hellwig2eafd722015-10-03 15:32:55 +02002152static ssize_t sbp_tpg_enable_store(struct config_item *item,
2153 const char *page, size_t count)
Chris Boota511ce32012-04-14 17:50:35 -07002154{
Christoph Hellwig2eafd722015-10-03 15:32:55 +02002155 struct se_portal_group *se_tpg = to_tpg(item);
Chris Boota511ce32012-04-14 17:50:35 -07002156 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2157 struct sbp_tport *tport = tpg->tport;
2158 unsigned long val;
2159 int ret;
2160
2161 if (kstrtoul(page, 0, &val) < 0)
2162 return -EINVAL;
2163 if ((val != 0) && (val != 1))
2164 return -EINVAL;
2165
2166 if (tport->enable == val)
2167 return count;
2168
2169 if (val) {
2170 if (sbp_count_se_tpg_luns(&tpg->se_tpg) == 0) {
2171 pr_err("Cannot enable a target with no LUNs!\n");
2172 return -EINVAL;
2173 }
2174 } else {
2175 /* XXX: force-shutdown sessions instead? */
2176 spin_lock_bh(&se_tpg->session_lock);
2177 if (!list_empty(&se_tpg->tpg_sess_list)) {
2178 spin_unlock_bh(&se_tpg->session_lock);
2179 return -EBUSY;
2180 }
2181 spin_unlock_bh(&se_tpg->session_lock);
2182 }
2183
2184 tport->enable = val;
2185
2186 ret = sbp_update_unit_directory(tport);
2187 if (ret < 0) {
2188 pr_err("Could not update Config ROM\n");
2189 return ret;
2190 }
2191
2192 return count;
2193}
2194
Christoph Hellwig2eafd722015-10-03 15:32:55 +02002195CONFIGFS_ATTR(sbp_tpg_, directory_id);
2196CONFIGFS_ATTR(sbp_tpg_, enable);
Chris Boota511ce32012-04-14 17:50:35 -07002197
2198static struct configfs_attribute *sbp_tpg_base_attrs[] = {
Christoph Hellwig2eafd722015-10-03 15:32:55 +02002199 &sbp_tpg_attr_directory_id,
2200 &sbp_tpg_attr_enable,
Chris Boota511ce32012-04-14 17:50:35 -07002201 NULL,
2202};
2203
Christoph Hellwig2eafd722015-10-03 15:32:55 +02002204static ssize_t sbp_tpg_attrib_mgt_orb_timeout_show(struct config_item *item,
Chris Boota511ce32012-04-14 17:50:35 -07002205 char *page)
2206{
Christoph Hellwig2eafd722015-10-03 15:32:55 +02002207 struct se_portal_group *se_tpg = attrib_to_tpg(item);
Chris Boota511ce32012-04-14 17:50:35 -07002208 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2209 struct sbp_tport *tport = tpg->tport;
2210 return sprintf(page, "%d\n", tport->mgt_orb_timeout);
2211}
2212
Christoph Hellwig2eafd722015-10-03 15:32:55 +02002213static ssize_t sbp_tpg_attrib_mgt_orb_timeout_store(struct config_item *item,
2214 const char *page, size_t count)
Chris Boota511ce32012-04-14 17:50:35 -07002215{
Christoph Hellwig2eafd722015-10-03 15:32:55 +02002216 struct se_portal_group *se_tpg = attrib_to_tpg(item);
Chris Boota511ce32012-04-14 17:50:35 -07002217 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2218 struct sbp_tport *tport = tpg->tport;
2219 unsigned long val;
2220 int ret;
2221
2222 if (kstrtoul(page, 0, &val) < 0)
2223 return -EINVAL;
2224 if ((val < 1) || (val > 127))
2225 return -EINVAL;
2226
2227 if (tport->mgt_orb_timeout == val)
2228 return count;
2229
2230 tport->mgt_orb_timeout = val;
2231
2232 ret = sbp_update_unit_directory(tport);
2233 if (ret < 0)
2234 return ret;
2235
2236 return count;
2237}
2238
Christoph Hellwig2eafd722015-10-03 15:32:55 +02002239static ssize_t sbp_tpg_attrib_max_reconnect_timeout_show(struct config_item *item,
Chris Boota511ce32012-04-14 17:50:35 -07002240 char *page)
2241{
Christoph Hellwig2eafd722015-10-03 15:32:55 +02002242 struct se_portal_group *se_tpg = attrib_to_tpg(item);
Chris Boota511ce32012-04-14 17:50:35 -07002243 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2244 struct sbp_tport *tport = tpg->tport;
2245 return sprintf(page, "%d\n", tport->max_reconnect_timeout);
2246}
2247
Christoph Hellwig2eafd722015-10-03 15:32:55 +02002248static ssize_t sbp_tpg_attrib_max_reconnect_timeout_store(struct config_item *item,
2249 const char *page, size_t count)
Chris Boota511ce32012-04-14 17:50:35 -07002250{
Christoph Hellwig2eafd722015-10-03 15:32:55 +02002251 struct se_portal_group *se_tpg = attrib_to_tpg(item);
Chris Boota511ce32012-04-14 17:50:35 -07002252 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2253 struct sbp_tport *tport = tpg->tport;
2254 unsigned long val;
2255 int ret;
2256
2257 if (kstrtoul(page, 0, &val) < 0)
2258 return -EINVAL;
2259 if ((val < 1) || (val > 32767))
2260 return -EINVAL;
2261
2262 if (tport->max_reconnect_timeout == val)
2263 return count;
2264
2265 tport->max_reconnect_timeout = val;
2266
2267 ret = sbp_update_unit_directory(tport);
2268 if (ret < 0)
2269 return ret;
2270
2271 return count;
2272}
2273
Christoph Hellwig2eafd722015-10-03 15:32:55 +02002274static ssize_t sbp_tpg_attrib_max_logins_per_lun_show(struct config_item *item,
Chris Boota511ce32012-04-14 17:50:35 -07002275 char *page)
2276{
Christoph Hellwig2eafd722015-10-03 15:32:55 +02002277 struct se_portal_group *se_tpg = attrib_to_tpg(item);
Chris Boota511ce32012-04-14 17:50:35 -07002278 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2279 struct sbp_tport *tport = tpg->tport;
2280 return sprintf(page, "%d\n", tport->max_logins_per_lun);
2281}
2282
Christoph Hellwig2eafd722015-10-03 15:32:55 +02002283static ssize_t sbp_tpg_attrib_max_logins_per_lun_store(struct config_item *item,
2284 const char *page, size_t count)
Chris Boota511ce32012-04-14 17:50:35 -07002285{
Christoph Hellwig2eafd722015-10-03 15:32:55 +02002286 struct se_portal_group *se_tpg = attrib_to_tpg(item);
Chris Boota511ce32012-04-14 17:50:35 -07002287 struct sbp_tpg *tpg = container_of(se_tpg, struct sbp_tpg, se_tpg);
2288 struct sbp_tport *tport = tpg->tport;
2289 unsigned long val;
2290
2291 if (kstrtoul(page, 0, &val) < 0)
2292 return -EINVAL;
2293 if ((val < 1) || (val > 127))
2294 return -EINVAL;
2295
2296 /* XXX: also check against current count? */
2297
2298 tport->max_logins_per_lun = val;
2299
2300 return count;
2301}
2302
Christoph Hellwig2eafd722015-10-03 15:32:55 +02002303CONFIGFS_ATTR(sbp_tpg_attrib_, mgt_orb_timeout);
2304CONFIGFS_ATTR(sbp_tpg_attrib_, max_reconnect_timeout);
2305CONFIGFS_ATTR(sbp_tpg_attrib_, max_logins_per_lun);
Chris Boota511ce32012-04-14 17:50:35 -07002306
2307static struct configfs_attribute *sbp_tpg_attrib_attrs[] = {
Christoph Hellwig2eafd722015-10-03 15:32:55 +02002308 &sbp_tpg_attrib_attr_mgt_orb_timeout,
2309 &sbp_tpg_attrib_attr_max_reconnect_timeout,
2310 &sbp_tpg_attrib_attr_max_logins_per_lun,
Chris Boota511ce32012-04-14 17:50:35 -07002311 NULL,
2312};
2313
Christoph Hellwig9ac89282015-04-08 20:01:35 +02002314static const struct target_core_fabric_ops sbp_ops = {
2315 .module = THIS_MODULE,
2316 .name = "sbp",
Chris Boota511ce32012-04-14 17:50:35 -07002317 .get_fabric_name = sbp_get_fabric_name,
Chris Boota511ce32012-04-14 17:50:35 -07002318 .tpg_get_wwn = sbp_get_fabric_wwn,
2319 .tpg_get_tag = sbp_get_tag,
Chris Boota511ce32012-04-14 17:50:35 -07002320 .tpg_check_demo_mode = sbp_check_true,
2321 .tpg_check_demo_mode_cache = sbp_check_true,
2322 .tpg_check_demo_mode_write_protect = sbp_check_false,
2323 .tpg_check_prod_mode_write_protect = sbp_check_false,
Chris Boota511ce32012-04-14 17:50:35 -07002324 .tpg_get_inst_index = sbp_tpg_get_inst_index,
2325 .release_cmd = sbp_release_cmd,
2326 .shutdown_session = sbp_shutdown_session,
2327 .close_session = sbp_close_session,
2328 .sess_get_index = sbp_sess_get_index,
2329 .write_pending = sbp_write_pending,
2330 .write_pending_status = sbp_write_pending_status,
2331 .set_default_node_attributes = sbp_set_default_node_attrs,
Chris Boota511ce32012-04-14 17:50:35 -07002332 .get_cmd_state = sbp_get_cmd_state,
2333 .queue_data_in = sbp_queue_data_in,
2334 .queue_status = sbp_queue_status,
2335 .queue_tm_rsp = sbp_queue_tm_rsp,
Nicholas Bellinger131e6ab2014-03-22 14:55:56 -07002336 .aborted_task = sbp_aborted_task,
Chris Boota511ce32012-04-14 17:50:35 -07002337 .check_stop_free = sbp_check_stop_free,
2338
2339 .fabric_make_wwn = sbp_make_tport,
2340 .fabric_drop_wwn = sbp_drop_tport,
2341 .fabric_make_tpg = sbp_make_tpg,
2342 .fabric_drop_tpg = sbp_drop_tpg,
2343 .fabric_post_link = sbp_post_link_lun,
2344 .fabric_pre_unlink = sbp_pre_unlink_lun,
2345 .fabric_make_np = NULL,
2346 .fabric_drop_np = NULL,
Christoph Hellwigc7d6a802015-04-13 19:51:14 +02002347 .fabric_init_nodeacl = sbp_init_nodeacl,
Chris Boota511ce32012-04-14 17:50:35 -07002348
Christoph Hellwig9ac89282015-04-08 20:01:35 +02002349 .tfc_wwn_attrs = sbp_wwn_attrs,
2350 .tfc_tpg_base_attrs = sbp_tpg_base_attrs,
2351 .tfc_tpg_attrib_attrs = sbp_tpg_attrib_attrs,
Chris Boota511ce32012-04-14 17:50:35 -07002352};
2353
2354static int __init sbp_init(void)
2355{
Christoph Hellwig9ac89282015-04-08 20:01:35 +02002356 return target_register_template(&sbp_ops);
Chris Boota511ce32012-04-14 17:50:35 -07002357};
2358
Asias He63b91d52013-02-27 12:50:56 +08002359static void __exit sbp_exit(void)
Chris Boota511ce32012-04-14 17:50:35 -07002360{
Christoph Hellwig9ac89282015-04-08 20:01:35 +02002361 target_unregister_template(&sbp_ops);
Chris Boota511ce32012-04-14 17:50:35 -07002362};
2363
2364MODULE_DESCRIPTION("FireWire SBP fabric driver");
2365MODULE_LICENSE("GPL");
2366module_init(sbp_init);
2367module_exit(sbp_exit);