blob: 6d88d24e6cce97ab3cca4078d3fb41a2ae0c0220 [file] [log] [blame]
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08001/*******************************************************************************
2 * Filename: target_core_iblock.c
3 *
4 * This file contains the Storage Engine <-> Linux BlockIO transport
5 * specific functions.
6 *
Nicholas Bellinger4c762512013-09-05 15:29:12 -07007 * (c) Copyright 2003-2013 Datera, Inc.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -08008 *
9 * Nicholas A. Bellinger <nab@kernel.org>
10 *
11 * This program is free software; you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation; either version 2 of the License, or
14 * (at your option) any later version.
15 *
16 * This program is distributed in the hope that it will be useful,
17 * but WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
19 * GNU General Public License for more details.
20 *
21 * You should have received a copy of the GNU General Public License
22 * along with this program; if not, write to the Free Software
23 * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
24 *
25 ******************************************************************************/
26
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080027#include <linux/string.h>
28#include <linux/parser.h>
29#include <linux/timer.h>
30#include <linux/fs.h>
31#include <linux/blkdev.h>
32#include <linux/slab.h>
33#include <linux/spinlock.h>
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080034#include <linux/bio.h>
35#include <linux/genhd.h>
36#include <linux/file.h>
Paul Gortmaker827509e2011-08-30 14:20:44 -040037#include <linux/module.h>
Bart Van Asscheba929992015-05-08 10:11:12 +020038#include <scsi/scsi_proto.h>
Christoph Hellwig14150a62012-06-17 18:40:55 -040039#include <asm/unaligned.h>
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080040
41#include <target/target_core_base.h>
Christoph Hellwigc4795fb2011-11-16 09:46:48 -050042#include <target/target_core_backend.h>
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080043
44#include "target_core_iblock.h"
45
Christoph Hellwigd5b4a212011-12-21 14:20:31 -050046#define IBLOCK_MAX_BIO_PER_TASK 32 /* max # of bios to submit at a time */
47#define IBLOCK_BIO_POOL_SIZE 128
48
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -040049static inline struct iblock_dev *IBLOCK_DEV(struct se_device *dev)
50{
51 return container_of(dev, struct iblock_dev, dev);
52}
53
54
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080055static int iblock_attach_hba(struct se_hba *hba, u32 host_id)
56{
Andy Grover6708bb22011-06-08 10:36:43 -070057 pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080058 " Generic Target Core Stack %s\n", hba->hba_id,
Christoph Hellwigce8dd252015-06-19 15:14:39 +020059 IBLOCK_VERSION, TARGET_CORE_VERSION);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080060 return 0;
61}
62
63static void iblock_detach_hba(struct se_hba *hba)
64{
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080065}
66
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -040067static struct se_device *iblock_alloc_device(struct se_hba *hba, const char *name)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080068{
69 struct iblock_dev *ib_dev = NULL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080070
71 ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
Andy Grover6708bb22011-06-08 10:36:43 -070072 if (!ib_dev) {
73 pr_err("Unable to allocate struct iblock_dev\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080074 return NULL;
75 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080076
Andy Grover6708bb22011-06-08 10:36:43 -070077 pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080078
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -040079 return &ib_dev->dev;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080080}
81
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -040082static int iblock_configure_device(struct se_device *dev)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080083{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -040084 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080085 struct request_queue *q;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -040086 struct block_device *bd = NULL;
Nicholas Bellingerecebbf62013-12-23 20:31:24 +000087 struct blk_integrity *bi;
Andy Grover44bfd012012-06-07 10:38:51 -070088 fmode_t mode;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -040089 int ret = -ENOMEM;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080090
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -040091 if (!(ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)) {
92 pr_err("Missing udev_path= parameters for IBLOCK\n");
93 return -EINVAL;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -080094 }
Christoph Hellwigd5b4a212011-12-21 14:20:31 -050095
96 ib_dev->ibd_bio_set = bioset_create(IBLOCK_BIO_POOL_SIZE, 0);
Andy Grover6708bb22011-06-08 10:36:43 -070097 if (!ib_dev->ibd_bio_set) {
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -040098 pr_err("IBLOCK: Unable to create bioset\n");
99 goto out;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800100 }
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400101
Andy Grover6708bb22011-06-08 10:36:43 -0700102 pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800103 ib_dev->ibd_udev_path);
104
Andy Grover44bfd012012-06-07 10:38:51 -0700105 mode = FMODE_READ|FMODE_EXCL;
106 if (!ib_dev->ibd_readonly)
107 mode |= FMODE_WRITE;
108
109 bd = blkdev_get_by_path(ib_dev->ibd_udev_path, mode, ib_dev);
Nicholas Bellinger613640e2011-03-14 04:05:59 -0700110 if (IS_ERR(bd)) {
111 ret = PTR_ERR(bd);
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400112 goto out_free_bioset;
Nicholas Bellinger613640e2011-03-14 04:05:59 -0700113 }
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800114 ib_dev->ibd_bd = bd;
115
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400116 q = bdev_get_queue(bd);
117
118 dev->dev_attrib.hw_block_size = bdev_logical_block_size(bd);
Nicholas Bellinger046ba642015-01-06 16:10:37 -0800119 dev->dev_attrib.hw_max_sectors = queue_max_hw_sectors(q);
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400120 dev->dev_attrib.hw_queue_depth = q->nr_requests;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800121
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800122 /*
123 * Check if the underlying struct block_device request_queue supports
124 * the QUEUE_FLAG_DISCARD bit for UNMAP/WRITE_SAME in SCSI + TRIM
125 * in ATA and we need to set TPE=1
126 */
Nicholas Bellinger613640e2011-03-14 04:05:59 -0700127 if (blk_queue_discard(q)) {
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400128 dev->dev_attrib.max_unmap_lba_count =
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800129 q->limits.max_discard_sectors;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400130
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800131 /*
132 * Currently hardcoded to 1 in Linux/SCSI code..
133 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400134 dev->dev_attrib.max_unmap_block_desc_count = 1;
135 dev->dev_attrib.unmap_granularity =
Marco Sanvido7347b5f2012-01-20 15:49:27 -0800136 q->limits.discard_granularity >> 9;
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400137 dev->dev_attrib.unmap_granularity_alignment =
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800138 q->limits.discard_alignment;
139
Andy Grover6708bb22011-06-08 10:36:43 -0700140 pr_debug("IBLOCK: BLOCK Discard support available,"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800141 " disabled by default\n");
142 }
Nicholas Bellingerf6970ad2012-11-07 20:08:38 -0800143 /*
144 * Enable write same emulation for IBLOCK and use 0xFFFF as
145 * the smaller WRITE_SAME(10) only has a two-byte block count.
146 */
147 dev->dev_attrib.max_write_same_len = 0xFFFF;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800148
Roland Dreiere22a7f02011-07-05 13:34:52 -0700149 if (blk_queue_nonrot(q))
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400150 dev->dev_attrib.is_nonrot = 1;
Nicholas Bellingerd0c8b252013-01-29 22:10:06 -0800151
Nicholas Bellingerecebbf62013-12-23 20:31:24 +0000152 bi = bdev_get_integrity(bd);
153 if (bi) {
154 struct bio_set *bs = ib_dev->ibd_bio_set;
155
156 if (!strcmp(bi->name, "T10-DIF-TYPE3-IP") ||
157 !strcmp(bi->name, "T10-DIF-TYPE1-IP")) {
158 pr_err("IBLOCK export of blk_integrity: %s not"
159 " supported\n", bi->name);
160 ret = -ENOSYS;
161 goto out_blkdev_put;
162 }
163
164 if (!strcmp(bi->name, "T10-DIF-TYPE3-CRC")) {
165 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE3_PROT;
166 } else if (!strcmp(bi->name, "T10-DIF-TYPE1-CRC")) {
167 dev->dev_attrib.pi_prot_type = TARGET_DIF_TYPE1_PROT;
168 }
169
170 if (dev->dev_attrib.pi_prot_type) {
171 if (bioset_integrity_create(bs, IBLOCK_BIO_POOL_SIZE) < 0) {
172 pr_err("Unable to allocate bioset for PI\n");
173 ret = -ENOMEM;
174 goto out_blkdev_put;
175 }
176 pr_debug("IBLOCK setup BIP bs->bio_integrity_pool: %p\n",
177 bs->bio_integrity_pool);
178 }
179 dev->dev_attrib.hw_pi_prot_type = dev->dev_attrib.pi_prot_type;
180 }
181
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400182 return 0;
Roland Dreiere22a7f02011-07-05 13:34:52 -0700183
Nicholas Bellingerecebbf62013-12-23 20:31:24 +0000184out_blkdev_put:
185 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400186out_free_bioset:
187 bioset_free(ib_dev->ibd_bio_set);
188 ib_dev->ibd_bio_set = NULL;
189out:
190 return ret;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800191}
192
Nicholas Bellinger4cc987e2015-05-19 00:03:07 -0700193static void iblock_dev_call_rcu(struct rcu_head *p)
194{
195 struct se_device *dev = container_of(p, struct se_device, rcu_head);
196 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
197
198 kfree(ib_dev);
199}
200
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400201static void iblock_free_device(struct se_device *dev)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800202{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400203 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800204
Nicholas Bellingerbc665522011-02-09 15:34:38 -0800205 if (ib_dev->ibd_bd != NULL)
206 blkdev_put(ib_dev->ibd_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
Nicholas Bellingerd84287b2014-04-03 03:35:02 +0000207 if (ib_dev->ibd_bio_set != NULL)
Nicholas Bellingerbc665522011-02-09 15:34:38 -0800208 bioset_free(ib_dev->ibd_bio_set);
Nicholas Bellingerd84287b2014-04-03 03:35:02 +0000209
Nicholas Bellinger4cc987e2015-05-19 00:03:07 -0700210 call_rcu(&dev->rcu_head, iblock_dev_call_rcu);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800211}
212
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800213static unsigned long long iblock_emulate_read_cap_with_block_size(
214 struct se_device *dev,
215 struct block_device *bd,
216 struct request_queue *q)
217{
218 unsigned long long blocks_long = (div_u64(i_size_read(bd->bd_inode),
219 bdev_logical_block_size(bd)) - 1);
220 u32 block_size = bdev_logical_block_size(bd);
221
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400222 if (block_size == dev->dev_attrib.block_size)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800223 return blocks_long;
224
225 switch (block_size) {
226 case 4096:
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400227 switch (dev->dev_attrib.block_size) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800228 case 2048:
229 blocks_long <<= 1;
230 break;
231 case 1024:
232 blocks_long <<= 2;
233 break;
234 case 512:
235 blocks_long <<= 3;
236 default:
237 break;
238 }
239 break;
240 case 2048:
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400241 switch (dev->dev_attrib.block_size) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800242 case 4096:
243 blocks_long >>= 1;
244 break;
245 case 1024:
246 blocks_long <<= 1;
247 break;
248 case 512:
249 blocks_long <<= 2;
250 break;
251 default:
252 break;
253 }
254 break;
255 case 1024:
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400256 switch (dev->dev_attrib.block_size) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800257 case 4096:
258 blocks_long >>= 2;
259 break;
260 case 2048:
261 blocks_long >>= 1;
262 break;
263 case 512:
264 blocks_long <<= 1;
265 break;
266 default:
267 break;
268 }
269 break;
270 case 512:
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400271 switch (dev->dev_attrib.block_size) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800272 case 4096:
273 blocks_long >>= 3;
274 break;
275 case 2048:
276 blocks_long >>= 2;
277 break;
278 case 1024:
279 blocks_long >>= 1;
280 break;
281 default:
282 break;
283 }
284 break;
285 default:
286 break;
287 }
288
289 return blocks_long;
290}
291
Nicholas Bellinger3a41d852012-11-17 14:27:55 -0800292static void iblock_complete_cmd(struct se_cmd *cmd)
293{
294 struct iblock_req *ibr = cmd->priv;
295 u8 status;
296
297 if (!atomic_dec_and_test(&ibr->pending))
298 return;
299
300 if (atomic_read(&ibr->ib_bio_err_cnt))
301 status = SAM_STAT_CHECK_CONDITION;
302 else
303 status = SAM_STAT_GOOD;
304
305 target_complete_cmd(cmd, status);
306 kfree(ibr);
307}
308
309static void iblock_bio_done(struct bio *bio, int err)
310{
311 struct se_cmd *cmd = bio->bi_private;
312 struct iblock_req *ibr = cmd->priv;
313
314 /*
315 * Set -EIO if !BIO_UPTODATE and the passed is still err=0
316 */
317 if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
318 err = -EIO;
319
320 if (err != 0) {
321 pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
322 " err: %d\n", bio, err);
323 /*
324 * Bump the ib_bio_err_cnt and release bio.
325 */
326 atomic_inc(&ibr->ib_bio_err_cnt);
Peter Zijlstra4e857c52014-03-17 18:06:10 +0100327 smp_mb__after_atomic();
Nicholas Bellinger3a41d852012-11-17 14:27:55 -0800328 }
329
330 bio_put(bio);
331
332 iblock_complete_cmd(cmd);
333}
334
335static struct bio *
336iblock_get_bio(struct se_cmd *cmd, sector_t lba, u32 sg_num)
337{
338 struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
339 struct bio *bio;
340
341 /*
342 * Only allocate as many vector entries as the bio code allows us to,
343 * we'll loop later on until we have handled the whole request.
344 */
345 if (sg_num > BIO_MAX_PAGES)
346 sg_num = BIO_MAX_PAGES;
347
348 bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
349 if (!bio) {
350 pr_err("Unable to allocate memory for bio\n");
351 return NULL;
352 }
353
354 bio->bi_bdev = ib_dev->ibd_bd;
355 bio->bi_private = cmd;
356 bio->bi_end_io = &iblock_bio_done;
Kent Overstreet4f024f32013-10-11 15:44:27 -0700357 bio->bi_iter.bi_sector = lba;
Nicholas Bellinger3a41d852012-11-17 14:27:55 -0800358
359 return bio;
360}
361
362static void iblock_submit_bios(struct bio_list *list, int rw)
363{
364 struct blk_plug plug;
365 struct bio *bio;
366
367 blk_start_plug(&plug);
368 while ((bio = bio_list_pop(list)))
369 submit_bio(rw, bio);
370 blk_finish_plug(&plug);
371}
372
Christoph Hellwigdf5fa692011-10-14 07:29:58 -0400373static void iblock_end_io_flush(struct bio *bio, int err)
374{
375 struct se_cmd *cmd = bio->bi_private;
376
377 if (err)
378 pr_err("IBLOCK: cache flush failed: %d\n", err);
379
Christoph Hellwig5787cac2012-04-24 00:25:06 -0400380 if (cmd) {
Christoph Hellwigde103c92012-11-06 12:24:09 -0800381 if (err)
Christoph Hellwig5787cac2012-04-24 00:25:06 -0400382 target_complete_cmd(cmd, SAM_STAT_CHECK_CONDITION);
Christoph Hellwigde103c92012-11-06 12:24:09 -0800383 else
Christoph Hellwig5787cac2012-04-24 00:25:06 -0400384 target_complete_cmd(cmd, SAM_STAT_GOOD);
Christoph Hellwig5787cac2012-04-24 00:25:06 -0400385 }
386
Christoph Hellwigdf5fa692011-10-14 07:29:58 -0400387 bio_put(bio);
388}
389
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800390/*
Christoph Hellwigdf5fa692011-10-14 07:29:58 -0400391 * Implement SYCHRONIZE CACHE. Note that we can't handle lba ranges and must
392 * always flush the whole cache.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800393 */
Christoph Hellwigde103c92012-11-06 12:24:09 -0800394static sense_reason_t
395iblock_execute_sync_cache(struct se_cmd *cmd)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800396{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400397 struct iblock_dev *ib_dev = IBLOCK_DEV(cmd->se_dev);
Andy Grovera1d8b492011-05-02 17:12:10 -0700398 int immed = (cmd->t_task_cdb[1] & 0x2);
Christoph Hellwigdf5fa692011-10-14 07:29:58 -0400399 struct bio *bio;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800400
401 /*
402 * If the Immediate bit is set, queue up the GOOD response
Christoph Hellwigdf5fa692011-10-14 07:29:58 -0400403 * for this SYNCHRONIZE_CACHE op.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800404 */
405 if (immed)
Christoph Hellwig5787cac2012-04-24 00:25:06 -0400406 target_complete_cmd(cmd, SAM_STAT_GOOD);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800407
Christoph Hellwigdf5fa692011-10-14 07:29:58 -0400408 bio = bio_alloc(GFP_KERNEL, 0);
409 bio->bi_end_io = iblock_end_io_flush;
410 bio->bi_bdev = ib_dev->ibd_bd;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800411 if (!immed)
Christoph Hellwigdf5fa692011-10-14 07:29:58 -0400412 bio->bi_private = cmd;
413 submit_bio(WRITE_FLUSH, bio);
Christoph Hellwigad67f0d2012-06-17 18:40:53 -0400414 return 0;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800415}
416
Christoph Hellwigde103c92012-11-06 12:24:09 -0800417static sense_reason_t
Christoph Hellwig62e46942015-06-19 15:10:59 +0200418iblock_execute_unmap(struct se_cmd *cmd, sector_t lba, sector_t nolb)
Asias Hedbc21c52013-02-25 14:03:45 +0800419{
Christoph Hellwig62e46942015-06-19 15:10:59 +0200420 struct block_device *bdev = IBLOCK_DEV(cmd->se_dev)->ibd_bd;
Asias Hedbc21c52013-02-25 14:03:45 +0800421 int ret;
422
423 ret = blkdev_issue_discard(bdev, lba, nolb, GFP_KERNEL, 0);
424 if (ret < 0) {
425 pr_err("blkdev_issue_discard() failed: %d\n", ret);
426 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
427 }
428
429 return 0;
430}
431
432static sense_reason_t
Nicholas Bellingerf6970ad2012-11-07 20:08:38 -0800433iblock_execute_write_same(struct se_cmd *cmd)
434{
435 struct iblock_req *ibr;
436 struct scatterlist *sg;
437 struct bio *bio;
438 struct bio_list list;
439 sector_t block_lba = cmd->t_task_lba;
Roland Dreier972b29c82013-02-22 09:52:57 -0800440 sector_t sectors = sbc_get_write_same_sectors(cmd);
Nicholas Bellingerf6970ad2012-11-07 20:08:38 -0800441
Nicholas Bellingerafd73f12015-02-14 01:32:11 +0000442 if (cmd->prot_op) {
443 pr_err("WRITE_SAME: Protection information with IBLOCK"
444 " backends not supported\n");
445 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
446 }
Nicholas Bellingerf6970ad2012-11-07 20:08:38 -0800447 sg = &cmd->t_data_sg[0];
448
449 if (cmd->t_data_nents > 1 ||
450 sg->length != cmd->se_dev->dev_attrib.block_size) {
451 pr_err("WRITE_SAME: Illegal SGL t_data_nents: %u length: %u"
452 " block_size: %u\n", cmd->t_data_nents, sg->length,
453 cmd->se_dev->dev_attrib.block_size);
454 return TCM_INVALID_CDB_FIELD;
455 }
456
457 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
458 if (!ibr)
459 goto fail;
460 cmd->priv = ibr;
461
462 bio = iblock_get_bio(cmd, block_lba, 1);
463 if (!bio)
464 goto fail_free_ibr;
465
466 bio_list_init(&list);
467 bio_list_add(&list, bio);
468
469 atomic_set(&ibr->pending, 1);
470
471 while (sectors) {
472 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
473 != sg->length) {
474
475 bio = iblock_get_bio(cmd, block_lba, 1);
476 if (!bio)
477 goto fail_put_bios;
478
479 atomic_inc(&ibr->pending);
480 bio_list_add(&list, bio);
481 }
482
483 /* Always in 512 byte units for Linux/Block */
484 block_lba += sg->length >> IBLOCK_LBA_SHIFT;
485 sectors -= 1;
486 }
487
488 iblock_submit_bios(&list, WRITE);
489 return 0;
490
491fail_put_bios:
492 while ((bio = bio_list_pop(&list)))
493 bio_put(bio);
494fail_free_ibr:
495 kfree(ibr);
496fail:
497 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
498}
499
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800500enum {
Andy Grover44bfd012012-06-07 10:38:51 -0700501 Opt_udev_path, Opt_readonly, Opt_force, Opt_err
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800502};
503
504static match_table_t tokens = {
505 {Opt_udev_path, "udev_path=%s"},
Andy Grover44bfd012012-06-07 10:38:51 -0700506 {Opt_readonly, "readonly=%d"},
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800507 {Opt_force, "force=%d"},
508 {Opt_err, NULL}
509};
510
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400511static ssize_t iblock_set_configfs_dev_params(struct se_device *dev,
512 const char *page, ssize_t count)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800513{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400514 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
Jesper Juhl6d180252011-03-14 04:05:56 -0700515 char *orig, *ptr, *arg_p, *opts;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800516 substring_t args[MAX_OPT_ARGS];
Roland Dreier21bca312011-07-05 15:35:02 -0700517 int ret = 0, token;
Andy Grover44bfd012012-06-07 10:38:51 -0700518 unsigned long tmp_readonly;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800519
520 opts = kstrdup(page, GFP_KERNEL);
521 if (!opts)
522 return -ENOMEM;
523
524 orig = opts;
525
Sebastian Andrzej Siewior90c161b2011-11-23 20:53:17 +0100526 while ((ptr = strsep(&opts, ",\n")) != NULL) {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800527 if (!*ptr)
528 continue;
529
530 token = match_token(ptr, tokens, args);
531 switch (token) {
532 case Opt_udev_path:
533 if (ib_dev->ibd_bd) {
Andy Grover6708bb22011-06-08 10:36:43 -0700534 pr_err("Unable to set udev_path= while"
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800535 " ib_dev->ibd_bd exists\n");
536 ret = -EEXIST;
537 goto out;
538 }
Nicholas Bellinger852b6ed2012-08-22 18:45:11 -0700539 if (match_strlcpy(ib_dev->ibd_udev_path, &args[0],
540 SE_UDEV_PATH_LEN) == 0) {
541 ret = -EINVAL;
Jesper Juhl6d180252011-03-14 04:05:56 -0700542 break;
543 }
Andy Grover6708bb22011-06-08 10:36:43 -0700544 pr_debug("IBLOCK: Referencing UDEV path: %s\n",
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800545 ib_dev->ibd_udev_path);
546 ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
547 break;
Andy Grover44bfd012012-06-07 10:38:51 -0700548 case Opt_readonly:
549 arg_p = match_strdup(&args[0]);
550 if (!arg_p) {
551 ret = -ENOMEM;
552 break;
553 }
Jingoo Han57103d72013-07-19 16:22:19 +0900554 ret = kstrtoul(arg_p, 0, &tmp_readonly);
Andy Grover44bfd012012-06-07 10:38:51 -0700555 kfree(arg_p);
556 if (ret < 0) {
Jingoo Han57103d72013-07-19 16:22:19 +0900557 pr_err("kstrtoul() failed for"
Andy Grover44bfd012012-06-07 10:38:51 -0700558 " readonly=\n");
559 goto out;
560 }
561 ib_dev->ibd_readonly = tmp_readonly;
562 pr_debug("IBLOCK: readonly: %d\n", ib_dev->ibd_readonly);
563 break;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800564 case Opt_force:
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800565 break;
566 default:
567 break;
568 }
569 }
570
571out:
572 kfree(orig);
573 return (!ret) ? count : ret;
574}
575
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400576static ssize_t iblock_show_configfs_dev_params(struct se_device *dev, char *b)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800577{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400578 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
579 struct block_device *bd = ib_dev->ibd_bd;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800580 char buf[BDEVNAME_SIZE];
581 ssize_t bl = 0;
582
583 if (bd)
584 bl += sprintf(b + bl, "iBlock device: %s",
585 bdevname(bd, buf));
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400586 if (ib_dev->ibd_flags & IBDF_HAS_UDEV_PATH)
Andy Grover44bfd012012-06-07 10:38:51 -0700587 bl += sprintf(b + bl, " UDEV PATH: %s",
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400588 ib_dev->ibd_udev_path);
589 bl += sprintf(b + bl, " readonly: %d\n", ib_dev->ibd_readonly);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800590
591 bl += sprintf(b + bl, " ");
592 if (bd) {
593 bl += sprintf(b + bl, "Major: %d Minor: %d %s\n",
Roland Dreier21bca312011-07-05 15:35:02 -0700594 MAJOR(bd->bd_dev), MINOR(bd->bd_dev), (!bd->bd_contains) ?
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400595 "" : (bd->bd_holder == ib_dev) ?
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800596 "CLAIMED: IBLOCK" : "CLAIMED: OS");
597 } else {
Roland Dreier21bca312011-07-05 15:35:02 -0700598 bl += sprintf(b + bl, "Major: 0 Minor: 0\n");
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800599 }
600
601 return bl;
602}
603
Nicholas Bellingerecebbf62013-12-23 20:31:24 +0000604static int
605iblock_alloc_bip(struct se_cmd *cmd, struct bio *bio)
606{
607 struct se_device *dev = cmd->se_dev;
608 struct blk_integrity *bi;
609 struct bio_integrity_payload *bip;
610 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
611 struct scatterlist *sg;
612 int i, rc;
613
614 bi = bdev_get_integrity(ib_dev->ibd_bd);
615 if (!bi) {
616 pr_err("Unable to locate bio_integrity\n");
617 return -ENODEV;
618 }
619
620 bip = bio_integrity_alloc(bio, GFP_NOIO, cmd->t_prot_nents);
621 if (!bip) {
622 pr_err("Unable to allocate bio_integrity_payload\n");
623 return -ENOMEM;
624 }
625
Linus Torvalds4e13c5d2014-01-31 15:31:23 -0800626 bip->bip_iter.bi_size = (cmd->data_length / dev->dev_attrib.block_size) *
Nicholas Bellingerecebbf62013-12-23 20:31:24 +0000627 dev->prot_length;
Linus Torvalds4e13c5d2014-01-31 15:31:23 -0800628 bip->bip_iter.bi_sector = bio->bi_iter.bi_sector;
Nicholas Bellingerecebbf62013-12-23 20:31:24 +0000629
Linus Torvalds4e13c5d2014-01-31 15:31:23 -0800630 pr_debug("IBLOCK BIP Size: %u Sector: %llu\n", bip->bip_iter.bi_size,
631 (unsigned long long)bip->bip_iter.bi_sector);
Nicholas Bellingerecebbf62013-12-23 20:31:24 +0000632
633 for_each_sg(cmd->t_prot_sg, sg, cmd->t_prot_nents, i) {
634
635 rc = bio_integrity_add_page(bio, sg_page(sg), sg->length,
636 sg->offset);
637 if (rc != sg->length) {
638 pr_err("bio_integrity_add_page() failed; %d\n", rc);
639 return -ENOMEM;
640 }
641
642 pr_debug("Added bio integrity page: %p length: %d offset; %d\n",
643 sg_page(sg), sg->length, sg->offset);
644 }
645
646 return 0;
647}
648
Christoph Hellwigde103c92012-11-06 12:24:09 -0800649static sense_reason_t
Nicholas Bellingera82a9532013-08-19 23:57:30 -0700650iblock_execute_rw(struct se_cmd *cmd, struct scatterlist *sgl, u32 sgl_nents,
651 enum dma_data_direction data_direction)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800652{
Andy Grover5951146d2011-07-19 10:26:37 +0000653 struct se_device *dev = cmd->se_dev;
Christoph Hellwig5787cac2012-04-24 00:25:06 -0400654 struct iblock_req *ibr;
Nicholas Bellingerecebbf62013-12-23 20:31:24 +0000655 struct bio *bio, *bio_start;
Christoph Hellwigdbbf3e92011-09-25 14:56:24 -0400656 struct bio_list list;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800657 struct scatterlist *sg;
Christoph Hellwig5787cac2012-04-24 00:25:06 -0400658 u32 sg_num = sgl_nents;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800659 sector_t block_lba;
Christoph Hellwigd5b4a212011-12-21 14:20:31 -0500660 unsigned bio_cnt;
Nicholas Bellingerd0c8b252013-01-29 22:10:06 -0800661 int rw = 0;
Christoph Hellwig5787cac2012-04-24 00:25:06 -0400662 int i;
Christoph Hellwigdbbf3e92011-09-25 14:56:24 -0400663
Christoph Hellwig5787cac2012-04-24 00:25:06 -0400664 if (data_direction == DMA_TO_DEVICE) {
Nicholas Bellingerd0c8b252013-01-29 22:10:06 -0800665 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
666 struct request_queue *q = bdev_get_queue(ib_dev->ibd_bd);
Christoph Hellwigdbbf3e92011-09-25 14:56:24 -0400667 /*
Nicholas Bellingerd0c8b252013-01-29 22:10:06 -0800668 * Force writethrough using WRITE_FUA if a volatile write cache
669 * is not enabled, or if initiator set the Force Unit Access bit.
Christoph Hellwigdbbf3e92011-09-25 14:56:24 -0400670 */
Nicholas Bellingerd0c8b252013-01-29 22:10:06 -0800671 if (q->flush_flags & REQ_FUA) {
672 if (cmd->se_cmd_flags & SCF_FUA)
673 rw = WRITE_FUA;
674 else if (!(q->flush_flags & REQ_FLUSH))
675 rw = WRITE_FUA;
Nicholas Bellingerd2bdbee2013-05-14 23:41:04 -0700676 else
677 rw = WRITE;
Nicholas Bellingerd0c8b252013-01-29 22:10:06 -0800678 } else {
Christoph Hellwigdbbf3e92011-09-25 14:56:24 -0400679 rw = WRITE;
Nicholas Bellingerd0c8b252013-01-29 22:10:06 -0800680 }
Christoph Hellwigdbbf3e92011-09-25 14:56:24 -0400681 } else {
682 rw = READ;
683 }
684
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800685 /*
Christoph Hellwig5787cac2012-04-24 00:25:06 -0400686 * Convert the blocksize advertised to the initiator to the 512 byte
687 * units unconditionally used by the Linux block layer.
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800688 */
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400689 if (dev->dev_attrib.block_size == 4096)
Christoph Hellwig72a0e5e2012-04-23 11:35:30 -0400690 block_lba = (cmd->t_task_lba << 3);
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400691 else if (dev->dev_attrib.block_size == 2048)
Christoph Hellwig72a0e5e2012-04-23 11:35:30 -0400692 block_lba = (cmd->t_task_lba << 2);
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400693 else if (dev->dev_attrib.block_size == 1024)
Christoph Hellwig72a0e5e2012-04-23 11:35:30 -0400694 block_lba = (cmd->t_task_lba << 1);
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400695 else if (dev->dev_attrib.block_size == 512)
Christoph Hellwig72a0e5e2012-04-23 11:35:30 -0400696 block_lba = cmd->t_task_lba;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800697 else {
Andy Grover6708bb22011-06-08 10:36:43 -0700698 pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400699 " %u\n", dev->dev_attrib.block_size);
Christoph Hellwigde103c92012-11-06 12:24:09 -0800700 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800701 }
702
Christoph Hellwig5787cac2012-04-24 00:25:06 -0400703 ibr = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
704 if (!ibr)
705 goto fail;
706 cmd->priv = ibr;
707
Paolo Bonzinie0de4452012-09-07 17:30:41 +0200708 if (!sgl_nents) {
709 atomic_set(&ibr->pending, 1);
710 iblock_complete_cmd(cmd);
711 return 0;
712 }
713
Christoph Hellwig5787cac2012-04-24 00:25:06 -0400714 bio = iblock_get_bio(cmd, block_lba, sgl_nents);
715 if (!bio)
716 goto fail_free_ibr;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800717
Nicholas Bellingerecebbf62013-12-23 20:31:24 +0000718 bio_start = bio;
Christoph Hellwigdbbf3e92011-09-25 14:56:24 -0400719 bio_list_init(&list);
720 bio_list_add(&list, bio);
Christoph Hellwig5787cac2012-04-24 00:25:06 -0400721
722 atomic_set(&ibr->pending, 2);
Christoph Hellwigd5b4a212011-12-21 14:20:31 -0500723 bio_cnt = 1;
Christoph Hellwigdbbf3e92011-09-25 14:56:24 -0400724
Christoph Hellwig5787cac2012-04-24 00:25:06 -0400725 for_each_sg(sgl, sg, sgl_nents, i) {
Christoph Hellwigdbbf3e92011-09-25 14:56:24 -0400726 /*
727 * XXX: if the length the device accepts is shorter than the
728 * length of the S/G list entry this will cause and
729 * endless loop. Better hope no driver uses huge pages.
730 */
731 while (bio_add_page(bio, sg_page(sg), sg->length, sg->offset)
732 != sg->length) {
Christoph Hellwigd5b4a212011-12-21 14:20:31 -0500733 if (bio_cnt >= IBLOCK_MAX_BIO_PER_TASK) {
734 iblock_submit_bios(&list, rw);
735 bio_cnt = 0;
736 }
737
Christoph Hellwig5787cac2012-04-24 00:25:06 -0400738 bio = iblock_get_bio(cmd, block_lba, sg_num);
Andy Grover6708bb22011-06-08 10:36:43 -0700739 if (!bio)
Christoph Hellwig5787cac2012-04-24 00:25:06 -0400740 goto fail_put_bios;
741
742 atomic_inc(&ibr->pending);
Christoph Hellwigdbbf3e92011-09-25 14:56:24 -0400743 bio_list_add(&list, bio);
Christoph Hellwigd5b4a212011-12-21 14:20:31 -0500744 bio_cnt++;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800745 }
Christoph Hellwigdbbf3e92011-09-25 14:56:24 -0400746
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800747 /* Always in 512 byte units for Linux/Block */
748 block_lba += sg->length >> IBLOCK_LBA_SHIFT;
749 sg_num--;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800750 }
751
Nicholas Bellinger6f16ec42015-03-27 23:14:16 -0700752 if (cmd->prot_type && dev->dev_attrib.pi_prot_type) {
Nicholas Bellingerecebbf62013-12-23 20:31:24 +0000753 int rc = iblock_alloc_bip(cmd, bio_start);
754 if (rc)
755 goto fail_put_bios;
756 }
757
Christoph Hellwigd5b4a212011-12-21 14:20:31 -0500758 iblock_submit_bios(&list, rw);
Christoph Hellwig5787cac2012-04-24 00:25:06 -0400759 iblock_complete_cmd(cmd);
Nicholas Bellinger03e98c92011-11-04 02:36:16 -0700760 return 0;
Christoph Hellwigdbbf3e92011-09-25 14:56:24 -0400761
Christoph Hellwig5787cac2012-04-24 00:25:06 -0400762fail_put_bios:
Christoph Hellwigdbbf3e92011-09-25 14:56:24 -0400763 while ((bio = bio_list_pop(&list)))
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800764 bio_put(bio);
Christoph Hellwig5787cac2012-04-24 00:25:06 -0400765fail_free_ibr:
766 kfree(ibr);
Christoph Hellwig5787cac2012-04-24 00:25:06 -0400767fail:
Christoph Hellwigde103c92012-11-06 12:24:09 -0800768 return TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800769}
770
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800771static sector_t iblock_get_blocks(struct se_device *dev)
772{
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400773 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
774 struct block_device *bd = ib_dev->ibd_bd;
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800775 struct request_queue *q = bdev_get_queue(bd);
776
777 return iblock_emulate_read_cap_with_block_size(dev, bd, q);
778}
779
Andy Grover7f7caf62013-11-11 08:59:17 -0800780static sector_t iblock_get_alignment_offset_lbas(struct se_device *dev)
781{
782 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
783 struct block_device *bd = ib_dev->ibd_bd;
784 int ret;
785
786 ret = bdev_alignment_offset(bd);
787 if (ret == -1)
788 return 0;
789
790 /* convert offset-bytes to offset-lbas */
791 return ret / bdev_logical_block_size(bd);
792}
793
794static unsigned int iblock_get_lbppbe(struct se_device *dev)
795{
796 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
797 struct block_device *bd = ib_dev->ibd_bd;
798 int logs_per_phys = bdev_physical_block_size(bd) / bdev_logical_block_size(bd);
799
800 return ilog2(logs_per_phys);
801}
802
803static unsigned int iblock_get_io_min(struct se_device *dev)
804{
805 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
806 struct block_device *bd = ib_dev->ibd_bd;
807
808 return bdev_io_min(bd);
809}
810
811static unsigned int iblock_get_io_opt(struct se_device *dev)
812{
813 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
814 struct block_device *bd = ib_dev->ibd_bd;
815
816 return bdev_io_opt(bd);
817}
818
Christoph Hellwig9e999a62012-10-07 10:55:50 -0400819static struct sbc_ops iblock_sbc_ops = {
Christoph Hellwig0c2ad7d2012-06-17 18:40:52 -0400820 .execute_rw = iblock_execute_rw,
Christoph Hellwigad67f0d2012-06-17 18:40:53 -0400821 .execute_sync_cache = iblock_execute_sync_cache,
Christoph Hellwig6f974e82012-06-17 18:40:54 -0400822 .execute_write_same = iblock_execute_write_same,
Christoph Hellwig14150a62012-06-17 18:40:55 -0400823 .execute_unmap = iblock_execute_unmap,
Christoph Hellwig0c2ad7d2012-06-17 18:40:52 -0400824};
825
Christoph Hellwigde103c92012-11-06 12:24:09 -0800826static sense_reason_t
827iblock_parse_cdb(struct se_cmd *cmd)
Christoph Hellwig0c2ad7d2012-06-17 18:40:52 -0400828{
Christoph Hellwig9e999a62012-10-07 10:55:50 -0400829 return sbc_parse_cdb(cmd, &iblock_sbc_ops);
Christoph Hellwig0c2ad7d2012-06-17 18:40:52 -0400830}
831
Rashika Kheria452e2012013-12-18 23:56:44 +0530832static bool iblock_get_write_cache(struct se_device *dev)
Nicholas Bellingerd0c8b252013-01-29 22:10:06 -0800833{
834 struct iblock_dev *ib_dev = IBLOCK_DEV(dev);
835 struct block_device *bd = ib_dev->ibd_bd;
836 struct request_queue *q = bdev_get_queue(bd);
837
838 return q->flush_flags & REQ_FLUSH;
839}
840
Christoph Hellwig0a06d432015-05-10 18:14:56 +0200841static const struct target_backend_ops iblock_ops = {
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800842 .name = "iblock",
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400843 .inquiry_prod = "IBLOCK",
844 .inquiry_rev = IBLOCK_VERSION,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800845 .owner = THIS_MODULE,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800846 .attach_hba = iblock_attach_hba,
847 .detach_hba = iblock_detach_hba,
Christoph Hellwig0fd97cc2012-10-08 00:03:19 -0400848 .alloc_device = iblock_alloc_device,
849 .configure_device = iblock_configure_device,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800850 .free_device = iblock_free_device,
Christoph Hellwig0c2ad7d2012-06-17 18:40:52 -0400851 .parse_cdb = iblock_parse_cdb,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800852 .set_configfs_dev_params = iblock_set_configfs_dev_params,
853 .show_configfs_dev_params = iblock_show_configfs_dev_params,
Christoph Hellwig6f23ac82012-10-07 10:55:53 -0400854 .get_device_type = sbc_get_device_type,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800855 .get_blocks = iblock_get_blocks,
Andy Grover7f7caf62013-11-11 08:59:17 -0800856 .get_alignment_offset_lbas = iblock_get_alignment_offset_lbas,
857 .get_lbppbe = iblock_get_lbppbe,
858 .get_io_min = iblock_get_io_min,
859 .get_io_opt = iblock_get_io_opt,
Nicholas Bellingerd0c8b252013-01-29 22:10:06 -0800860 .get_write_cache = iblock_get_write_cache,
Christoph Hellwig5873c4d2015-05-10 18:14:57 +0200861 .tb_dev_attrib_attrs = sbc_attrib_attrs,
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800862};
863
864static int __init iblock_module_init(void)
865{
Christoph Hellwig0a06d432015-05-10 18:14:56 +0200866 return transport_backend_register(&iblock_ops);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800867}
868
Asias He63b91d52013-02-27 12:50:56 +0800869static void __exit iblock_module_exit(void)
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800870{
Christoph Hellwig0a06d432015-05-10 18:14:56 +0200871 target_backend_unregister(&iblock_ops);
Nicholas Bellingerc66ac9d2010-12-17 11:11:26 -0800872}
873
874MODULE_DESCRIPTION("TCM IBLOCK subsystem plugin");
875MODULE_AUTHOR("nab@Linux-iSCSI.org");
876MODULE_LICENSE("GPL");
877
878module_init(iblock_module_init);
879module_exit(iblock_module_exit);