target: Follow up core updates from AGrover and HCH (round 4)

This patch contains the squashed version of forth round series cleanups
from Andy and Christoph following the post heavy lifting in the preceeding:
'Eliminate usage of struct se_mem' and 'Make all control CDBs scatter-gather'
changes.  This also includes a conversion of target core and the v3.0
mainline fabric modules (loopback and tcm_fc) to use pr_debug and the
CONFIG_DYNAMIC_DEBUG infrastructure!

These have been squashed into this third and final round for v3.1.

target: Remove ifdeffed code in t_g_process_write
target: Remove direct ramdisk code
target: Rename task_sg_num to task_sg_nents
target: Remove custom debug macros for pr_debug. Use pr_err().
target: Remove custom debug macros in mainline fabrics
target: Set WSNZ=1 in block limits VPD. Abort if WRITE_SAME sectors = 0
target: Remove transport do_se_mem_map callback
target: Further simplify transport_free_pages
target: Redo task allocation return value handling
target: Remove extra parentheses
target: change alloc_task call to take *cdb, not *cmd

(nab: Fix bogus struct file assignments in fd_do_readv and fd_do_writev)

Signed-off-by: Andy Grover <agrover@redhat.com>
Reviewed-by: Christoph Hellwig <hch@lst.de>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
diff --git a/drivers/target/loopback/Kconfig b/drivers/target/loopback/Kconfig
index 57dcbc2..abe8ecb 100644
--- a/drivers/target/loopback/Kconfig
+++ b/drivers/target/loopback/Kconfig
@@ -3,9 +3,3 @@
 	help
 	  Say Y here to enable the TCM Virtual SAS target and Linux/SCSI LLD
 	  fabric loopback module.
-
-config LOOPBACK_TARGET_CDB_DEBUG
-	bool "TCM loopback fabric module CDB debug code"
-	depends on LOOPBACK_TARGET
-	help
-	  Say Y here to enable the TCM loopback fabric module CDB debug code
diff --git a/drivers/target/loopback/tcm_loop.c b/drivers/target/loopback/tcm_loop.c
index 99603bc..aa2d6799 100644
--- a/drivers/target/loopback/tcm_loop.c
+++ b/drivers/target/loopback/tcm_loop.c
@@ -79,7 +79,7 @@
 
 	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_ATOMIC);
 	if (!tl_cmd) {
-		printk(KERN_ERR "Unable to allocate struct tcm_loop_cmd\n");
+		pr_err("Unable to allocate struct tcm_loop_cmd\n");
 		set_host_byte(sc, DID_ERROR);
 		return NULL;
 	}
@@ -281,7 +281,7 @@
 	struct tcm_loop_hba *tl_hba;
 	struct tcm_loop_tpg *tl_tpg;
 
-	TL_CDB_DEBUG("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x"
+	pr_debug("tcm_loop_queuecommand() %d:%d:%d:%d got CDB: 0x%02x"
 		" scsi_buf_len: %u\n", sc->device->host->host_no,
 		sc->device->id, sc->device->channel, sc->device->lun,
 		sc->cmnd[0], scsi_bufflen(sc));
@@ -331,7 +331,7 @@
 	 */
 	tl_nexus = tl_hba->tl_nexus;
 	if (!tl_nexus) {
-		printk(KERN_ERR "Unable to perform device reset without"
+		pr_err("Unable to perform device reset without"
 				" active I_T Nexus\n");
 		return FAILED;
 	}
@@ -344,13 +344,13 @@
 
 	tl_cmd = kmem_cache_zalloc(tcm_loop_cmd_cache, GFP_KERNEL);
 	if (!tl_cmd) {
-		printk(KERN_ERR "Unable to allocate memory for tl_cmd\n");
+		pr_err("Unable to allocate memory for tl_cmd\n");
 		return FAILED;
 	}
 
 	tl_tmr = kzalloc(sizeof(struct tcm_loop_tmr), GFP_KERNEL);
 	if (!tl_tmr) {
-		printk(KERN_ERR "Unable to allocate memory for tl_tmr\n");
+		pr_err("Unable to allocate memory for tl_tmr\n");
 		goto release;
 	}
 	init_waitqueue_head(&tl_tmr->tl_tmr_wait);
@@ -435,7 +435,7 @@
 	sh = scsi_host_alloc(&tcm_loop_driver_template,
 			sizeof(struct tcm_loop_hba));
 	if (!sh) {
-		printk(KERN_ERR "Unable to allocate struct scsi_host\n");
+		pr_err("Unable to allocate struct scsi_host\n");
 		return -ENODEV;
 	}
 	tl_hba->sh = sh;
@@ -454,7 +454,7 @@
 
 	error = scsi_add_host(sh, &tl_hba->dev);
 	if (error) {
-		printk(KERN_ERR "%s: scsi_add_host failed\n", __func__);
+		pr_err("%s: scsi_add_host failed\n", __func__);
 		scsi_host_put(sh);
 		return -ENODEV;
 	}
@@ -495,7 +495,7 @@
 
 	ret = device_register(&tl_hba->dev);
 	if (ret) {
-		printk(KERN_ERR "device_register() failed for"
+		pr_err("device_register() failed for"
 				" tl_hba->dev: %d\n", ret);
 		return -ENODEV;
 	}
@@ -513,24 +513,24 @@
 
 	tcm_loop_primary = root_device_register("tcm_loop_0");
 	if (IS_ERR(tcm_loop_primary)) {
-		printk(KERN_ERR "Unable to allocate tcm_loop_primary\n");
+		pr_err("Unable to allocate tcm_loop_primary\n");
 		return PTR_ERR(tcm_loop_primary);
 	}
 
 	ret = bus_register(&tcm_loop_lld_bus);
 	if (ret) {
-		printk(KERN_ERR "bus_register() failed for tcm_loop_lld_bus\n");
+		pr_err("bus_register() failed for tcm_loop_lld_bus\n");
 		goto dev_unreg;
 	}
 
 	ret = driver_register(&tcm_loop_driverfs);
 	if (ret) {
-		printk(KERN_ERR "driver_register() failed for"
+		pr_err("driver_register() failed for"
 				"tcm_loop_driverfs\n");
 		goto bus_unreg;
 	}
 
-	printk(KERN_INFO "Initialized TCM Loop Core Bus\n");
+	pr_debug("Initialized TCM Loop Core Bus\n");
 	return ret;
 
 bus_unreg:
@@ -546,7 +546,7 @@
 	bus_unregister(&tcm_loop_lld_bus);
 	root_device_unregister(tcm_loop_primary);
 
-	printk(KERN_INFO "Releasing TCM Loop Core BUS\n");
+	pr_debug("Releasing TCM Loop Core BUS\n");
 }
 
 static char *tcm_loop_get_fabric_name(void)
@@ -574,7 +574,7 @@
 	case SCSI_PROTOCOL_ISCSI:
 		return iscsi_get_fabric_proto_ident(se_tpg);
 	default:
-		printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using"
+		pr_err("Unknown tl_proto_id: 0x%02x, using"
 			" SAS emulation\n", tl_hba->tl_proto_id);
 		break;
 	}
@@ -630,7 +630,7 @@
 		return iscsi_get_pr_transport_id(se_tpg, se_nacl, pr_reg,
 					format_code, buf);
 	default:
-		printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using"
+		pr_err("Unknown tl_proto_id: 0x%02x, using"
 			" SAS emulation\n", tl_hba->tl_proto_id);
 		break;
 	}
@@ -660,7 +660,7 @@
 		return iscsi_get_pr_transport_id_len(se_tpg, se_nacl, pr_reg,
 					format_code);
 	default:
-		printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using"
+		pr_err("Unknown tl_proto_id: 0x%02x, using"
 			" SAS emulation\n", tl_hba->tl_proto_id);
 		break;
 	}
@@ -694,7 +694,7 @@
 		return iscsi_parse_pr_out_transport_id(se_tpg, buf, out_tid_len,
 					port_nexus_ptr);
 	default:
-		printk(KERN_ERR "Unknown tl_proto_id: 0x%02x, using"
+		pr_err("Unknown tl_proto_id: 0x%02x, using"
 			" SAS emulation\n", tl_hba->tl_proto_id);
 		break;
 	}
@@ -743,7 +743,7 @@
 
 	tl_nacl = kzalloc(sizeof(struct tcm_loop_nacl), GFP_KERNEL);
 	if (!tl_nacl) {
-		printk(KERN_ERR "Unable to allocate struct tcm_loop_nacl\n");
+		pr_err("Unable to allocate struct tcm_loop_nacl\n");
 		return NULL;
 	}
 
@@ -853,7 +853,7 @@
 				struct tcm_loop_cmd, tl_se_cmd);
 	struct scsi_cmnd *sc = tl_cmd->sc;
 
-	TL_CDB_DEBUG("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
+	pr_debug("tcm_loop_queue_data_in() called for scsi_cmnd: %p"
 		     " cdb: 0x%02x\n", sc, sc->cmnd[0]);
 
 	sc->result = SAM_STAT_GOOD;
@@ -868,7 +868,7 @@
 				struct tcm_loop_cmd, tl_se_cmd);
 	struct scsi_cmnd *sc = tl_cmd->sc;
 
-	TL_CDB_DEBUG("tcm_loop_queue_status() called for scsi_cmnd: %p"
+	pr_debug("tcm_loop_queue_status() called for scsi_cmnd: %p"
 			" cdb: 0x%02x\n", sc, sc->cmnd[0]);
 
 	if (se_cmd->sense_buffer &&
@@ -943,7 +943,7 @@
 	 */
 	scsi_add_device(tl_hba->sh, 0, tl_tpg->tl_tpgt, lun->unpacked_lun);
 
-	printk(KERN_INFO "TCM_Loop_ConfigFS: Port Link Successful\n");
+	pr_debug("TCM_Loop_ConfigFS: Port Link Successful\n");
 	return 0;
 }
 
@@ -961,7 +961,7 @@
 	sd = scsi_device_lookup(tl_hba->sh, 0, tl_tpg->tl_tpgt,
 				se_lun->unpacked_lun);
 	if (!sd) {
-		printk(KERN_ERR "Unable to locate struct scsi_device for %d:%d:"
+		pr_err("Unable to locate struct scsi_device for %d:%d:"
 			"%d\n", 0, tl_tpg->tl_tpgt, se_lun->unpacked_lun);
 		return;
 	}
@@ -974,7 +974,7 @@
 	atomic_dec(&tl_tpg->tl_tpg_port_count);
 	smp_mb__after_atomic_dec();
 
-	printk(KERN_INFO "TCM_Loop_ConfigFS: Port Unlink Successful\n");
+	pr_debug("TCM_Loop_ConfigFS: Port Unlink Successful\n");
 }
 
 /* End items for tcm_loop_port_cit */
@@ -991,14 +991,14 @@
 	int ret = -ENOMEM;
 
 	if (tl_tpg->tl_hba->tl_nexus) {
-		printk(KERN_INFO "tl_tpg->tl_hba->tl_nexus already exists\n");
+		pr_debug("tl_tpg->tl_hba->tl_nexus already exists\n");
 		return -EEXIST;
 	}
 	se_tpg = &tl_tpg->tl_se_tpg;
 
 	tl_nexus = kzalloc(sizeof(struct tcm_loop_nexus), GFP_KERNEL);
 	if (!tl_nexus) {
-		printk(KERN_ERR "Unable to allocate struct tcm_loop_nexus\n");
+		pr_err("Unable to allocate struct tcm_loop_nexus\n");
 		return -ENOMEM;
 	}
 	/*
@@ -1027,7 +1027,7 @@
 	__transport_register_session(se_tpg, tl_nexus->se_sess->se_node_acl,
 			tl_nexus->se_sess, tl_nexus);
 	tl_tpg->tl_hba->tl_nexus = tl_nexus;
-	printk(KERN_INFO "TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
+	pr_debug("TCM_Loop_ConfigFS: Established I_T Nexus to emulated"
 		" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
 		name);
 	return 0;
@@ -1053,13 +1053,13 @@
 		return -ENODEV;
 
 	if (atomic_read(&tpg->tl_tpg_port_count)) {
-		printk(KERN_ERR "Unable to remove TCM_Loop I_T Nexus with"
+		pr_err("Unable to remove TCM_Loop I_T Nexus with"
 			" active TPG port count: %d\n",
 			atomic_read(&tpg->tl_tpg_port_count));
 		return -EPERM;
 	}
 
-	printk(KERN_INFO "TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
+	pr_debug("TCM_Loop_ConfigFS: Removing I_T Nexus to emulated"
 		" %s Initiator Port: %s\n", tcm_loop_dump_proto_id(tl_hba),
 		tl_nexus->se_sess->se_node_acl->initiatorname);
 	/*
@@ -1115,7 +1115,7 @@
 	 * tcm_loop_make_nexus()
 	 */
 	if (strlen(page) >= TL_WWN_ADDR_LEN) {
-		printk(KERN_ERR "Emulated NAA Sas Address: %s, exceeds"
+		pr_err("Emulated NAA Sas Address: %s, exceeds"
 				" max: %d\n", page, TL_WWN_ADDR_LEN);
 		return -EINVAL;
 	}
@@ -1124,7 +1124,7 @@
 	ptr = strstr(i_port, "naa.");
 	if (ptr) {
 		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_SAS) {
-			printk(KERN_ERR "Passed SAS Initiator Port %s does not"
+			pr_err("Passed SAS Initiator Port %s does not"
 				" match target port protoid: %s\n", i_port,
 				tcm_loop_dump_proto_id(tl_hba));
 			return -EINVAL;
@@ -1135,7 +1135,7 @@
 	ptr = strstr(i_port, "fc.");
 	if (ptr) {
 		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_FCP) {
-			printk(KERN_ERR "Passed FCP Initiator Port %s does not"
+			pr_err("Passed FCP Initiator Port %s does not"
 				" match target port protoid: %s\n", i_port,
 				tcm_loop_dump_proto_id(tl_hba));
 			return -EINVAL;
@@ -1146,7 +1146,7 @@
 	ptr = strstr(i_port, "iqn.");
 	if (ptr) {
 		if (tl_hba->tl_proto_id != SCSI_PROTOCOL_ISCSI) {
-			printk(KERN_ERR "Passed iSCSI Initiator Port %s does not"
+			pr_err("Passed iSCSI Initiator Port %s does not"
 				" match target port protoid: %s\n", i_port,
 				tcm_loop_dump_proto_id(tl_hba));
 			return -EINVAL;
@@ -1154,7 +1154,7 @@
 		port_ptr = &i_port[0];
 		goto check_newline;
 	}
-	printk(KERN_ERR "Unable to locate prefix for emulated Initiator Port:"
+	pr_err("Unable to locate prefix for emulated Initiator Port:"
 			" %s\n", i_port);
 	return -EINVAL;
 	/*
@@ -1194,7 +1194,7 @@
 
 	tpgt_str = strstr(name, "tpgt_");
 	if (!tpgt_str) {
-		printk(KERN_ERR "Unable to locate \"tpgt_#\" directory"
+		pr_err("Unable to locate \"tpgt_#\" directory"
 				" group\n");
 		return ERR_PTR(-EINVAL);
 	}
@@ -1202,7 +1202,7 @@
 	tpgt = (unsigned short int) simple_strtoul(tpgt_str, &end_ptr, 0);
 
 	if (tpgt >= TL_TPGS_PER_HBA) {
-		printk(KERN_ERR "Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:"
+		pr_err("Passed tpgt: %hu exceeds TL_TPGS_PER_HBA:"
 				" %u\n", tpgt, TL_TPGS_PER_HBA);
 		return ERR_PTR(-EINVAL);
 	}
@@ -1218,7 +1218,7 @@
 	if (ret < 0)
 		return ERR_PTR(-ENOMEM);
 
-	printk(KERN_INFO "TCM_Loop_ConfigFS: Allocated Emulated %s"
+	pr_debug("TCM_Loop_ConfigFS: Allocated Emulated %s"
 		" Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
 		config_item_name(&wwn->wwn_group.cg_item), tpgt);
 
@@ -1245,7 +1245,7 @@
 	 */
 	core_tpg_deregister(se_tpg);
 
-	printk(KERN_INFO "TCM_Loop_ConfigFS: Deallocated Emulated %s"
+	pr_debug("TCM_Loop_ConfigFS: Deallocated Emulated %s"
 		" Target Port %s,t,0x%04x\n", tcm_loop_dump_proto_id(tl_hba),
 		config_item_name(&wwn->wwn_group.cg_item), tpgt);
 }
@@ -1266,7 +1266,7 @@
 
 	tl_hba = kzalloc(sizeof(struct tcm_loop_hba), GFP_KERNEL);
 	if (!tl_hba) {
-		printk(KERN_ERR "Unable to allocate struct tcm_loop_hba\n");
+		pr_err("Unable to allocate struct tcm_loop_hba\n");
 		return ERR_PTR(-ENOMEM);
 	}
 	/*
@@ -1286,7 +1286,7 @@
 	}
 	ptr = strstr(name, "iqn.");
 	if (!ptr) {
-		printk(KERN_ERR "Unable to locate prefix for emulated Target "
+		pr_err("Unable to locate prefix for emulated Target "
 				"Port: %s\n", name);
 		ret = -EINVAL;
 		goto out;
@@ -1295,7 +1295,7 @@
 
 check_len:
 	if (strlen(name) >= TL_WWN_ADDR_LEN) {
-		printk(KERN_ERR "Emulated NAA %s Address: %s, exceeds"
+		pr_err("Emulated NAA %s Address: %s, exceeds"
 			" max: %d\n", name, tcm_loop_dump_proto_id(tl_hba),
 			TL_WWN_ADDR_LEN);
 		ret = -EINVAL;
@@ -1314,7 +1314,7 @@
 
 	sh = tl_hba->sh;
 	tcm_loop_hba_no_cnt++;
-	printk(KERN_INFO "TCM_Loop_ConfigFS: Allocated emulated Target"
+	pr_debug("TCM_Loop_ConfigFS: Allocated emulated Target"
 		" %s Address: %s at Linux/SCSI Host ID: %d\n",
 		tcm_loop_dump_proto_id(tl_hba), name, sh->host_no);
 
@@ -1337,7 +1337,7 @@
 	 */
 	device_unregister(&tl_hba->dev);
 
-	printk(KERN_INFO "TCM_Loop_ConfigFS: Deallocated emulated Target"
+	pr_debug("TCM_Loop_ConfigFS: Deallocated emulated Target"
 		" SAS Address: %s at Linux/SCSI Host ID: %d\n",
 		config_item_name(&wwn->wwn_group.cg_item), host_no);
 }
@@ -1373,7 +1373,7 @@
 	 */
 	fabric = target_fabric_configfs_init(THIS_MODULE, "loopback");
 	if (IS_ERR(fabric)) {
-		printk(KERN_ERR "tcm_loop_register_configfs() failed!\n");
+		pr_err("tcm_loop_register_configfs() failed!\n");
 		return PTR_ERR(fabric);
 	}
 	/*
@@ -1464,7 +1464,7 @@
 	 */
 	ret = target_fabric_configfs_register(fabric);
 	if (ret < 0) {
-		printk(KERN_ERR "target_fabric_configfs_register() for"
+		pr_err("target_fabric_configfs_register() for"
 				" TCM_Loop failed!\n");
 		target_fabric_configfs_free(fabric);
 		return -1;
@@ -1473,7 +1473,7 @@
 	 * Setup our local pointer to *fabric.
 	 */
 	tcm_loop_fabric_configfs = fabric;
-	printk(KERN_INFO "TCM_LOOP[0] - Set fabric ->"
+	pr_debug("TCM_LOOP[0] - Set fabric ->"
 			" tcm_loop_fabric_configfs\n");
 	return 0;
 }
@@ -1485,7 +1485,7 @@
 
 	target_fabric_configfs_deregister(tcm_loop_fabric_configfs);
 	tcm_loop_fabric_configfs = NULL;
-	printk(KERN_INFO "TCM_LOOP[0] - Cleared"
+	pr_debug("TCM_LOOP[0] - Cleared"
 				" tcm_loop_fabric_configfs\n");
 }
 
@@ -1498,7 +1498,7 @@
 				__alignof__(struct tcm_loop_cmd),
 				0, NULL);
 	if (!tcm_loop_cmd_cache) {
-		printk(KERN_ERR "kmem_cache_create() for"
+		pr_debug("kmem_cache_create() for"
 			" tcm_loop_cmd_cache failed\n");
 		return -ENOMEM;
 	}
diff --git a/drivers/target/loopback/tcm_loop.h b/drivers/target/loopback/tcm_loop.h
index 7e9f7ab..6b76c7a 100644
--- a/drivers/target/loopback/tcm_loop.h
+++ b/drivers/target/loopback/tcm_loop.h
@@ -16,12 +16,6 @@
  */
 #define TL_SCSI_MAX_CMD_LEN		32
 
-#ifdef CONFIG_LOOPBACK_TARGET_CDB_DEBUG
-# define TL_CDB_DEBUG(x...)		printk(KERN_INFO x)
-#else
-# define TL_CDB_DEBUG(x...)
-#endif
-
 struct tcm_loop_cmd {
 	/* State of Linux/SCSI CDB+Data descriptor */
 	u32 sc_cmd_state;
diff --git a/drivers/target/target_core_alua.c b/drivers/target/target_core_alua.c
index dba412f..98c98a3 100644
--- a/drivers/target/target_core_alua.c
+++ b/drivers/target/target_core_alua.c
@@ -167,7 +167,7 @@
 	int alua_access_state, primary = 0, rc;
 	u16 tg_pt_id, rtpi;
 
-	if (!(l_port))
+	if (!l_port)
 		return PYX_TRANSPORT_LU_COMM_FAILURE;
 
 	buf = transport_kmap_first_data_page(cmd);
@@ -177,24 +177,24 @@
 	 * for the local tg_pt_gp.
 	 */
 	l_tg_pt_gp_mem = l_port->sep_alua_tg_pt_gp_mem;
-	if (!(l_tg_pt_gp_mem)) {
-		printk(KERN_ERR "Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
+	if (!l_tg_pt_gp_mem) {
+		pr_err("Unable to access l_port->sep_alua_tg_pt_gp_mem\n");
 		rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
 		goto out;
 	}
 	spin_lock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
 	l_tg_pt_gp = l_tg_pt_gp_mem->tg_pt_gp;
-	if (!(l_tg_pt_gp)) {
+	if (!l_tg_pt_gp) {
 		spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
-		printk(KERN_ERR "Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
+		pr_err("Unable to access *l_tg_pt_gp_mem->tg_pt_gp\n");
 		rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
 		goto out;
 	}
 	rc = (l_tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_EXPLICT_ALUA);
 	spin_unlock(&l_tg_pt_gp_mem->tg_pt_gp_mem_lock);
 
-	if (!(rc)) {
-		printk(KERN_INFO "Unable to process SET_TARGET_PORT_GROUPS"
+	if (!rc) {
+		pr_debug("Unable to process SET_TARGET_PORT_GROUPS"
 				" while TPGS_EXPLICT_ALUA is disabled\n");
 		rc = PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
 		goto out;
@@ -249,7 +249,7 @@
 			list_for_each_entry(tg_pt_gp,
 					&su_dev->t10_alua.tg_pt_gps_list,
 					tg_pt_gp_list) {
-				if (!(tg_pt_gp->tg_pt_gp_valid_id))
+				if (!tg_pt_gp->tg_pt_gp_valid_id)
 					continue;
 
 				if (tg_pt_id != tg_pt_gp->tg_pt_gp_id)
@@ -498,7 +498,7 @@
 	struct t10_alua_tg_pt_gp_member *tg_pt_gp_mem;
 	int out_alua_state, nonop_delay_msecs;
 
-	if (!(port))
+	if (!port)
 		return 0;
 	/*
 	 * First, check for a struct se_port specific secondary ALUA target port
@@ -506,7 +506,7 @@
 	 */
 	if (atomic_read(&port->sep_tg_pt_secondary_offline)) {
 		*alua_ascq = ASCQ_04H_ALUA_OFFLINE;
-		printk(KERN_INFO "ALUA: Got secondary offline status for local"
+		pr_debug("ALUA: Got secondary offline status for local"
 				" target port\n");
 		*alua_ascq = ASCQ_04H_ALUA_OFFLINE;
 		return 1;
@@ -548,7 +548,7 @@
 	 */
 	case ALUA_ACCESS_STATE_OFFLINE:
 	default:
-		printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n",
+		pr_err("Unknown ALUA access state: 0x%02x\n",
 				out_alua_state);
 		return -EINVAL;
 	}
@@ -580,7 +580,7 @@
 		*primary = 0;
 		break;
 	default:
-		printk(KERN_ERR "Unknown ALUA access state: 0x%02x\n", state);
+		pr_err("Unknown ALUA access state: 0x%02x\n", state);
 		return -EINVAL;
 	}
 
@@ -638,7 +638,7 @@
 	 * The ALUA Active/NonOptimized access state delay can be disabled
 	 * in via configfs with a value of zero
 	 */
-	if (!(cmd->alua_nonop_delay))
+	if (!cmd->alua_nonop_delay)
 		return 0;
 	/*
 	 * struct se_cmd->alua_nonop_delay gets set by a target port group
@@ -667,7 +667,7 @@
 
 	file = filp_open(path, flags, 0600);
 	if (IS_ERR(file) || !file || !file->f_dentry) {
-		printk(KERN_ERR "filp_open(%s) for ALUA metadata failed\n",
+		pr_err("filp_open(%s) for ALUA metadata failed\n",
 			path);
 		return -ENODEV;
 	}
@@ -681,7 +681,7 @@
 	set_fs(old_fs);
 
 	if (ret < 0) {
-		printk(KERN_ERR "Error writing ALUA metadata file: %s\n", path);
+		pr_err("Error writing ALUA metadata file: %s\n", path);
 		filp_close(file, NULL);
 		return -EIO;
 	}
@@ -778,7 +778,7 @@
 			 * se_deve->se_lun_acl pointer may be NULL for a
 			 * entry created without explict Node+MappedLUN ACLs
 			 */
-			if (!(lacl))
+			if (!lacl)
 				continue;
 
 			if (explict &&
@@ -820,7 +820,7 @@
 	 */
 	atomic_set(&tg_pt_gp->tg_pt_gp_alua_access_state, new_state);
 
-	printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu"
+	pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
 		" from primary access state %s to %s\n", (explict) ? "explict" :
 		"implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
 		tg_pt_gp->tg_pt_gp_id, core_alua_dump_state(old_state),
@@ -851,8 +851,8 @@
 		return -EINVAL;
 
 	md_buf = kzalloc(l_tg_pt_gp->tg_pt_gp_md_buf_len, GFP_KERNEL);
-	if (!(md_buf)) {
-		printk("Unable to allocate buf for ALUA metadata\n");
+	if (!md_buf) {
+		pr_err("Unable to allocate buf for ALUA metadata\n");
 		return -ENOMEM;
 	}
 
@@ -867,7 +867,7 @@
 	 * we only do transition on the passed *l_tp_pt_gp, and not
 	 * on all of the matching target port groups IDs in default_lu_gp.
 	 */
-	if (!(lu_gp->lu_gp_id)) {
+	if (!lu_gp->lu_gp_id) {
 		/*
 		 * core_alua_do_transition_tg_pt() will always return
 		 * success.
@@ -899,7 +899,7 @@
 				&su_dev->t10_alua.tg_pt_gps_list,
 				tg_pt_gp_list) {
 
-			if (!(tg_pt_gp->tg_pt_gp_valid_id))
+			if (!tg_pt_gp->tg_pt_gp_valid_id)
 				continue;
 			/*
 			 * If the target behavior port asymmetric access state
@@ -941,7 +941,7 @@
 	}
 	spin_unlock(&lu_gp->lu_gp_lock);
 
-	printk(KERN_INFO "Successfully processed LU Group: %s all ALUA TG PT"
+	pr_debug("Successfully processed LU Group: %s all ALUA TG PT"
 		" Group IDs: %hu %s transition to primary state: %s\n",
 		config_item_name(&lu_gp->lu_gp_group.cg_item),
 		l_tg_pt_gp->tg_pt_gp_id, (explict) ? "explict" : "implict",
@@ -1001,9 +1001,9 @@
 
 	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
 	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
-	if (!(tg_pt_gp)) {
+	if (!tg_pt_gp) {
 		spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
-		printk(KERN_ERR "Unable to complete secondary state"
+		pr_err("Unable to complete secondary state"
 				" transition\n");
 		return -EINVAL;
 	}
@@ -1022,7 +1022,7 @@
 			ALUA_STATUS_ALTERED_BY_EXPLICT_STPG :
 			ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA;
 
-	printk(KERN_INFO "Successful %s ALUA transition TG PT Group: %s ID: %hu"
+	pr_debug("Successful %s ALUA transition TG PT Group: %s ID: %hu"
 		" to secondary access state: %s\n", (explict) ? "explict" :
 		"implict", config_item_name(&tg_pt_gp->tg_pt_gp_group.cg_item),
 		tg_pt_gp->tg_pt_gp_id, (offline) ? "OFFLINE" : "ONLINE");
@@ -1040,8 +1040,8 @@
 	 */
 	if (port->sep_tg_pt_secondary_write_md) {
 		md_buf = kzalloc(md_buf_len, GFP_KERNEL);
-		if (!(md_buf)) {
-			printk(KERN_ERR "Unable to allocate md_buf for"
+		if (!md_buf) {
+			pr_err("Unable to allocate md_buf for"
 				" secondary ALUA access metadata\n");
 			return -ENOMEM;
 		}
@@ -1062,8 +1062,8 @@
 	struct t10_alua_lu_gp *lu_gp;
 
 	lu_gp = kmem_cache_zalloc(t10_alua_lu_gp_cache, GFP_KERNEL);
-	if (!(lu_gp)) {
-		printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp\n");
+	if (!lu_gp) {
+		pr_err("Unable to allocate struct t10_alua_lu_gp\n");
 		return ERR_PTR(-ENOMEM);
 	}
 	INIT_LIST_HEAD(&lu_gp->lu_gp_node);
@@ -1088,14 +1088,14 @@
 	 * The lu_gp->lu_gp_id may only be set once..
 	 */
 	if (lu_gp->lu_gp_valid_id) {
-		printk(KERN_WARNING "ALUA LU Group already has a valid ID,"
+		pr_warn("ALUA LU Group already has a valid ID,"
 			" ignoring request\n");
 		return -EINVAL;
 	}
 
 	spin_lock(&lu_gps_lock);
 	if (alua_lu_gps_count == 0x0000ffff) {
-		printk(KERN_ERR "Maximum ALUA alua_lu_gps_count:"
+		pr_err("Maximum ALUA alua_lu_gps_count:"
 				" 0x0000ffff reached\n");
 		spin_unlock(&lu_gps_lock);
 		kmem_cache_free(t10_alua_lu_gp_cache, lu_gp);
@@ -1107,10 +1107,10 @@
 
 	list_for_each_entry(lu_gp_tmp, &lu_gps_list, lu_gp_node) {
 		if (lu_gp_tmp->lu_gp_id == lu_gp_id_tmp) {
-			if (!(lu_gp_id))
+			if (!lu_gp_id)
 				goto again;
 
-			printk(KERN_WARNING "ALUA Logical Unit Group ID: %hu"
+			pr_warn("ALUA Logical Unit Group ID: %hu"
 				" already exists, ignoring request\n",
 				lu_gp_id);
 			spin_unlock(&lu_gps_lock);
@@ -1133,8 +1133,8 @@
 	struct t10_alua_lu_gp_member *lu_gp_mem;
 
 	lu_gp_mem = kmem_cache_zalloc(t10_alua_lu_gp_mem_cache, GFP_KERNEL);
-	if (!(lu_gp_mem)) {
-		printk(KERN_ERR "Unable to allocate struct t10_alua_lu_gp_member\n");
+	if (!lu_gp_mem) {
+		pr_err("Unable to allocate struct t10_alua_lu_gp_member\n");
 		return ERR_PTR(-ENOMEM);
 	}
 	INIT_LIST_HEAD(&lu_gp_mem->lu_gp_mem_list);
@@ -1218,7 +1218,7 @@
 		return;
 
 	lu_gp_mem = dev->dev_alua_lu_gp_mem;
-	if (!(lu_gp_mem))
+	if (!lu_gp_mem)
 		return;
 
 	while (atomic_read(&lu_gp_mem->lu_gp_mem_ref_cnt))
@@ -1226,7 +1226,7 @@
 
 	spin_lock(&lu_gp_mem->lu_gp_mem_lock);
 	lu_gp = lu_gp_mem->lu_gp;
-	if ((lu_gp)) {
+	if (lu_gp) {
 		spin_lock(&lu_gp->lu_gp_lock);
 		if (lu_gp_mem->lu_gp_assoc) {
 			list_del(&lu_gp_mem->lu_gp_mem_list);
@@ -1248,10 +1248,10 @@
 
 	spin_lock(&lu_gps_lock);
 	list_for_each_entry(lu_gp, &lu_gps_list, lu_gp_node) {
-		if (!(lu_gp->lu_gp_valid_id))
+		if (!lu_gp->lu_gp_valid_id)
 			continue;
 		ci = &lu_gp->lu_gp_group.cg_item;
-		if (!(strcmp(config_item_name(ci), name))) {
+		if (!strcmp(config_item_name(ci), name)) {
 			atomic_inc(&lu_gp->lu_gp_ref_cnt);
 			spin_unlock(&lu_gps_lock);
 			return lu_gp;
@@ -1307,8 +1307,8 @@
 	struct t10_alua_tg_pt_gp *tg_pt_gp;
 
 	tg_pt_gp = kmem_cache_zalloc(t10_alua_tg_pt_gp_cache, GFP_KERNEL);
-	if (!(tg_pt_gp)) {
-		printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp\n");
+	if (!tg_pt_gp) {
+		pr_err("Unable to allocate struct t10_alua_tg_pt_gp\n");
 		return NULL;
 	}
 	INIT_LIST_HEAD(&tg_pt_gp->tg_pt_gp_list);
@@ -1356,14 +1356,14 @@
 	 * The tg_pt_gp->tg_pt_gp_id may only be set once..
 	 */
 	if (tg_pt_gp->tg_pt_gp_valid_id) {
-		printk(KERN_WARNING "ALUA TG PT Group already has a valid ID,"
+		pr_warn("ALUA TG PT Group already has a valid ID,"
 			" ignoring request\n");
 		return -EINVAL;
 	}
 
 	spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
 	if (su_dev->t10_alua.alua_tg_pt_gps_count == 0x0000ffff) {
-		printk(KERN_ERR "Maximum ALUA alua_tg_pt_gps_count:"
+		pr_err("Maximum ALUA alua_tg_pt_gps_count:"
 			" 0x0000ffff reached\n");
 		spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
 		kmem_cache_free(t10_alua_tg_pt_gp_cache, tg_pt_gp);
@@ -1376,10 +1376,10 @@
 	list_for_each_entry(tg_pt_gp_tmp, &su_dev->t10_alua.tg_pt_gps_list,
 			tg_pt_gp_list) {
 		if (tg_pt_gp_tmp->tg_pt_gp_id == tg_pt_gp_id_tmp) {
-			if (!(tg_pt_gp_id))
+			if (!tg_pt_gp_id)
 				goto again;
 
-			printk(KERN_ERR "ALUA Target Port Group ID: %hu already"
+			pr_err("ALUA Target Port Group ID: %hu already"
 				" exists, ignoring request\n", tg_pt_gp_id);
 			spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
 			return -EINVAL;
@@ -1403,8 +1403,8 @@
 
 	tg_pt_gp_mem = kmem_cache_zalloc(t10_alua_tg_pt_gp_mem_cache,
 				GFP_KERNEL);
-	if (!(tg_pt_gp_mem)) {
-		printk(KERN_ERR "Unable to allocate struct t10_alua_tg_pt_gp_member\n");
+	if (!tg_pt_gp_mem) {
+		pr_err("Unable to allocate struct t10_alua_tg_pt_gp_member\n");
 		return ERR_PTR(-ENOMEM);
 	}
 	INIT_LIST_HEAD(&tg_pt_gp_mem->tg_pt_gp_mem_list);
@@ -1491,7 +1491,7 @@
 		return;
 
 	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
-	if (!(tg_pt_gp_mem))
+	if (!tg_pt_gp_mem)
 		return;
 
 	while (atomic_read(&tg_pt_gp_mem->tg_pt_gp_mem_ref_cnt))
@@ -1499,7 +1499,7 @@
 
 	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
 	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
-	if ((tg_pt_gp)) {
+	if (tg_pt_gp) {
 		spin_lock(&tg_pt_gp->tg_pt_gp_lock);
 		if (tg_pt_gp_mem->tg_pt_gp_assoc) {
 			list_del(&tg_pt_gp_mem->tg_pt_gp_mem_list);
@@ -1524,10 +1524,10 @@
 	spin_lock(&su_dev->t10_alua.tg_pt_gps_lock);
 	list_for_each_entry(tg_pt_gp, &su_dev->t10_alua.tg_pt_gps_list,
 			tg_pt_gp_list) {
-		if (!(tg_pt_gp->tg_pt_gp_valid_id))
+		if (!tg_pt_gp->tg_pt_gp_valid_id)
 			continue;
 		ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
-		if (!(strcmp(config_item_name(ci), name))) {
+		if (!strcmp(config_item_name(ci), name)) {
 			atomic_inc(&tg_pt_gp->tg_pt_gp_ref_cnt);
 			spin_unlock(&su_dev->t10_alua.tg_pt_gps_lock);
 			return tg_pt_gp;
@@ -1592,12 +1592,12 @@
 		return len;
 
 	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
-	if (!(tg_pt_gp_mem))
+	if (!tg_pt_gp_mem)
 		return len;
 
 	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
 	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
-	if ((tg_pt_gp)) {
+	if (tg_pt_gp) {
 		tg_pt_ci = &tg_pt_gp->tg_pt_gp_group.cg_item;
 		len += sprintf(page, "TG Port Alias: %s\nTG Port Group ID:"
 			" %hu\nTG Port Primary Access State: %s\nTG Port "
@@ -1634,7 +1634,7 @@
 	lun = port->sep_lun;
 
 	if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
-		printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for"
+		pr_warn("SPC3_ALUA_EMULATED not enabled for"
 			" %s/tpgt_%hu/%s\n", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
 			tpg->se_tpg_tfo->tpg_get_tag(tpg),
 			config_item_name(&lun->lun_group.cg_item));
@@ -1642,7 +1642,7 @@
 	}
 
 	if (count > TG_PT_GROUP_NAME_BUF) {
-		printk(KERN_ERR "ALUA Target Port Group alias too large!\n");
+		pr_err("ALUA Target Port Group alias too large!\n");
 		return -EINVAL;
 	}
 	memset(buf, 0, TG_PT_GROUP_NAME_BUF);
@@ -1659,26 +1659,26 @@
 		 */
 		tg_pt_gp_new = core_alua_get_tg_pt_gp_by_name(su_dev,
 					strstrip(buf));
-		if (!(tg_pt_gp_new))
+		if (!tg_pt_gp_new)
 			return -ENODEV;
 	}
 	tg_pt_gp_mem = port->sep_alua_tg_pt_gp_mem;
-	if (!(tg_pt_gp_mem)) {
+	if (!tg_pt_gp_mem) {
 		if (tg_pt_gp_new)
 			core_alua_put_tg_pt_gp_from_name(tg_pt_gp_new);
-		printk(KERN_ERR "NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n");
+		pr_err("NULL struct se_port->sep_alua_tg_pt_gp_mem pointer\n");
 		return -EINVAL;
 	}
 
 	spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
 	tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
-	if ((tg_pt_gp)) {
+	if (tg_pt_gp) {
 		/*
 		 * Clearing an existing tg_pt_gp association, and replacing
 		 * with the default_tg_pt_gp.
 		 */
-		if (!(tg_pt_gp_new)) {
-			printk(KERN_INFO "Target_Core_ConfigFS: Moving"
+		if (!tg_pt_gp_new) {
+			pr_debug("Target_Core_ConfigFS: Moving"
 				" %s/tpgt_%hu/%s from ALUA Target Port Group:"
 				" alua/%s, ID: %hu back to"
 				" default_tg_pt_gp\n",
@@ -1707,7 +1707,7 @@
 	 */
 	__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem, tg_pt_gp_new);
 	spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
-	printk(KERN_INFO "Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
+	pr_debug("Target_Core_ConfigFS: %s %s/tpgt_%hu/%s to ALUA"
 		" Target Port Group: alua/%s, ID: %hu\n", (move) ?
 		"Moving" : "Adding", tpg->se_tpg_tfo->tpg_get_wwn(tpg),
 		tpg->se_tpg_tfo->tpg_get_tag(tpg),
@@ -1744,11 +1744,11 @@
 
 	ret = strict_strtoul(page, 0, &tmp);
 	if (ret < 0) {
-		printk(KERN_ERR "Unable to extract alua_access_type\n");
+		pr_err("Unable to extract alua_access_type\n");
 		return -EINVAL;
 	}
 	if ((tmp != 0) && (tmp != 1) && (tmp != 2) && (tmp != 3)) {
-		printk(KERN_ERR "Illegal value for alua_access_type:"
+		pr_err("Illegal value for alua_access_type:"
 				" %lu\n", tmp);
 		return -EINVAL;
 	}
@@ -1782,11 +1782,11 @@
 
 	ret = strict_strtoul(page, 0, &tmp);
 	if (ret < 0) {
-		printk(KERN_ERR "Unable to extract nonop_delay_msecs\n");
+		pr_err("Unable to extract nonop_delay_msecs\n");
 		return -EINVAL;
 	}
 	if (tmp > ALUA_MAX_NONOP_DELAY_MSECS) {
-		printk(KERN_ERR "Passed nonop_delay_msecs: %lu, exceeds"
+		pr_err("Passed nonop_delay_msecs: %lu, exceeds"
 			" ALUA_MAX_NONOP_DELAY_MSECS: %d\n", tmp,
 			ALUA_MAX_NONOP_DELAY_MSECS);
 		return -EINVAL;
@@ -1813,11 +1813,11 @@
 
 	ret = strict_strtoul(page, 0, &tmp);
 	if (ret < 0) {
-		printk(KERN_ERR "Unable to extract trans_delay_msecs\n");
+		pr_err("Unable to extract trans_delay_msecs\n");
 		return -EINVAL;
 	}
 	if (tmp > ALUA_MAX_TRANS_DELAY_MSECS) {
-		printk(KERN_ERR "Passed trans_delay_msecs: %lu, exceeds"
+		pr_err("Passed trans_delay_msecs: %lu, exceeds"
 			" ALUA_MAX_TRANS_DELAY_MSECS: %d\n", tmp,
 			ALUA_MAX_TRANS_DELAY_MSECS);
 		return -EINVAL;
@@ -1844,11 +1844,11 @@
 
 	ret = strict_strtoul(page, 0, &tmp);
 	if (ret < 0) {
-		printk(KERN_ERR "Unable to extract preferred ALUA value\n");
+		pr_err("Unable to extract preferred ALUA value\n");
 		return -EINVAL;
 	}
 	if ((tmp != 0) && (tmp != 1)) {
-		printk(KERN_ERR "Illegal value for preferred ALUA: %lu\n", tmp);
+		pr_err("Illegal value for preferred ALUA: %lu\n", tmp);
 		return -EINVAL;
 	}
 	tg_pt_gp->tg_pt_gp_pref = (int)tmp;
@@ -1858,7 +1858,7 @@
 
 ssize_t core_alua_show_offline_bit(struct se_lun *lun, char *page)
 {
-	if (!(lun->lun_sep))
+	if (!lun->lun_sep)
 		return -ENODEV;
 
 	return sprintf(page, "%d\n",
@@ -1874,22 +1874,22 @@
 	unsigned long tmp;
 	int ret;
 
-	if (!(lun->lun_sep))
+	if (!lun->lun_sep)
 		return -ENODEV;
 
 	ret = strict_strtoul(page, 0, &tmp);
 	if (ret < 0) {
-		printk(KERN_ERR "Unable to extract alua_tg_pt_offline value\n");
+		pr_err("Unable to extract alua_tg_pt_offline value\n");
 		return -EINVAL;
 	}
 	if ((tmp != 0) && (tmp != 1)) {
-		printk(KERN_ERR "Illegal value for alua_tg_pt_offline: %lu\n",
+		pr_err("Illegal value for alua_tg_pt_offline: %lu\n",
 				tmp);
 		return -EINVAL;
 	}
 	tg_pt_gp_mem = lun->lun_sep->sep_alua_tg_pt_gp_mem;
-	if (!(tg_pt_gp_mem)) {
-		printk(KERN_ERR "Unable to locate *tg_pt_gp_mem\n");
+	if (!tg_pt_gp_mem) {
+		pr_err("Unable to locate *tg_pt_gp_mem\n");
 		return -EINVAL;
 	}
 
@@ -1918,13 +1918,13 @@
 
 	ret = strict_strtoul(page, 0, &tmp);
 	if (ret < 0) {
-		printk(KERN_ERR "Unable to extract alua_tg_pt_status\n");
+		pr_err("Unable to extract alua_tg_pt_status\n");
 		return -EINVAL;
 	}
 	if ((tmp != ALUA_STATUS_NONE) &&
 	    (tmp != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
 	    (tmp != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
-		printk(KERN_ERR "Illegal value for alua_tg_pt_status: %lu\n",
+		pr_err("Illegal value for alua_tg_pt_status: %lu\n",
 				tmp);
 		return -EINVAL;
 	}
@@ -1951,11 +1951,11 @@
 
 	ret = strict_strtoul(page, 0, &tmp);
 	if (ret < 0) {
-		printk(KERN_ERR "Unable to extract alua_tg_pt_write_md\n");
+		pr_err("Unable to extract alua_tg_pt_write_md\n");
 		return -EINVAL;
 	}
 	if ((tmp != 0) && (tmp != 1)) {
-		printk(KERN_ERR "Illegal value for alua_tg_pt_write_md:"
+		pr_err("Illegal value for alua_tg_pt_write_md:"
 				" %lu\n", tmp);
 		return -EINVAL;
 	}
@@ -1979,7 +1979,7 @@
 	    !(dev->se_sub_dev->se_dev_attrib.emulate_alua)) || force_pt) {
 		alua->alua_type = SPC_ALUA_PASSTHROUGH;
 		alua->alua_state_check = &core_alua_state_check_nop;
-		printk(KERN_INFO "%s: Using SPC_ALUA_PASSTHROUGH, no ALUA"
+		pr_debug("%s: Using SPC_ALUA_PASSTHROUGH, no ALUA"
 			" emulation\n", dev->transport->name);
 		return 0;
 	}
@@ -1988,7 +1988,7 @@
 	 * use emulated ALUA.
 	 */
 	if (dev->transport->get_device_rev(dev) >= SCSI_3) {
-		printk(KERN_INFO "%s: Enabling ALUA Emulation for SPC-3"
+		pr_debug("%s: Enabling ALUA Emulation for SPC-3"
 			" device\n", dev->transport->name);
 		/*
 		 * Associate this struct se_device with the default ALUA
@@ -2005,13 +2005,13 @@
 				default_lu_gp);
 		spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
 
-		printk(KERN_INFO "%s: Adding to default ALUA LU Group:"
+		pr_debug("%s: Adding to default ALUA LU Group:"
 			" core/alua/lu_gps/default_lu_gp\n",
 			dev->transport->name);
 	} else {
 		alua->alua_type = SPC2_ALUA_DISABLED;
 		alua->alua_state_check = &core_alua_state_check_nop;
-		printk(KERN_INFO "%s: Disabling ALUA Emulation for SPC-2"
+		pr_debug("%s: Disabling ALUA Emulation for SPC-2"
 			" device\n", dev->transport->name);
 	}
 
diff --git a/drivers/target/target_core_cdb.c b/drivers/target/target_core_cdb.c
index 418282d..9828300 100644
--- a/drivers/target/target_core_cdb.c
+++ b/drivers/target/target_core_cdb.c
@@ -73,7 +73,7 @@
 	 * payload going back for EVPD=0
 	 */
 	if (cmd->data_length < 6) {
-		printk(KERN_ERR "SCSI Inquiry payload length: %u"
+		pr_err("SCSI Inquiry payload length: %u"
 			" too small for EVPD=0\n", cmd->data_length);
 		return -EINVAL;
 	}
@@ -327,7 +327,7 @@
 
 		spin_lock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
 		tg_pt_gp = tg_pt_gp_mem->tg_pt_gp;
-		if (!(tg_pt_gp)) {
+		if (!tg_pt_gp) {
 			spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
 			goto check_lu_gp;
 		}
@@ -358,12 +358,12 @@
 			goto check_scsi_name;
 		}
 		lu_gp_mem = dev->dev_alua_lu_gp_mem;
-		if (!(lu_gp_mem))
+		if (!lu_gp_mem)
 			goto check_scsi_name;
 
 		spin_lock(&lu_gp_mem->lu_gp_mem_lock);
 		lu_gp = lu_gp_mem->lu_gp;
-		if (!(lu_gp)) {
+		if (!lu_gp) {
 			spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
 			goto check_scsi_name;
 		}
@@ -475,14 +475,14 @@
 		have_tp = 1;
 
 	if (cmd->data_length < (0x10 + 4)) {
-		printk(KERN_INFO "Received data_length: %u"
+		pr_debug("Received data_length: %u"
 			" too small for EVPD 0xb0\n",
 			cmd->data_length);
 		return -EINVAL;
 	}
 
 	if (have_tp && cmd->data_length < (0x3c + 4)) {
-		printk(KERN_INFO "Received data_length: %u"
+		pr_debug("Received data_length: %u"
 			" too small for TPE=1 EVPD 0xb0\n",
 			cmd->data_length);
 		have_tp = 0;
@@ -491,6 +491,9 @@
 	buf[0] = dev->transport->get_device_type(dev);
 	buf[3] = have_tp ? 0x3c : 0x10;
 
+	/* Set WSNZ to 1 */
+	buf[4] = 0x01;
+
 	/*
 	 * Set OPTIMAL TRANSFER LENGTH GRANULARITY
 	 */
@@ -667,7 +670,7 @@
 	 * payload length left for the next outgoing EVPD metadata
 	 */
 	if (cmd->data_length < 4) {
-		printk(KERN_ERR "SCSI Inquiry payload length: %u"
+		pr_err("SCSI Inquiry payload length: %u"
 			" too small for EVPD=1\n", cmd->data_length);
 		return -EINVAL;
 	}
@@ -685,7 +688,7 @@
 		}
 
 	transport_kunmap_first_data_page(cmd);
-	printk(KERN_ERR "Unknown VPD Code: 0x%02x\n", cdb[2]);
+	pr_err("Unknown VPD Code: 0x%02x\n", cdb[2]);
 	return -EINVAL;
 }
 
@@ -891,7 +894,7 @@
 		length += target_modesense_control(dev, &buf[offset+length]);
 		break;
 	default:
-		printk(KERN_ERR "Got Unknown Mode Page: 0x%02x\n",
+		pr_err("Got Unknown Mode Page: 0x%02x\n",
 				cdb[2] & 0x3f);
 		return PYX_TRANSPORT_UNKNOWN_MODE_PAGE;
 	}
@@ -947,14 +950,14 @@
 	int err = 0;
 
 	if (cdb[1] & 0x01) {
-		printk(KERN_ERR "REQUEST_SENSE description emulation not"
+		pr_err("REQUEST_SENSE description emulation not"
 			" supported\n");
 		return PYX_TRANSPORT_INVALID_CDB_FIELD;
 	}
 
 	buf = transport_kmap_first_data_page(cmd);
 
-	if (!(core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq))) {
+	if (!core_scsi3_ua_clear_for_request_sense(cmd, &ua_asc, &ua_ascq)) {
 		/*
 		 * CURRENT ERROR, UNIT ATTENTION
 		 */
@@ -1028,18 +1031,18 @@
 	buf = transport_kmap_first_data_page(cmd);
 
 	ptr = &buf[offset];
-	printk(KERN_INFO "UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
+	pr_debug("UNMAP: Sub: %s Using dl: %hu bd_dl: %hu size: %hu"
 		" ptr: %p\n", dev->transport->name, dl, bd_dl, size, ptr);
 
 	while (size) {
 		lba = get_unaligned_be64(&ptr[0]);
 		range = get_unaligned_be32(&ptr[8]);
-		printk(KERN_INFO "UNMAP: Using lba: %llu and range: %u\n",
+		pr_debug("UNMAP: Using lba: %llu and range: %u\n",
 				 (unsigned long long)lba, range);
 
 		ret = dev->transport->do_discard(dev, lba, range);
 		if (ret < 0) {
-			printk(KERN_ERR "blkdev_issue_discard() failed: %d\n",
+			pr_err("blkdev_issue_discard() failed: %d\n",
 					ret);
 			goto err;
 		}
@@ -1084,12 +1087,12 @@
 	else
 		range = (dev->transport->get_blocks(dev) - lba);
 
-	printk(KERN_INFO "WRITE_SAME UNMAP: LBA: %llu Range: %llu\n",
+	pr_debug("WRITE_SAME UNMAP: LBA: %llu Range: %llu\n",
 		 (unsigned long long)lba, (unsigned long long)range);
 
 	ret = dev->transport->do_discard(dev, lba, range);
 	if (ret < 0) {
-		printk(KERN_INFO "blkdev_issue_discard() failed for WRITE_SAME\n");
+		pr_debug("blkdev_issue_discard() failed for WRITE_SAME\n");
 		return ret;
 	}
 
@@ -1125,7 +1128,7 @@
 			ret = target_emulate_readcapacity_16(cmd);
 			break;
 		default:
-			printk(KERN_ERR "Unsupported SA: 0x%02x\n",
+			pr_err("Unsupported SA: 0x%02x\n",
 				cmd->t_task_cdb[1] & 0x1f);
 			return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
 		}
@@ -1135,7 +1138,7 @@
 		break;
 	case UNMAP:
 		if (!dev->transport->do_discard) {
-			printk(KERN_ERR "UNMAP emulation not supported for: %s\n",
+			pr_err("UNMAP emulation not supported for: %s\n",
 					dev->transport->name);
 			return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
 		}
@@ -1143,7 +1146,7 @@
 		break;
 	case WRITE_SAME_16:
 		if (!dev->transport->do_discard) {
-			printk(KERN_ERR "WRITE_SAME_16 emulation not supported"
+			pr_err("WRITE_SAME_16 emulation not supported"
 					" for: %s\n", dev->transport->name);
 			return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
 		}
@@ -1155,7 +1158,7 @@
 		switch (service_action) {
 		case WRITE_SAME_32:
 			if (!dev->transport->do_discard) {
-				printk(KERN_ERR "WRITE_SAME_32 SA emulation not"
+				pr_err("WRITE_SAME_32 SA emulation not"
 					" supported for: %s\n",
 					dev->transport->name);
 				return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
@@ -1163,7 +1166,7 @@
 			ret = target_emulate_write_same(task, 1);
 			break;
 		default:
-			printk(KERN_ERR "Unsupported VARIABLE_LENGTH_CMD SA:"
+			pr_err("Unsupported VARIABLE_LENGTH_CMD SA:"
 					" 0x%02x\n", service_action);
 			break;
 		}
@@ -1171,8 +1174,7 @@
 	case SYNCHRONIZE_CACHE:
 	case 0x91: /* SYNCHRONIZE_CACHE_16: */
 		if (!dev->transport->do_sync_cache) {
-			printk(KERN_ERR
-				"SYNCHRONIZE_CACHE emulation not supported"
+			pr_err("SYNCHRONIZE_CACHE emulation not supported"
 				" for: %s\n", dev->transport->name);
 			return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
 		}
@@ -1189,7 +1191,7 @@
 	case WRITE_FILEMARKS:
 		break;
 	default:
-		printk(KERN_ERR "Unsupported SCSI Opcode: 0x%02x for %s\n",
+		pr_err("Unsupported SCSI Opcode: 0x%02x for %s\n",
 			cmd->t_task_cdb[0], dev->transport->name);
 		return PYX_TRANSPORT_UNKNOWN_SAM_OPCODE;
 	}
diff --git a/drivers/target/target_core_configfs.c b/drivers/target/target_core_configfs.c
index 6b00810..e56c39d 100644
--- a/drivers/target/target_core_configfs.c
+++ b/drivers/target/target_core_configfs.c
@@ -104,12 +104,12 @@
 {
 	struct target_fabric_configfs *tf;
 
-	if (!(name))
+	if (!name)
 		return NULL;
 
 	mutex_lock(&g_tf_lock);
 	list_for_each_entry(tf, &g_tf_list, tf_list) {
-		if (!(strcmp(tf->tf_name, name))) {
+		if (!strcmp(tf->tf_name, name)) {
 			atomic_inc(&tf->tf_access_cnt);
 			mutex_unlock(&g_tf_lock);
 			return tf;
@@ -130,7 +130,7 @@
 	struct target_fabric_configfs *tf;
 	int ret;
 
-	printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> group: %p name:"
+	pr_debug("Target_Core_ConfigFS: REGISTER -> group: %p name:"
 			" %s\n", group, name);
 	/*
 	 * Ensure that TCM subsystem plugins are loaded at this point for
@@ -150,7 +150,7 @@
 	 * registered, but simply provids auto loading logic for modules with
 	 * mkdir(2) system calls with known TCM fabric modules.
 	 */
-	if (!(strncmp(name, "iscsi", 5))) {
+	if (!strncmp(name, "iscsi", 5)) {
 		/*
 		 * Automatically load the LIO Target fabric module when the
 		 * following is called:
@@ -159,11 +159,11 @@
 		 */
 		ret = request_module("iscsi_target_mod");
 		if (ret < 0) {
-			printk(KERN_ERR "request_module() failed for"
+			pr_err("request_module() failed for"
 				" iscsi_target_mod.ko: %d\n", ret);
 			return ERR_PTR(-EINVAL);
 		}
-	} else if (!(strncmp(name, "loopback", 8))) {
+	} else if (!strncmp(name, "loopback", 8)) {
 		/*
 		 * Automatically load the tcm_loop fabric module when the
 		 * following is called:
@@ -172,25 +172,25 @@
 		 */
 		ret = request_module("tcm_loop");
 		if (ret < 0) {
-			printk(KERN_ERR "request_module() failed for"
+			pr_err("request_module() failed for"
 				" tcm_loop.ko: %d\n", ret);
 			return ERR_PTR(-EINVAL);
 		}
 	}
 
 	tf = target_core_get_fabric(name);
-	if (!(tf)) {
-		printk(KERN_ERR "target_core_get_fabric() failed for %s\n",
+	if (!tf) {
+		pr_err("target_core_get_fabric() failed for %s\n",
 			name);
 		return ERR_PTR(-EINVAL);
 	}
-	printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Located fabric:"
+	pr_debug("Target_Core_ConfigFS: REGISTER -> Located fabric:"
 			" %s\n", tf->tf_name);
 	/*
 	 * On a successful target_core_get_fabric() look, the returned
 	 * struct target_fabric_configfs *tf will contain a usage reference.
 	 */
-	printk(KERN_INFO "Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
+	pr_debug("Target_Core_ConfigFS: REGISTER tfc_wwn_cit -> %p\n",
 			&TF_CIT_TMPL(tf)->tfc_wwn_cit);
 
 	tf->tf_group.default_groups = tf->tf_default_groups;
@@ -202,14 +202,14 @@
 	config_group_init_type_name(&tf->tf_disc_group, "discovery_auth",
 			&TF_CIT_TMPL(tf)->tfc_discovery_cit);
 
-	printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
+	pr_debug("Target_Core_ConfigFS: REGISTER -> Allocated Fabric:"
 			" %s\n", tf->tf_group.cg_item.ci_name);
 	/*
 	 * Setup tf_ops.tf_subsys pointer for usage with configfs_depend_item()
 	 */
 	tf->tf_ops.tf_subsys = tf->tf_subsys;
 	tf->tf_fabric = &tf->tf_group.cg_item;
-	printk(KERN_INFO "Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric"
+	pr_debug("Target_Core_ConfigFS: REGISTER -> Set tf->tf_fabric"
 			" for %s\n", name);
 
 	return &tf->tf_group;
@@ -228,18 +228,18 @@
 	struct config_item *df_item;
 	int i;
 
-	printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Looking up %s in"
+	pr_debug("Target_Core_ConfigFS: DEREGISTER -> Looking up %s in"
 		" tf list\n", config_item_name(item));
 
-	printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> located fabric:"
+	pr_debug("Target_Core_ConfigFS: DEREGISTER -> located fabric:"
 			" %s\n", tf->tf_name);
 	atomic_dec(&tf->tf_access_cnt);
 
-	printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing"
+	pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing"
 			" tf->tf_fabric for %s\n", tf->tf_name);
 	tf->tf_fabric = NULL;
 
-	printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
+	pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing ci"
 			" %s\n", config_item_name(item));
 
 	tf_group = &tf->tf_group;
@@ -307,17 +307,17 @@
 	struct target_fabric_configfs *tf;
 
 	if (!(name)) {
-		printk(KERN_ERR "Unable to locate passed fabric name\n");
+		pr_err("Unable to locate passed fabric name\n");
 		return ERR_PTR(-EINVAL);
 	}
 	if (strlen(name) >= TARGET_FABRIC_NAME_SIZE) {
-		printk(KERN_ERR "Passed name: %s exceeds TARGET_FABRIC"
+		pr_err("Passed name: %s exceeds TARGET_FABRIC"
 			"_NAME_SIZE\n", name);
 		return ERR_PTR(-EINVAL);
 	}
 
 	tf = kzalloc(sizeof(struct target_fabric_configfs), GFP_KERNEL);
-	if (!(tf))
+	if (!tf)
 		return ERR_PTR(-ENOMEM);
 
 	INIT_LIST_HEAD(&tf->tf_list);
@@ -336,9 +336,9 @@
 	list_add_tail(&tf->tf_list, &g_tf_list);
 	mutex_unlock(&g_tf_lock);
 
-	printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>"
+	pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>"
 			">>>>>>>>>>>>>>\n");
-	printk(KERN_INFO "Initialized struct target_fabric_configfs: %p for"
+	pr_debug("Initialized struct target_fabric_configfs: %p for"
 			" %s\n", tf, tf->tf_name);
 	return tf;
 }
@@ -367,132 +367,132 @@
 {
 	struct target_core_fabric_ops *tfo = &tf->tf_ops;
 
-	if (!(tfo->get_fabric_name)) {
-		printk(KERN_ERR "Missing tfo->get_fabric_name()\n");
+	if (!tfo->get_fabric_name) {
+		pr_err("Missing tfo->get_fabric_name()\n");
 		return -EINVAL;
 	}
-	if (!(tfo->get_fabric_proto_ident)) {
-		printk(KERN_ERR "Missing tfo->get_fabric_proto_ident()\n");
+	if (!tfo->get_fabric_proto_ident) {
+		pr_err("Missing tfo->get_fabric_proto_ident()\n");
 		return -EINVAL;
 	}
-	if (!(tfo->tpg_get_wwn)) {
-		printk(KERN_ERR "Missing tfo->tpg_get_wwn()\n");
+	if (!tfo->tpg_get_wwn) {
+		pr_err("Missing tfo->tpg_get_wwn()\n");
 		return -EINVAL;
 	}
-	if (!(tfo->tpg_get_tag)) {
-		printk(KERN_ERR "Missing tfo->tpg_get_tag()\n");
+	if (!tfo->tpg_get_tag) {
+		pr_err("Missing tfo->tpg_get_tag()\n");
 		return -EINVAL;
 	}
-	if (!(tfo->tpg_get_default_depth)) {
-		printk(KERN_ERR "Missing tfo->tpg_get_default_depth()\n");
+	if (!tfo->tpg_get_default_depth) {
+		pr_err("Missing tfo->tpg_get_default_depth()\n");
 		return -EINVAL;
 	}
-	if (!(tfo->tpg_get_pr_transport_id)) {
-		printk(KERN_ERR "Missing tfo->tpg_get_pr_transport_id()\n");
+	if (!tfo->tpg_get_pr_transport_id) {
+		pr_err("Missing tfo->tpg_get_pr_transport_id()\n");
 		return -EINVAL;
 	}
-	if (!(tfo->tpg_get_pr_transport_id_len)) {
-		printk(KERN_ERR "Missing tfo->tpg_get_pr_transport_id_len()\n");
+	if (!tfo->tpg_get_pr_transport_id_len) {
+		pr_err("Missing tfo->tpg_get_pr_transport_id_len()\n");
 		return -EINVAL;
 	}
-	if (!(tfo->tpg_check_demo_mode)) {
-		printk(KERN_ERR "Missing tfo->tpg_check_demo_mode()\n");
+	if (!tfo->tpg_check_demo_mode) {
+		pr_err("Missing tfo->tpg_check_demo_mode()\n");
 		return -EINVAL;
 	}
-	if (!(tfo->tpg_check_demo_mode_cache)) {
-		printk(KERN_ERR "Missing tfo->tpg_check_demo_mode_cache()\n");
+	if (!tfo->tpg_check_demo_mode_cache) {
+		pr_err("Missing tfo->tpg_check_demo_mode_cache()\n");
 		return -EINVAL;
 	}
-	if (!(tfo->tpg_check_demo_mode_write_protect)) {
-		printk(KERN_ERR "Missing tfo->tpg_check_demo_mode_write_protect()\n");
+	if (!tfo->tpg_check_demo_mode_write_protect) {
+		pr_err("Missing tfo->tpg_check_demo_mode_write_protect()\n");
 		return -EINVAL;
 	}
-	if (!(tfo->tpg_check_prod_mode_write_protect)) {
-		printk(KERN_ERR "Missing tfo->tpg_check_prod_mode_write_protect()\n");
+	if (!tfo->tpg_check_prod_mode_write_protect) {
+		pr_err("Missing tfo->tpg_check_prod_mode_write_protect()\n");
 		return -EINVAL;
 	}
-	if (!(tfo->tpg_alloc_fabric_acl)) {
-		printk(KERN_ERR "Missing tfo->tpg_alloc_fabric_acl()\n");
+	if (!tfo->tpg_alloc_fabric_acl) {
+		pr_err("Missing tfo->tpg_alloc_fabric_acl()\n");
 		return -EINVAL;
 	}
-	if (!(tfo->tpg_release_fabric_acl)) {
-		printk(KERN_ERR "Missing tfo->tpg_release_fabric_acl()\n");
+	if (!tfo->tpg_release_fabric_acl) {
+		pr_err("Missing tfo->tpg_release_fabric_acl()\n");
 		return -EINVAL;
 	}
-	if (!(tfo->tpg_get_inst_index)) {
-		printk(KERN_ERR "Missing tfo->tpg_get_inst_index()\n");
+	if (!tfo->tpg_get_inst_index) {
+		pr_err("Missing tfo->tpg_get_inst_index()\n");
 		return -EINVAL;
 	}
 	if (!tfo->release_cmd) {
-		printk(KERN_ERR "Missing tfo->release_cmd()\n");
+		pr_err("Missing tfo->release_cmd()\n");
 		return -EINVAL;
 	}
-	if (!(tfo->shutdown_session)) {
-		printk(KERN_ERR "Missing tfo->shutdown_session()\n");
+	if (!tfo->shutdown_session) {
+		pr_err("Missing tfo->shutdown_session()\n");
 		return -EINVAL;
 	}
-	if (!(tfo->close_session)) {
-		printk(KERN_ERR "Missing tfo->close_session()\n");
+	if (!tfo->close_session) {
+		pr_err("Missing tfo->close_session()\n");
 		return -EINVAL;
 	}
-	if (!(tfo->stop_session)) {
-		printk(KERN_ERR "Missing tfo->stop_session()\n");
+	if (!tfo->stop_session) {
+		pr_err("Missing tfo->stop_session()\n");
 		return -EINVAL;
 	}
-	if (!(tfo->fall_back_to_erl0)) {
-		printk(KERN_ERR "Missing tfo->fall_back_to_erl0()\n");
+	if (!tfo->fall_back_to_erl0) {
+		pr_err("Missing tfo->fall_back_to_erl0()\n");
 		return -EINVAL;
 	}
-	if (!(tfo->sess_logged_in)) {
-		printk(KERN_ERR "Missing tfo->sess_logged_in()\n");
+	if (!tfo->sess_logged_in) {
+		pr_err("Missing tfo->sess_logged_in()\n");
 		return -EINVAL;
 	}
-	if (!(tfo->sess_get_index)) {
-		printk(KERN_ERR "Missing tfo->sess_get_index()\n");
+	if (!tfo->sess_get_index) {
+		pr_err("Missing tfo->sess_get_index()\n");
 		return -EINVAL;
 	}
-	if (!(tfo->write_pending)) {
-		printk(KERN_ERR "Missing tfo->write_pending()\n");
+	if (!tfo->write_pending) {
+		pr_err("Missing tfo->write_pending()\n");
 		return -EINVAL;
 	}
-	if (!(tfo->write_pending_status)) {
-		printk(KERN_ERR "Missing tfo->write_pending_status()\n");
+	if (!tfo->write_pending_status) {
+		pr_err("Missing tfo->write_pending_status()\n");
 		return -EINVAL;
 	}
-	if (!(tfo->set_default_node_attributes)) {
-		printk(KERN_ERR "Missing tfo->set_default_node_attributes()\n");
+	if (!tfo->set_default_node_attributes) {
+		pr_err("Missing tfo->set_default_node_attributes()\n");
 		return -EINVAL;
 	}
-	if (!(tfo->get_task_tag)) {
-		printk(KERN_ERR "Missing tfo->get_task_tag()\n");
+	if (!tfo->get_task_tag) {
+		pr_err("Missing tfo->get_task_tag()\n");
 		return -EINVAL;
 	}
-	if (!(tfo->get_cmd_state)) {
-		printk(KERN_ERR "Missing tfo->get_cmd_state()\n");
+	if (!tfo->get_cmd_state) {
+		pr_err("Missing tfo->get_cmd_state()\n");
 		return -EINVAL;
 	}
-	if (!(tfo->queue_data_in)) {
-		printk(KERN_ERR "Missing tfo->queue_data_in()\n");
+	if (!tfo->queue_data_in) {
+		pr_err("Missing tfo->queue_data_in()\n");
 		return -EINVAL;
 	}
-	if (!(tfo->queue_status)) {
-		printk(KERN_ERR "Missing tfo->queue_status()\n");
+	if (!tfo->queue_status) {
+		pr_err("Missing tfo->queue_status()\n");
 		return -EINVAL;
 	}
-	if (!(tfo->queue_tm_rsp)) {
-		printk(KERN_ERR "Missing tfo->queue_tm_rsp()\n");
+	if (!tfo->queue_tm_rsp) {
+		pr_err("Missing tfo->queue_tm_rsp()\n");
 		return -EINVAL;
 	}
-	if (!(tfo->set_fabric_sense_len)) {
-		printk(KERN_ERR "Missing tfo->set_fabric_sense_len()\n");
+	if (!tfo->set_fabric_sense_len) {
+		pr_err("Missing tfo->set_fabric_sense_len()\n");
 		return -EINVAL;
 	}
-	if (!(tfo->get_fabric_sense_len)) {
-		printk(KERN_ERR "Missing tfo->get_fabric_sense_len()\n");
+	if (!tfo->get_fabric_sense_len) {
+		pr_err("Missing tfo->get_fabric_sense_len()\n");
 		return -EINVAL;
 	}
-	if (!(tfo->is_state_remove)) {
-		printk(KERN_ERR "Missing tfo->is_state_remove()\n");
+	if (!tfo->is_state_remove) {
+		pr_err("Missing tfo->is_state_remove()\n");
 		return -EINVAL;
 	}
 	/*
@@ -500,20 +500,20 @@
 	 * tfo->fabric_make_tpg() and tfo->fabric_drop_tpg() in
 	 * target_core_fabric_configfs.c WWN+TPG group context code.
 	 */
-	if (!(tfo->fabric_make_wwn)) {
-		printk(KERN_ERR "Missing tfo->fabric_make_wwn()\n");
+	if (!tfo->fabric_make_wwn) {
+		pr_err("Missing tfo->fabric_make_wwn()\n");
 		return -EINVAL;
 	}
-	if (!(tfo->fabric_drop_wwn)) {
-		printk(KERN_ERR "Missing tfo->fabric_drop_wwn()\n");
+	if (!tfo->fabric_drop_wwn) {
+		pr_err("Missing tfo->fabric_drop_wwn()\n");
 		return -EINVAL;
 	}
-	if (!(tfo->fabric_make_tpg)) {
-		printk(KERN_ERR "Missing tfo->fabric_make_tpg()\n");
+	if (!tfo->fabric_make_tpg) {
+		pr_err("Missing tfo->fabric_make_tpg()\n");
 		return -EINVAL;
 	}
-	if (!(tfo->fabric_drop_tpg)) {
-		printk(KERN_ERR "Missing tfo->fabric_drop_tpg()\n");
+	if (!tfo->fabric_drop_tpg) {
+		pr_err("Missing tfo->fabric_drop_tpg()\n");
 		return -EINVAL;
 	}
 
@@ -533,13 +533,13 @@
 {
 	int ret;
 
-	if (!(tf)) {
-		printk(KERN_ERR "Unable to locate target_fabric_configfs"
+	if (!tf) {
+		pr_err("Unable to locate target_fabric_configfs"
 			" pointer\n");
 		return -EINVAL;
 	}
-	if (!(tf->tf_subsys)) {
-		printk(KERN_ERR "Unable to target struct config_subsystem"
+	if (!tf->tf_subsys) {
+		pr_err("Unable to target struct config_subsystem"
 			" pointer\n");
 		return -EINVAL;
 	}
@@ -547,7 +547,7 @@
 	if (ret < 0)
 		return ret;
 
-	printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>"
+	pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>"
 		">>>>>>>>>>\n");
 	return 0;
 }
@@ -558,36 +558,36 @@
 {
 	struct configfs_subsystem *su;
 
-	if (!(tf)) {
-		printk(KERN_ERR "Unable to locate passed target_fabric_"
+	if (!tf) {
+		pr_err("Unable to locate passed target_fabric_"
 			"configfs\n");
 		return;
 	}
 	su = tf->tf_subsys;
-	if (!(su)) {
-		printk(KERN_ERR "Unable to locate passed tf->tf_subsys"
+	if (!su) {
+		pr_err("Unable to locate passed tf->tf_subsys"
 			" pointer\n");
 		return;
 	}
-	printk(KERN_INFO "<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>"
+	pr_debug("<<<<<<<<<<<<<<<<<<<<<< BEGIN FABRIC API >>>>>>>>>>"
 			">>>>>>>>>>>>\n");
 	mutex_lock(&g_tf_lock);
 	if (atomic_read(&tf->tf_access_cnt)) {
 		mutex_unlock(&g_tf_lock);
-		printk(KERN_ERR "Non zero tf->tf_access_cnt for fabric %s\n",
+		pr_err("Non zero tf->tf_access_cnt for fabric %s\n",
 			tf->tf_name);
 		BUG();
 	}
 	list_del(&tf->tf_list);
 	mutex_unlock(&g_tf_lock);
 
-	printk(KERN_INFO "Target_Core_ConfigFS: DEREGISTER -> Releasing tf:"
+	pr_debug("Target_Core_ConfigFS: DEREGISTER -> Releasing tf:"
 			" %s\n", tf->tf_name);
 	tf->tf_module = NULL;
 	tf->tf_subsys = NULL;
 	kfree(tf);
 
-	printk("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>"
+	pr_debug("<<<<<<<<<<<<<<<<<<<<<< END FABRIC API >>>>>>>>>>>>>>>>>"
 			">>>>>\n");
 }
 EXPORT_SYMBOL(target_fabric_configfs_deregister);
@@ -609,7 +609,7 @@
 									\
 	spin_lock(&se_dev->se_dev_lock);				\
 	dev = se_dev->se_dev_ptr;					\
-	if (!(dev)) {							\
+	if (!dev) {							\
 		spin_unlock(&se_dev->se_dev_lock); 			\
 		return -ENODEV;						\
 	}								\
@@ -633,14 +633,14 @@
 									\
 	spin_lock(&se_dev->se_dev_lock);				\
 	dev = se_dev->se_dev_ptr;					\
-	if (!(dev)) {							\
+	if (!dev) {							\
 		spin_unlock(&se_dev->se_dev_lock);			\
 		return -ENODEV;						\
 	}								\
 	ret = strict_strtoul(page, 0, &val);				\
 	if (ret < 0) {							\
 		spin_unlock(&se_dev->se_dev_lock);                      \
-		printk(KERN_ERR "strict_strtoul() failed with"		\
+		pr_err("strict_strtoul() failed with"		\
 			" ret: %d\n", ret);				\
 		return -EINVAL;						\
 	}								\
@@ -806,7 +806,7 @@
 	struct se_device *dev;
 
 	dev = se_dev->se_dev_ptr;
-	if (!(dev))
+	if (!dev)
 		return -ENODEV;
 
 	return sprintf(page, "T10 VPD Unit Serial Number: %s\n",
@@ -833,13 +833,13 @@
 	 * VPD Unit Serial Number that OS dependent multipath can depend on.
 	 */
 	if (su_dev->su_dev_flags & SDF_FIRMWARE_VPD_UNIT_SERIAL) {
-		printk(KERN_ERR "Underlying SCSI device firmware provided VPD"
+		pr_err("Underlying SCSI device firmware provided VPD"
 			" Unit Serial, ignoring request\n");
 		return -EOPNOTSUPP;
 	}
 
 	if (strlen(page) >= INQUIRY_VPD_SERIAL_LEN) {
-		printk(KERN_ERR "Emulated VPD Unit Serial exceeds"
+		pr_err("Emulated VPD Unit Serial exceeds"
 		" INQUIRY_VPD_SERIAL_LEN: %d\n", INQUIRY_VPD_SERIAL_LEN);
 		return -EOVERFLOW;
 	}
@@ -850,9 +850,9 @@
 	 * could cause negative effects.
 	 */
 	dev = su_dev->se_dev_ptr;
-	if ((dev)) {
+	if (dev) {
 		if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
-			printk(KERN_ERR "Unable to set VPD Unit Serial while"
+			pr_err("Unable to set VPD Unit Serial while"
 				" active %d $FABRIC_MOD exports exist\n",
 				atomic_read(&dev->dev_export_obj.obj_access_count));
 			return -EINVAL;
@@ -870,7 +870,7 @@
 			"%s", strstrip(buf));
 	su_dev->su_dev_flags |= SDF_EMULATED_VPD_UNIT_SERIAL;
 
-	printk(KERN_INFO "Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
+	pr_debug("Target_Core_ConfigFS: Set emulated VPD Unit Serial:"
 			" %s\n", su_dev->t10_wwn.unit_serial);
 
 	return count;
@@ -892,19 +892,19 @@
 	ssize_t len = 0;
 
 	dev = se_dev->se_dev_ptr;
-	if (!(dev))
+	if (!dev)
 		return -ENODEV;
 
 	memset(buf, 0, VPD_TMP_BUF_SIZE);
 
 	spin_lock(&t10_wwn->t10_vpd_lock);
 	list_for_each_entry(vpd, &t10_wwn->t10_vpd_list, vpd_list) {
-		if (!(vpd->protocol_identifier_set))
+		if (!vpd->protocol_identifier_set)
 			continue;
 
 		transport_dump_vpd_proto_id(vpd, buf, VPD_TMP_BUF_SIZE);
 
-		if ((len + strlen(buf) >= PAGE_SIZE))
+		if (len + strlen(buf) >= PAGE_SIZE)
 			break;
 
 		len += sprintf(page+len, "%s", buf);
@@ -939,7 +939,7 @@
 	ssize_t len = 0;						\
 									\
 	dev = se_dev->se_dev_ptr;					\
-	if (!(dev))							\
+	if (!dev)							\
 		return -ENODEV;						\
 									\
 	spin_lock(&t10_wwn->t10_vpd_lock);				\
@@ -949,19 +949,19 @@
 									\
 		memset(buf, 0, VPD_TMP_BUF_SIZE);			\
 		transport_dump_vpd_assoc(vpd, buf, VPD_TMP_BUF_SIZE);	\
-		if ((len + strlen(buf) >= PAGE_SIZE))			\
+		if (len + strlen(buf) >= PAGE_SIZE)			\
 			break;						\
 		len += sprintf(page+len, "%s", buf);			\
 									\
 		memset(buf, 0, VPD_TMP_BUF_SIZE);			\
 		transport_dump_vpd_ident_type(vpd, buf, VPD_TMP_BUF_SIZE); \
-		if ((len + strlen(buf) >= PAGE_SIZE))			\
+		if (len + strlen(buf) >= PAGE_SIZE)			\
 			break;						\
 		len += sprintf(page+len, "%s", buf);			\
 									\
 		memset(buf, 0, VPD_TMP_BUF_SIZE);			\
 		transport_dump_vpd_ident(vpd, buf, VPD_TMP_BUF_SIZE); \
-		if ((len + strlen(buf) >= PAGE_SIZE))			\
+		if (len + strlen(buf) >= PAGE_SIZE)			\
 			break;						\
 		len += sprintf(page+len, "%s", buf);			\
 	}								\
@@ -1070,7 +1070,7 @@
 
 	spin_lock(&dev->dev_reservation_lock);
 	pr_reg = dev->dev_pr_res_holder;
-	if (!(pr_reg)) {
+	if (!pr_reg) {
 		*len += sprintf(page + *len, "No SPC-3 Reservation holder\n");
 		spin_unlock(&dev->dev_reservation_lock);
 		return *len;
@@ -1096,7 +1096,7 @@
 
 	spin_lock(&dev->dev_reservation_lock);
 	se_nacl = dev->dev_reserved_node_acl;
-	if (!(se_nacl)) {
+	if (!se_nacl) {
 		*len += sprintf(page + *len, "No SPC-2 Reservation holder\n");
 		spin_unlock(&dev->dev_reservation_lock);
 		return *len;
@@ -1115,7 +1115,7 @@
 {
 	ssize_t len = 0;
 
-	if (!(su_dev->se_dev_ptr))
+	if (!su_dev->se_dev_ptr)
 		return -ENODEV;
 
 	switch (su_dev->t10_pr.res_type) {
@@ -1152,7 +1152,7 @@
 	ssize_t len = 0;
 
 	dev = su_dev->se_dev_ptr;
-	if (!(dev))
+	if (!dev)
 		return -ENODEV;
 
 	if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
@@ -1160,7 +1160,7 @@
 
 	spin_lock(&dev->dev_reservation_lock);
 	pr_reg = dev->dev_pr_res_holder;
-	if (!(pr_reg)) {
+	if (!pr_reg) {
 		len = sprintf(page, "No SPC-3 Reservation holder\n");
 		spin_unlock(&dev->dev_reservation_lock);
 		return len;
@@ -1189,7 +1189,7 @@
 	struct se_subsystem_dev *su_dev,
 	char *page)
 {
-	if (!(su_dev->se_dev_ptr))
+	if (!su_dev->se_dev_ptr)
 		return -ENODEV;
 
 	if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
@@ -1216,7 +1216,7 @@
 	ssize_t len = 0;
 
 	dev = su_dev->se_dev_ptr;
-	if (!(dev))
+	if (!dev)
 		return -ENODEV;
 
 	if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
@@ -1224,7 +1224,7 @@
 
 	spin_lock(&dev->dev_reservation_lock);
 	pr_reg = dev->dev_pr_res_holder;
-	if (!(pr_reg)) {
+	if (!pr_reg) {
 		len = sprintf(page, "No SPC-3 Reservation holder\n");
 		spin_unlock(&dev->dev_reservation_lock);
 		return len;
@@ -1263,7 +1263,7 @@
 	ssize_t len = 0;
 	int reg_count = 0, prf_isid;
 
-	if (!(su_dev->se_dev_ptr))
+	if (!su_dev->se_dev_ptr)
 		return -ENODEV;
 
 	if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
@@ -1286,7 +1286,7 @@
 			&i_buf[0] : "", pr_reg->pr_res_key,
 			pr_reg->pr_res_generation);
 
-		if ((len + strlen(buf) >= PAGE_SIZE))
+		if (len + strlen(buf) >= PAGE_SIZE)
 			break;
 
 		len += sprintf(page+len, "%s", buf);
@@ -1294,7 +1294,7 @@
 	}
 	spin_unlock(&su_dev->t10_pr.registration_lock);
 
-	if (!(reg_count))
+	if (!reg_count)
 		len += sprintf(page+len, "None\n");
 
 	return len;
@@ -1314,7 +1314,7 @@
 	ssize_t len = 0;
 
 	dev = su_dev->se_dev_ptr;
-	if (!(dev))
+	if (!dev)
 		return -ENODEV;
 
 	if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
@@ -1322,7 +1322,7 @@
 
 	spin_lock(&dev->dev_reservation_lock);
 	pr_reg = dev->dev_pr_res_holder;
-	if (!(pr_reg)) {
+	if (!pr_reg) {
 		len = sprintf(page, "No SPC-3 Reservation holder\n");
 		spin_unlock(&dev->dev_reservation_lock);
 		return len;
@@ -1345,7 +1345,7 @@
 {
 	ssize_t len = 0;
 
-	if (!(su_dev->se_dev_ptr))
+	if (!su_dev->se_dev_ptr)
 		return -ENODEV;
 
 	switch (su_dev->t10_pr.res_type) {
@@ -1376,7 +1376,7 @@
 	struct se_subsystem_dev *su_dev,
 	char *page)
 {
-	if (!(su_dev->se_dev_ptr))
+	if (!su_dev->se_dev_ptr)
 		return -ENODEV;
 
 	if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
@@ -1395,7 +1395,7 @@
 	struct se_subsystem_dev *su_dev,
 	char *page)
 {
-	if (!(su_dev->se_dev_ptr))
+	if (!su_dev->se_dev_ptr)
 		return -ENODEV;
 
 	if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
@@ -1447,14 +1447,14 @@
 	u8 type = 0, scope;
 
 	dev = su_dev->se_dev_ptr;
-	if (!(dev))
+	if (!dev)
 		return -ENODEV;
 
 	if (su_dev->t10_pr.res_type != SPC3_PERSISTENT_RESERVATIONS)
 		return 0;
 
 	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
-		printk(KERN_INFO "Unable to process APTPL metadata while"
+		pr_debug("Unable to process APTPL metadata while"
 			" active fabric exports exist\n");
 		return -EINVAL;
 	}
@@ -1484,7 +1484,7 @@
 				goto out;
 			}
 			if (strlen(i_port) >= PR_APTPL_MAX_IPORT_LEN) {
-				printk(KERN_ERR "APTPL metadata initiator_node="
+				pr_err("APTPL metadata initiator_node="
 					" exceeds PR_APTPL_MAX_IPORT_LEN: %d\n",
 					PR_APTPL_MAX_IPORT_LEN);
 				ret = -EINVAL;
@@ -1498,7 +1498,7 @@
 				goto out;
 			}
 			if (strlen(isid) >= PR_REG_ISID_LEN) {
-				printk(KERN_ERR "APTPL metadata initiator_isid"
+				pr_err("APTPL metadata initiator_isid"
 					"= exceeds PR_REG_ISID_LEN: %d\n",
 					PR_REG_ISID_LEN);
 				ret = -EINVAL;
@@ -1513,7 +1513,7 @@
 			}
 			ret = strict_strtoull(arg_p, 0, &tmp_ll);
 			if (ret < 0) {
-				printk(KERN_ERR "strict_strtoull() failed for"
+				pr_err("strict_strtoull() failed for"
 					" sa_res_key=\n");
 				goto out;
 			}
@@ -1559,7 +1559,7 @@
 				goto out;
 			}
 			if (strlen(t_port) >= PR_APTPL_MAX_TPORT_LEN) {
-				printk(KERN_ERR "APTPL metadata target_node="
+				pr_err("APTPL metadata target_node="
 					" exceeds PR_APTPL_MAX_TPORT_LEN: %d\n",
 					PR_APTPL_MAX_TPORT_LEN);
 				ret = -EINVAL;
@@ -1583,14 +1583,14 @@
 		}
 	}
 
-	if (!(i_port) || !(t_port) || !(sa_res_key)) {
-		printk(KERN_ERR "Illegal parameters for APTPL registration\n");
+	if (!i_port || !t_port || !sa_res_key) {
+		pr_err("Illegal parameters for APTPL registration\n");
 		ret = -EINVAL;
 		goto out;
 	}
 
 	if (res_holder && !(type)) {
-		printk(KERN_ERR "Illegal PR type: 0x%02x for reservation"
+		pr_err("Illegal PR type: 0x%02x for reservation"
 				" holder\n", type);
 		ret = -EINVAL;
 		goto out;
@@ -1649,7 +1649,7 @@
 	int bl = 0;
 	ssize_t read_bytes = 0;
 
-	if (!(se_dev->se_dev_ptr))
+	if (!se_dev->se_dev_ptr)
 		return -ENODEV;
 
 	transport_dump_dev_state(se_dev->se_dev_ptr, page, &bl);
@@ -1675,8 +1675,8 @@
 	struct se_hba *hba = se_dev->se_dev_hba;
 	struct se_subsystem_api *t = hba->transport;
 
-	if (!(se_dev->se_dev_su_ptr)) {
-		printk(KERN_ERR "Unable to locate struct se_subsystem_dev>se"
+	if (!se_dev->se_dev_su_ptr) {
+		pr_err("Unable to locate struct se_subsystem_dev>se"
 				"_dev_su_ptr\n");
 		return -EINVAL;
 	}
@@ -1712,7 +1712,7 @@
 	ssize_t read_bytes;
 
 	if (count > (SE_DEV_ALIAS_LEN-1)) {
-		printk(KERN_ERR "alias count: %d exceeds"
+		pr_err("alias count: %d exceeds"
 			" SE_DEV_ALIAS_LEN-1: %u\n", (int)count,
 			SE_DEV_ALIAS_LEN-1);
 		return -EINVAL;
@@ -1722,7 +1722,7 @@
 	read_bytes = snprintf(&se_dev->se_dev_alias[0], SE_DEV_ALIAS_LEN,
 			"%s", page);
 
-	printk(KERN_INFO "Target_Core_ConfigFS: %s/%s set alias: %s\n",
+	pr_debug("Target_Core_ConfigFS: %s/%s set alias: %s\n",
 		config_item_name(&hba->hba_group.cg_item),
 		config_item_name(&se_dev->se_dev_group.cg_item),
 		se_dev->se_dev_alias);
@@ -1758,7 +1758,7 @@
 	ssize_t read_bytes;
 
 	if (count > (SE_UDEV_PATH_LEN-1)) {
-		printk(KERN_ERR "udev_path count: %d exceeds"
+		pr_err("udev_path count: %d exceeds"
 			" SE_UDEV_PATH_LEN-1: %u\n", (int)count,
 			SE_UDEV_PATH_LEN-1);
 		return -EINVAL;
@@ -1768,7 +1768,7 @@
 	read_bytes = snprintf(&se_dev->se_dev_udev_path[0], SE_UDEV_PATH_LEN,
 			"%s", page);
 
-	printk(KERN_INFO "Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
+	pr_debug("Target_Core_ConfigFS: %s/%s set udev_path: %s\n",
 		config_item_name(&hba->hba_group.cg_item),
 		config_item_name(&se_dev->se_dev_group.cg_item),
 		se_dev->se_dev_udev_path);
@@ -1796,13 +1796,13 @@
 	char *ptr;
 
 	ptr = strstr(page, "1");
-	if (!(ptr)) {
-		printk(KERN_ERR "For dev_enable ops, only valid value"
+	if (!ptr) {
+		pr_err("For dev_enable ops, only valid value"
 				" is \"1\"\n");
 		return -EINVAL;
 	}
-	if ((se_dev->se_dev_ptr)) {
-		printk(KERN_ERR "se_dev->se_dev_ptr already set for storage"
+	if (se_dev->se_dev_ptr) {
+		pr_err("se_dev->se_dev_ptr already set for storage"
 				" object\n");
 		return -EEXIST;
 	}
@@ -1817,7 +1817,7 @@
 		return -EINVAL;
 
 	se_dev->se_dev_ptr = dev;
-	printk(KERN_INFO "Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:"
+	pr_debug("Target_Core_ConfigFS: Registered se_dev->se_dev_ptr:"
 		" %p\n", se_dev->se_dev_ptr);
 
 	return count;
@@ -1841,22 +1841,22 @@
 	ssize_t len = 0;
 
 	dev = su_dev->se_dev_ptr;
-	if (!(dev))
+	if (!dev)
 		return -ENODEV;
 
 	if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED)
 		return len;
 
 	lu_gp_mem = dev->dev_alua_lu_gp_mem;
-	if (!(lu_gp_mem)) {
-		printk(KERN_ERR "NULL struct se_device->dev_alua_lu_gp_mem"
+	if (!lu_gp_mem) {
+		pr_err("NULL struct se_device->dev_alua_lu_gp_mem"
 				" pointer\n");
 		return -EINVAL;
 	}
 
 	spin_lock(&lu_gp_mem->lu_gp_mem_lock);
 	lu_gp = lu_gp_mem->lu_gp;
-	if ((lu_gp)) {
+	if (lu_gp) {
 		lu_ci = &lu_gp->lu_gp_group.cg_item;
 		len += sprintf(page, "LU Group Alias: %s\nLU Group ID: %hu\n",
 			config_item_name(lu_ci), lu_gp->lu_gp_id);
@@ -1880,17 +1880,17 @@
 	int move = 0;
 
 	dev = su_dev->se_dev_ptr;
-	if (!(dev))
+	if (!dev)
 		return -ENODEV;
 
 	if (su_dev->t10_alua.alua_type != SPC3_ALUA_EMULATED) {
-		printk(KERN_WARNING "SPC3_ALUA_EMULATED not enabled for %s/%s\n",
+		pr_warn("SPC3_ALUA_EMULATED not enabled for %s/%s\n",
 			config_item_name(&hba->hba_group.cg_item),
 			config_item_name(&su_dev->se_dev_group.cg_item));
 		return -EINVAL;
 	}
 	if (count > LU_GROUP_NAME_BUF) {
-		printk(KERN_ERR "ALUA LU Group Alias too large!\n");
+		pr_err("ALUA LU Group Alias too large!\n");
 		return -EINVAL;
 	}
 	memset(buf, 0, LU_GROUP_NAME_BUF);
@@ -1906,27 +1906,27 @@
 		 * core_alua_get_lu_gp_by_name below().
 		 */
 		lu_gp_new = core_alua_get_lu_gp_by_name(strstrip(buf));
-		if (!(lu_gp_new))
+		if (!lu_gp_new)
 			return -ENODEV;
 	}
 	lu_gp_mem = dev->dev_alua_lu_gp_mem;
-	if (!(lu_gp_mem)) {
+	if (!lu_gp_mem) {
 		if (lu_gp_new)
 			core_alua_put_lu_gp_from_name(lu_gp_new);
-		printk(KERN_ERR "NULL struct se_device->dev_alua_lu_gp_mem"
+		pr_err("NULL struct se_device->dev_alua_lu_gp_mem"
 				" pointer\n");
 		return -EINVAL;
 	}
 
 	spin_lock(&lu_gp_mem->lu_gp_mem_lock);
 	lu_gp = lu_gp_mem->lu_gp;
-	if ((lu_gp)) {
+	if (lu_gp) {
 		/*
 		 * Clearing an existing lu_gp association, and replacing
 		 * with NULL
 		 */
-		if (!(lu_gp_new)) {
-			printk(KERN_INFO "Target_Core_ConfigFS: Releasing %s/%s"
+		if (!lu_gp_new) {
+			pr_debug("Target_Core_ConfigFS: Releasing %s/%s"
 				" from ALUA LU Group: core/alua/lu_gps/%s, ID:"
 				" %hu\n",
 				config_item_name(&hba->hba_group.cg_item),
@@ -1951,7 +1951,7 @@
 	__core_alua_attach_lu_gp_mem(lu_gp_mem, lu_gp_new);
 	spin_unlock(&lu_gp_mem->lu_gp_mem_lock);
 
-	printk(KERN_INFO "Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:"
+	pr_debug("Target_Core_ConfigFS: %s %s/%s to ALUA LU Group:"
 		" core/alua/lu_gps/%s, ID: %hu\n",
 		(move) ? "Moving" : "Adding",
 		config_item_name(&hba->hba_group.cg_item),
@@ -1995,7 +1995,7 @@
 	 *`echo 1 > $CONFIGFS/core/$HBA/$DEV/dev_enable`
 	 */
 	if (se_dev->se_dev_ptr) {
-		printk(KERN_INFO "Target_Core_ConfigFS: Calling se_free_"
+		pr_debug("Target_Core_ConfigFS: Calling se_free_"
 			"virtual_device() for se_dev_ptr: %p\n",
 			se_dev->se_dev_ptr);
 
@@ -2004,14 +2004,14 @@
 		/*
 		 * Release struct se_subsystem_dev->se_dev_su_ptr..
 		 */
-		printk(KERN_INFO "Target_Core_ConfigFS: Calling t->free_"
+		pr_debug("Target_Core_ConfigFS: Calling t->free_"
 			"device() for se_dev_su_ptr: %p\n",
 			se_dev->se_dev_su_ptr);
 
 		t->free_device(se_dev->se_dev_su_ptr);
 	}
 
-	printk(KERN_INFO "Target_Core_ConfigFS: Deallocating se_subsystem"
+	pr_debug("Target_Core_ConfigFS: Deallocating se_subsystem"
 			"_dev_t: %p\n", se_dev);
 	kfree(se_dev);
 }
@@ -2026,7 +2026,7 @@
 	struct target_core_configfs_attribute *tc_attr = container_of(
 			attr, struct target_core_configfs_attribute, attr);
 
-	if (!(tc_attr->show))
+	if (!tc_attr->show)
 		return -EINVAL;
 
 	return tc_attr->show(se_dev, page);
@@ -2042,7 +2042,7 @@
 	struct target_core_configfs_attribute *tc_attr = container_of(
 			attr, struct target_core_configfs_attribute, attr);
 
-	if (!(tc_attr->store))
+	if (!tc_attr->store)
 		return -EINVAL;
 
 	return tc_attr->store(se_dev, page, count);
@@ -2085,7 +2085,7 @@
 	struct t10_alua_lu_gp *lu_gp,
 	char *page)
 {
-	if (!(lu_gp->lu_gp_valid_id))
+	if (!lu_gp->lu_gp_valid_id)
 		return 0;
 
 	return sprintf(page, "%hu\n", lu_gp->lu_gp_id);
@@ -2102,12 +2102,12 @@
 
 	ret = strict_strtoul(page, 0, &lu_gp_id);
 	if (ret < 0) {
-		printk(KERN_ERR "strict_strtoul() returned %d for"
+		pr_err("strict_strtoul() returned %d for"
 			" lu_gp_id\n", ret);
 		return -EINVAL;
 	}
 	if (lu_gp_id > 0x0000ffff) {
-		printk(KERN_ERR "ALUA lu_gp_id: %lu exceeds maximum:"
+		pr_err("ALUA lu_gp_id: %lu exceeds maximum:"
 			" 0x0000ffff\n", lu_gp_id);
 		return -EINVAL;
 	}
@@ -2116,7 +2116,7 @@
 	if (ret < 0)
 		return -EINVAL;
 
-	printk(KERN_INFO "Target_Core_ConfigFS: Set ALUA Logical Unit"
+	pr_debug("Target_Core_ConfigFS: Set ALUA Logical Unit"
 		" Group: core/alua/lu_gps/%s to ID: %hu\n",
 		config_item_name(&alua_lu_gp_cg->cg_item),
 		lu_gp->lu_gp_id);
@@ -2154,7 +2154,7 @@
 		cur_len++; /* Extra byte for NULL terminator */
 
 		if ((cur_len + len) > PAGE_SIZE) {
-			printk(KERN_WARNING "Ran out of lu_gp_show_attr"
+			pr_warn("Ran out of lu_gp_show_attr"
 				"_members buffer\n");
 			break;
 		}
@@ -2218,7 +2218,7 @@
 	config_group_init_type_name(alua_lu_gp_cg, name,
 			&target_core_alua_lu_gp_cit);
 
-	printk(KERN_INFO "Target_Core_ConfigFS: Allocated ALUA Logical Unit"
+	pr_debug("Target_Core_ConfigFS: Allocated ALUA Logical Unit"
 		" Group: core/alua/lu_gps/%s\n",
 		config_item_name(alua_lu_gp_ci));
 
@@ -2233,7 +2233,7 @@
 	struct t10_alua_lu_gp *lu_gp = container_of(to_config_group(item),
 			struct t10_alua_lu_gp, lu_gp_group);
 
-	printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Logical Unit"
+	pr_debug("Target_Core_ConfigFS: Releasing ALUA Logical Unit"
 		" Group: core/alua/lu_gps/%s, ID: %hu\n",
 		config_item_name(item), lu_gp->lu_gp_id);
 	/*
@@ -2292,22 +2292,22 @@
 	unsigned long tmp;
 	int new_state, ret;
 
-	if (!(tg_pt_gp->tg_pt_gp_valid_id)) {
-		printk(KERN_ERR "Unable to do implict ALUA on non valid"
+	if (!tg_pt_gp->tg_pt_gp_valid_id) {
+		pr_err("Unable to do implict ALUA on non valid"
 			" tg_pt_gp ID: %hu\n", tg_pt_gp->tg_pt_gp_valid_id);
 		return -EINVAL;
 	}
 
 	ret = strict_strtoul(page, 0, &tmp);
 	if (ret < 0) {
-		printk("Unable to extract new ALUA access state from"
+		pr_err("Unable to extract new ALUA access state from"
 				" %s\n", page);
 		return -EINVAL;
 	}
 	new_state = (int)tmp;
 
 	if (!(tg_pt_gp->tg_pt_gp_alua_access_type & TPGS_IMPLICT_ALUA)) {
-		printk(KERN_ERR "Unable to process implict configfs ALUA"
+		pr_err("Unable to process implict configfs ALUA"
 			" transition while TPGS_IMPLICT_ALUA is diabled\n");
 		return -EINVAL;
 	}
@@ -2338,8 +2338,8 @@
 	unsigned long tmp;
 	int new_status, ret;
 
-	if (!(tg_pt_gp->tg_pt_gp_valid_id)) {
-		printk(KERN_ERR "Unable to do set ALUA access status on non"
+	if (!tg_pt_gp->tg_pt_gp_valid_id) {
+		pr_err("Unable to do set ALUA access status on non"
 			" valid tg_pt_gp ID: %hu\n",
 			tg_pt_gp->tg_pt_gp_valid_id);
 		return -EINVAL;
@@ -2347,7 +2347,7 @@
 
 	ret = strict_strtoul(page, 0, &tmp);
 	if (ret < 0) {
-		printk(KERN_ERR "Unable to extract new ALUA access status"
+		pr_err("Unable to extract new ALUA access status"
 				" from %s\n", page);
 		return -EINVAL;
 	}
@@ -2356,7 +2356,7 @@
 	if ((new_status != ALUA_STATUS_NONE) &&
 	    (new_status != ALUA_STATUS_ALTERED_BY_EXPLICT_STPG) &&
 	    (new_status != ALUA_STATUS_ALTERED_BY_IMPLICT_ALUA)) {
-		printk(KERN_ERR "Illegal ALUA access status: 0x%02x\n",
+		pr_err("Illegal ALUA access status: 0x%02x\n",
 				new_status);
 		return -EINVAL;
 	}
@@ -2407,12 +2407,12 @@
 
 	ret = strict_strtoul(page, 0, &tmp);
 	if (ret < 0) {
-		printk(KERN_ERR "Unable to extract alua_write_metadata\n");
+		pr_err("Unable to extract alua_write_metadata\n");
 		return -EINVAL;
 	}
 
 	if ((tmp != 0) && (tmp != 1)) {
-		printk(KERN_ERR "Illegal value for alua_write_metadata:"
+		pr_err("Illegal value for alua_write_metadata:"
 			" %lu\n", tmp);
 		return -EINVAL;
 	}
@@ -2494,7 +2494,7 @@
 	struct t10_alua_tg_pt_gp *tg_pt_gp,
 	char *page)
 {
-	if (!(tg_pt_gp->tg_pt_gp_valid_id))
+	if (!tg_pt_gp->tg_pt_gp_valid_id)
 		return 0;
 
 	return sprintf(page, "%hu\n", tg_pt_gp->tg_pt_gp_id);
@@ -2511,12 +2511,12 @@
 
 	ret = strict_strtoul(page, 0, &tg_pt_gp_id);
 	if (ret < 0) {
-		printk(KERN_ERR "strict_strtoul() returned %d for"
+		pr_err("strict_strtoul() returned %d for"
 			" tg_pt_gp_id\n", ret);
 		return -EINVAL;
 	}
 	if (tg_pt_gp_id > 0x0000ffff) {
-		printk(KERN_ERR "ALUA tg_pt_gp_id: %lu exceeds maximum:"
+		pr_err("ALUA tg_pt_gp_id: %lu exceeds maximum:"
 			" 0x0000ffff\n", tg_pt_gp_id);
 		return -EINVAL;
 	}
@@ -2525,7 +2525,7 @@
 	if (ret < 0)
 		return -EINVAL;
 
-	printk(KERN_INFO "Target_Core_ConfigFS: Set ALUA Target Port Group: "
+	pr_debug("Target_Core_ConfigFS: Set ALUA Target Port Group: "
 		"core/alua/tg_pt_gps/%s to ID: %hu\n",
 		config_item_name(&alua_tg_pt_gp_cg->cg_item),
 		tg_pt_gp->tg_pt_gp_id);
@@ -2566,7 +2566,7 @@
 		cur_len++; /* Extra byte for NULL terminator */
 
 		if ((cur_len + len) > PAGE_SIZE) {
-			printk(KERN_WARNING "Ran out of lu_gp_show_attr"
+			pr_warn("Ran out of lu_gp_show_attr"
 				"_members buffer\n");
 			break;
 		}
@@ -2632,7 +2632,7 @@
 	struct config_item *alua_tg_pt_gp_ci = NULL;
 
 	tg_pt_gp = core_alua_allocate_tg_pt_gp(su_dev, name, 0);
-	if (!(tg_pt_gp))
+	if (!tg_pt_gp)
 		return NULL;
 
 	alua_tg_pt_gp_cg = &tg_pt_gp->tg_pt_gp_group;
@@ -2641,7 +2641,7 @@
 	config_group_init_type_name(alua_tg_pt_gp_cg, name,
 			&target_core_alua_tg_pt_gp_cit);
 
-	printk(KERN_INFO "Target_Core_ConfigFS: Allocated ALUA Target Port"
+	pr_debug("Target_Core_ConfigFS: Allocated ALUA Target Port"
 		" Group: alua/tg_pt_gps/%s\n",
 		config_item_name(alua_tg_pt_gp_ci));
 
@@ -2655,7 +2655,7 @@
 	struct t10_alua_tg_pt_gp *tg_pt_gp = container_of(to_config_group(item),
 			struct t10_alua_tg_pt_gp, tg_pt_gp_group);
 
-	printk(KERN_INFO "Target_Core_ConfigFS: Releasing ALUA Target Port"
+	pr_debug("Target_Core_ConfigFS: Releasing ALUA Target Port"
 		" Group: alua/tg_pt_gps/%s, ID: %hu\n",
 		config_item_name(item), tg_pt_gp->tg_pt_gp_id);
 	/*
@@ -2746,7 +2746,7 @@
 
 	se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
 	if (!se_dev) {
-		printk(KERN_ERR "Unable to allocate memory for"
+		pr_err("Unable to allocate memory for"
 				" struct se_subsystem_dev\n");
 		goto unlock;
 	}
@@ -2770,7 +2770,7 @@
 
 	dev_cg->default_groups = kzalloc(sizeof(struct config_group) * 7,
 			GFP_KERNEL);
-	if (!(dev_cg->default_groups))
+	if (!dev_cg->default_groups)
 		goto out;
 	/*
 	 * Set se_dev_su_ptr from struct se_subsystem_api returned void ptr
@@ -2781,8 +2781,8 @@
 	 * configfs tree for device object's struct config_group.
 	 */
 	se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, name);
-	if (!(se_dev->se_dev_su_ptr)) {
-		printk(KERN_ERR "Unable to locate subsystem dependent pointer"
+	if (!se_dev->se_dev_su_ptr) {
+		pr_err("Unable to locate subsystem dependent pointer"
 			" from allocate_virtdevice()\n");
 		goto out;
 	}
@@ -2813,14 +2813,14 @@
 	 * Add core/$HBA/$DEV/alua/default_tg_pt_gp
 	 */
 	tg_pt_gp = core_alua_allocate_tg_pt_gp(se_dev, "default_tg_pt_gp", 1);
-	if (!(tg_pt_gp))
+	if (!tg_pt_gp)
 		goto out;
 
 	tg_pt_gp_cg = &se_dev->t10_alua.alua_tg_pt_gps_group;
 	tg_pt_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
 				GFP_KERNEL);
-	if (!(tg_pt_gp_cg->default_groups)) {
-		printk(KERN_ERR "Unable to allocate tg_pt_gp_cg->"
+	if (!tg_pt_gp_cg->default_groups) {
+		pr_err("Unable to allocate tg_pt_gp_cg->"
 				"default_groups\n");
 		goto out;
 	}
@@ -2837,12 +2837,12 @@
 	dev_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 4,
 				GFP_KERNEL);
 	if (!dev_stat_grp->default_groups) {
-		printk(KERN_ERR "Unable to allocate dev_stat_grp->default_groups\n");
+		pr_err("Unable to allocate dev_stat_grp->default_groups\n");
 		goto out;
 	}
 	target_stat_setup_dev_default_groups(se_dev);
 
-	printk(KERN_INFO "Target_Core_ConfigFS: Allocated struct se_subsystem_dev:"
+	pr_debug("Target_Core_ConfigFS: Allocated struct se_subsystem_dev:"
 		" %p se_dev_su_ptr: %p\n", se_dev, se_dev->se_dev_su_ptr);
 
 	mutex_unlock(&hba->hba_access_mutex);
@@ -2975,13 +2975,13 @@
 
 	ret = strict_strtoul(page, 0, &mode_flag);
 	if (ret < 0) {
-		printk(KERN_ERR "Unable to extract hba mode flag: %d\n", ret);
+		pr_err("Unable to extract hba mode flag: %d\n", ret);
 		return -EINVAL;
 	}
 
 	spin_lock(&hba->device_lock);
-	if (!(list_empty(&hba->hba_dev_list))) {
-		printk(KERN_ERR "Unable to set hba_mode with active devices\n");
+	if (!list_empty(&hba->hba_dev_list)) {
+		pr_err("Unable to set hba_mode with active devices\n");
 		spin_unlock(&hba->device_lock);
 		return -EINVAL;
 	}
@@ -3040,7 +3040,7 @@
 
 	memset(buf, 0, TARGET_CORE_NAME_MAX_LEN);
 	if (strlen(name) >= TARGET_CORE_NAME_MAX_LEN) {
-		printk(KERN_ERR "Passed *name strlen(): %d exceeds"
+		pr_err("Passed *name strlen(): %d exceeds"
 			" TARGET_CORE_NAME_MAX_LEN: %d\n", (int)strlen(name),
 			TARGET_CORE_NAME_MAX_LEN);
 		return ERR_PTR(-ENAMETOOLONG);
@@ -3048,8 +3048,8 @@
 	snprintf(buf, TARGET_CORE_NAME_MAX_LEN, "%s", name);
 
 	str = strstr(buf, "_");
-	if (!(str)) {
-		printk(KERN_ERR "Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n");
+	if (!str) {
+		pr_err("Unable to locate \"_\" for $SUBSYSTEM_PLUGIN_$HOST_ID\n");
 		return ERR_PTR(-EINVAL);
 	}
 	se_plugin_str = buf;
@@ -3058,7 +3058,7 @@
 	 * Namely rd_direct and rd_mcp..
 	 */
 	str2 = strstr(str+1, "_");
-	if ((str2)) {
+	if (str2) {
 		*str2 = '\0'; /* Terminate for *se_plugin_str */
 		str2++; /* Skip to start of plugin dependent ID */
 		str = str2;
@@ -3069,7 +3069,7 @@
 
 	ret = strict_strtoul(str, 0, &plugin_dep_id);
 	if (ret < 0) {
-		printk(KERN_ERR "strict_strtoul() returned %d for"
+		pr_err("strict_strtoul() returned %d for"
 				" plugin_dep_id\n", ret);
 		return ERR_PTR(-EINVAL);
 	}
@@ -3122,7 +3122,7 @@
 	struct t10_alua_lu_gp *lu_gp;
 	int ret;
 
-	printk(KERN_INFO "TARGET_CORE[0]: Loading Generic Kernel Storage"
+	pr_debug("TARGET_CORE[0]: Loading Generic Kernel Storage"
 		" Engine: %s on %s/%s on "UTS_RELEASE"\n",
 		TARGET_CORE_VERSION, utsname()->sysname, utsname()->machine);
 
@@ -3142,8 +3142,8 @@
 	target_cg = &subsys->su_group;
 	target_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
 				GFP_KERNEL);
-	if (!(target_cg->default_groups)) {
-		printk(KERN_ERR "Unable to allocate target_cg->default_groups\n");
+	if (!target_cg->default_groups) {
+		pr_err("Unable to allocate target_cg->default_groups\n");
 		goto out_global;
 	}
 
@@ -3157,8 +3157,8 @@
 	hba_cg = &target_core_hbagroup;
 	hba_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
 				GFP_KERNEL);
-	if (!(hba_cg->default_groups)) {
-		printk(KERN_ERR "Unable to allocate hba_cg->default_groups\n");
+	if (!hba_cg->default_groups) {
+		pr_err("Unable to allocate hba_cg->default_groups\n");
 		goto out_global;
 	}
 	config_group_init_type_name(&alua_group,
@@ -3172,8 +3172,8 @@
 	alua_cg = &alua_group;
 	alua_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
 			GFP_KERNEL);
-	if (!(alua_cg->default_groups)) {
-		printk(KERN_ERR "Unable to allocate alua_cg->default_groups\n");
+	if (!alua_cg->default_groups) {
+		pr_err("Unable to allocate alua_cg->default_groups\n");
 		goto out_global;
 	}
 
@@ -3191,8 +3191,8 @@
 	lu_gp_cg = &alua_lu_gps_group;
 	lu_gp_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
 			GFP_KERNEL);
-	if (!(lu_gp_cg->default_groups)) {
-		printk(KERN_ERR "Unable to allocate lu_gp_cg->default_groups\n");
+	if (!lu_gp_cg->default_groups) {
+		pr_err("Unable to allocate lu_gp_cg->default_groups\n");
 		goto out_global;
 	}
 
@@ -3206,11 +3206,11 @@
 	 */
 	ret = configfs_register_subsystem(subsys);
 	if (ret < 0) {
-		printk(KERN_ERR "Error %d while registering subsystem %s\n",
+		pr_err("Error %d while registering subsystem %s\n",
 			ret, subsys->su_group.cg_item.ci_namebuf);
 		goto out_global;
 	}
-	printk(KERN_INFO "TARGET_CORE[0]: Initialized ConfigFS Fabric"
+	pr_debug("TARGET_CORE[0]: Initialized ConfigFS Fabric"
 		" Infrastructure: "TARGET_CORE_CONFIGFS_VERSION" on %s/%s"
 		" on "UTS_RELEASE"\n", utsname()->sysname, utsname()->machine);
 	/*
@@ -3290,7 +3290,7 @@
 	core_alua_free_lu_gp(default_lu_gp);
 	default_lu_gp = NULL;
 
-	printk(KERN_INFO "TARGET_CORE[0]: Released ConfigFS Fabric"
+	pr_debug("TARGET_CORE[0]: Released ConfigFS Fabric"
 			" Infrastructure\n");
 
 	core_dev_release_virtual_lun0();
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index 1185c3b..81860dd 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -84,7 +84,7 @@
 		    (deve->lun_flags & TRANSPORT_LUNFLAGS_READ_ONLY)) {
 			se_cmd->scsi_sense_reason = TCM_WRITE_PROTECTED;
 			se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-			printk("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
+			pr_err("TARGET_CORE[%s]: Detected WRITE_PROTECTED LUN"
 				" Access for 0x%08x\n",
 				se_cmd->se_tfo->get_fabric_name(),
 				unpacked_lun);
@@ -117,7 +117,7 @@
 		if (unpacked_lun != 0) {
 			se_cmd->scsi_sense_reason = TCM_NON_EXISTENT_LUN;
 			se_cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
-			printk("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
+			pr_err("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
 				" Access for 0x%08x\n",
 				se_cmd->se_tfo->get_fabric_name(),
 				unpacked_lun);
@@ -204,7 +204,7 @@
 	spin_unlock_irqrestore(&se_sess->se_node_acl->device_list_lock, flags);
 
 	if (!se_lun) {
-		printk(KERN_INFO "TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
+		pr_debug("TARGET_CORE[%s]: Detected NON_EXISTENT_LUN"
 			" Access for 0x%08x\n",
 			se_cmd->se_tfo->get_fabric_name(),
 			unpacked_lun);
@@ -255,15 +255,15 @@
 			continue;
 
 		lun = deve->se_lun;
-		if (!(lun)) {
-			printk(KERN_ERR "%s device entries device pointer is"
+		if (!lun) {
+			pr_err("%s device entries device pointer is"
 				" NULL, but Initiator has access.\n",
 				tpg->se_tpg_tfo->get_fabric_name());
 			continue;
 		}
 		port = lun->lun_sep;
-		if (!(port)) {
-			printk(KERN_ERR "%s device entries device pointer is"
+		if (!port) {
+			pr_err("%s device entries device pointer is"
 				" NULL, but Initiator has access.\n",
 				tpg->se_tpg_tfo->get_fabric_name());
 			continue;
@@ -301,7 +301,7 @@
 			continue;
 
 		if (!deve->se_lun) {
-			printk(KERN_ERR "%s device entries device pointer is"
+			pr_err("%s device entries device pointer is"
 				" NULL, but Initiator has access.\n",
 				tpg->se_tpg_tfo->get_fabric_name());
 			continue;
@@ -372,7 +372,7 @@
 	 * struct se_dev_entry pointers below as logic in
 	 * core_alua_do_transition_tg_pt() depends on these being present.
 	 */
-	if (!(enable)) {
+	if (!enable) {
 		/*
 		 * deve->se_lun_acl will be NULL for demo-mode created LUNs
 		 * that have not been explicitly concerted to MappedLUNs ->
@@ -395,14 +395,14 @@
 		 */
 		if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
 			if (deve->se_lun_acl != NULL) {
-				printk(KERN_ERR "struct se_dev_entry->se_lun_acl"
+				pr_err("struct se_dev_entry->se_lun_acl"
 					" already set for demo mode -> explict"
 					" LUN ACL transition\n");
 				spin_unlock_irq(&nacl->device_list_lock);
 				return -EINVAL;
 			}
 			if (deve->se_lun != lun) {
-				printk(KERN_ERR "struct se_dev_entry->se_lun does"
+				pr_err("struct se_dev_entry->se_lun does"
 					" match passed struct se_lun for demo mode"
 					" -> explict LUN ACL transition\n");
 				spin_unlock_irq(&nacl->device_list_lock);
@@ -501,8 +501,8 @@
 	struct se_port *port, *port_tmp;
 
 	port = kzalloc(sizeof(struct se_port), GFP_KERNEL);
-	if (!(port)) {
-		printk(KERN_ERR "Unable to allocate struct se_port\n");
+	if (!port) {
+		pr_err("Unable to allocate struct se_port\n");
 		return ERR_PTR(-ENOMEM);
 	}
 	INIT_LIST_HEAD(&port->sep_alua_list);
@@ -513,7 +513,7 @@
 
 	spin_lock(&dev->se_port_lock);
 	if (dev->dev_port_count == 0x0000ffff) {
-		printk(KERN_WARNING "Reached dev->dev_port_count =="
+		pr_warn("Reached dev->dev_port_count =="
 				" 0x0000ffff\n");
 		spin_unlock(&dev->se_port_lock);
 		return ERR_PTR(-ENOSPC);
@@ -532,7 +532,7 @@
 	 * 3h to FFFFh    Relative port 3 through 65 535
 	 */
 	port->sep_rtpi = dev->dev_rpti_counter++;
-	if (!(port->sep_rtpi))
+	if (!port->sep_rtpi)
 		goto again;
 
 	list_for_each_entry(port_tmp, &dev->dev_sep_list, sep_list) {
@@ -570,7 +570,7 @@
 	if (su_dev->t10_alua.alua_type == SPC3_ALUA_EMULATED) {
 		tg_pt_gp_mem = core_alua_allocate_tg_pt_gp_mem(port);
 		if (IS_ERR(tg_pt_gp_mem) || !tg_pt_gp_mem) {
-			printk(KERN_ERR "Unable to allocate t10_alua_tg_pt"
+			pr_err("Unable to allocate t10_alua_tg_pt"
 					"_gp_member_t\n");
 			return;
 		}
@@ -578,7 +578,7 @@
 		__core_alua_attach_tg_pt_gp_mem(tg_pt_gp_mem,
 			su_dev->t10_alua.default_tg_pt_gp);
 		spin_unlock(&tg_pt_gp_mem->tg_pt_gp_mem_lock);
-		printk(KERN_INFO "%s/%s: Adding to default ALUA Target Port"
+		pr_debug("%s/%s: Adding to default ALUA Target Port"
 			" Group: alua/default_tg_pt_gp\n",
 			dev->transport->name, tpg->se_tpg_tfo->get_fabric_name());
 	}
@@ -663,8 +663,8 @@
 	list_for_each_entry(se_task, &se_cmd->t_task_list, t_list)
 		break;
 
-	if (!(se_task)) {
-		printk(KERN_ERR "Unable to locate struct se_task for struct se_cmd\n");
+	if (!se_task) {
+		pr_err("Unable to locate struct se_task for struct se_cmd\n");
 		return PYX_TRANSPORT_LU_COMM_FAILURE;
 	}
 
@@ -675,7 +675,7 @@
 	 * coming via a target_core_mod PASSTHROUGH op, and not through
 	 * a $FABRIC_MOD.  In that case, report LUN=0 only.
 	 */
-	if (!(se_sess)) {
+	if (!se_sess) {
 		int_to_scsilun(0, (struct scsi_lun *)&buf[offset]);
 		lun_count = 1;
 		goto done;
@@ -893,12 +893,12 @@
 int se_dev_set_task_timeout(struct se_device *dev, u32 task_timeout)
 {
 	if (task_timeout > DA_TASK_TIMEOUT_MAX) {
-		printk(KERN_ERR "dev[%p]: Passed task_timeout: %u larger then"
+		pr_err("dev[%p]: Passed task_timeout: %u larger then"
 			" DA_TASK_TIMEOUT_MAX\n", dev, task_timeout);
 		return -EINVAL;
 	} else {
 		dev->se_sub_dev->se_dev_attrib.task_timeout = task_timeout;
-		printk(KERN_INFO "dev[%p]: Set SE Device task_timeout: %u\n",
+		pr_debug("dev[%p]: Set SE Device task_timeout: %u\n",
 			dev, task_timeout);
 	}
 
@@ -910,7 +910,7 @@
 	u32 max_unmap_lba_count)
 {
 	dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count = max_unmap_lba_count;
-	printk(KERN_INFO "dev[%p]: Set max_unmap_lba_count: %u\n",
+	pr_debug("dev[%p]: Set max_unmap_lba_count: %u\n",
 			dev, dev->se_sub_dev->se_dev_attrib.max_unmap_lba_count);
 	return 0;
 }
@@ -921,7 +921,7 @@
 {
 	dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count =
 		max_unmap_block_desc_count;
-	printk(KERN_INFO "dev[%p]: Set max_unmap_block_desc_count: %u\n",
+	pr_debug("dev[%p]: Set max_unmap_block_desc_count: %u\n",
 			dev, dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count);
 	return 0;
 }
@@ -931,7 +931,7 @@
 	u32 unmap_granularity)
 {
 	dev->se_sub_dev->se_dev_attrib.unmap_granularity = unmap_granularity;
-	printk(KERN_INFO "dev[%p]: Set unmap_granularity: %u\n",
+	pr_debug("dev[%p]: Set unmap_granularity: %u\n",
 			dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity);
 	return 0;
 }
@@ -941,7 +941,7 @@
 	u32 unmap_granularity_alignment)
 {
 	dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment = unmap_granularity_alignment;
-	printk(KERN_INFO "dev[%p]: Set unmap_granularity_alignment: %u\n",
+	pr_debug("dev[%p]: Set unmap_granularity_alignment: %u\n",
 			dev, dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment);
 	return 0;
 }
@@ -949,19 +949,19 @@
 int se_dev_set_emulate_dpo(struct se_device *dev, int flag)
 {
 	if ((flag != 0) && (flag != 1)) {
-		printk(KERN_ERR "Illegal value %d\n", flag);
+		pr_err("Illegal value %d\n", flag);
 		return -EINVAL;
 	}
 	if (dev->transport->dpo_emulated == NULL) {
-		printk(KERN_ERR "dev->transport->dpo_emulated is NULL\n");
+		pr_err("dev->transport->dpo_emulated is NULL\n");
 		return -EINVAL;
 	}
 	if (dev->transport->dpo_emulated(dev) == 0) {
-		printk(KERN_ERR "dev->transport->dpo_emulated not supported\n");
+		pr_err("dev->transport->dpo_emulated not supported\n");
 		return -EINVAL;
 	}
 	dev->se_sub_dev->se_dev_attrib.emulate_dpo = flag;
-	printk(KERN_INFO "dev[%p]: SE Device Page Out (DPO) Emulation"
+	pr_debug("dev[%p]: SE Device Page Out (DPO) Emulation"
 			" bit: %d\n", dev, dev->se_sub_dev->se_dev_attrib.emulate_dpo);
 	return 0;
 }
@@ -969,19 +969,19 @@
 int se_dev_set_emulate_fua_write(struct se_device *dev, int flag)
 {
 	if ((flag != 0) && (flag != 1)) {
-		printk(KERN_ERR "Illegal value %d\n", flag);
+		pr_err("Illegal value %d\n", flag);
 		return -EINVAL;
 	}
 	if (dev->transport->fua_write_emulated == NULL) {
-		printk(KERN_ERR "dev->transport->fua_write_emulated is NULL\n");
+		pr_err("dev->transport->fua_write_emulated is NULL\n");
 		return -EINVAL;
 	}
 	if (dev->transport->fua_write_emulated(dev) == 0) {
-		printk(KERN_ERR "dev->transport->fua_write_emulated not supported\n");
+		pr_err("dev->transport->fua_write_emulated not supported\n");
 		return -EINVAL;
 	}
 	dev->se_sub_dev->se_dev_attrib.emulate_fua_write = flag;
-	printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
+	pr_debug("dev[%p]: SE Device Forced Unit Access WRITEs: %d\n",
 			dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_write);
 	return 0;
 }
@@ -989,19 +989,19 @@
 int se_dev_set_emulate_fua_read(struct se_device *dev, int flag)
 {
 	if ((flag != 0) && (flag != 1)) {
-		printk(KERN_ERR "Illegal value %d\n", flag);
+		pr_err("Illegal value %d\n", flag);
 		return -EINVAL;
 	}
 	if (dev->transport->fua_read_emulated == NULL) {
-		printk(KERN_ERR "dev->transport->fua_read_emulated is NULL\n");
+		pr_err("dev->transport->fua_read_emulated is NULL\n");
 		return -EINVAL;
 	}
 	if (dev->transport->fua_read_emulated(dev) == 0) {
-		printk(KERN_ERR "dev->transport->fua_read_emulated not supported\n");
+		pr_err("dev->transport->fua_read_emulated not supported\n");
 		return -EINVAL;
 	}
 	dev->se_sub_dev->se_dev_attrib.emulate_fua_read = flag;
-	printk(KERN_INFO "dev[%p]: SE Device Forced Unit Access READs: %d\n",
+	pr_debug("dev[%p]: SE Device Forced Unit Access READs: %d\n",
 			dev, dev->se_sub_dev->se_dev_attrib.emulate_fua_read);
 	return 0;
 }
@@ -1009,19 +1009,19 @@
 int se_dev_set_emulate_write_cache(struct se_device *dev, int flag)
 {
 	if ((flag != 0) && (flag != 1)) {
-		printk(KERN_ERR "Illegal value %d\n", flag);
+		pr_err("Illegal value %d\n", flag);
 		return -EINVAL;
 	}
 	if (dev->transport->write_cache_emulated == NULL) {
-		printk(KERN_ERR "dev->transport->write_cache_emulated is NULL\n");
+		pr_err("dev->transport->write_cache_emulated is NULL\n");
 		return -EINVAL;
 	}
 	if (dev->transport->write_cache_emulated(dev) == 0) {
-		printk(KERN_ERR "dev->transport->write_cache_emulated not supported\n");
+		pr_err("dev->transport->write_cache_emulated not supported\n");
 		return -EINVAL;
 	}
 	dev->se_sub_dev->se_dev_attrib.emulate_write_cache = flag;
-	printk(KERN_INFO "dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
+	pr_debug("dev[%p]: SE Device WRITE_CACHE_EMULATION flag: %d\n",
 			dev, dev->se_sub_dev->se_dev_attrib.emulate_write_cache);
 	return 0;
 }
@@ -1029,19 +1029,19 @@
 int se_dev_set_emulate_ua_intlck_ctrl(struct se_device *dev, int flag)
 {
 	if ((flag != 0) && (flag != 1) && (flag != 2)) {
-		printk(KERN_ERR "Illegal value %d\n", flag);
+		pr_err("Illegal value %d\n", flag);
 		return -EINVAL;
 	}
 
 	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
-		printk(KERN_ERR "dev[%p]: Unable to change SE Device"
+		pr_err("dev[%p]: Unable to change SE Device"
 			" UA_INTRLCK_CTRL while dev_export_obj: %d count"
 			" exists\n", dev,
 			atomic_read(&dev->dev_export_obj.obj_access_count));
 		return -EINVAL;
 	}
 	dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl = flag;
-	printk(KERN_INFO "dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
+	pr_debug("dev[%p]: SE Device UA_INTRLCK_CTRL flag: %d\n",
 		dev, dev->se_sub_dev->se_dev_attrib.emulate_ua_intlck_ctrl);
 
 	return 0;
@@ -1050,18 +1050,18 @@
 int se_dev_set_emulate_tas(struct se_device *dev, int flag)
 {
 	if ((flag != 0) && (flag != 1)) {
-		printk(KERN_ERR "Illegal value %d\n", flag);
+		pr_err("Illegal value %d\n", flag);
 		return -EINVAL;
 	}
 
 	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
-		printk(KERN_ERR "dev[%p]: Unable to change SE Device TAS while"
+		pr_err("dev[%p]: Unable to change SE Device TAS while"
 			" dev_export_obj: %d count exists\n", dev,
 			atomic_read(&dev->dev_export_obj.obj_access_count));
 		return -EINVAL;
 	}
 	dev->se_sub_dev->se_dev_attrib.emulate_tas = flag;
-	printk(KERN_INFO "dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
+	pr_debug("dev[%p]: SE Device TASK_ABORTED status bit: %s\n",
 		dev, (dev->se_sub_dev->se_dev_attrib.emulate_tas) ? "Enabled" : "Disabled");
 
 	return 0;
@@ -1070,20 +1070,20 @@
 int se_dev_set_emulate_tpu(struct se_device *dev, int flag)
 {
 	if ((flag != 0) && (flag != 1)) {
-		printk(KERN_ERR "Illegal value %d\n", flag);
+		pr_err("Illegal value %d\n", flag);
 		return -EINVAL;
 	}
 	/*
 	 * We expect this value to be non-zero when generic Block Layer
 	 * Discard supported is detected iblock_create_virtdevice().
 	 */
-	if (!(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count)) {
-		printk(KERN_ERR "Generic Block Discard not supported\n");
+	if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+		pr_err("Generic Block Discard not supported\n");
 		return -ENOSYS;
 	}
 
 	dev->se_sub_dev->se_dev_attrib.emulate_tpu = flag;
-	printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
+	pr_debug("dev[%p]: SE Device Thin Provisioning UNMAP bit: %d\n",
 				dev, flag);
 	return 0;
 }
@@ -1091,20 +1091,20 @@
 int se_dev_set_emulate_tpws(struct se_device *dev, int flag)
 {
 	if ((flag != 0) && (flag != 1)) {
-		printk(KERN_ERR "Illegal value %d\n", flag);
+		pr_err("Illegal value %d\n", flag);
 		return -EINVAL;
 	}
 	/*
 	 * We expect this value to be non-zero when generic Block Layer
 	 * Discard supported is detected iblock_create_virtdevice().
 	 */
-	if (!(dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count)) {
-		printk(KERN_ERR "Generic Block Discard not supported\n");
+	if (!dev->se_sub_dev->se_dev_attrib.max_unmap_block_desc_count) {
+		pr_err("Generic Block Discard not supported\n");
 		return -ENOSYS;
 	}
 
 	dev->se_sub_dev->se_dev_attrib.emulate_tpws = flag;
-	printk(KERN_INFO "dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
+	pr_debug("dev[%p]: SE Device Thin Provisioning WRITE_SAME: %d\n",
 				dev, flag);
 	return 0;
 }
@@ -1112,11 +1112,11 @@
 int se_dev_set_enforce_pr_isids(struct se_device *dev, int flag)
 {
 	if ((flag != 0) && (flag != 1)) {
-		printk(KERN_ERR "Illegal value %d\n", flag);
+		pr_err("Illegal value %d\n", flag);
 		return -EINVAL;
 	}
 	dev->se_sub_dev->se_dev_attrib.enforce_pr_isids = flag;
-	printk(KERN_INFO "dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
+	pr_debug("dev[%p]: SE Device enforce_pr_isids bit: %s\n", dev,
 		(dev->se_sub_dev->se_dev_attrib.enforce_pr_isids) ? "Enabled" : "Disabled");
 	return 0;
 }
@@ -1141,20 +1141,20 @@
 	u32 orig_queue_depth = dev->queue_depth;
 
 	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
-		printk(KERN_ERR "dev[%p]: Unable to change SE Device TCQ while"
+		pr_err("dev[%p]: Unable to change SE Device TCQ while"
 			" dev_export_obj: %d count exists\n", dev,
 			atomic_read(&dev->dev_export_obj.obj_access_count));
 		return -EINVAL;
 	}
-	if (!(queue_depth)) {
-		printk(KERN_ERR "dev[%p]: Illegal ZERO value for queue"
+	if (!queue_depth) {
+		pr_err("dev[%p]: Illegal ZERO value for queue"
 			"_depth\n", dev);
 		return -EINVAL;
 	}
 
 	if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
 		if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
-			printk(KERN_ERR "dev[%p]: Passed queue_depth: %u"
+			pr_err("dev[%p]: Passed queue_depth: %u"
 				" exceeds TCM/SE_Device TCQ: %u\n",
 				dev, queue_depth,
 				dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
@@ -1163,7 +1163,7 @@
 	} else {
 		if (queue_depth > dev->se_sub_dev->se_dev_attrib.queue_depth) {
 			if (queue_depth > dev->se_sub_dev->se_dev_attrib.hw_queue_depth) {
-				printk(KERN_ERR "dev[%p]: Passed queue_depth:"
+				pr_err("dev[%p]: Passed queue_depth:"
 					" %u exceeds TCM/SE_Device MAX"
 					" TCQ: %u\n", dev, queue_depth,
 					dev->se_sub_dev->se_dev_attrib.hw_queue_depth);
@@ -1178,7 +1178,7 @@
 	else if (queue_depth < orig_queue_depth)
 		atomic_sub(orig_queue_depth - queue_depth, &dev->depth_left);
 
-	printk(KERN_INFO "dev[%p]: SE Device TCQ Depth changed to: %u\n",
+	pr_debug("dev[%p]: SE Device TCQ Depth changed to: %u\n",
 			dev, queue_depth);
 	return 0;
 }
@@ -1188,41 +1188,41 @@
 	int force = 0; /* Force setting for VDEVS */
 
 	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
-		printk(KERN_ERR "dev[%p]: Unable to change SE Device"
+		pr_err("dev[%p]: Unable to change SE Device"
 			" max_sectors while dev_export_obj: %d count exists\n",
 			dev, atomic_read(&dev->dev_export_obj.obj_access_count));
 		return -EINVAL;
 	}
-	if (!(max_sectors)) {
-		printk(KERN_ERR "dev[%p]: Illegal ZERO value for"
+	if (!max_sectors) {
+		pr_err("dev[%p]: Illegal ZERO value for"
 			" max_sectors\n", dev);
 		return -EINVAL;
 	}
 	if (max_sectors < DA_STATUS_MAX_SECTORS_MIN) {
-		printk(KERN_ERR "dev[%p]: Passed max_sectors: %u less than"
+		pr_err("dev[%p]: Passed max_sectors: %u less than"
 			" DA_STATUS_MAX_SECTORS_MIN: %u\n", dev, max_sectors,
 				DA_STATUS_MAX_SECTORS_MIN);
 		return -EINVAL;
 	}
 	if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
 		if (max_sectors > dev->se_sub_dev->se_dev_attrib.hw_max_sectors) {
-			printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
+			pr_err("dev[%p]: Passed max_sectors: %u"
 				" greater than TCM/SE_Device max_sectors:"
 				" %u\n", dev, max_sectors,
 				dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
 			 return -EINVAL;
 		}
 	} else {
-		if (!(force) && (max_sectors >
+		if (!force && (max_sectors >
 				 dev->se_sub_dev->se_dev_attrib.hw_max_sectors)) {
-			printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
+			pr_err("dev[%p]: Passed max_sectors: %u"
 				" greater than TCM/SE_Device max_sectors"
 				": %u, use force=1 to override.\n", dev,
 				max_sectors, dev->se_sub_dev->se_dev_attrib.hw_max_sectors);
 			return -EINVAL;
 		}
 		if (max_sectors > DA_STATUS_MAX_SECTORS_MAX) {
-			printk(KERN_ERR "dev[%p]: Passed max_sectors: %u"
+			pr_err("dev[%p]: Passed max_sectors: %u"
 				" greater than DA_STATUS_MAX_SECTORS_MAX:"
 				" %u\n", dev, max_sectors,
 				DA_STATUS_MAX_SECTORS_MAX);
@@ -1231,7 +1231,7 @@
 	}
 
 	dev->se_sub_dev->se_dev_attrib.max_sectors = max_sectors;
-	printk("dev[%p]: SE Device max_sectors changed to %u\n",
+	pr_debug("dev[%p]: SE Device max_sectors changed to %u\n",
 			dev, max_sectors);
 	return 0;
 }
@@ -1239,25 +1239,25 @@
 int se_dev_set_optimal_sectors(struct se_device *dev, u32 optimal_sectors)
 {
 	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
-		printk(KERN_ERR "dev[%p]: Unable to change SE Device"
+		pr_err("dev[%p]: Unable to change SE Device"
 			" optimal_sectors while dev_export_obj: %d count exists\n",
 			dev, atomic_read(&dev->dev_export_obj.obj_access_count));
 		return -EINVAL;
 	}
 	if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
-		printk(KERN_ERR "dev[%p]: Passed optimal_sectors cannot be"
+		pr_err("dev[%p]: Passed optimal_sectors cannot be"
 				" changed for TCM/pSCSI\n", dev);
 		return -EINVAL;
 	}
 	if (optimal_sectors > dev->se_sub_dev->se_dev_attrib.max_sectors) {
-		printk(KERN_ERR "dev[%p]: Passed optimal_sectors %u cannot be"
+		pr_err("dev[%p]: Passed optimal_sectors %u cannot be"
 			" greater than max_sectors: %u\n", dev,
 			optimal_sectors, dev->se_sub_dev->se_dev_attrib.max_sectors);
 		return -EINVAL;
 	}
 
 	dev->se_sub_dev->se_dev_attrib.optimal_sectors = optimal_sectors;
-	printk(KERN_INFO "dev[%p]: SE Device optimal_sectors changed to %u\n",
+	pr_debug("dev[%p]: SE Device optimal_sectors changed to %u\n",
 			dev, optimal_sectors);
 	return 0;
 }
@@ -1265,7 +1265,7 @@
 int se_dev_set_block_size(struct se_device *dev, u32 block_size)
 {
 	if (atomic_read(&dev->dev_export_obj.obj_access_count)) {
-		printk(KERN_ERR "dev[%p]: Unable to change SE Device block_size"
+		pr_err("dev[%p]: Unable to change SE Device block_size"
 			" while dev_export_obj: %d count exists\n", dev,
 			atomic_read(&dev->dev_export_obj.obj_access_count));
 		return -EINVAL;
@@ -1275,21 +1275,21 @@
 	    (block_size != 1024) &&
 	    (block_size != 2048) &&
 	    (block_size != 4096)) {
-		printk(KERN_ERR "dev[%p]: Illegal value for block_device: %u"
+		pr_err("dev[%p]: Illegal value for block_device: %u"
 			" for SE device, must be 512, 1024, 2048 or 4096\n",
 			dev, block_size);
 		return -EINVAL;
 	}
 
 	if (dev->transport->transport_type == TRANSPORT_PLUGIN_PHBA_PDEV) {
-		printk(KERN_ERR "dev[%p]: Not allowed to change block_size for"
+		pr_err("dev[%p]: Not allowed to change block_size for"
 			" Physical Device, use for Linux/SCSI to change"
 			" block_size for underlying hardware\n", dev);
 		return -EINVAL;
 	}
 
 	dev->se_sub_dev->se_dev_attrib.block_size = block_size;
-	printk(KERN_INFO "dev[%p]: SE Device block_size changed to %u\n",
+	pr_debug("dev[%p]: SE Device block_size changed to %u\n",
 			dev, block_size);
 	return 0;
 }
@@ -1304,13 +1304,13 @@
 	u32 lun_access = 0;
 
 	if (atomic_read(&dev->dev_access_obj.obj_access_count) != 0) {
-		printk(KERN_ERR "Unable to export struct se_device while dev_access_obj: %d\n",
+		pr_err("Unable to export struct se_device while dev_access_obj: %d\n",
 			atomic_read(&dev->dev_access_obj.obj_access_count));
 		return NULL;
 	}
 
 	lun_p = core_tpg_pre_addlun(tpg, lun);
-	if ((IS_ERR(lun_p)) || !(lun_p))
+	if ((IS_ERR(lun_p)) || !lun_p)
 		return NULL;
 
 	if (dev->dev_flags & DF_READ_ONLY)
@@ -1321,7 +1321,7 @@
 	if (core_tpg_post_addlun(tpg, lun_p, lun_access, dev) < 0)
 		return NULL;
 
-	printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
+	pr_debug("%s_TPG[%u]_LUN[%u] - Activated %s Logical Unit from"
 		" CORE HBA: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
 		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun_p->unpacked_lun,
 		tpg->se_tpg_tfo->get_fabric_name(), hba->hba_id);
@@ -1357,12 +1357,12 @@
 	int ret = 0;
 
 	lun = core_tpg_pre_dellun(tpg, unpacked_lun, &ret);
-	if (!(lun))
+	if (!lun)
 		return ret;
 
 	core_tpg_post_dellun(tpg, lun);
 
-	printk(KERN_INFO "%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
+	pr_debug("%s_TPG[%u]_LUN[%u] - Deactivated %s Logical Unit from"
 		" device object\n", tpg->se_tpg_tfo->get_fabric_name(),
 		tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun,
 		tpg->se_tpg_tfo->get_fabric_name());
@@ -1376,7 +1376,7 @@
 
 	spin_lock(&tpg->tpg_lun_lock);
 	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
-		printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
+		pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS"
 			"_PER_TPG-1: %u for Target Portal Group: %hu\n",
 			tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
 			TRANSPORT_MAX_LUNS_PER_TPG-1,
@@ -1387,7 +1387,7 @@
 	lun = &tpg->tpg_lun_list[unpacked_lun];
 
 	if (lun->lun_status != TRANSPORT_LUN_STATUS_FREE) {
-		printk(KERN_ERR "%s Logical Unit Number: %u is not free on"
+		pr_err("%s Logical Unit Number: %u is not free on"
 			" Target Portal Group: %hu, ignoring request.\n",
 			tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
 			tpg->se_tpg_tfo->tpg_get_tag(tpg));
@@ -1409,7 +1409,7 @@
 
 	spin_lock(&tpg->tpg_lun_lock);
 	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
-		printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
+		pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER"
 			"_TPG-1: %u for Target Portal Group: %hu\n",
 			tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
 			TRANSPORT_MAX_LUNS_PER_TPG-1,
@@ -1420,7 +1420,7 @@
 	lun = &tpg->tpg_lun_list[unpacked_lun];
 
 	if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
-		printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
+		pr_err("%s Logical Unit Number: %u is not active on"
 			" Target Portal Group: %hu, ignoring request.\n",
 			tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
 			tpg->se_tpg_tfo->tpg_get_tag(tpg));
@@ -1442,19 +1442,19 @@
 	struct se_node_acl *nacl;
 
 	if (strlen(initiatorname) >= TRANSPORT_IQN_LEN) {
-		printk(KERN_ERR "%s InitiatorName exceeds maximum size.\n",
+		pr_err("%s InitiatorName exceeds maximum size.\n",
 			tpg->se_tpg_tfo->get_fabric_name());
 		*ret = -EOVERFLOW;
 		return NULL;
 	}
 	nacl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
-	if (!(nacl)) {
+	if (!nacl) {
 		*ret = -EINVAL;
 		return NULL;
 	}
 	lacl = kzalloc(sizeof(struct se_lun_acl), GFP_KERNEL);
-	if (!(lacl)) {
-		printk(KERN_ERR "Unable to allocate memory for struct se_lun_acl.\n");
+	if (!lacl) {
+		pr_err("Unable to allocate memory for struct se_lun_acl.\n");
 		*ret = -ENOMEM;
 		return NULL;
 	}
@@ -1477,8 +1477,8 @@
 	struct se_node_acl *nacl;
 
 	lun = core_dev_get_lun(tpg, unpacked_lun);
-	if (!(lun)) {
-		printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
+	if (!lun) {
+		pr_err("%s Logical Unit Number: %u is not active on"
 			" Target Portal Group: %hu, ignoring request.\n",
 			tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
 			tpg->se_tpg_tfo->tpg_get_tag(tpg));
@@ -1486,7 +1486,7 @@
 	}
 
 	nacl = lacl->se_lun_nacl;
-	if (!(nacl))
+	if (!nacl)
 		return -EINVAL;
 
 	if ((lun->lun_access & TRANSPORT_LUNFLAGS_READ_ONLY) &&
@@ -1505,7 +1505,7 @@
 	smp_mb__after_atomic_inc();
 	spin_unlock(&lun->lun_acl_lock);
 
-	printk(KERN_INFO "%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
+	pr_debug("%s_TPG[%hu]_LUN[%u->%u] - Added %s ACL for "
 		" InitiatorNode: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
 		tpg->se_tpg_tfo->tpg_get_tag(tpg), unpacked_lun, lacl->mapped_lun,
 		(lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) ? "RW" : "RO",
@@ -1530,7 +1530,7 @@
 	struct se_node_acl *nacl;
 
 	nacl = lacl->se_lun_nacl;
-	if (!(nacl))
+	if (!nacl)
 		return -EINVAL;
 
 	spin_lock(&lun->lun_acl_lock);
@@ -1544,7 +1544,7 @@
 
 	lacl->se_lun = NULL;
 
-	printk(KERN_INFO "%s_TPG[%hu]_LUN[%u] - Removed ACL for"
+	pr_debug("%s_TPG[%hu]_LUN[%u] - Removed ACL for"
 		" InitiatorNode: %s Mapped LUN: %u\n",
 		tpg->se_tpg_tfo->get_fabric_name(),
 		tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
@@ -1557,7 +1557,7 @@
 	struct se_portal_group *tpg,
 	struct se_lun_acl *lacl)
 {
-	printk("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
+	pr_debug("%s_TPG[%hu] - Freeing ACL for %s InitiatorNode: %s"
 		" Mapped LUN: %u\n", tpg->se_tpg_tfo->get_fabric_name(),
 		tpg->se_tpg_tfo->tpg_get_tag(tpg),
 		tpg->se_tpg_tfo->get_fabric_name(),
@@ -1575,7 +1575,7 @@
 	char buf[16];
 	int ret;
 
-	hba = core_alloc_hba("rd_dr", 0, HBA_FLAGS_INTERNAL_USE);
+	hba = core_alloc_hba("rd_mcp", 0, HBA_FLAGS_INTERNAL_USE);
 	if (IS_ERR(hba))
 		return PTR_ERR(hba);
 
@@ -1583,8 +1583,8 @@
 	t = hba->transport;
 
 	se_dev = kzalloc(sizeof(struct se_subsystem_dev), GFP_KERNEL);
-	if (!(se_dev)) {
-		printk(KERN_ERR "Unable to allocate memory for"
+	if (!se_dev) {
+		pr_err("Unable to allocate memory for"
 				" struct se_subsystem_dev\n");
 		ret = -ENOMEM;
 		goto out;
@@ -1606,8 +1606,8 @@
 	se_dev->se_dev_hba = hba;
 
 	se_dev->se_dev_su_ptr = t->allocate_virtdevice(hba, "virt_lun0");
-	if (!(se_dev->se_dev_su_ptr)) {
-		printk(KERN_ERR "Unable to locate subsystem dependent pointer"
+	if (!se_dev->se_dev_su_ptr) {
+		pr_err("Unable to locate subsystem dependent pointer"
 			" from allocate_virtdevice()\n");
 		ret = -ENOMEM;
 		goto out;
@@ -1643,7 +1643,7 @@
 	struct se_hba *hba = lun0_hba;
 	struct se_subsystem_dev *su_dev = lun0_su_dev;
 
-	if (!(hba))
+	if (!hba)
 		return;
 
 	if (g_lun0_dev)
diff --git a/drivers/target/target_core_fabric_configfs.c b/drivers/target/target_core_fabric_configfs.c
index 0b1659d..f165469 100644
--- a/drivers/target/target_core_fabric_configfs.c
+++ b/drivers/target/target_core_fabric_configfs.c
@@ -60,7 +60,7 @@
 	cit->ct_group_ops = _group_ops;					\
 	cit->ct_attrs = _attrs;						\
 	cit->ct_owner = tf->tf_module;					\
-	printk("Setup generic %s\n", __stringify(_name));		\
+	pr_debug("Setup generic %s\n", __stringify(_name));		\
 }
 
 /* Start of tfc_tpg_mappedlun_cit */
@@ -80,8 +80,8 @@
 	/*
 	 * Ensure that the source port exists
 	 */
-	if (!(lun->lun_sep) || !(lun->lun_sep->sep_tpg)) {
-		printk(KERN_ERR "Source se_lun->lun_sep or lun->lun_sep->sep"
+	if (!lun->lun_sep || !lun->lun_sep->sep_tpg) {
+		pr_err("Source se_lun->lun_sep or lun->lun_sep->sep"
 				"_tpg does not exist\n");
 		return -EINVAL;
 	}
@@ -96,12 +96,12 @@
 	 * Make sure the SymLink is going to the same $FABRIC/$WWN/tpgt_$TPGT
 	 */
 	if (strcmp(config_item_name(wwn_ci), config_item_name(wwn_ci_s))) {
-		printk(KERN_ERR "Illegal Initiator ACL SymLink outside of %s\n",
+		pr_err("Illegal Initiator ACL SymLink outside of %s\n",
 			config_item_name(wwn_ci));
 		return -EINVAL;
 	}
 	if (strcmp(config_item_name(tpg_ci), config_item_name(tpg_ci_s))) {
-		printk(KERN_ERR "Illegal Initiator ACL Symlink outside of %s"
+		pr_err("Illegal Initiator ACL Symlink outside of %s"
 			" TPGT: %s\n", config_item_name(wwn_ci),
 			config_item_name(tpg_ci));
 		return -EINVAL;
@@ -147,7 +147,7 @@
 	/*
 	 * Determine if the underlying MappedLUN has already been released..
 	 */
-	if (!(deve->se_lun))
+	if (!deve->se_lun)
 		return 0;
 
 	lun = container_of(to_config_group(lun_ci), struct se_lun, lun_group);
@@ -202,7 +202,7 @@
 			TRANSPORT_LUNFLAGS_READ_WRITE,
 			lacl->se_lun_nacl);
 
-	printk(KERN_INFO "%s_ConfigFS: Changed Initiator ACL: %s"
+	pr_debug("%s_ConfigFS: Changed Initiator ACL: %s"
 		" Mapped LUN: %u Write Protect bit to %s\n",
 		se_tpg->se_tpg_tfo->get_fabric_name(),
 		lacl->initiatorname, lacl->mapped_lun, (op) ? "ON" : "OFF");
@@ -327,14 +327,14 @@
 	int ret = 0;
 
 	acl_ci = &group->cg_item;
-	if (!(acl_ci)) {
-		printk(KERN_ERR "Unable to locatel acl_ci\n");
+	if (!acl_ci) {
+		pr_err("Unable to locatel acl_ci\n");
 		return NULL;
 	}
 
 	buf = kzalloc(strlen(name) + 1, GFP_KERNEL);
-	if (!(buf)) {
-		printk(KERN_ERR "Unable to allocate memory for name buf\n");
+	if (!buf) {
+		pr_err("Unable to allocate memory for name buf\n");
 		return ERR_PTR(-ENOMEM);
 	}
 	snprintf(buf, strlen(name) + 1, "%s", name);
@@ -342,7 +342,7 @@
 	 * Make sure user is creating iscsi/$IQN/$TPGT/acls/$INITIATOR/lun_$ID.
 	 */
 	if (strstr(buf, "lun_") != buf) {
-		printk(KERN_ERR "Unable to locate \"lun_\" from buf: %s"
+		pr_err("Unable to locate \"lun_\" from buf: %s"
 			" name: %s\n", buf, name);
 		ret = -EINVAL;
 		goto out;
@@ -358,7 +358,7 @@
 
 	lacl = core_dev_init_initiator_node_lun_acl(se_tpg, mapped_lun,
 			config_item_name(acl_ci), &ret);
-	if (!(lacl)) {
+	if (!lacl) {
 		ret = -EINVAL;
 		goto out;
 	}
@@ -367,7 +367,7 @@
 	lacl_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
 				GFP_KERNEL);
 	if (!lacl_cg->default_groups) {
-		printk(KERN_ERR "Unable to allocate lacl_cg->default_groups\n");
+		pr_err("Unable to allocate lacl_cg->default_groups\n");
 		ret = -ENOMEM;
 		goto out;
 	}
@@ -383,7 +383,7 @@
 	ml_stat_grp->default_groups = kzalloc(sizeof(struct config_group) * 3,
 				GFP_KERNEL);
 	if (!ml_stat_grp->default_groups) {
-		printk(KERN_ERR "Unable to allocate ml_stat_grp->default_groups\n");
+		pr_err("Unable to allocate ml_stat_grp->default_groups\n");
 		ret = -ENOMEM;
 		goto out;
 	}
@@ -474,8 +474,8 @@
 	struct se_node_acl *se_nacl;
 	struct config_group *nacl_cg;
 
-	if (!(tf->tf_ops.fabric_make_nodeacl)) {
-		printk(KERN_ERR "tf->tf_ops.fabric_make_nodeacl is NULL\n");
+	if (!tf->tf_ops.fabric_make_nodeacl) {
+		pr_err("tf->tf_ops.fabric_make_nodeacl is NULL\n");
 		return ERR_PTR(-ENOSYS);
 	}
 
@@ -572,13 +572,13 @@
 	struct target_fabric_configfs *tf = se_tpg->se_tpg_wwn->wwn_tf;
 	struct se_tpg_np *se_tpg_np;
 
-	if (!(tf->tf_ops.fabric_make_np)) {
-		printk(KERN_ERR "tf->tf_ops.fabric_make_np is NULL\n");
+	if (!tf->tf_ops.fabric_make_np) {
+		pr_err("tf->tf_ops.fabric_make_np is NULL\n");
 		return ERR_PTR(-ENOSYS);
 	}
 
 	se_tpg_np = tf->tf_ops.fabric_make_np(se_tpg, group, name);
-	if (!(se_tpg_np) || IS_ERR(se_tpg_np))
+	if (!se_tpg_np || IS_ERR(se_tpg_np))
 		return ERR_PTR(-EINVAL);
 
 	se_tpg_np->tpg_np_parent = se_tpg;
@@ -627,10 +627,7 @@
 	struct se_lun *lun,
 	char *page)
 {
-	if (!(lun))
-		return -ENODEV;
-
-	if (!(lun->lun_sep))
+	if (!lun || !lun->lun_sep)
 		return -ENODEV;
 
 	return core_alua_show_tg_pt_gp_info(lun->lun_sep, page);
@@ -641,10 +638,7 @@
 	const char *page,
 	size_t count)
 {
-	if (!(lun))
-		return -ENODEV;
-
-	if (!(lun->lun_sep))
+	if (!lun || !lun->lun_sep)
 		return -ENODEV;
 
 	return core_alua_store_tg_pt_gp_info(lun->lun_sep, page, count);
@@ -659,10 +653,7 @@
 	struct se_lun *lun,
 	char *page)
 {
-	if (!(lun))
-		return -ENODEV;
-
-	if (!(lun->lun_sep))
+	if (!lun || !lun->lun_sep)
 		return -ENODEV;
 
 	return core_alua_show_offline_bit(lun, page);
@@ -673,10 +664,7 @@
 	const char *page,
 	size_t count)
 {
-	if (!(lun))
-		return -ENODEV;
-
-	if (!(lun->lun_sep))
+	if (!lun || !lun->lun_sep)
 		return -ENODEV;
 
 	return core_alua_store_offline_bit(lun, page, count);
@@ -691,10 +679,7 @@
 	struct se_lun *lun,
 	char *page)
 {
-	if (!(lun))
-		return -ENODEV;
-
-	if (!(lun->lun_sep))
+	if (!lun || !lun->lun_sep)
 		return -ENODEV;
 
 	return core_alua_show_secondary_status(lun, page);
@@ -705,10 +690,7 @@
 	const char *page,
 	size_t count)
 {
-	if (!(lun))
-		return -ENODEV;
-
-	if (!(lun->lun_sep))
+	if (!lun || !lun->lun_sep)
 		return -ENODEV;
 
 	return core_alua_store_secondary_status(lun, page, count);
@@ -723,10 +705,7 @@
 	struct se_lun *lun,
 	char *page)
 {
-	if (!(lun))
-		return -ENODEV;
-
-	if (!(lun->lun_sep))
+	if (!lun || !lun->lun_sep)
 		return -ENODEV;
 
 	return core_alua_show_secondary_write_metadata(lun, page);
@@ -737,10 +716,7 @@
 	const char *page,
 	size_t count)
 {
-	if (!(lun))
-		return -ENODEV;
-
-	if (!(lun->lun_sep))
+	if (!lun || !lun->lun_sep)
 		return -ENODEV;
 
 	return core_alua_store_secondary_write_metadata(lun, page, count);
@@ -781,13 +757,13 @@
 	tf = se_tpg->se_tpg_wwn->wwn_tf;
 
 	if (lun->lun_se_dev !=  NULL) {
-		printk(KERN_ERR "Port Symlink already exists\n");
+		pr_err("Port Symlink already exists\n");
 		return -EEXIST;
 	}
 
 	dev = se_dev->se_dev_ptr;
-	if (!(dev)) {
-		printk(KERN_ERR "Unable to locate struct se_device pointer from"
+	if (!dev) {
+		pr_err("Unable to locate struct se_device pointer from"
 			" %s\n", config_item_name(se_dev_ci));
 		ret = -ENODEV;
 		goto out;
@@ -795,8 +771,8 @@
 
 	lun_p = core_dev_add_lun(se_tpg, dev->se_hba, dev,
 				lun->unpacked_lun);
-	if ((IS_ERR(lun_p)) || !(lun_p)) {
-		printk(KERN_ERR "core_dev_add_lun() failed\n");
+	if (IS_ERR(lun_p) || !lun_p) {
+		pr_err("core_dev_add_lun() failed\n");
 		ret = -EINVAL;
 		goto out;
 	}
@@ -888,7 +864,7 @@
 	int errno;
 
 	if (strstr(name, "lun_") != name) {
-		printk(KERN_ERR "Unable to locate \'_\" in"
+		pr_err("Unable to locate \'_\" in"
 				" \"lun_$LUN_NUMBER\"\n");
 		return ERR_PTR(-EINVAL);
 	}
@@ -896,14 +872,14 @@
 		return ERR_PTR(-EINVAL);
 
 	lun = core_get_lun_from_tpg(se_tpg, unpacked_lun);
-	if (!(lun))
+	if (!lun)
 		return ERR_PTR(-EINVAL);
 
 	lun_cg = &lun->lun_group;
 	lun_cg->default_groups = kzalloc(sizeof(struct config_group) * 2,
 				GFP_KERNEL);
 	if (!lun_cg->default_groups) {
-		printk(KERN_ERR "Unable to allocate lun_cg->default_groups\n");
+		pr_err("Unable to allocate lun_cg->default_groups\n");
 		return ERR_PTR(-ENOMEM);
 	}
 
@@ -918,7 +894,7 @@
 	port_stat_grp->default_groups =  kzalloc(sizeof(struct config_group) * 3,
 				GFP_KERNEL);
 	if (!port_stat_grp->default_groups) {
-		printk(KERN_ERR "Unable to allocate port_stat_grp->default_groups\n");
+		pr_err("Unable to allocate port_stat_grp->default_groups\n");
 		errno = -ENOMEM;
 		goto out;
 	}
@@ -1031,13 +1007,13 @@
 	struct target_fabric_configfs *tf = wwn->wwn_tf;
 	struct se_portal_group *se_tpg;
 
-	if (!(tf->tf_ops.fabric_make_tpg)) {
-		printk(KERN_ERR "tf->tf_ops.fabric_make_tpg is NULL\n");
+	if (!tf->tf_ops.fabric_make_tpg) {
+		pr_err("tf->tf_ops.fabric_make_tpg is NULL\n");
 		return ERR_PTR(-ENOSYS);
 	}
 
 	se_tpg = tf->tf_ops.fabric_make_tpg(wwn, group, name);
-	if (!(se_tpg) || IS_ERR(se_tpg))
+	if (!se_tpg || IS_ERR(se_tpg))
 		return ERR_PTR(-EINVAL);
 	/*
 	 * Setup default groups from pre-allocated se_tpg->tpg_default_groups
@@ -1130,13 +1106,13 @@
 				struct target_fabric_configfs, tf_group);
 	struct se_wwn *wwn;
 
-	if (!(tf->tf_ops.fabric_make_wwn)) {
-		printk(KERN_ERR "tf->tf_ops.fabric_make_wwn is NULL\n");
+	if (!tf->tf_ops.fabric_make_wwn) {
+		pr_err("tf->tf_ops.fabric_make_wwn is NULL\n");
 		return ERR_PTR(-ENOSYS);
 	}
 
 	wwn = tf->tf_ops.fabric_make_wwn(tf, group, name);
-	if (!(wwn) || IS_ERR(wwn))
+	if (!wwn || IS_ERR(wwn))
 		return ERR_PTR(-EINVAL);
 
 	wwn->wwn_tf = tf;
diff --git a/drivers/target/target_core_fabric_lib.c b/drivers/target/target_core_fabric_lib.c
index 1e193f3..a299688 100644
--- a/drivers/target/target_core_fabric_lib.c
+++ b/drivers/target/target_core_fabric_lib.c
@@ -172,7 +172,7 @@
 	ptr = &se_nacl->initiatorname[0];
 
 	for (i = 0; i < 24; ) {
-		if (!(strncmp(&ptr[i], ":", 1))) {
+		if (!strncmp(&ptr[i], ":", 1)) {
 			i++;
 			continue;
 		}
@@ -386,7 +386,7 @@
 	 *            Reserved
 	 */
 	if ((format_code != 0x00) && (format_code != 0x40)) {
-		printk(KERN_ERR "Illegal format code: 0x%02x for iSCSI"
+		pr_err("Illegal format code: 0x%02x for iSCSI"
 			" Initiator Transport ID\n", format_code);
 		return NULL;
 	}
@@ -406,7 +406,7 @@
 			tid_len += padding;
 
 		if ((add_len + 4) != tid_len) {
-			printk(KERN_INFO "LIO-Target Extracted add_len: %hu "
+			pr_debug("LIO-Target Extracted add_len: %hu "
 				"does not match calculated tid_len: %u,"
 				" using tid_len instead\n", add_len+4, tid_len);
 			*out_tid_len = tid_len;
@@ -420,8 +420,8 @@
 	 */
 	if (format_code == 0x40) {
 		p = strstr((char *)&buf[4], ",i,0x");
-		if (!(p)) {
-			printk(KERN_ERR "Unable to locate \",i,0x\" seperator"
+		if (!p) {
+			pr_err("Unable to locate \",i,0x\" seperator"
 				" for Initiator port identifier: %s\n",
 				(char *)&buf[4]);
 			return NULL;
diff --git a/drivers/target/target_core_file.c b/drivers/target/target_core_file.c
index 5c47f42..bc1b336 100644
--- a/drivers/target/target_core_file.c
+++ b/drivers/target/target_core_file.c
@@ -42,18 +42,6 @@
 
 #include "target_core_file.h"
 
-#if 1
-#define DEBUG_FD_CACHE(x...) printk(x)
-#else
-#define DEBUG_FD_CACHE(x...)
-#endif
-
-#if 1
-#define DEBUG_FD_FUA(x...) printk(x)
-#else
-#define DEBUG_FD_FUA(x...)
-#endif
-
 static struct se_subsystem_api fileio_template;
 
 /*	fd_attach_hba(): (Part of se_subsystem_api_t template)
@@ -65,8 +53,8 @@
 	struct fd_host *fd_host;
 
 	fd_host = kzalloc(sizeof(struct fd_host), GFP_KERNEL);
-	if (!(fd_host)) {
-		printk(KERN_ERR "Unable to allocate memory for struct fd_host\n");
+	if (!fd_host) {
+		pr_err("Unable to allocate memory for struct fd_host\n");
 		return -ENOMEM;
 	}
 
@@ -74,10 +62,10 @@
 
 	hba->hba_ptr = fd_host;
 
-	printk(KERN_INFO "CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
+	pr_debug("CORE_HBA[%d] - TCM FILEIO HBA Driver %s on Generic"
 		" Target Core Stack %s\n", hba->hba_id, FD_VERSION,
 		TARGET_CORE_MOD_VERSION);
-	printk(KERN_INFO "CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
+	pr_debug("CORE_HBA[%d] - Attached FILEIO HBA: %u to Generic"
 		" MaxSectors: %u\n",
 		hba->hba_id, fd_host->fd_host_id, FD_MAX_SECTORS);
 
@@ -88,7 +76,7 @@
 {
 	struct fd_host *fd_host = hba->hba_ptr;
 
-	printk(KERN_INFO "CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
+	pr_debug("CORE_HBA[%d] - Detached FILEIO HBA: %u from Generic"
 		" Target Core\n", hba->hba_id, fd_host->fd_host_id);
 
 	kfree(fd_host);
@@ -101,14 +89,14 @@
 	struct fd_host *fd_host = (struct fd_host *) hba->hba_ptr;
 
 	fd_dev = kzalloc(sizeof(struct fd_dev), GFP_KERNEL);
-	if (!(fd_dev)) {
-		printk(KERN_ERR "Unable to allocate memory for struct fd_dev\n");
+	if (!fd_dev) {
+		pr_err("Unable to allocate memory for struct fd_dev\n");
 		return NULL;
 	}
 
 	fd_dev->fd_host = fd_host;
 
-	printk(KERN_INFO "FILEIO: Allocated fd_dev for %p\n", name);
+	pr_debug("FILEIO: Allocated fd_dev for %p\n", name);
 
 	return fd_dev;
 }
@@ -141,7 +129,7 @@
 	set_fs(old_fs);
 
 	if (IS_ERR(dev_p)) {
-		printk(KERN_ERR "getname(%s) failed: %lu\n",
+		pr_err("getname(%s) failed: %lu\n",
 			fd_dev->fd_dev_name, IS_ERR(dev_p));
 		ret = PTR_ERR(dev_p);
 		goto fail;
@@ -164,12 +152,12 @@
 
 	file = filp_open(dev_p, flags, 0600);
 	if (IS_ERR(file)) {
-		printk(KERN_ERR "filp_open(%s) failed\n", dev_p);
+		pr_err("filp_open(%s) failed\n", dev_p);
 		ret = PTR_ERR(file);
 		goto fail;
 	}
 	if (!file || !file->f_dentry) {
-		printk(KERN_ERR "filp_open(%s) failed\n", dev_p);
+		pr_err("filp_open(%s) failed\n", dev_p);
 		goto fail;
 	}
 	fd_dev->fd_file = file;
@@ -199,14 +187,14 @@
 		fd_dev->fd_dev_size = (i_size_read(file->f_mapping->host) -
 				       fd_dev->fd_block_size);
 
-		printk(KERN_INFO "FILEIO: Using size: %llu bytes from struct"
+		pr_debug("FILEIO: Using size: %llu bytes from struct"
 			" block_device blocks: %llu logical_block_size: %d\n",
 			fd_dev->fd_dev_size,
 			div_u64(fd_dev->fd_dev_size, fd_dev->fd_block_size),
 			fd_dev->fd_block_size);
 	} else {
 		if (!(fd_dev->fbd_flags & FBDF_HAS_SIZE)) {
-			printk(KERN_ERR "FILEIO: Missing fd_dev_size="
+			pr_err("FILEIO: Missing fd_dev_size="
 				" parameter, and no backing struct"
 				" block_device\n");
 			goto fail;
@@ -225,13 +213,13 @@
 	dev = transport_add_device_to_core_hba(hba, &fileio_template,
 				se_dev, dev_flags, fd_dev,
 				&dev_limits, "FILEIO", FD_VERSION);
-	if (!(dev))
+	if (!dev)
 		goto fail;
 
 	fd_dev->fd_dev_id = fd_host->fd_host_dev_id_count++;
 	fd_dev->fd_queue_depth = dev->queue_depth;
 
-	printk(KERN_INFO "CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
+	pr_debug("CORE_FILE[%u] - Added TCM FILEIO Device ID: %u at %s,"
 		" %llu total bytes\n", fd_host->fd_host_id, fd_dev->fd_dev_id,
 			fd_dev->fd_dev_name, fd_dev->fd_dev_size);
 
@@ -269,25 +257,24 @@
 
 
 static struct se_task *
-fd_alloc_task(struct se_cmd *cmd)
+fd_alloc_task(unsigned char *cdb)
 {
 	struct fd_request *fd_req;
 
 	fd_req = kzalloc(sizeof(struct fd_request), GFP_KERNEL);
-	if (!(fd_req)) {
-		printk(KERN_ERR "Unable to allocate struct fd_request\n");
+	if (!fd_req) {
+		pr_err("Unable to allocate struct fd_request\n");
 		return NULL;
 	}
 
-	fd_req->fd_dev = cmd->se_dev->dev_ptr;
-
 	return &fd_req->fd_task;
 }
 
 static int fd_do_readv(struct se_task *task)
 {
 	struct fd_request *req = FILE_REQ(task);
-	struct file *fd = req->fd_dev->fd_file;
+	struct fd_dev *dev = req->fd_task.se_dev->dev_ptr;
+	struct file *fd = dev->fd_file;
 	struct scatterlist *sg = task->task_sg;
 	struct iovec *iov;
 	mm_segment_t old_fs;
@@ -295,20 +282,20 @@
 		      task->se_dev->se_sub_dev->se_dev_attrib.block_size);
 	int ret = 0, i;
 
-	iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
-	if (!(iov)) {
-		printk(KERN_ERR "Unable to allocate fd_do_readv iov[]\n");
+	iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
+	if (!iov) {
+		pr_err("Unable to allocate fd_do_readv iov[]\n");
 		return -ENOMEM;
 	}
 
-	for (i = 0; i < task->task_sg_num; i++) {
+	for (i = 0; i < task->task_sg_nents; i++) {
 		iov[i].iov_len = sg[i].length;
 		iov[i].iov_base = sg_virt(&sg[i]);
 	}
 
 	old_fs = get_fs();
 	set_fs(get_ds());
-	ret = vfs_readv(fd, &iov[0], task->task_sg_num, &pos);
+	ret = vfs_readv(fd, &iov[0], task->task_sg_nents, &pos);
 	set_fs(old_fs);
 
 	kfree(iov);
@@ -319,14 +306,14 @@
 	 */
 	if (S_ISBLK(fd->f_dentry->d_inode->i_mode)) {
 		if (ret < 0 || ret != task->task_size) {
-			printk(KERN_ERR "vfs_readv() returned %d,"
+			pr_err("vfs_readv() returned %d,"
 				" expecting %d for S_ISBLK\n", ret,
 				(int)task->task_size);
 			return (ret < 0 ? ret : -EINVAL);
 		}
 	} else {
 		if (ret < 0) {
-			printk(KERN_ERR "vfs_readv() returned %d for non"
+			pr_err("vfs_readv() returned %d for non"
 				" S_ISBLK\n", ret);
 			return ret;
 		}
@@ -338,7 +325,8 @@
 static int fd_do_writev(struct se_task *task)
 {
 	struct fd_request *req = FILE_REQ(task);
-	struct file *fd = req->fd_dev->fd_file;
+	struct fd_dev *dev = req->fd_task.se_dev->dev_ptr;
+	struct file *fd = dev->fd_file;
 	struct scatterlist *sg = task->task_sg;
 	struct iovec *iov;
 	mm_segment_t old_fs;
@@ -346,26 +334,26 @@
 		      task->se_dev->se_sub_dev->se_dev_attrib.block_size);
 	int ret, i = 0;
 
-	iov = kzalloc(sizeof(struct iovec) * task->task_sg_num, GFP_KERNEL);
-	if (!(iov)) {
-		printk(KERN_ERR "Unable to allocate fd_do_writev iov[]\n");
+	iov = kzalloc(sizeof(struct iovec) * task->task_sg_nents, GFP_KERNEL);
+	if (!iov) {
+		pr_err("Unable to allocate fd_do_writev iov[]\n");
 		return -ENOMEM;
 	}
 
-	for (i = 0; i < task->task_sg_num; i++) {
+	for (i = 0; i < task->task_sg_nents; i++) {
 		iov[i].iov_len = sg[i].length;
 		iov[i].iov_base = sg_virt(&sg[i]);
 	}
 
 	old_fs = get_fs();
 	set_fs(get_ds());
-	ret = vfs_writev(fd, &iov[0], task->task_sg_num, &pos);
+	ret = vfs_writev(fd, &iov[0], task->task_sg_nents, &pos);
 	set_fs(old_fs);
 
 	kfree(iov);
 
 	if (ret < 0 || ret != task->task_size) {
-		printk(KERN_ERR "vfs_writev() returned %d\n", ret);
+		pr_err("vfs_writev() returned %d\n", ret);
 		return (ret < 0 ? ret : -EINVAL);
 	}
 
@@ -404,7 +392,7 @@
 
 	ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
 	if (ret != 0)
-		printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret);
+		pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
 
 	if (!immed)
 		transport_complete_sync_cache(cmd, ret == 0);
@@ -449,12 +437,12 @@
 	loff_t end = start + task->task_size;
 	int ret;
 
-	DEBUG_FD_CACHE("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
+	pr_debug("FILEIO: FUA WRITE LBA: %llu, bytes: %u\n",
 			task->task_lba, task->task_size);
 
 	ret = vfs_fsync_range(fd_dev->fd_file, start, end, 1);
 	if (ret != 0)
-		printk(KERN_ERR "FILEIO: vfs_fsync_range() failed: %d\n", ret);
+		pr_err("FILEIO: vfs_fsync_range() failed: %d\n", ret);
 }
 
 static int fd_do_task(struct se_task *task)
@@ -548,7 +536,7 @@
 			snprintf(fd_dev->fd_dev_name, FD_MAX_DEV_NAME,
 					"%s", arg_p);
 			kfree(arg_p);
-			printk(KERN_INFO "FILEIO: Referencing Path: %s\n",
+			pr_debug("FILEIO: Referencing Path: %s\n",
 					fd_dev->fd_dev_name);
 			fd_dev->fbd_flags |= FBDF_HAS_PATH;
 			break;
@@ -561,23 +549,23 @@
 			ret = strict_strtoull(arg_p, 0, &fd_dev->fd_dev_size);
 			kfree(arg_p);
 			if (ret < 0) {
-				printk(KERN_ERR "strict_strtoull() failed for"
+				pr_err("strict_strtoull() failed for"
 						" fd_dev_size=\n");
 				goto out;
 			}
-			printk(KERN_INFO "FILEIO: Referencing Size: %llu"
+			pr_debug("FILEIO: Referencing Size: %llu"
 					" bytes\n", fd_dev->fd_dev_size);
 			fd_dev->fbd_flags |= FBDF_HAS_SIZE;
 			break;
 		case Opt_fd_buffered_io:
 			match_int(args, &arg);
 			if (arg != 1) {
-				printk(KERN_ERR "bogus fd_buffered_io=%d value\n", arg);
+				pr_err("bogus fd_buffered_io=%d value\n", arg);
 				ret = -EINVAL;
 				goto out;
 			}
 
-			printk(KERN_INFO "FILEIO: Using buffered I/O"
+			pr_debug("FILEIO: Using buffered I/O"
 				" operations for struct fd_dev\n");
 
 			fd_dev->fbd_flags |= FDBD_USE_BUFFERED_IO;
@@ -597,7 +585,7 @@
 	struct fd_dev *fd_dev = (struct fd_dev *) se_dev->se_dev_su_ptr;
 
 	if (!(fd_dev->fbd_flags & FBDF_HAS_PATH)) {
-		printk(KERN_ERR "Missing fd_dev_name=\n");
+		pr_err("Missing fd_dev_name=\n");
 		return -EINVAL;
 	}
 
diff --git a/drivers/target/target_core_file.h b/drivers/target/target_core_file.h
index 6386d3f..daebd71 100644
--- a/drivers/target/target_core_file.h
+++ b/drivers/target/target_core_file.h
@@ -16,8 +16,6 @@
 	struct se_task	fd_task;
 	/* SCSI CDB from iSCSI Command PDU */
 	unsigned char	fd_scsi_cdb[TCM_MAX_COMMAND_SIZE];
-	/* FILEIO device */
-	struct fd_dev	*fd_dev;
 } ____cacheline_aligned;
 
 #define FBDF_HAS_PATH		0x01
diff --git a/drivers/target/target_core_hba.c b/drivers/target/target_core_hba.c
index bd9da25..0639b97 100644
--- a/drivers/target/target_core_hba.c
+++ b/drivers/target/target_core_hba.c
@@ -58,8 +58,8 @@
 
 	mutex_lock(&subsystem_mutex);
 	list_for_each_entry(s, &subsystem_list, sub_api_list) {
-		if (!(strcmp(s->name, sub_api->name))) {
-			printk(KERN_ERR "%p is already registered with"
+		if (!strcmp(s->name, sub_api->name)) {
+			pr_err("%p is already registered with"
 				" duplicate name %s, unable to process"
 				" request\n", s, s->name);
 			mutex_unlock(&subsystem_mutex);
@@ -69,7 +69,7 @@
 	list_add_tail(&sub_api->sub_api_list, &subsystem_list);
 	mutex_unlock(&subsystem_mutex);
 
-	printk(KERN_INFO "TCM: Registered subsystem plugin: %s struct module:"
+	pr_debug("TCM: Registered subsystem plugin: %s struct module:"
 			" %p\n", sub_api->name, sub_api->owner);
 	return 0;
 }
@@ -109,7 +109,7 @@
 
 	hba = kzalloc(sizeof(*hba), GFP_KERNEL);
 	if (!hba) {
-		printk(KERN_ERR "Unable to allocate struct se_hba\n");
+		pr_err("Unable to allocate struct se_hba\n");
 		return ERR_PTR(-ENOMEM);
 	}
 
@@ -135,7 +135,7 @@
 	list_add_tail(&hba->hba_node, &hba_list);
 	spin_unlock(&hba_lock);
 
-	printk(KERN_INFO "CORE_HBA[%d] - Attached HBA to Generic Target"
+	pr_debug("CORE_HBA[%d] - Attached HBA to Generic Target"
 			" Core\n", hba->hba_id);
 
 	return hba;
@@ -161,7 +161,7 @@
 	list_del(&hba->hba_node);
 	spin_unlock(&hba_lock);
 
-	printk(KERN_INFO "CORE_HBA[%d] - Detached HBA from Generic Target"
+	pr_debug("CORE_HBA[%d] - Detached HBA from Generic Target"
 			" Core\n", hba->hba_id);
 
 	if (hba->transport->owner)
diff --git a/drivers/target/target_core_iblock.c b/drivers/target/target_core_iblock.c
index 164b721..251fc66 100644
--- a/drivers/target/target_core_iblock.c
+++ b/drivers/target/target_core_iblock.c
@@ -47,12 +47,6 @@
 
 #include "target_core_iblock.h"
 
-#if 0
-#define DEBUG_IBLOCK(x...) printk(x)
-#else
-#define DEBUG_IBLOCK(x...)
-#endif
-
 static struct se_subsystem_api iblock_template;
 
 static void iblock_bio_done(struct bio *, int);
@@ -66,8 +60,8 @@
 	struct iblock_hba *ib_host;
 
 	ib_host = kzalloc(sizeof(struct iblock_hba), GFP_KERNEL);
-	if (!(ib_host)) {
-		printk(KERN_ERR "Unable to allocate memory for"
+	if (!ib_host) {
+		pr_err("Unable to allocate memory for"
 				" struct iblock_hba\n");
 		return -ENOMEM;
 	}
@@ -76,11 +70,11 @@
 
 	hba->hba_ptr = ib_host;
 
-	printk(KERN_INFO "CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
+	pr_debug("CORE_HBA[%d] - TCM iBlock HBA Driver %s on"
 		" Generic Target Core Stack %s\n", hba->hba_id,
 		IBLOCK_VERSION, TARGET_CORE_MOD_VERSION);
 
-	printk(KERN_INFO "CORE_HBA[%d] - Attached iBlock HBA: %u to Generic\n",
+	pr_debug("CORE_HBA[%d] - Attached iBlock HBA: %u to Generic\n",
 		hba->hba_id, ib_host->iblock_host_id);
 
 	return 0;
@@ -90,7 +84,7 @@
 {
 	struct iblock_hba *ib_host = hba->hba_ptr;
 
-	printk(KERN_INFO "CORE_HBA[%d] - Detached iBlock HBA: %u from Generic"
+	pr_debug("CORE_HBA[%d] - Detached iBlock HBA: %u from Generic"
 		" Target Core\n", hba->hba_id, ib_host->iblock_host_id);
 
 	kfree(ib_host);
@@ -103,13 +97,13 @@
 	struct iblock_hba *ib_host = hba->hba_ptr;
 
 	ib_dev = kzalloc(sizeof(struct iblock_dev), GFP_KERNEL);
-	if (!(ib_dev)) {
-		printk(KERN_ERR "Unable to allocate struct iblock_dev\n");
+	if (!ib_dev) {
+		pr_err("Unable to allocate struct iblock_dev\n");
 		return NULL;
 	}
 	ib_dev->ibd_host = ib_host;
 
-	printk(KERN_INFO  "IBLOCK: Allocated ib_dev for %s\n", name);
+	pr_debug( "IBLOCK: Allocated ib_dev for %s\n", name);
 
 	return ib_dev;
 }
@@ -128,8 +122,8 @@
 	u32 dev_flags = 0;
 	int ret = -EINVAL;
 
-	if (!(ib_dev)) {
-		printk(KERN_ERR "Unable to locate struct iblock_dev parameter\n");
+	if (!ib_dev) {
+		pr_err("Unable to locate struct iblock_dev parameter\n");
 		return ERR_PTR(ret);
 	}
 	memset(&dev_limits, 0, sizeof(struct se_dev_limits));
@@ -137,16 +131,16 @@
 	 * These settings need to be made tunable..
 	 */
 	ib_dev->ibd_bio_set = bioset_create(32, 64);
-	if (!(ib_dev->ibd_bio_set)) {
-		printk(KERN_ERR "IBLOCK: Unable to create bioset()\n");
+	if (!ib_dev->ibd_bio_set) {
+		pr_err("IBLOCK: Unable to create bioset()\n");
 		return ERR_PTR(-ENOMEM);
 	}
-	printk(KERN_INFO "IBLOCK: Created bio_set()\n");
+	pr_debug("IBLOCK: Created bio_set()\n");
 	/*
 	 * iblock_check_configfs_dev_params() ensures that ib_dev->ibd_udev_path
 	 * must already have been set in order for echo 1 > $HBA/$DEV/enable to run.
 	 */
-	printk(KERN_INFO  "IBLOCK: Claiming struct block_device: %s\n",
+	pr_debug( "IBLOCK: Claiming struct block_device: %s\n",
 			ib_dev->ibd_udev_path);
 
 	bd = blkdev_get_by_path(ib_dev->ibd_udev_path,
@@ -172,7 +166,7 @@
 	dev = transport_add_device_to_core_hba(hba,
 			&iblock_template, se_dev, dev_flags, ib_dev,
 			&dev_limits, "IBLOCK", IBLOCK_VERSION);
-	if (!(dev))
+	if (!dev)
 		goto failed;
 
 	/*
@@ -192,7 +186,7 @@
 		dev->se_sub_dev->se_dev_attrib.unmap_granularity_alignment =
 				q->limits.discard_alignment;
 
-		printk(KERN_INFO "IBLOCK: BLOCK Discard support available,"
+		pr_debug("IBLOCK: BLOCK Discard support available,"
 				" disabled by default\n");
 	}
 
@@ -227,17 +221,16 @@
 }
 
 static struct se_task *
-iblock_alloc_task(struct se_cmd *cmd)
+iblock_alloc_task(unsigned char *cdb)
 {
 	struct iblock_req *ib_req;
 
 	ib_req = kzalloc(sizeof(struct iblock_req), GFP_KERNEL);
-	if (!(ib_req)) {
-		printk(KERN_ERR "Unable to allocate memory for struct iblock_req\n");
+	if (!ib_req) {
+		pr_err("Unable to allocate memory for struct iblock_req\n");
 		return NULL;
 	}
 
-	ib_req->ib_dev = cmd->se_dev->dev_ptr;
 	atomic_set(&ib_req->ib_bio_cnt, 0);
 	return &ib_req->ib_task;
 }
@@ -345,7 +338,7 @@
 	 */
 	ret = blkdev_issue_flush(ib_dev->ibd_bd, GFP_KERNEL, &error_sector);
 	if (ret != 0) {
-		printk(KERN_ERR "IBLOCK: block_issue_flush() failed: %d "
+		pr_err("IBLOCK: block_issue_flush() failed: %d "
 			" error_sector: %llu\n", ret,
 			(unsigned long long)error_sector);
 	}
@@ -409,8 +402,9 @@
 	while (bio) {
 		nbio = bio->bi_next;
 		bio->bi_next = NULL;
-		DEBUG_IBLOCK("Calling submit_bio() task: %p bio: %p"
-			" bio->bi_sector: %llu\n", task, bio, bio->bi_sector);
+		pr_debug("Calling submit_bio() task: %p bio: %p"
+			" bio->bi_sector: %llu\n", task, bio,
+			 (unsigned long long)bio->bi_sector);
 
 		submit_bio(rw, bio);
 		bio = nbio;
@@ -480,7 +474,7 @@
 		switch (token) {
 		case Opt_udev_path:
 			if (ib_dev->ibd_bd) {
-				printk(KERN_ERR "Unable to set udev_path= while"
+				pr_err("Unable to set udev_path= while"
 					" ib_dev->ibd_bd exists\n");
 				ret = -EEXIST;
 				goto out;
@@ -493,7 +487,7 @@
 			snprintf(ib_dev->ibd_udev_path, SE_UDEV_PATH_LEN,
 					"%s", arg_p);
 			kfree(arg_p);
-			printk(KERN_INFO "IBLOCK: Referencing UDEV path: %s\n",
+			pr_debug("IBLOCK: Referencing UDEV path: %s\n",
 					ib_dev->ibd_udev_path);
 			ib_dev->ibd_flags |= IBDF_HAS_UDEV_PATH;
 			break;
@@ -516,7 +510,7 @@
 	struct iblock_dev *ibd = se_dev->se_dev_su_ptr;
 
 	if (!(ibd->ibd_flags & IBDF_HAS_UDEV_PATH)) {
-		printk(KERN_ERR "Missing udev_path= parameters for IBLOCK\n");
+		pr_err("Missing udev_path= parameters for IBLOCK\n");
 		return -EINVAL;
 	}
 
@@ -574,15 +568,15 @@
 	struct bio *bio;
 
 	bio = bio_alloc_bioset(GFP_NOIO, sg_num, ib_dev->ibd_bio_set);
-	if (!(bio)) {
-		printk(KERN_ERR "Unable to allocate memory for bio\n");
+	if (!bio) {
+		pr_err("Unable to allocate memory for bio\n");
 		*ret = PYX_TRANSPORT_OUT_OF_MEMORY_RESOURCES;
 		return NULL;
 	}
 
-	DEBUG_IBLOCK("Allocated bio: %p task_sg_num: %u using ibd_bio_set:"
-		" %p\n", bio, task->task_sg_num, ib_dev->ibd_bio_set);
-	DEBUG_IBLOCK("Allocated bio: %p task_size: %u\n", bio, task->task_size);
+	pr_debug("Allocated bio: %p task_sg_nents: %u using ibd_bio_set:"
+		" %p\n", bio, task->task_sg_nents, ib_dev->ibd_bio_set);
+	pr_debug("Allocated bio: %p task_size: %u\n", bio, task->task_size);
 
 	bio->bi_bdev = ib_dev->ibd_bd;
 	bio->bi_private = task;
@@ -591,8 +585,8 @@
 	bio->bi_sector = lba;
 	atomic_inc(&ib_req->ib_bio_cnt);
 
-	DEBUG_IBLOCK("Set bio->bi_sector: %llu\n", bio->bi_sector);
-	DEBUG_IBLOCK("Set ib_req->ib_bio_cnt: %d\n",
+	pr_debug("Set bio->bi_sector: %llu\n", (unsigned long long)bio->bi_sector);
+	pr_debug("Set ib_req->ib_bio_cnt: %d\n",
 			atomic_read(&ib_req->ib_bio_cnt));
 	return bio;
 }
@@ -606,7 +600,7 @@
 	struct bio *bio = NULL, *hbio = NULL, *tbio = NULL;
 	struct scatterlist *sg;
 	int ret = 0;
-	u32 i, sg_num = task->task_sg_num;
+	u32 i, sg_num = task->task_sg_nents;
 	sector_t block_lba;
 	/*
 	 * Do starting conversion up from non 512-byte blocksize with
@@ -621,13 +615,13 @@
 	else if (dev->se_sub_dev->se_dev_attrib.block_size == 512)
 		block_lba = task->task_lba;
 	else {
-		printk(KERN_ERR "Unsupported SCSI -> BLOCK LBA conversion:"
+		pr_err("Unsupported SCSI -> BLOCK LBA conversion:"
 				" %u\n", dev->se_sub_dev->se_dev_attrib.block_size);
 		return PYX_TRANSPORT_LU_COMM_FAILURE;
 	}
 
 	bio = iblock_get_bio(task, ib_req, ib_dev, &ret, block_lba, sg_num);
-	if (!(bio))
+	if (!bio)
 		return ret;
 
 	ib_req->ib_bio = bio;
@@ -636,41 +630,41 @@
 	 * Use fs/bio.c:bio_add_pages() to setup the bio_vec maplist
 	 * from task->task_sg -> struct scatterlist memory.
 	 */
-	for_each_sg(task->task_sg, sg, task->task_sg_num, i) {
-		DEBUG_IBLOCK("task: %p bio: %p Calling bio_add_page(): page:"
+	for_each_sg(task->task_sg, sg, task->task_sg_nents, i) {
+		pr_debug("task: %p bio: %p Calling bio_add_page(): page:"
 			" %p len: %u offset: %u\n", task, bio, sg_page(sg),
 				sg->length, sg->offset);
 again:
 		ret = bio_add_page(bio, sg_page(sg), sg->length, sg->offset);
 		if (ret != sg->length) {
 
-			DEBUG_IBLOCK("*** Set bio->bi_sector: %llu\n",
-					bio->bi_sector);
-			DEBUG_IBLOCK("** task->task_size: %u\n",
+			pr_debug("*** Set bio->bi_sector: %llu\n",
+				 (unsigned long long)bio->bi_sector);
+			pr_debug("** task->task_size: %u\n",
 					task->task_size);
-			DEBUG_IBLOCK("*** bio->bi_max_vecs: %u\n",
+			pr_debug("*** bio->bi_max_vecs: %u\n",
 					bio->bi_max_vecs);
-			DEBUG_IBLOCK("*** bio->bi_vcnt: %u\n",
+			pr_debug("*** bio->bi_vcnt: %u\n",
 					bio->bi_vcnt);
 
 			bio = iblock_get_bio(task, ib_req, ib_dev, &ret,
 						block_lba, sg_num);
-			if (!(bio))
+			if (!bio)
 				goto fail;
 
 			tbio = tbio->bi_next = bio;
-			DEBUG_IBLOCK("-----------------> Added +1 bio: %p to"
+			pr_debug("-----------------> Added +1 bio: %p to"
 				" list, Going to again\n", bio);
 			goto again;
 		}
 		/* Always in 512 byte units for Linux/Block */
 		block_lba += sg->length >> IBLOCK_LBA_SHIFT;
 		sg_num--;
-		DEBUG_IBLOCK("task: %p bio-add_page() passed!, decremented"
+		pr_debug("task: %p bio-add_page() passed!, decremented"
 			" sg_num to %u\n", task, sg_num);
-		DEBUG_IBLOCK("task: %p bio_add_page() passed!, increased lba"
-				" to %llu\n", task, block_lba);
-		DEBUG_IBLOCK("task: %p bio_add_page() passed!, bio->bi_vcnt:"
+		pr_debug("task: %p bio_add_page() passed!, increased lba"
+			 " to %llu\n", task, (unsigned long long)block_lba);
+		pr_debug("task: %p bio_add_page() passed!, bio->bi_vcnt:"
 				" %u\n", task, bio->bi_vcnt);
 	}
 
@@ -716,11 +710,11 @@
 	/*
 	 * Set -EIO if !BIO_UPTODATE and the passed is still err=0
 	 */
-	if (!(test_bit(BIO_UPTODATE, &bio->bi_flags)) && !(err))
+	if (!test_bit(BIO_UPTODATE, &bio->bi_flags) && !err)
 		err = -EIO;
 
 	if (err != 0) {
-		printk(KERN_ERR "test_bit(BIO_UPTODATE) failed for bio: %p,"
+		pr_err("test_bit(BIO_UPTODATE) failed for bio: %p,"
 			" err: %d\n", bio, err);
 		/*
 		 * Bump the ib_bio_err_cnt and release bio.
@@ -731,15 +725,15 @@
 		/*
 		 * Wait to complete the task until the last bio as completed.
 		 */
-		if (!(atomic_dec_and_test(&ibr->ib_bio_cnt)))
+		if (!atomic_dec_and_test(&ibr->ib_bio_cnt))
 			return;
 
 		ibr->ib_bio = NULL;
 		transport_complete_task(task, 0);
 		return;
 	}
-	DEBUG_IBLOCK("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
-		task, bio, task->task_lba, bio->bi_sector, err);
+	pr_debug("done[%p] bio: %p task_lba: %llu bio_lba: %llu err=%d\n",
+		 task, bio, task->task_lba, (unsigned long long)bio->bi_sector, err);
 	/*
 	 * bio_put() will call iblock_bio_destructor() to release the bio back
 	 * to ibr->ib_bio_set.
@@ -748,7 +742,7 @@
 	/*
 	 * Wait to complete the task until the last bio as completed.
 	 */
-	if (!(atomic_dec_and_test(&ibr->ib_bio_cnt)))
+	if (!atomic_dec_and_test(&ibr->ib_bio_cnt))
 		return;
 	/*
 	 * Return GOOD status for task if zero ib_bio_err_cnt exists.
diff --git a/drivers/target/target_core_iblock.h b/drivers/target/target_core_iblock.h
index 2aa1d27..a121cd1 100644
--- a/drivers/target/target_core_iblock.h
+++ b/drivers/target/target_core_iblock.h
@@ -12,7 +12,6 @@
 	atomic_t ib_bio_cnt;
 	atomic_t ib_bio_err_cnt;
 	struct bio *ib_bio;
-	struct iblock_dev *ib_dev;
 } ____cacheline_aligned;
 
 #define IBDF_HAS_UDEV_PATH		0x01
diff --git a/drivers/target/target_core_pr.c b/drivers/target/target_core_pr.c
index 3342843..1c1b849 100644
--- a/drivers/target/target_core_pr.c
+++ b/drivers/target/target_core_pr.c
@@ -62,7 +62,7 @@
 	char *buf,
 	u32 size)
 {
-	if (!(pr_reg->isid_present_at_reg))
+	if (!pr_reg->isid_present_at_reg)
 		return 0;
 
 	snprintf(buf, size, ",i,0x%s", &pr_reg->pr_reg_isid[0]);
@@ -95,7 +95,7 @@
 	struct se_session *sess = cmd->se_sess;
 	int ret;
 
-	if (!(sess))
+	if (!sess)
 		return 0;
 
 	spin_lock(&dev->dev_reservation_lock);
@@ -123,7 +123,7 @@
 	struct se_session *sess = cmd->se_sess;
 	struct se_portal_group *tpg = sess->se_tpg;
 
-	if (!(sess) || !(tpg))
+	if (!sess || !tpg)
 		return 0;
 
 	spin_lock(&dev->dev_reservation_lock);
@@ -142,7 +142,7 @@
 		dev->dev_res_bin_isid = 0;
 		dev->dev_flags &= ~DF_SPC2_RESERVATIONS_WITH_ISID;
 	}
-	printk(KERN_INFO "SCSI-2 Released reservation for %s LUN: %u ->"
+	pr_debug("SCSI-2 Released reservation for %s LUN: %u ->"
 		" MAPPED LUN: %u for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
 		cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun,
 		sess->se_node_acl->initiatorname);
@@ -159,7 +159,7 @@
 
 	if ((cmd->t_task_cdb[1] & 0x01) &&
 	    (cmd->t_task_cdb[1] & 0x02)) {
-		printk(KERN_ERR "LongIO and Obselete Bits set, returning"
+		pr_err("LongIO and Obselete Bits set, returning"
 				" ILLEGAL_REQUEST\n");
 		return PYX_TRANSPORT_ILLEGAL_REQUEST;
 	}
@@ -167,18 +167,18 @@
 	 * This is currently the case for target_core_mod passthrough struct se_cmd
 	 * ops
 	 */
-	if (!(sess) || !(tpg))
+	if (!sess || !tpg)
 		return 0;
 
 	spin_lock(&dev->dev_reservation_lock);
 	if (dev->dev_reserved_node_acl &&
 	   (dev->dev_reserved_node_acl != sess->se_node_acl)) {
-		printk(KERN_ERR "SCSI-2 RESERVATION CONFLIFT for %s fabric\n",
+		pr_err("SCSI-2 RESERVATION CONFLIFT for %s fabric\n",
 			tpg->se_tpg_tfo->get_fabric_name());
-		printk(KERN_ERR "Original reserver LUN: %u %s\n",
+		pr_err("Original reserver LUN: %u %s\n",
 			cmd->se_lun->unpacked_lun,
 			dev->dev_reserved_node_acl->initiatorname);
-		printk(KERN_ERR "Current attempt - LUN: %u -> MAPPED LUN: %u"
+		pr_err("Current attempt - LUN: %u -> MAPPED LUN: %u"
 			" from %s \n", cmd->se_lun->unpacked_lun,
 			cmd->se_deve->mapped_lun,
 			sess->se_node_acl->initiatorname);
@@ -192,7 +192,7 @@
 		dev->dev_res_bin_isid = sess->sess_bin_isid;
 		dev->dev_flags |= DF_SPC2_RESERVATIONS_WITH_ISID;
 	}
-	printk(KERN_INFO "SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u"
+	pr_debug("SCSI-2 Reserved %s LUN: %u -> MAPPED LUN: %u"
 		" for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
 		cmd->se_lun->unpacked_lun, cmd->se_deve->mapped_lun,
 		sess->se_node_acl->initiatorname);
@@ -220,10 +220,10 @@
 	int crh = (su_dev->t10_pr.res_type == SPC3_PERSISTENT_RESERVATIONS);
 	int conflict = 0;
 
-	if (!(se_sess))
+	if (!se_sess)
 		return 0;
 
-	if (!(crh))
+	if (!crh)
 		goto after_crh;
 
 	pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
@@ -280,7 +280,7 @@
 	}
 
 	if (conflict) {
-		printk(KERN_ERR "Received legacy SPC-2 RESERVE/RELEASE"
+		pr_err("Received legacy SPC-2 RESERVE/RELEASE"
 			" while active SPC-3 registrations exist,"
 			" returning RESERVATION_CONFLICT\n");
 		return PYX_TRANSPORT_RESERVATION_CONFLICT;
@@ -412,7 +412,7 @@
 			ret = (registered_nexus) ? 0 : 1;
 			break;
 		default:
-			printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service"
+			pr_err("Unknown PERSISTENT_RESERVE_OUT service"
 				" action: 0x%02x\n", cdb[1] & 0x1f);
 			return -EINVAL;
 		}
@@ -459,7 +459,7 @@
 			ret = 0; /* Allowed */
 			break;
 		default:
-			printk(KERN_ERR "Unknown MI Service Action: 0x%02x\n",
+			pr_err("Unknown MI Service Action: 0x%02x\n",
 				(cdb[1] & 0x1f));
 			return -EINVAL;
 		}
@@ -481,9 +481,9 @@
 	 * Case where the CDB is explicitly allowed in the above switch
 	 * statement.
 	 */
-	if (!(ret) && !(other_cdb)) {
+	if (!ret && !other_cdb) {
 #if 0
-		printk(KERN_INFO "Allowing explict CDB: 0x%02x for %s"
+		pr_debug("Allowing explict CDB: 0x%02x for %s"
 			" reservation holder\n", cdb[0],
 			core_scsi3_pr_dump_type(pr_reg_type));
 #endif
@@ -498,7 +498,7 @@
 			/*
 			 * Conflict for write exclusive
 			 */
-			printk(KERN_INFO "%s Conflict for unregistered nexus"
+			pr_debug("%s Conflict for unregistered nexus"
 				" %s CDB: 0x%02x to %s reservation\n",
 				transport_dump_cmd_direction(cmd),
 				se_sess->se_node_acl->initiatorname, cdb[0],
@@ -515,8 +515,8 @@
 			 * nexuses to issue CDBs.
 			 */
 #if 0
-			if (!(registered_nexus)) {
-				printk(KERN_INFO "Allowing implict CDB: 0x%02x"
+			if (!registered_nexus) {
+				pr_debug("Allowing implict CDB: 0x%02x"
 					" for %s reservation on unregistered"
 					" nexus\n", cdb[0],
 					core_scsi3_pr_dump_type(pr_reg_type));
@@ -531,14 +531,14 @@
 			 * allow commands from registered nexuses.
 			 */
 #if 0
-			printk(KERN_INFO "Allowing implict CDB: 0x%02x for %s"
+			pr_debug("Allowing implict CDB: 0x%02x for %s"
 				" reservation\n", cdb[0],
 				core_scsi3_pr_dump_type(pr_reg_type));
 #endif
 			return 0;
 		}
 	}
-	printk(KERN_INFO "%s Conflict for %sregistered nexus %s CDB: 0x%2x"
+	pr_debug("%s Conflict for %sregistered nexus %s CDB: 0x%2x"
 		" for %s reservation\n", transport_dump_cmd_direction(cmd),
 		(registered_nexus) ? "" : "un",
 		se_sess->se_node_acl->initiatorname, cdb[0],
@@ -575,7 +575,7 @@
 	struct se_session *sess = cmd->se_sess;
 	int ret;
 
-	if (!(sess))
+	if (!sess)
 		return 0;
 	/*
 	 * A legacy SPC-2 reservation is being held.
@@ -584,7 +584,7 @@
 		return core_scsi2_reservation_check(cmd, pr_reg_type);
 
 	spin_lock(&dev->dev_reservation_lock);
-	if (!(dev->dev_pr_res_holder)) {
+	if (!dev->dev_pr_res_holder) {
 		spin_unlock(&dev->dev_reservation_lock);
 		return 0;
 	}
@@ -594,7 +594,7 @@
 		spin_unlock(&dev->dev_reservation_lock);
 		return -EINVAL;
 	}
-	if (!(dev->dev_pr_res_holder->isid_present_at_reg)) {
+	if (!dev->dev_pr_res_holder->isid_present_at_reg) {
 		spin_unlock(&dev->dev_reservation_lock);
 		return 0;
 	}
@@ -624,15 +624,15 @@
 	struct t10_pr_registration *pr_reg;
 
 	pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_ATOMIC);
-	if (!(pr_reg)) {
-		printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n");
+	if (!pr_reg) {
+		pr_err("Unable to allocate struct t10_pr_registration\n");
 		return NULL;
 	}
 
 	pr_reg->pr_aptpl_buf = kzalloc(su_dev->t10_pr.pr_aptpl_buf_len,
 					GFP_ATOMIC);
-	if (!(pr_reg->pr_aptpl_buf)) {
-		printk(KERN_ERR "Unable to allocate pr_reg->pr_aptpl_buf\n");
+	if (!pr_reg->pr_aptpl_buf) {
+		pr_err("Unable to allocate pr_reg->pr_aptpl_buf\n");
 		kmem_cache_free(t10_pr_reg_cache, pr_reg);
 		return NULL;
 	}
@@ -692,12 +692,12 @@
 	 */
 	pr_reg = __core_scsi3_do_alloc_registration(dev, nacl, deve, isid,
 			sa_res_key, all_tg_pt, aptpl);
-	if (!(pr_reg))
+	if (!pr_reg)
 		return NULL;
 	/*
 	 * Return pointer to pr_reg for ALL_TG_PT=0
 	 */
-	if (!(all_tg_pt))
+	if (!all_tg_pt)
 		return pr_reg;
 	/*
 	 * Create list of matching SCSI Initiator Port registrations
@@ -717,7 +717,7 @@
 			 * that have not been make explict via a ConfigFS
 			 * MappedLUN group for the SCSI Initiator Node ACL.
 			 */
-			if (!(deve_tmp->se_lun_acl))
+			if (!deve_tmp->se_lun_acl)
 				continue;
 
 			nacl_tmp = deve_tmp->se_lun_acl->se_lun_nacl;
@@ -751,7 +751,7 @@
 			 */
 			ret = core_scsi3_lunacl_depend_item(deve_tmp);
 			if (ret < 0) {
-				printk(KERN_ERR "core_scsi3_lunacl_depend"
+				pr_err("core_scsi3_lunacl_depend"
 						"_item() failed\n");
 				atomic_dec(&port->sep_tg_pt_ref_cnt);
 				smp_mb__after_atomic_dec();
@@ -769,7 +769,7 @@
 			pr_reg_atp = __core_scsi3_do_alloc_registration(dev,
 						nacl_tmp, deve_tmp, NULL,
 						sa_res_key, all_tg_pt, aptpl);
-			if (!(pr_reg_atp)) {
+			if (!pr_reg_atp) {
 				atomic_dec(&port->sep_tg_pt_ref_cnt);
 				smp_mb__after_atomic_dec();
 				atomic_dec(&deve_tmp->pr_ref_count);
@@ -817,14 +817,14 @@
 {
 	struct t10_pr_registration *pr_reg;
 
-	if (!(i_port) || !(t_port) || !(sa_res_key)) {
-		printk(KERN_ERR "Illegal parameters for APTPL registration\n");
+	if (!i_port || !t_port || !sa_res_key) {
+		pr_err("Illegal parameters for APTPL registration\n");
 		return -EINVAL;
 	}
 
 	pr_reg = kmem_cache_zalloc(t10_pr_reg_cache, GFP_KERNEL);
-	if (!(pr_reg)) {
-		printk(KERN_ERR "Unable to allocate struct t10_pr_registration\n");
+	if (!pr_reg) {
+		pr_err("Unable to allocate struct t10_pr_registration\n");
 		return -ENOMEM;
 	}
 	pr_reg->pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len, GFP_KERNEL);
@@ -869,7 +869,7 @@
 	pr_reg->pr_res_holder = res_holder;
 
 	list_add_tail(&pr_reg->pr_reg_aptpl_list, &pr_tmpl->aptpl_reg_list);
-	printk(KERN_INFO "SPC-3 PR APTPL Successfully added registration%s from"
+	pr_debug("SPC-3 PR APTPL Successfully added registration%s from"
 			" metadata\n", (res_holder) ? "+reservation" : "");
 	return 0;
 }
@@ -891,12 +891,12 @@
 	dev->dev_pr_res_holder = pr_reg;
 	spin_unlock(&dev->dev_reservation_lock);
 
-	printk(KERN_INFO "SPC-3 PR [%s] Service Action: APTPL RESERVE created"
+	pr_debug("SPC-3 PR [%s] Service Action: APTPL RESERVE created"
 		" new reservation holder TYPE: %s ALL_TG_PT: %d\n",
 		tpg->se_tpg_tfo->get_fabric_name(),
 		core_scsi3_pr_dump_type(pr_reg->pr_res_type),
 		(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
-	printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n",
+	pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n",
 		tpg->se_tpg_tfo->get_fabric_name(), node_acl->initiatorname,
 		(prf_isid) ? &i_buf[0] : "");
 }
@@ -936,7 +936,7 @@
 	spin_lock(&pr_tmpl->aptpl_reg_lock);
 	list_for_each_entry_safe(pr_reg, pr_reg_tmp, &pr_tmpl->aptpl_reg_list,
 				pr_reg_aptpl_list) {
-		if (!(strcmp(pr_reg->pr_iport, i_port)) &&
+		if (!strcmp(pr_reg->pr_iport, i_port) &&
 		     (pr_reg->pr_res_mapped_lun == deve->mapped_lun) &&
 		    !(strcmp(pr_reg->pr_tport, t_port)) &&
 		     (pr_reg->pr_reg_tpgt == tpgt) &&
@@ -1006,19 +1006,19 @@
 	prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
 				PR_REG_ISID_ID_LEN);
 
-	printk(KERN_INFO "SPC-3 PR [%s] Service Action: REGISTER%s Initiator"
+	pr_debug("SPC-3 PR [%s] Service Action: REGISTER%s Initiator"
 		" Node: %s%s\n", tfo->get_fabric_name(), (register_type == 2) ?
 		"_AND_MOVE" : (register_type == 1) ?
 		"_AND_IGNORE_EXISTING_KEY" : "", nacl->initiatorname,
 		(prf_isid) ? i_buf : "");
-	printk(KERN_INFO "SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n",
+	pr_debug("SPC-3 PR [%s] registration on Target Port: %s,0x%04x\n",
 		 tfo->get_fabric_name(), tfo->tpg_get_wwn(se_tpg),
 		tfo->tpg_get_tag(se_tpg));
-	printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
+	pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
 		" Port(s)\n",  tfo->get_fabric_name(),
 		(pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
 		dev->transport->name);
-	printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
+	pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
 		" 0x%08x  APTPL: %d\n", tfo->get_fabric_name(),
 		pr_reg->pr_res_key, pr_reg->pr_res_generation,
 		pr_reg->pr_reg_aptpl);
@@ -1062,7 +1062,7 @@
 	/*
 	 * Skip extra processing for ALL_TG_PT=0 or REGISTER_AND_MOVE.
 	 */
-	if (!(pr_reg->pr_reg_all_tg_pt) || (register_move))
+	if (!pr_reg->pr_reg_all_tg_pt || register_move)
 		return;
 	/*
 	 * Walk pr_reg->pr_reg_atp_list and add registrations for ALL_TG_PT=1
@@ -1106,7 +1106,7 @@
 
 	pr_reg = __core_scsi3_alloc_registration(dev, nacl, deve, isid,
 			sa_res_key, all_tg_pt, aptpl);
-	if (!(pr_reg))
+	if (!pr_reg)
 		return -EPERM;
 
 	__core_scsi3_add_registration(dev, nacl, pr_reg,
@@ -1137,7 +1137,7 @@
 		 * If this registration does NOT contain a fabric provided
 		 * ISID, then we have found a match.
 		 */
-		if (!(pr_reg->isid_present_at_reg)) {
+		if (!pr_reg->isid_present_at_reg) {
 			/*
 			 * Determine if this SCSI device server requires that
 			 * SCSI Intiatior TransportID w/ ISIDs is enforced
@@ -1157,7 +1157,7 @@
 		 * SCSI Initiator Port TransportIDs, then we expect a valid
 		 * matching ISID to be provided by the local SCSI Initiator Port.
 		 */
-		if (!(isid))
+		if (!isid)
 			continue;
 		if (strcmp(isid, pr_reg->pr_reg_isid))
 			continue;
@@ -1206,7 +1206,7 @@
 
 	spin_lock(&dev->dev_reservation_lock);
 	pr_res_holder = dev->dev_pr_res_holder;
-	if (!(pr_res_holder)) {
+	if (!pr_res_holder) {
 		spin_unlock(&dev->dev_reservation_lock);
 		return ret;
 	}
@@ -1236,7 +1236,7 @@
 		  (!strcmp(pr_res_holder->pr_reg_nacl->initiatorname,
 			  pr_reg->pr_reg_nacl->initiatorname)) &&
 		  (pr_res_holder->pr_res_key == pr_reg->pr_res_key)) {
-		printk(KERN_ERR "SPC-3 PR: Unable to perform ALL_TG_PT=1"
+		pr_err("SPC-3 PR: Unable to perform ALL_TG_PT=1"
 			" UNREGISTER while existing reservation with matching"
 			" key 0x%016Lx is present from another SCSI Initiator"
 			" Port\n", pr_reg->pr_res_key);
@@ -1283,25 +1283,25 @@
 	 */
 	while (atomic_read(&pr_reg->pr_res_holders) != 0) {
 		spin_unlock(&pr_tmpl->registration_lock);
-		printk("SPC-3 PR [%s] waiting for pr_res_holders\n",
+		pr_debug("SPC-3 PR [%s] waiting for pr_res_holders\n",
 				tfo->get_fabric_name());
 		cpu_relax();
 		spin_lock(&pr_tmpl->registration_lock);
 	}
 
-	printk(KERN_INFO "SPC-3 PR [%s] Service Action: UNREGISTER Initiator"
+	pr_debug("SPC-3 PR [%s] Service Action: UNREGISTER Initiator"
 		" Node: %s%s\n", tfo->get_fabric_name(),
 		pr_reg->pr_reg_nacl->initiatorname,
 		(prf_isid) ? &i_buf[0] : "");
-	printk(KERN_INFO "SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
+	pr_debug("SPC-3 PR [%s] for %s TCM Subsystem %s Object Target"
 		" Port(s)\n", tfo->get_fabric_name(),
 		(pr_reg->pr_reg_all_tg_pt) ? "ALL" : "SINGLE",
 		dev->transport->name);
-	printk(KERN_INFO "SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
+	pr_debug("SPC-3 PR [%s] SA Res Key: 0x%016Lx PRgeneration:"
 		" 0x%08x\n", tfo->get_fabric_name(), pr_reg->pr_res_key,
 		pr_reg->pr_res_generation);
 
-	if (!(preempt_and_abort_list)) {
+	if (!preempt_and_abort_list) {
 		pr_reg->pr_reg_deve = NULL;
 		pr_reg->pr_reg_nacl = NULL;
 		kfree(pr_reg->pr_aptpl_buf);
@@ -1430,7 +1430,7 @@
 	/*
 	 * For nacl->dynamic_node_acl=1
 	 */
-	if (!(lun_acl))
+	if (!lun_acl)
 		return 0;
 
 	nacl = lun_acl->se_lun_nacl;
@@ -1448,7 +1448,7 @@
 	/*
 	 * For nacl->dynamic_node_acl=1
 	 */
-	if (!(lun_acl)) {
+	if (!lun_acl) {
 		atomic_dec(&se_deve->pr_ref_count);
 		smp_mb__after_atomic_dec();
 		return;
@@ -1500,8 +1500,8 @@
 	 * processing in the loop of tid_dest_list below.
 	 */
 	tidh_new = kzalloc(sizeof(struct pr_transport_id_holder), GFP_KERNEL);
-	if (!(tidh_new)) {
-		printk(KERN_ERR "Unable to allocate tidh_new\n");
+	if (!tidh_new) {
+		pr_err("Unable to allocate tidh_new\n");
 		return PYX_TRANSPORT_LU_COMM_FAILURE;
 	}
 	INIT_LIST_HEAD(&tidh_new->dest_list);
@@ -1512,7 +1512,7 @@
 	local_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev,
 				se_sess->se_node_acl, local_se_deve, l_isid,
 				sa_res_key, all_tg_pt, aptpl);
-	if (!(local_pr_reg)) {
+	if (!local_pr_reg) {
 		kfree(tidh_new);
 		return PYX_TRANSPORT_LU_COMM_FAILURE;
 	}
@@ -1537,7 +1537,7 @@
 	tpdl |= buf[27] & 0xff;
 
 	if ((tpdl + 28) != cmd->data_length) {
-		printk(KERN_ERR "SPC-3 PR: Illegal tpdl: %u + 28 byte header"
+		pr_err("SPC-3 PR: Illegal tpdl: %u + 28 byte header"
 			" does not equal CDB data_length: %u\n", tpdl,
 			cmd->data_length);
 		ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
@@ -1557,13 +1557,13 @@
 		spin_lock(&dev->se_port_lock);
 		list_for_each_entry(tmp_port, &dev->dev_sep_list, sep_list) {
 			tmp_tpg = tmp_port->sep_tpg;
-			if (!(tmp_tpg))
+			if (!tmp_tpg)
 				continue;
 			tmp_tf_ops = tmp_tpg->se_tpg_tfo;
-			if (!(tmp_tf_ops))
+			if (!tmp_tf_ops)
 				continue;
-			if (!(tmp_tf_ops->get_fabric_proto_ident) ||
-			    !(tmp_tf_ops->tpg_parse_pr_out_transport_id))
+			if (!tmp_tf_ops->get_fabric_proto_ident ||
+			    !tmp_tf_ops->tpg_parse_pr_out_transport_id)
 				continue;
 			/*
 			 * Look for the matching proto_ident provided by
@@ -1577,7 +1577,7 @@
 			i_str = tmp_tf_ops->tpg_parse_pr_out_transport_id(
 					tmp_tpg, (const char *)ptr, &tid_len,
 					&iport_ptr);
-			if (!(i_str))
+			if (!i_str)
 				continue;
 
 			atomic_inc(&tmp_tpg->tpg_pr_ref_count);
@@ -1586,7 +1586,7 @@
 
 			ret = core_scsi3_tpg_depend_item(tmp_tpg);
 			if (ret != 0) {
-				printk(KERN_ERR " core_scsi3_tpg_depend_item()"
+				pr_err(" core_scsi3_tpg_depend_item()"
 					" for tmp_tpg\n");
 				atomic_dec(&tmp_tpg->tpg_pr_ref_count);
 				smp_mb__after_atomic_dec();
@@ -1607,7 +1607,7 @@
 			}
 			spin_unlock_bh(&tmp_tpg->acl_node_lock);
 
-			if (!(dest_node_acl)) {
+			if (!dest_node_acl) {
 				core_scsi3_tpg_undepend_item(tmp_tpg);
 				spin_lock(&dev->se_port_lock);
 				continue;
@@ -1615,7 +1615,7 @@
 
 			ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
 			if (ret != 0) {
-				printk(KERN_ERR "configfs_depend_item() failed"
+				pr_err("configfs_depend_item() failed"
 					" for dest_node_acl->acl_group\n");
 				atomic_dec(&dest_node_acl->acl_pr_ref_count);
 				smp_mb__after_atomic_dec();
@@ -1625,7 +1625,7 @@
 			}
 
 			dest_tpg = tmp_tpg;
-			printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node:"
+			pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node:"
 				" %s Port RTPI: %hu\n",
 				dest_tpg->se_tpg_tfo->get_fabric_name(),
 				dest_node_acl->initiatorname, dest_rtpi);
@@ -1635,20 +1635,20 @@
 		}
 		spin_unlock(&dev->se_port_lock);
 
-		if (!(dest_tpg)) {
-			printk(KERN_ERR "SPC-3 PR SPEC_I_PT: Unable to locate"
+		if (!dest_tpg) {
+			pr_err("SPC-3 PR SPEC_I_PT: Unable to locate"
 					" dest_tpg\n");
 			ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
 			goto out;
 		}
 #if 0
-		printk("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u"
+		pr_debug("SPC-3 PR SPEC_I_PT: Got %s data_length: %u tpdl: %u"
 			" tid_len: %d for %s + %s\n",
 			dest_tpg->se_tpg_tfo->get_fabric_name(), cmd->data_length,
 			tpdl, tid_len, i_str, iport_ptr);
 #endif
 		if (tid_len > tpdl) {
-			printk(KERN_ERR "SPC-3 PR SPEC_I_PT: Illegal tid_len:"
+			pr_err("SPC-3 PR SPEC_I_PT: Illegal tid_len:"
 				" %u for Transport ID: %s\n", tid_len, ptr);
 			core_scsi3_nodeacl_undepend_item(dest_node_acl);
 			core_scsi3_tpg_undepend_item(dest_tpg);
@@ -1662,8 +1662,8 @@
 		 */
 		dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl,
 					dest_rtpi);
-		if (!(dest_se_deve)) {
-			printk(KERN_ERR "Unable to locate %s dest_se_deve"
+		if (!dest_se_deve) {
+			pr_err("Unable to locate %s dest_se_deve"
 				" from destination RTPI: %hu\n",
 				dest_tpg->se_tpg_tfo->get_fabric_name(),
 				dest_rtpi);
@@ -1676,7 +1676,7 @@
 
 		ret = core_scsi3_lunacl_depend_item(dest_se_deve);
 		if (ret < 0) {
-			printk(KERN_ERR "core_scsi3_lunacl_depend_item()"
+			pr_err("core_scsi3_lunacl_depend_item()"
 					" failed\n");
 			atomic_dec(&dest_se_deve->pr_ref_count);
 			smp_mb__after_atomic_dec();
@@ -1686,7 +1686,7 @@
 			goto out;
 		}
 #if 0
-		printk(KERN_INFO "SPC-3 PR SPEC_I_PT: Located %s Node: %s"
+		pr_debug("SPC-3 PR SPEC_I_PT: Located %s Node: %s"
 			" dest_se_deve mapped_lun: %u\n",
 			dest_tpg->se_tpg_tfo->get_fabric_name(),
 			dest_node_acl->initiatorname, dest_se_deve->mapped_lun);
@@ -1714,8 +1714,8 @@
 		 */
 		tidh_new = kzalloc(sizeof(struct pr_transport_id_holder),
 				GFP_KERNEL);
-		if (!(tidh_new)) {
-			printk(KERN_ERR "Unable to allocate tidh_new\n");
+		if (!tidh_new) {
+			pr_err("Unable to allocate tidh_new\n");
 			core_scsi3_lunacl_undepend_item(dest_se_deve);
 			core_scsi3_nodeacl_undepend_item(dest_node_acl);
 			core_scsi3_tpg_undepend_item(dest_tpg);
@@ -1746,7 +1746,7 @@
 		dest_pr_reg = __core_scsi3_alloc_registration(cmd->se_dev,
 				dest_node_acl, dest_se_deve, iport_ptr,
 				sa_res_key, all_tg_pt, aptpl);
-		if (!(dest_pr_reg)) {
+		if (!dest_pr_reg) {
 			core_scsi3_lunacl_undepend_item(dest_se_deve);
 			core_scsi3_nodeacl_undepend_item(dest_node_acl);
 			core_scsi3_tpg_undepend_item(dest_tpg);
@@ -1795,7 +1795,7 @@
 		__core_scsi3_add_registration(cmd->se_dev, dest_node_acl,
 					dest_pr_reg, 0, 0);
 
-		printk(KERN_INFO "SPC-3 PR [%s] SPEC_I_PT: Successfully"
+		pr_debug("SPC-3 PR [%s] SPEC_I_PT: Successfully"
 			" registered Transport ID for Node: %s%s Mapped LUN:"
 			" %u\n", dest_tpg->se_tpg_tfo->get_fabric_name(),
 			dest_node_acl->initiatorname, (prf_isid) ?
@@ -1923,7 +1923,7 @@
 		}
 
 		if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
-			printk(KERN_ERR "Unable to update renaming"
+			pr_err("Unable to update renaming"
 				" APTPL metadata\n");
 			spin_unlock(&su_dev->t10_pr.registration_lock);
 			return -EMSGSIZE;
@@ -1941,7 +1941,7 @@
 			lun->lun_sep->sep_rtpi, lun->unpacked_lun, reg_count);
 
 		if ((len + strlen(tmp) >= pr_aptpl_buf_len)) {
-			printk(KERN_ERR "Unable to update renaming"
+			pr_err("Unable to update renaming"
 				" APTPL metadata\n");
 			spin_unlock(&su_dev->t10_pr.registration_lock);
 			return -EMSGSIZE;
@@ -1951,7 +1951,7 @@
 	}
 	spin_unlock(&su_dev->t10_pr.registration_lock);
 
-	if (!(reg_count))
+	if (!reg_count)
 		len += sprintf(buf+len, "No Registrations or Reservations");
 
 	return 0;
@@ -1993,7 +1993,7 @@
 	memset(path, 0, 512);
 
 	if (strlen(&wwn->unit_serial[0]) >= 512) {
-		printk(KERN_ERR "WWN value for struct se_device does not fit"
+		pr_err("WWN value for struct se_device does not fit"
 			" into path buffer\n");
 		return -EMSGSIZE;
 	}
@@ -2001,13 +2001,13 @@
 	snprintf(path, 512, "/var/target/pr/aptpl_%s", &wwn->unit_serial[0]);
 	file = filp_open(path, flags, 0600);
 	if (IS_ERR(file) || !file || !file->f_dentry) {
-		printk(KERN_ERR "filp_open(%s) for APTPL metadata"
+		pr_err("filp_open(%s) for APTPL metadata"
 			" failed\n", path);
 		return (PTR_ERR(file) < 0 ? PTR_ERR(file) : -ENOENT);
 	}
 
 	iov[0].iov_base = &buf[0];
-	if (!(pr_aptpl_buf_len))
+	if (!pr_aptpl_buf_len)
 		iov[0].iov_len = (strlen(&buf[0]) + 1); /* Add extra for NULL */
 	else
 		iov[0].iov_len = pr_aptpl_buf_len;
@@ -2018,7 +2018,7 @@
 	set_fs(old_fs);
 
 	if (ret < 0) {
-		printk("Error writing APTPL metadata file: %s\n", path);
+		pr_debug("Error writing APTPL metadata file: %s\n", path);
 		filp_close(file, NULL);
 		return -EIO;
 	}
@@ -2038,7 +2038,7 @@
 	/*
 	 * Can be called with a NULL pointer from PROUT service action CLEAR
 	 */
-	if (!(in_buf)) {
+	if (!in_buf) {
 		memset(null_buf, 0, 64);
 		buf = &null_buf[0];
 		/*
@@ -2088,8 +2088,8 @@
 	unsigned char isid_buf[PR_REG_ISID_LEN], *isid_ptr = NULL;
 	int pr_holder = 0, ret = 0, type;
 
-	if (!(se_sess) || !(se_lun)) {
-		printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+	if (!se_sess || !se_lun) {
+		pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
 		return PYX_TRANSPORT_LU_COMM_FAILURE;
 	}
 	se_tpg = se_sess->se_tpg;
@@ -2105,19 +2105,19 @@
 	 * Follow logic from spc4r17 Section 5.7.7, Register Behaviors Table 47
 	 */
 	pr_reg_e = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess);
-	if (!(pr_reg_e)) {
+	if (!pr_reg_e) {
 		if (res_key) {
-			printk(KERN_WARNING "SPC-3 PR: Reservation Key non-zero"
+			pr_warn("SPC-3 PR: Reservation Key non-zero"
 				" for SA REGISTER, returning CONFLICT\n");
 			return PYX_TRANSPORT_RESERVATION_CONFLICT;
 		}
 		/*
 		 * Do nothing but return GOOD status.
 		 */
-		if (!(sa_res_key))
+		if (!sa_res_key)
 			return PYX_TRANSPORT_SENT_TO_TRANSPORT;
 
-		if (!(spec_i_pt)) {
+		if (!spec_i_pt) {
 			/*
 			 * Perform the Service Action REGISTER on the Initiator
 			 * Port Endpoint that the PRO was received from on the
@@ -2128,7 +2128,7 @@
 					sa_res_key, all_tg_pt, aptpl,
 					ignore_key, 0);
 			if (ret != 0) {
-				printk(KERN_ERR "Unable to allocate"
+				pr_err("Unable to allocate"
 					" struct t10_pr_registration\n");
 				return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
 			}
@@ -2149,10 +2149,10 @@
 		/*
 		 * Nothing left to do for the APTPL=0 case.
 		 */
-		if (!(aptpl)) {
+		if (!aptpl) {
 			pr_tmpl->pr_aptpl_active = 0;
 			core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0);
-			printk("SPC-3 PR: Set APTPL Bit Deactivated for"
+			pr_debug("SPC-3 PR: Set APTPL Bit Deactivated for"
 					" REGISTER\n");
 			return 0;
 		}
@@ -2167,9 +2167,9 @@
 		ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
 				&pr_reg->pr_aptpl_buf[0],
 				pr_tmpl->pr_aptpl_buf_len);
-		if (!(ret)) {
+		if (!ret) {
 			pr_tmpl->pr_aptpl_active = 1;
-			printk("SPC-3 PR: Set APTPL Bit Activated for REGISTER\n");
+			pr_debug("SPC-3 PR: Set APTPL Bit Activated for REGISTER\n");
 		}
 
 		core_scsi3_put_pr_reg(pr_reg);
@@ -2181,9 +2181,9 @@
 		pr_reg = pr_reg_e;
 		type = pr_reg->pr_res_type;
 
-		if (!(ignore_key)) {
+		if (!ignore_key) {
 			if (res_key != pr_reg->pr_res_key) {
-				printk(KERN_ERR "SPC-3 PR REGISTER: Received"
+				pr_err("SPC-3 PR REGISTER: Received"
 					" res_key: 0x%016Lx does not match"
 					" existing SA REGISTER res_key:"
 					" 0x%016Lx\n", res_key,
@@ -2193,7 +2193,7 @@
 			}
 		}
 		if (spec_i_pt) {
-			printk(KERN_ERR "SPC-3 PR UNREGISTER: SPEC_I_PT"
+			pr_err("SPC-3 PR UNREGISTER: SPEC_I_PT"
 				" set while sa_res_key=0\n");
 			core_scsi3_put_pr_reg(pr_reg);
 			return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
@@ -2203,7 +2203,7 @@
 		 * must also set ALL_TG_PT=1 in the incoming PROUT.
 		 */
 		if (pr_reg->pr_reg_all_tg_pt && !(all_tg_pt)) {
-			printk(KERN_ERR "SPC-3 PR UNREGISTER: ALL_TG_PT=1"
+			pr_err("SPC-3 PR UNREGISTER: ALL_TG_PT=1"
 				" registration exists, but ALL_TG_PT=1 bit not"
 				" present in received PROUT\n");
 			core_scsi3_put_pr_reg(pr_reg);
@@ -2215,8 +2215,8 @@
 		if (aptpl) {
 			pr_aptpl_buf = kzalloc(pr_tmpl->pr_aptpl_buf_len,
 						GFP_KERNEL);
-			if (!(pr_aptpl_buf)) {
-				printk(KERN_ERR "Unable to allocate"
+			if (!pr_aptpl_buf) {
+				pr_err("Unable to allocate"
 					" pr_aptpl_buf\n");
 				core_scsi3_put_pr_reg(pr_reg);
 				return PYX_TRANSPORT_LU_COMM_FAILURE;
@@ -2227,7 +2227,7 @@
 		 * Nexus sa_res_key=1 Change Reservation Key for registered I_T
 		 * Nexus.
 		 */
-		if (!(sa_res_key)) {
+		if (!sa_res_key) {
 			pr_holder = core_scsi3_check_implict_release(
 					cmd->se_dev, pr_reg);
 			if (pr_holder < 0) {
@@ -2246,7 +2246,7 @@
 						&pr_tmpl->registration_list,
 						pr_reg_list) {
 
-					if (!(pr_reg_p->pr_reg_all_tg_pt))
+					if (!pr_reg_p->pr_reg_all_tg_pt)
 						continue;
 
 					if (pr_reg_p->pr_res_key != res_key)
@@ -2295,10 +2295,10 @@
 			}
 			spin_unlock(&pr_tmpl->registration_lock);
 
-			if (!(aptpl)) {
+			if (!aptpl) {
 				pr_tmpl->pr_aptpl_active = 0;
 				core_scsi3_update_and_write_aptpl(dev, NULL, 0);
-				printk("SPC-3 PR: Set APTPL Bit Deactivated"
+				pr_debug("SPC-3 PR: Set APTPL Bit Deactivated"
 						" for UNREGISTER\n");
 				return 0;
 			}
@@ -2306,9 +2306,9 @@
 			ret = core_scsi3_update_and_write_aptpl(dev,
 					&pr_aptpl_buf[0],
 					pr_tmpl->pr_aptpl_buf_len);
-			if (!(ret)) {
+			if (!ret) {
 				pr_tmpl->pr_aptpl_active = 1;
-				printk("SPC-3 PR: Set APTPL Bit Activated"
+				pr_debug("SPC-3 PR: Set APTPL Bit Activated"
 						" for UNREGISTER\n");
 			}
 
@@ -2323,18 +2323,18 @@
 			pr_reg->pr_res_generation = core_scsi3_pr_generation(
 							cmd->se_dev);
 			pr_reg->pr_res_key = sa_res_key;
-			printk("SPC-3 PR [%s] REGISTER%s: Changed Reservation"
+			pr_debug("SPC-3 PR [%s] REGISTER%s: Changed Reservation"
 				" Key for %s to: 0x%016Lx PRgeneration:"
 				" 0x%08x\n", cmd->se_tfo->get_fabric_name(),
 				(ignore_key) ? "_AND_IGNORE_EXISTING_KEY" : "",
 				pr_reg->pr_reg_nacl->initiatorname,
 				pr_reg->pr_res_key, pr_reg->pr_res_generation);
 
-			if (!(aptpl)) {
+			if (!aptpl) {
 				pr_tmpl->pr_aptpl_active = 0;
 				core_scsi3_update_and_write_aptpl(dev, NULL, 0);
 				core_scsi3_put_pr_reg(pr_reg);
-				printk("SPC-3 PR: Set APTPL Bit Deactivated"
+				pr_debug("SPC-3 PR: Set APTPL Bit Deactivated"
 						" for REGISTER\n");
 				return 0;
 			}
@@ -2342,9 +2342,9 @@
 			ret = core_scsi3_update_and_write_aptpl(dev,
 					&pr_aptpl_buf[0],
 					pr_tmpl->pr_aptpl_buf_len);
-			if (!(ret)) {
+			if (!ret) {
 				pr_tmpl->pr_aptpl_active = 1;
-				printk("SPC-3 PR: Set APTPL Bit Activated"
+				pr_debug("SPC-3 PR: Set APTPL Bit Activated"
 						" for REGISTER\n");
 			}
 
@@ -2395,8 +2395,8 @@
 
 	memset(i_buf, 0, PR_REG_ISID_ID_LEN);
 
-	if (!(se_sess) || !(se_lun)) {
-		printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+	if (!se_sess || !se_lun) {
+		pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
 		return PYX_TRANSPORT_LU_COMM_FAILURE;
 	}
 	se_tpg = se_sess->se_tpg;
@@ -2406,8 +2406,8 @@
 	 */
 	pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
 				se_sess);
-	if (!(pr_reg)) {
-		printk(KERN_ERR "SPC-3 PR: Unable to locate"
+	if (!pr_reg) {
+		pr_err("SPC-3 PR: Unable to locate"
 			" PR_REGISTERED *pr_reg for RESERVE\n");
 		return PYX_TRANSPORT_LU_COMM_FAILURE;
 	}
@@ -2421,7 +2421,7 @@
 	 * 	 registered with the logical unit for the I_T nexus; and
 	 */
 	if (res_key != pr_reg->pr_res_key) {
-		printk(KERN_ERR "SPC-3 PR RESERVE: Received res_key: 0x%016Lx"
+		pr_err("SPC-3 PR RESERVE: Received res_key: 0x%016Lx"
 			" does not match existing SA REGISTER res_key:"
 			" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
 		core_scsi3_put_pr_reg(pr_reg);
@@ -2438,7 +2438,7 @@
 	 * and that persistent reservation has a scope of LU_SCOPE.
 	 */
 	if (scope != PR_SCOPE_LU_SCOPE) {
-		printk(KERN_ERR "SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
+		pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
 		core_scsi3_put_pr_reg(pr_reg);
 		return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
 	}
@@ -2462,7 +2462,7 @@
 		 */
 		if (pr_res_holder != pr_reg) {
 			struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
-			printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from"
+			pr_err("SPC-3 PR: Attempted RESERVE from"
 				" [%s]: %s while reservation already held by"
 				" [%s]: %s, returning RESERVATION_CONFLICT\n",
 				cmd->se_tfo->get_fabric_name(),
@@ -2484,7 +2484,7 @@
 		if ((pr_res_holder->pr_res_type != type) ||
 		    (pr_res_holder->pr_res_scope != scope)) {
 			struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
-			printk(KERN_ERR "SPC-3 PR: Attempted RESERVE from"
+			pr_err("SPC-3 PR: Attempted RESERVE from"
 				" [%s]: %s trying to change TYPE and/or SCOPE,"
 				" while reservation already held by [%s]: %s,"
 				" returning RESERVATION_CONFLICT\n",
@@ -2522,11 +2522,11 @@
 	prf_isid = core_pr_dump_initiator_port(pr_reg, &i_buf[0],
 				PR_REG_ISID_ID_LEN);
 
-	printk(KERN_INFO "SPC-3 PR [%s] Service Action: RESERVE created new"
+	pr_debug("SPC-3 PR [%s] Service Action: RESERVE created new"
 		" reservation holder TYPE: %s ALL_TG_PT: %d\n",
 		cmd->se_tfo->get_fabric_name(), core_scsi3_pr_dump_type(type),
 		(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
-	printk(KERN_INFO "SPC-3 PR [%s] RESERVE Node: %s%s\n",
+	pr_debug("SPC-3 PR [%s] RESERVE Node: %s%s\n",
 			cmd->se_tfo->get_fabric_name(),
 			se_sess->se_node_acl->initiatorname,
 			(prf_isid) ? &i_buf[0] : "");
@@ -2536,8 +2536,8 @@
 		ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
 				&pr_reg->pr_aptpl_buf[0],
 				pr_tmpl->pr_aptpl_buf_len);
-		if (!(ret))
-			printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata"
+		if (!ret)
+			pr_debug("SPC-3 PR: Updated APTPL metadata"
 					" for RESERVE\n");
 	}
 
@@ -2564,7 +2564,7 @@
 		ret = core_scsi3_pro_reserve(cmd, dev, type, scope, res_key);
 		break;
 	default:
-		printk(KERN_ERR "SPC-3 PR: Unknown Service Action RESERVE Type:"
+		pr_err("SPC-3 PR: Unknown Service Action RESERVE Type:"
 			" 0x%02x\n", type);
 		return PYX_TRANSPORT_INVALID_CDB_FIELD;
 	}
@@ -2593,12 +2593,12 @@
 	 */
 	dev->dev_pr_res_holder = NULL;
 
-	printk(KERN_INFO "SPC-3 PR [%s] Service Action: %s RELEASE cleared"
+	pr_debug("SPC-3 PR [%s] Service Action: %s RELEASE cleared"
 		" reservation holder TYPE: %s ALL_TG_PT: %d\n",
 		tfo->get_fabric_name(), (explict) ? "explict" : "implict",
 		core_scsi3_pr_dump_type(pr_reg->pr_res_type),
 		(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
-	printk(KERN_INFO "SPC-3 PR [%s] RELEASE Node: %s%s\n",
+	pr_debug("SPC-3 PR [%s] RELEASE Node: %s%s\n",
 		tfo->get_fabric_name(), se_nacl->initiatorname,
 		(prf_isid) ? &i_buf[0] : "");
 	/*
@@ -2620,16 +2620,16 @@
 	struct t10_reservation *pr_tmpl = &dev->se_sub_dev->t10_pr;
 	int ret, all_reg = 0;
 
-	if (!(se_sess) || !(se_lun)) {
-		printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+	if (!se_sess || !se_lun) {
+		pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
 		return PYX_TRANSPORT_LU_COMM_FAILURE;
 	}
 	/*
 	 * Locate the existing *pr_reg via struct se_node_acl pointers
 	 */
 	pr_reg = core_scsi3_locate_pr_reg(dev, se_sess->se_node_acl, se_sess);
-	if (!(pr_reg)) {
-		printk(KERN_ERR "SPC-3 PR: Unable to locate"
+	if (!pr_reg) {
+		pr_err("SPC-3 PR: Unable to locate"
 			" PR_REGISTERED *pr_reg for RELEASE\n");
 		return PYX_TRANSPORT_LU_COMM_FAILURE;
 	}
@@ -2647,7 +2647,7 @@
 	 */
 	spin_lock(&dev->dev_reservation_lock);
 	pr_res_holder = dev->dev_pr_res_holder;
-	if (!(pr_res_holder)) {
+	if (!pr_res_holder) {
 		/*
 		 * No persistent reservation, return GOOD status.
 		 */
@@ -2684,7 +2684,7 @@
 	 *	  that is registered with the logical unit for the I_T nexus;
 	 */
 	if (res_key != pr_reg->pr_res_key) {
-		printk(KERN_ERR "SPC-3 PR RELEASE: Received res_key: 0x%016Lx"
+		pr_err("SPC-3 PR RELEASE: Received res_key: 0x%016Lx"
 			" does not match existing SA REGISTER res_key:"
 			" 0x%016Lx\n", res_key, pr_reg->pr_res_key);
 		spin_unlock(&dev->dev_reservation_lock);
@@ -2700,7 +2700,7 @@
 	if ((pr_res_holder->pr_res_type != type) ||
 	    (pr_res_holder->pr_res_scope != scope)) {
 		struct se_node_acl *pr_res_nacl = pr_res_holder->pr_reg_nacl;
-		printk(KERN_ERR "SPC-3 PR RELEASE: Attempted to release"
+		pr_err("SPC-3 PR RELEASE: Attempted to release"
 			" reservation from [%s]: %s with different TYPE "
 			"and/or SCOPE  while reservation already held by"
 			" [%s]: %s, returning RESERVATION_CONFLICT\n",
@@ -2767,8 +2767,8 @@
 		ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
 				&pr_reg->pr_aptpl_buf[0],
 				pr_tmpl->pr_aptpl_buf_len);
-		if (!(ret))
-			printk("SPC-3 PR: Updated APTPL metadata for RELEASE\n");
+		if (!ret)
+			pr_debug("SPC-3 PR: Updated APTPL metadata for RELEASE\n");
 	}
 
 	core_scsi3_put_pr_reg(pr_reg);
@@ -2791,8 +2791,8 @@
 	 */
 	pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev,
 			se_sess->se_node_acl, se_sess);
-	if (!(pr_reg_n)) {
-		printk(KERN_ERR "SPC-3 PR: Unable to locate"
+	if (!pr_reg_n) {
+		pr_err("SPC-3 PR: Unable to locate"
 			" PR_REGISTERED *pr_reg for CLEAR\n");
 			return PYX_TRANSPORT_LU_COMM_FAILURE;
 	}
@@ -2808,7 +2808,7 @@
 	 * 	   that is registered with the logical unit for the I_T nexus.
 	 */
 	if (res_key != pr_reg_n->pr_res_key) {
-		printk(KERN_ERR "SPC-3 PR REGISTER: Received"
+		pr_err("SPC-3 PR REGISTER: Received"
 			" res_key: 0x%016Lx does not match"
 			" existing SA REGISTER res_key:"
 			" 0x%016Lx\n", res_key, pr_reg_n->pr_res_key);
@@ -2845,18 +2845,18 @@
 		 *    command with CLEAR service action was received, with the
 		 *    additional sense code set to RESERVATIONS PREEMPTED.
 		 */
-		if (!(calling_it_nexus))
+		if (!calling_it_nexus)
 			core_scsi3_ua_allocate(pr_reg_nacl, pr_res_mapped_lun,
 				0x2A, ASCQ_2AH_RESERVATIONS_PREEMPTED);
 	}
 	spin_unlock(&pr_tmpl->registration_lock);
 
-	printk(KERN_INFO "SPC-3 PR [%s] Service Action: CLEAR complete\n",
+	pr_debug("SPC-3 PR [%s] Service Action: CLEAR complete\n",
 		cmd->se_tfo->get_fabric_name());
 
 	if (pr_tmpl->pr_aptpl_active) {
 		core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0);
-		printk(KERN_INFO "SPC-3 PR: Updated APTPL metadata"
+		pr_debug("SPC-3 PR: Updated APTPL metadata"
 				" for CLEAR\n");
 	}
 
@@ -2895,12 +2895,12 @@
 	pr_reg->pr_res_type = type;
 	pr_reg->pr_res_scope = scope;
 
-	printk(KERN_INFO "SPC-3 PR [%s] Service Action: PREEMPT%s created new"
+	pr_debug("SPC-3 PR [%s] Service Action: PREEMPT%s created new"
 		" reservation holder TYPE: %s ALL_TG_PT: %d\n",
 		tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "",
 		core_scsi3_pr_dump_type(type),
 		(pr_reg->pr_reg_all_tg_pt) ? 1 : 0);
-	printk(KERN_INFO "SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n",
+	pr_debug("SPC-3 PR [%s] PREEMPT%s from Node: %s%s\n",
 		tfo->get_fabric_name(), (abort) ? "_AND_ABORT" : "",
 		nacl->initiatorname, (prf_isid) ? &i_buf[0] : "");
 	/*
@@ -2926,7 +2926,7 @@
 		if (pr_reg_holder == pr_reg)
 			continue;
 		if (pr_reg->pr_res_holder) {
-			printk(KERN_WARNING "pr_reg->pr_res_holder still set\n");
+			pr_warn("pr_reg->pr_res_holder still set\n");
 			continue;
 		}
 
@@ -2971,14 +2971,14 @@
 	int all_reg = 0, calling_it_nexus = 0, released_regs = 0;
 	int prh_type = 0, prh_scope = 0, ret;
 
-	if (!(se_sess))
+	if (!se_sess)
 		return PYX_TRANSPORT_LU_COMM_FAILURE;
 
 	se_deve = &se_sess->se_node_acl->device_list[cmd->orig_fe_lun];
 	pr_reg_n = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
 				se_sess);
-	if (!(pr_reg_n)) {
-		printk(KERN_ERR "SPC-3 PR: Unable to locate"
+	if (!pr_reg_n) {
+		pr_err("SPC-3 PR: Unable to locate"
 			" PR_REGISTERED *pr_reg for PREEMPT%s\n",
 			(abort) ? "_AND_ABORT" : "");
 		return PYX_TRANSPORT_RESERVATION_CONFLICT;
@@ -2988,7 +2988,7 @@
 		return PYX_TRANSPORT_RESERVATION_CONFLICT;
 	}
 	if (scope != PR_SCOPE_LU_SCOPE) {
-		printk(KERN_ERR "SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
+		pr_err("SPC-3 PR: Illegal SCOPE: 0x%02x\n", scope);
 		core_scsi3_put_pr_reg(pr_reg_n);
 		return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
 	}
@@ -3001,7 +3001,7 @@
 	    (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)))
 		all_reg = 1;
 
-	if (!(all_reg) && !(sa_res_key)) {
+	if (!all_reg && !sa_res_key) {
 		spin_unlock(&dev->dev_reservation_lock);
 		core_scsi3_put_pr_reg(pr_reg_n);
 		return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
@@ -3015,7 +3015,7 @@
 	 * server shall perform a preempt by doing the following in an
 	 * uninterrupted series of actions. (See below..)
 	 */
-	if (!(pr_res_holder) || (pr_res_holder->pr_res_key != sa_res_key)) {
+	if (!pr_res_holder || (pr_res_holder->pr_res_key != sa_res_key)) {
 		/*
 		 * No existing or SA Reservation Key matching reservations..
 		 *
@@ -3042,7 +3042,7 @@
 			 *    was received, with the additional sense code set
 			 *    to REGISTRATIONS PREEMPTED.
 			 */
-			if (!(all_reg)) {
+			if (!all_reg) {
 				if (pr_reg->pr_res_key != sa_res_key)
 					continue;
 
@@ -3082,7 +3082,7 @@
 						NULL, 0);
 				released_regs++;
 			}
-			if (!(calling_it_nexus))
+			if (!calling_it_nexus)
 				core_scsi3_ua_allocate(pr_reg_nacl,
 					pr_res_mapped_lun, 0x2A,
 					ASCQ_2AH_RESERVATIONS_PREEMPTED);
@@ -3095,7 +3095,7 @@
 		 * registered reservation key, then the device server shall
 		 * complete the command with RESERVATION CONFLICT status.
 		 */
-		if (!(released_regs)) {
+		if (!released_regs) {
 			spin_unlock(&dev->dev_reservation_lock);
 			core_scsi3_put_pr_reg(pr_reg_n);
 			return PYX_TRANSPORT_RESERVATION_CONFLICT;
@@ -3120,8 +3120,8 @@
 			ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
 					&pr_reg_n->pr_aptpl_buf[0],
 					pr_tmpl->pr_aptpl_buf_len);
-			if (!(ret))
-				printk(KERN_INFO "SPC-3 PR: Updated APTPL"
+			if (!ret)
+				pr_debug("SPC-3 PR: Updated APTPL"
 					" metadata for  PREEMPT%s\n", (abort) ?
 					"_AND_ABORT" : "");
 		}
@@ -3256,8 +3256,8 @@
 		ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
 				&pr_reg_n->pr_aptpl_buf[0],
 				pr_tmpl->pr_aptpl_buf_len);
-		if (!(ret))
-			printk("SPC-3 PR: Updated APTPL metadata for PREEMPT"
+		if (!ret)
+			pr_debug("SPC-3 PR: Updated APTPL metadata for PREEMPT"
 				"%s\n", (abort) ? "_AND_ABORT" : "");
 	}
 
@@ -3287,7 +3287,7 @@
 				res_key, sa_res_key, abort);
 		break;
 	default:
-		printk(KERN_ERR "SPC-3 PR: Unknown Service Action PREEMPT%s"
+		pr_err("SPC-3 PR: Unknown Service Action PREEMPT%s"
 			" Type: 0x%02x\n", (abort) ? "_AND_ABORT" : "", type);
 		return PYX_TRANSPORT_INVALID_CDB_FIELD;
 	}
@@ -3321,8 +3321,8 @@
 	unsigned short rtpi;
 	unsigned char proto_ident;
 
-	if (!(se_sess) || !(se_lun)) {
-		printk(KERN_ERR "SPC-3 PR: se_sess || struct se_lun is NULL!\n");
+	if (!se_sess || !se_lun) {
+		pr_err("SPC-3 PR: se_sess || struct se_lun is NULL!\n");
 		return PYX_TRANSPORT_LU_COMM_FAILURE;
 	}
 	memset(dest_iport, 0, 64);
@@ -3338,8 +3338,8 @@
 	 */
 	pr_reg = core_scsi3_locate_pr_reg(cmd->se_dev, se_sess->se_node_acl,
 				se_sess);
-	if (!(pr_reg)) {
-		printk(KERN_ERR "SPC-3 PR: Unable to locate PR_REGISTERED"
+	if (!pr_reg) {
+		pr_err("SPC-3 PR: Unable to locate PR_REGISTERED"
 			" *pr_reg for REGISTER_AND_MOVE\n");
 		return PYX_TRANSPORT_LU_COMM_FAILURE;
 	}
@@ -3348,7 +3348,7 @@
 	 * provided during this initiator's I_T nexus registration.
 	 */
 	if (res_key != pr_reg->pr_res_key) {
-		printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Received"
+		pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received"
 			" res_key: 0x%016Lx does not match existing SA REGISTER"
 			" res_key: 0x%016Lx\n", res_key, pr_reg->pr_res_key);
 		core_scsi3_put_pr_reg(pr_reg);
@@ -3357,8 +3357,8 @@
 	/*
 	 * The service active reservation key needs to be non zero
 	 */
-	if (!(sa_res_key)) {
-		printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Received zero"
+	if (!sa_res_key) {
+		pr_warn("SPC-3 PR REGISTER_AND_MOVE: Received zero"
 			" sa_res_key\n");
 		core_scsi3_put_pr_reg(pr_reg);
 		return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
@@ -3380,7 +3380,7 @@
 	buf = NULL;
 
 	if ((tid_len + 24) != cmd->data_length) {
-		printk(KERN_ERR "SPC-3 PR: Illegal tid_len: %u + 24 byte header"
+		pr_err("SPC-3 PR: Illegal tid_len: %u + 24 byte header"
 			" does not equal CDB data_length: %u\n", tid_len,
 			cmd->data_length);
 		core_scsi3_put_pr_reg(pr_reg);
@@ -3392,10 +3392,10 @@
 		if (se_port->sep_rtpi != rtpi)
 			continue;
 		dest_se_tpg = se_port->sep_tpg;
-		if (!(dest_se_tpg))
+		if (!dest_se_tpg)
 			continue;
 		dest_tf_ops = dest_se_tpg->se_tpg_tfo;
-		if (!(dest_tf_ops))
+		if (!dest_tf_ops)
 			continue;
 
 		atomic_inc(&dest_se_tpg->tpg_pr_ref_count);
@@ -3404,7 +3404,7 @@
 
 		ret = core_scsi3_tpg_depend_item(dest_se_tpg);
 		if (ret != 0) {
-			printk(KERN_ERR "core_scsi3_tpg_depend_item() failed"
+			pr_err("core_scsi3_tpg_depend_item() failed"
 				" for dest_se_tpg\n");
 			atomic_dec(&dest_se_tpg->tpg_pr_ref_count);
 			smp_mb__after_atomic_dec();
@@ -3417,8 +3417,8 @@
 	}
 	spin_unlock(&dev->se_port_lock);
 
-	if (!(dest_se_tpg) || (!dest_tf_ops)) {
-		printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
+	if (!dest_se_tpg || !dest_tf_ops) {
+		pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
 			" fabric ops from Relative Target Port Identifier:"
 			" %hu\n", rtpi);
 		core_scsi3_put_pr_reg(pr_reg);
@@ -3428,11 +3428,11 @@
 	buf = transport_kmap_first_data_page(cmd);
 	proto_ident = (buf[24] & 0x0f);
 #if 0
-	printk("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:"
+	pr_debug("SPC-3 PR REGISTER_AND_MOVE: Extracted Protocol Identifier:"
 			" 0x%02x\n", proto_ident);
 #endif
 	if (proto_ident != dest_tf_ops->get_fabric_proto_ident(dest_se_tpg)) {
-		printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Received"
+		pr_err("SPC-3 PR REGISTER_AND_MOVE: Received"
 			" proto_ident: 0x%02x does not match ident: 0x%02x"
 			" from fabric: %s\n", proto_ident,
 			dest_tf_ops->get_fabric_proto_ident(dest_se_tpg),
@@ -3441,7 +3441,7 @@
 		goto out;
 	}
 	if (dest_tf_ops->tpg_parse_pr_out_transport_id == NULL) {
-		printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
+		pr_err("SPC-3 PR REGISTER_AND_MOVE: Fabric does not"
 			" containg a valid tpg_parse_pr_out_transport_id"
 			" function pointer\n");
 		ret = PYX_TRANSPORT_LU_COMM_FAILURE;
@@ -3449,8 +3449,8 @@
 	}
 	initiator_str = dest_tf_ops->tpg_parse_pr_out_transport_id(dest_se_tpg,
 			(const char *)&buf[24], &tmp_tid_len, &iport_ptr);
-	if (!(initiator_str)) {
-		printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
+	if (!initiator_str) {
+		pr_err("SPC-3 PR REGISTER_AND_MOVE: Unable to locate"
 			" initiator_str from Transport ID\n");
 		ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
 		goto out;
@@ -3459,7 +3459,7 @@
 	transport_kunmap_first_data_page(cmd);
 	buf = NULL;
 
-	printk(KERN_INFO "SPC-3 PR [%s] Extracted initiator %s identifier: %s"
+	pr_debug("SPC-3 PR [%s] Extracted initiator %s identifier: %s"
 		" %s\n", dest_tf_ops->get_fabric_name(), (iport_ptr != NULL) ?
 		"port" : "device", initiator_str, (iport_ptr != NULL) ?
 		iport_ptr : "");
@@ -3474,18 +3474,18 @@
 	pr_reg_nacl = pr_reg->pr_reg_nacl;
 	matching_iname = (!strcmp(initiator_str,
 				  pr_reg_nacl->initiatorname)) ? 1 : 0;
-	if (!(matching_iname))
+	if (!matching_iname)
 		goto after_iport_check;
 
-	if (!(iport_ptr) || !(pr_reg->isid_present_at_reg)) {
-		printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
+	if (!iport_ptr || !pr_reg->isid_present_at_reg) {
+		pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s"
 			" matches: %s on received I_T Nexus\n", initiator_str,
 			pr_reg_nacl->initiatorname);
 		ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
 		goto out;
 	}
-	if (!(strcmp(iport_ptr, pr_reg->pr_reg_isid))) {
-		printk(KERN_ERR "SPC-3 PR REGISTER_AND_MOVE: TransportID: %s %s"
+	if (!strcmp(iport_ptr, pr_reg->pr_reg_isid)) {
+		pr_err("SPC-3 PR REGISTER_AND_MOVE: TransportID: %s %s"
 			" matches: %s %s on received I_T Nexus\n",
 			initiator_str, iport_ptr, pr_reg_nacl->initiatorname,
 			pr_reg->pr_reg_isid);
@@ -3505,8 +3505,8 @@
 	}
 	spin_unlock_bh(&dest_se_tpg->acl_node_lock);
 
-	if (!(dest_node_acl)) {
-		printk(KERN_ERR "Unable to locate %s dest_node_acl for"
+	if (!dest_node_acl) {
+		pr_err("Unable to locate %s dest_node_acl for"
 			" TransportID%s\n", dest_tf_ops->get_fabric_name(),
 			initiator_str);
 		ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
@@ -3514,7 +3514,7 @@
 	}
 	ret = core_scsi3_nodeacl_depend_item(dest_node_acl);
 	if (ret != 0) {
-		printk(KERN_ERR "core_scsi3_nodeacl_depend_item() for"
+		pr_err("core_scsi3_nodeacl_depend_item() for"
 			" dest_node_acl\n");
 		atomic_dec(&dest_node_acl->acl_pr_ref_count);
 		smp_mb__after_atomic_dec();
@@ -3523,7 +3523,7 @@
 		goto out;
 	}
 #if 0
-	printk(KERN_INFO "SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:"
+	pr_debug("SPC-3 PR REGISTER_AND_MOVE: Found %s dest_node_acl:"
 		" %s from TransportID\n", dest_tf_ops->get_fabric_name(),
 		dest_node_acl->initiatorname);
 #endif
@@ -3532,8 +3532,8 @@
 	 * PORT IDENTIFIER.
 	 */
 	dest_se_deve = core_get_se_deve_from_rtpi(dest_node_acl, rtpi);
-	if (!(dest_se_deve)) {
-		printk(KERN_ERR "Unable to locate %s dest_se_deve from RTPI:"
+	if (!dest_se_deve) {
+		pr_err("Unable to locate %s dest_se_deve from RTPI:"
 			" %hu\n",  dest_tf_ops->get_fabric_name(), rtpi);
 		ret = PYX_TRANSPORT_INVALID_PARAMETER_LIST;
 		goto out;
@@ -3541,7 +3541,7 @@
 
 	ret = core_scsi3_lunacl_depend_item(dest_se_deve);
 	if (ret < 0) {
-		printk(KERN_ERR "core_scsi3_lunacl_depend_item() failed\n");
+		pr_err("core_scsi3_lunacl_depend_item() failed\n");
 		atomic_dec(&dest_se_deve->pr_ref_count);
 		smp_mb__after_atomic_dec();
 		dest_se_deve = NULL;
@@ -3549,7 +3549,7 @@
 		goto out;
 	}
 #if 0
-	printk(KERN_INFO "SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN"
+	pr_debug("SPC-3 PR REGISTER_AND_MOVE: Located %s node %s LUN"
 		" ACL for dest_se_deve->mapped_lun: %u\n",
 		dest_tf_ops->get_fabric_name(), dest_node_acl->initiatorname,
 		dest_se_deve->mapped_lun);
@@ -3560,8 +3560,8 @@
 	 */
 	spin_lock(&dev->dev_reservation_lock);
 	pr_res_holder = dev->dev_pr_res_holder;
-	if (!(pr_res_holder)) {
-		printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: No reservation"
+	if (!pr_res_holder) {
+		pr_warn("SPC-3 PR REGISTER_AND_MOVE: No reservation"
 			" currently held\n");
 		spin_unlock(&dev->dev_reservation_lock);
 		ret = PYX_TRANSPORT_INVALID_CDB_FIELD;
@@ -3574,7 +3574,7 @@
 	 * 	Register behaviors for a REGISTER AND MOVE service action
 	 */
 	if (pr_res_holder != pr_reg) {
-		printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
+		pr_warn("SPC-3 PR REGISTER_AND_MOVE: Calling I_T"
 			" Nexus is not reservation holder\n");
 		spin_unlock(&dev->dev_reservation_lock);
 		ret = PYX_TRANSPORT_RESERVATION_CONFLICT;
@@ -3591,7 +3591,7 @@
 	 */
 	if ((pr_res_holder->pr_res_type == PR_TYPE_WRITE_EXCLUSIVE_ALLREG) ||
 	    (pr_res_holder->pr_res_type == PR_TYPE_EXCLUSIVE_ACCESS_ALLREG)) {
-		printk(KERN_WARNING "SPC-3 PR REGISTER_AND_MOVE: Unable to move"
+		pr_warn("SPC-3 PR REGISTER_AND_MOVE: Unable to move"
 			" reservation for type: %s\n",
 			core_scsi3_pr_dump_type(pr_res_holder->pr_res_type));
 		spin_unlock(&dev->dev_reservation_lock);
@@ -3626,7 +3626,7 @@
 	 */
 	dest_pr_reg = __core_scsi3_locate_pr_reg(dev, dest_node_acl,
 					iport_ptr);
-	if (!(dest_pr_reg)) {
+	if (!dest_pr_reg) {
 		ret = core_scsi3_alloc_registration(cmd->se_dev,
 				dest_node_acl, dest_se_deve, iport_ptr,
 				sa_res_key, 0, aptpl, 2, 1);
@@ -3659,16 +3659,16 @@
 	/*
 	 * Increment PRGeneration for existing registrations..
 	 */
-	if (!(new_reg))
+	if (!new_reg)
 		dest_pr_reg->pr_res_generation = pr_tmpl->pr_generation++;
 	spin_unlock(&dev->dev_reservation_lock);
 
-	printk(KERN_INFO "SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE"
+	pr_debug("SPC-3 PR [%s] Service Action: REGISTER_AND_MOVE"
 		" created new reservation holder TYPE: %s on object RTPI:"
 		" %hu  PRGeneration: 0x%08x\n", dest_tf_ops->get_fabric_name(),
 		core_scsi3_pr_dump_type(type), rtpi,
 		dest_pr_reg->pr_res_generation);
-	printk(KERN_INFO "SPC-3 PR Successfully moved reservation from"
+	pr_debug("SPC-3 PR Successfully moved reservation from"
 		" %s Fabric Node: %s%s -> %s Fabric Node: %s %s\n",
 		tf_ops->get_fabric_name(), pr_reg_nacl->initiatorname,
 		(prf_isid) ? &i_buf[0] : "", dest_tf_ops->get_fabric_name(),
@@ -3696,18 +3696,18 @@
 	 * Clear the APTPL metadata if APTPL has been disabled, otherwise
 	 * write out the updated metadata to struct file for this SCSI device.
 	 */
-	if (!(aptpl)) {
+	if (!aptpl) {
 		pr_tmpl->pr_aptpl_active = 0;
 		core_scsi3_update_and_write_aptpl(cmd->se_dev, NULL, 0);
-		printk("SPC-3 PR: Set APTPL Bit Deactivated for"
+		pr_debug("SPC-3 PR: Set APTPL Bit Deactivated for"
 				" REGISTER_AND_MOVE\n");
 	} else {
 		pr_tmpl->pr_aptpl_active = 1;
 		ret = core_scsi3_update_and_write_aptpl(cmd->se_dev,
 				&dest_pr_reg->pr_aptpl_buf[0],
 				pr_tmpl->pr_aptpl_buf_len);
-		if (!(ret))
-			printk("SPC-3 PR: Set APTPL Bit Activated for"
+		if (!ret)
+			pr_debug("SPC-3 PR: Set APTPL Bit Activated for"
 					" REGISTER_AND_MOVE\n");
 	}
 
@@ -3750,11 +3750,11 @@
 	 * FIXME: A NULL struct se_session pointer means an this is not coming from
 	 * a $FABRIC_MOD's nexus, but from internal passthrough ops.
 	 */
-	if (!(cmd->se_sess))
+	if (!cmd->se_sess)
 		return PYX_TRANSPORT_LU_COMM_FAILURE;
 
 	if (cmd->data_length < 24) {
-		printk(KERN_WARNING "SPC-PR: Received PR OUT parameter list"
+		pr_warn("SPC-PR: Received PR OUT parameter list"
 			" length too small: %u\n", cmd->data_length);
 		return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
 	}
@@ -3800,9 +3800,9 @@
 	 * the sense key set to ILLEGAL REQUEST, and the additional sense
 	 * code set to PARAMETER LIST LENGTH ERROR.
 	 */
-	if (!(spec_i_pt) && ((cdb[1] & 0x1f) != PRO_REGISTER_AND_MOVE) &&
+	if (!spec_i_pt && ((cdb[1] & 0x1f) != PRO_REGISTER_AND_MOVE) &&
 	    (cmd->data_length != 24)) {
-		printk(KERN_WARNING "SPC-PR: Received PR OUT illegal parameter"
+		pr_warn("SPC-PR: Received PR OUT illegal parameter"
 			" list length: %u\n", cmd->data_length);
 		return PYX_TRANSPORT_INVALID_PARAMETER_LIST;
 	}
@@ -3836,7 +3836,7 @@
 		return core_scsi3_emulate_pro_register_and_move(cmd, res_key,
 				sa_res_key, aptpl, unreg);
 	default:
-		printk(KERN_ERR "Unknown PERSISTENT_RESERVE_OUT service"
+		pr_err("Unknown PERSISTENT_RESERVE_OUT service"
 			" action: 0x%02x\n", cdb[1] & 0x1f);
 		return PYX_TRANSPORT_INVALID_CDB_FIELD;
 	}
@@ -3858,7 +3858,7 @@
 	u32 add_len = 0, off = 8;
 
 	if (cmd->data_length < 8) {
-		printk(KERN_ERR "PRIN SA READ_KEYS SCSI Data Length: %u"
+		pr_err("PRIN SA READ_KEYS SCSI Data Length: %u"
 			" too small\n", cmd->data_length);
 		return PYX_TRANSPORT_INVALID_CDB_FIELD;
 	}
@@ -3917,7 +3917,7 @@
 	u32 add_len = 16; /* Hardcoded to 16 when a reservation is held. */
 
 	if (cmd->data_length < 8) {
-		printk(KERN_ERR "PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
+		pr_err("PRIN SA READ_RESERVATIONS SCSI Data Length: %u"
 			" too small\n", cmd->data_length);
 		return PYX_TRANSPORT_INVALID_CDB_FIELD;
 	}
@@ -3999,7 +3999,7 @@
 	u16 add_len = 8; /* Hardcoded to 8. */
 
 	if (cmd->data_length < 6) {
-		printk(KERN_ERR "PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
+		pr_err("PRIN SA REPORT_CAPABILITIES SCSI Data Length:"
 			" %u too small\n", cmd->data_length);
 		return PYX_TRANSPORT_INVALID_CDB_FIELD;
 	}
@@ -4060,7 +4060,7 @@
 	int format_code = 0;
 
 	if (cmd->data_length < 8) {
-		printk(KERN_ERR "PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
+		pr_err("PRIN SA READ_FULL_STATUS SCSI Data Length: %u"
 			" too small\n", cmd->data_length);
 		return PYX_TRANSPORT_INVALID_CDB_FIELD;
 	}
@@ -4091,7 +4091,7 @@
 				se_tpg, se_nacl, pr_reg, &format_code);
 
 		if ((exp_desc_len + add_len) > cmd->data_length) {
-			printk(KERN_WARNING "SPC-3 PRIN READ_FULL_STATUS ran"
+			pr_warn("SPC-3 PRIN READ_FULL_STATUS ran"
 				" out of buffer: %d\n", cmd->data_length);
 			spin_lock(&pr_tmpl->registration_lock);
 			atomic_dec(&pr_reg->pr_res_holders);
@@ -4141,7 +4141,7 @@
 		 * bit is set to one, the contents of the RELATIVE TARGET PORT
 		 * IDENTIFIER field are not defined by this standard.
 		 */
-		if (!(pr_reg->pr_reg_all_tg_pt)) {
+		if (!pr_reg->pr_reg_all_tg_pt) {
 			struct se_port *port = pr_reg->pr_reg_tg_pt_lun->lun_sep;
 
 			buf[off++] = ((port->sep_rtpi >> 8) & 0xff);
@@ -4203,7 +4203,7 @@
 	case PRI_READ_FULL_STATUS:
 		return core_scsi3_pri_read_full_status(cmd);
 	default:
-		printk(KERN_ERR "Unknown PERSISTENT_RESERVE_IN service"
+		pr_err("Unknown PERSISTENT_RESERVE_IN service"
 			" action: 0x%02x\n", cdb[1] & 0x1f);
 		return PYX_TRANSPORT_INVALID_CDB_FIELD;
 	}
@@ -4224,7 +4224,7 @@
 	 * CONFLICT status.
 	 */
 	if (dev->dev_flags & DF_SPC2_RESERVATIONS) {
-		printk(KERN_ERR "Received PERSISTENT_RESERVE CDB while legacy"
+		pr_err("Received PERSISTENT_RESERVE CDB while legacy"
 			" SPC-2 reservation is held, returning"
 			" RESERVATION_CONFLICT\n");
 		return PYX_TRANSPORT_RESERVATION_CONFLICT;
@@ -4263,7 +4263,7 @@
 		rest->res_type = SPC_PASSTHROUGH;
 		rest->pr_ops.t10_reservation_check = &core_pt_reservation_check;
 		rest->pr_ops.t10_seq_non_holder = &core_pt_seq_non_holder;
-		printk(KERN_INFO "%s: Using SPC_PASSTHROUGH, no reservation"
+		pr_debug("%s: Using SPC_PASSTHROUGH, no reservation"
 			" emulation\n", dev->transport->name);
 		return 0;
 	}
@@ -4275,14 +4275,14 @@
 		rest->res_type = SPC3_PERSISTENT_RESERVATIONS;
 		rest->pr_ops.t10_reservation_check = &core_scsi3_pr_reservation_check;
 		rest->pr_ops.t10_seq_non_holder = &core_scsi3_pr_seq_non_holder;
-		printk(KERN_INFO "%s: Using SPC3_PERSISTENT_RESERVATIONS"
+		pr_debug("%s: Using SPC3_PERSISTENT_RESERVATIONS"
 			" emulation\n", dev->transport->name);
 	} else {
 		rest->res_type = SPC2_RESERVATIONS;
 		rest->pr_ops.t10_reservation_check = &core_scsi2_reservation_check;
 		rest->pr_ops.t10_seq_non_holder =
 				&core_scsi2_reservation_seq_non_holder;
-		printk(KERN_INFO "%s: Using SPC2_RESERVATIONS emulation\n",
+		pr_debug("%s: Using SPC2_RESERVATIONS emulation\n",
 			dev->transport->name);
 	}
 
diff --git a/drivers/target/target_core_pscsi.c b/drivers/target/target_core_pscsi.c
index 318ef14..a2ce599 100644
--- a/drivers/target/target_core_pscsi.c
+++ b/drivers/target/target_core_pscsi.c
@@ -65,8 +65,8 @@
 	struct pscsi_hba_virt *phv;
 
 	phv = kzalloc(sizeof(struct pscsi_hba_virt), GFP_KERNEL);
-	if (!(phv)) {
-		printk(KERN_ERR "Unable to allocate struct pscsi_hba_virt\n");
+	if (!phv) {
+		pr_err("Unable to allocate struct pscsi_hba_virt\n");
 		return -ENOMEM;
 	}
 	phv->phv_host_id = host_id;
@@ -74,10 +74,10 @@
 
 	hba->hba_ptr = phv;
 
-	printk(KERN_INFO "CORE_HBA[%d] - TCM SCSI HBA Driver %s on"
+	pr_debug("CORE_HBA[%d] - TCM SCSI HBA Driver %s on"
 		" Generic Target Core Stack %s\n", hba->hba_id,
 		PSCSI_VERSION, TARGET_CORE_MOD_VERSION);
-	printk(KERN_INFO "CORE_HBA[%d] - Attached SCSI HBA to Generic\n",
+	pr_debug("CORE_HBA[%d] - Attached SCSI HBA to Generic\n",
 	       hba->hba_id);
 
 	return 0;
@@ -91,12 +91,12 @@
 	if (scsi_host) {
 		scsi_host_put(scsi_host);
 
-		printk(KERN_INFO "CORE_HBA[%d] - Detached SCSI HBA: %s from"
+		pr_debug("CORE_HBA[%d] - Detached SCSI HBA: %s from"
 			" Generic Target Core\n", hba->hba_id,
 			(scsi_host->hostt->name) ? (scsi_host->hostt->name) :
 			"Unknown");
 	} else
-		printk(KERN_INFO "CORE_HBA[%d] - Detached Virtual SCSI HBA"
+		pr_debug("CORE_HBA[%d] - Detached Virtual SCSI HBA"
 			" from Generic Target Core\n", hba->hba_id);
 
 	kfree(phv);
@@ -110,14 +110,14 @@
 	/*
 	 * Release the struct Scsi_Host
 	 */
-	if (!(mode_flag)) {
-		if (!(sh))
+	if (!mode_flag) {
+		if (!sh)
 			return 0;
 
 		phv->phv_lld_host = NULL;
 		phv->phv_mode = PHV_VIRUTAL_HOST_ID;
 
-		printk(KERN_INFO "CORE_HBA[%d] - Disabled pSCSI HBA Passthrough"
+		pr_debug("CORE_HBA[%d] - Disabled pSCSI HBA Passthrough"
 			" %s\n", hba->hba_id, (sh->hostt->name) ?
 			(sh->hostt->name) : "Unknown");
 
@@ -130,7 +130,7 @@
 	 */
 	sh = scsi_host_lookup(phv->phv_host_id);
 	if (IS_ERR(sh)) {
-		printk(KERN_ERR "pSCSI: Unable to locate SCSI Host for"
+		pr_err("pSCSI: Unable to locate SCSI Host for"
 			" phv_host_id: %d\n", phv->phv_host_id);
 		return PTR_ERR(sh);
 	}
@@ -138,7 +138,7 @@
 	phv->phv_lld_host = sh;
 	phv->phv_mode = PHV_LLD_SCSI_HOST_NO;
 
-	printk(KERN_INFO "CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n",
+	pr_debug("CORE_HBA[%d] - Enabled pSCSI HBA Passthrough %s\n",
 		hba->hba_id, (sh->hostt->name) ? (sh->hostt->name) : "Unknown");
 
 	return 1;
@@ -257,15 +257,15 @@
 		page_83 = &buf[off];
 		ident_len = page_83[3];
 		if (!ident_len) {
-			printk(KERN_ERR "page_83[3]: identifier"
+			pr_err("page_83[3]: identifier"
 					" length zero!\n");
 			break;
 		}
-		printk(KERN_INFO "T10 VPD Identifer Length: %d\n", ident_len);
+		pr_debug("T10 VPD Identifer Length: %d\n", ident_len);
 
 		vpd = kzalloc(sizeof(struct t10_vpd), GFP_KERNEL);
 		if (!vpd) {
-			printk(KERN_ERR "Unable to allocate memory for"
+			pr_err("Unable to allocate memory for"
 					" struct t10_vpd\n");
 			goto out;
 		}
@@ -317,7 +317,7 @@
 	if (!sd->queue_depth) {
 		sd->queue_depth = PSCSI_DEFAULT_QUEUEDEPTH;
 
-		printk(KERN_ERR "Set broken SCSI Device %d:%d:%d"
+		pr_err("Set broken SCSI Device %d:%d:%d"
 			" queue_depth to %d\n", sd->channel, sd->id,
 				sd->lun, sd->queue_depth);
 	}
@@ -355,7 +355,7 @@
 	dev = transport_add_device_to_core_hba(hba, &pscsi_template,
 				se_dev, dev_flags, pdv,
 				&dev_limits, NULL, NULL);
-	if (!(dev)) {
+	if (!dev) {
 		pdv->pdv_sd = NULL;
 		return NULL;
 	}
@@ -385,13 +385,13 @@
 	struct pscsi_dev_virt *pdv;
 
 	pdv = kzalloc(sizeof(struct pscsi_dev_virt), GFP_KERNEL);
-	if (!(pdv)) {
-		printk(KERN_ERR "Unable to allocate memory for struct pscsi_dev_virt\n");
+	if (!pdv) {
+		pr_err("Unable to allocate memory for struct pscsi_dev_virt\n");
 		return NULL;
 	}
 	pdv->pdv_se_hba = hba;
 
-	printk(KERN_INFO "PSCSI: Allocated pdv: %p for %s\n", pdv, name);
+	pr_debug("PSCSI: Allocated pdv: %p for %s\n", pdv, name);
 	return pdv;
 }
 
@@ -412,7 +412,7 @@
 	u32 dev_flags = 0;
 
 	if (scsi_device_get(sd)) {
-		printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n",
+		pr_err("scsi_device_get() failed for %d:%d:%d:%d\n",
 			sh->host_no, sd->channel, sd->id, sd->lun);
 		spin_unlock_irq(sh->host_lock);
 		return NULL;
@@ -425,19 +425,19 @@
 	bd = blkdev_get_by_path(se_dev->se_dev_udev_path,
 				FMODE_WRITE|FMODE_READ|FMODE_EXCL, pdv);
 	if (IS_ERR(bd)) {
-		printk(KERN_ERR "pSCSI: blkdev_get_by_path() failed\n");
+		pr_err("pSCSI: blkdev_get_by_path() failed\n");
 		scsi_device_put(sd);
 		return NULL;
 	}
 	pdv->pdv_bd = bd;
 
 	dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
-	if (!(dev)) {
+	if (!dev) {
 		blkdev_put(pdv->pdv_bd, FMODE_WRITE|FMODE_READ|FMODE_EXCL);
 		scsi_device_put(sd);
 		return NULL;
 	}
-	printk(KERN_INFO "CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n",
+	pr_debug("CORE_PSCSI[%d] - Added TYPE_DISK for %d:%d:%d:%d\n",
 		phv->phv_host_id, sh->host_no, sd->channel, sd->id, sd->lun);
 
 	return dev;
@@ -459,7 +459,7 @@
 	u32 dev_flags = 0;
 
 	if (scsi_device_get(sd)) {
-		printk(KERN_ERR "scsi_device_get() failed for %d:%d:%d:%d\n",
+		pr_err("scsi_device_get() failed for %d:%d:%d:%d\n",
 			sh->host_no, sd->channel, sd->id, sd->lun);
 		spin_unlock_irq(sh->host_lock);
 		return NULL;
@@ -467,11 +467,11 @@
 	spin_unlock_irq(sh->host_lock);
 
 	dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
-	if (!(dev)) {
+	if (!dev) {
 		scsi_device_put(sd);
 		return NULL;
 	}
-	printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
+	pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
 		phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
 		sd->channel, sd->id, sd->lun);
 
@@ -495,10 +495,10 @@
 
 	spin_unlock_irq(sh->host_lock);
 	dev = pscsi_add_device_to_list(hba, se_dev, pdv, sd, dev_flags);
-	if (!(dev))
+	if (!dev)
 		return NULL;
 
-	printk(KERN_INFO "CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
+	pr_debug("CORE_PSCSI[%d] - Added Type: %s for %d:%d:%d:%d\n",
 		phv->phv_host_id, scsi_device_type(sd->type), sh->host_no,
 		sd->channel, sd->id, sd->lun);
 
@@ -517,8 +517,8 @@
 	struct Scsi_Host *sh = phv->phv_lld_host;
 	int legacy_mode_enable = 0;
 
-	if (!(pdv)) {
-		printk(KERN_ERR "Unable to locate struct pscsi_dev_virt"
+	if (!pdv) {
+		pr_err("Unable to locate struct pscsi_dev_virt"
 				" parameter\n");
 		return ERR_PTR(-EINVAL);
 	}
@@ -526,9 +526,9 @@
 	 * If not running in PHV_LLD_SCSI_HOST_NO mode, locate the
 	 * struct Scsi_Host we will need to bring the TCM/pSCSI object online
 	 */
-	if (!(sh)) {
+	if (!sh) {
 		if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
-			printk(KERN_ERR "pSCSI: Unable to locate struct"
+			pr_err("pSCSI: Unable to locate struct"
 				" Scsi_Host for PHV_LLD_SCSI_HOST_NO\n");
 			return ERR_PTR(-ENODEV);
 		}
@@ -537,7 +537,7 @@
 		 * reference, we enforce that udev_path has been set
 		 */
 		if (!(se_dev->su_dev_flags & SDF_USING_UDEV_PATH)) {
-			printk(KERN_ERR "pSCSI: udev_path attribute has not"
+			pr_err("pSCSI: udev_path attribute has not"
 				" been set before ENABLE=1\n");
 			return ERR_PTR(-EINVAL);
 		}
@@ -548,8 +548,8 @@
 		 */
 		if (!(pdv->pdv_flags & PDF_HAS_VIRT_HOST_ID)) {
 			spin_lock(&hba->device_lock);
-			if (!(list_empty(&hba->hba_dev_list))) {
-				printk(KERN_ERR "pSCSI: Unable to set hba_mode"
+			if (!list_empty(&hba->hba_dev_list)) {
+				pr_err("pSCSI: Unable to set hba_mode"
 					" with active devices\n");
 				spin_unlock(&hba->device_lock);
 				return ERR_PTR(-EEXIST);
@@ -565,14 +565,14 @@
 		} else {
 			sh = scsi_host_lookup(pdv->pdv_host_id);
 			if (IS_ERR(sh)) {
-				printk(KERN_ERR "pSCSI: Unable to locate"
+				pr_err("pSCSI: Unable to locate"
 					" pdv_host_id: %d\n", pdv->pdv_host_id);
 				return (struct se_device *) sh;
 			}
 		}
 	} else {
 		if (phv->phv_mode == PHV_VIRUTAL_HOST_ID) {
-			printk(KERN_ERR "pSCSI: PHV_VIRUTAL_HOST_ID set while"
+			pr_err("pSCSI: PHV_VIRUTAL_HOST_ID set while"
 				" struct Scsi_Host exists\n");
 			return ERR_PTR(-EEXIST);
 		}
@@ -601,7 +601,7 @@
 			break;
 		}
 
-		if (!(dev)) {
+		if (!dev) {
 			if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
 				scsi_host_put(sh);
 			else if (legacy_mode_enable) {
@@ -615,7 +615,7 @@
 	}
 	spin_unlock_irq(sh->host_lock);
 
-	printk(KERN_ERR "pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no,
+	pr_err("pSCSI: Unable to locate %d:%d:%d:%d\n", sh->host_no,
 		pdv->pdv_channel_id,  pdv->pdv_target_id, pdv->pdv_lun_id);
 
 	if (phv->phv_mode == PHV_VIRUTAL_HOST_ID)
@@ -729,8 +729,8 @@
 		u32 blocksize;
 
 		buf = sg_virt(&sg[0]);
-		if (!(buf)) {
-			printk(KERN_ERR "Unable to get buf for scatterlist\n");
+		if (!buf) {
+			pr_err("Unable to get buf for scatterlist\n");
 			goto after_mode_select;
 		}
 
@@ -760,33 +760,19 @@
 }
 
 static struct se_task *
-pscsi_alloc_task(struct se_cmd *cmd)
+pscsi_alloc_task(unsigned char *cdb)
 {
 	struct pscsi_plugin_task *pt;
-	unsigned char *cdb = cmd->t_task_cdb;
-
-	pt = kzalloc(sizeof(struct pscsi_plugin_task), GFP_KERNEL);
-	if (!pt) {
-		printk(KERN_ERR "Unable to allocate struct pscsi_plugin_task\n");
-		return NULL;
-	}
 
 	/*
-	 * If TCM Core is signaling a > TCM_MAX_COMMAND_SIZE allocation,
-	 * allocate the extended CDB buffer for per struct se_task context
-	 * pt->pscsi_cdb now.
+	 * Dynamically alloc cdb space, since it may be larger than
+	 * TCM_MAX_COMMAND_SIZE
 	 */
-	if (cmd->t_task_cdb != cmd->__t_task_cdb) {
-
-		pt->pscsi_cdb = kzalloc(scsi_command_size(cdb), GFP_KERNEL);
-		if (!(pt->pscsi_cdb)) {
-			printk(KERN_ERR "pSCSI: Unable to allocate extended"
-					" pt->pscsi_cdb\n");
-			kfree(pt);
-			return NULL;
-		}
-	} else
-		pt->pscsi_cdb = &pt->__pscsi_cdb[0];
+	pt = kzalloc(sizeof(*pt) + scsi_command_size(cdb), GFP_KERNEL);
+	if (!pt) {
+		pr_err("Unable to allocate struct pscsi_plugin_task\n");
+		return NULL;
+	}
 
 	return &pt->pscsi_task;
 }
@@ -837,8 +823,8 @@
 	pt->pscsi_req = blk_get_request(pdv->pdv_sd->request_queue,
 			(task->task_data_direction == DMA_TO_DEVICE),
 			GFP_KERNEL);
-	if (!(pt->pscsi_req) || IS_ERR(pt->pscsi_req)) {
-		printk(KERN_ERR "PSCSI: blk_get_request() failed: %ld\n",
+	if (!pt->pscsi_req || IS_ERR(pt->pscsi_req)) {
+		pr_err("PSCSI: blk_get_request() failed: %ld\n",
 				IS_ERR(pt->pscsi_req));
 		return PYX_TRANSPORT_LU_COMM_FAILURE;
 	}
@@ -883,15 +869,8 @@
 static void pscsi_free_task(struct se_task *task)
 {
 	struct pscsi_plugin_task *pt = PSCSI_TASK(task);
-	struct se_cmd *cmd = task->task_se_cmd;
 
 	/*
-	 * Release the extended CDB allocation from pscsi_alloc_task()
-	 * if one exists.
-	 */
-	if (cmd->t_task_cdb != cmd->__t_task_cdb)
-		kfree(pt->pscsi_cdb);
-	/*
 	 * We do not release the bio(s) here associated with this task, as
 	 * this is handled by bio_put() and pscsi_bi_endio().
 	 */
@@ -936,7 +915,7 @@
 		switch (token) {
 		case Opt_scsi_host_id:
 			if (phv->phv_mode == PHV_LLD_SCSI_HOST_NO) {
-				printk(KERN_ERR "PSCSI[%d]: Unable to accept"
+				pr_err("PSCSI[%d]: Unable to accept"
 					" scsi_host_id while phv_mode =="
 					" PHV_LLD_SCSI_HOST_NO\n",
 					phv->phv_host_id);
@@ -945,14 +924,14 @@
 			}
 			match_int(args, &arg);
 			pdv->pdv_host_id = arg;
-			printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Host ID:"
+			pr_debug("PSCSI[%d]: Referencing SCSI Host ID:"
 				" %d\n", phv->phv_host_id, pdv->pdv_host_id);
 			pdv->pdv_flags |= PDF_HAS_VIRT_HOST_ID;
 			break;
 		case Opt_scsi_channel_id:
 			match_int(args, &arg);
 			pdv->pdv_channel_id = arg;
-			printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Channel"
+			pr_debug("PSCSI[%d]: Referencing SCSI Channel"
 				" ID: %d\n",  phv->phv_host_id,
 				pdv->pdv_channel_id);
 			pdv->pdv_flags |= PDF_HAS_CHANNEL_ID;
@@ -960,7 +939,7 @@
 		case Opt_scsi_target_id:
 			match_int(args, &arg);
 			pdv->pdv_target_id = arg;
-			printk(KERN_INFO "PSCSI[%d]: Referencing SCSI Target"
+			pr_debug("PSCSI[%d]: Referencing SCSI Target"
 				" ID: %d\n", phv->phv_host_id,
 				pdv->pdv_target_id);
 			pdv->pdv_flags |= PDF_HAS_TARGET_ID;
@@ -968,7 +947,7 @@
 		case Opt_scsi_lun_id:
 			match_int(args, &arg);
 			pdv->pdv_lun_id = arg;
-			printk(KERN_INFO "PSCSI[%d]: Referencing SCSI LUN ID:"
+			pr_debug("PSCSI[%d]: Referencing SCSI LUN ID:"
 				" %d\n", phv->phv_host_id, pdv->pdv_lun_id);
 			pdv->pdv_flags |= PDF_HAS_LUN_ID;
 			break;
@@ -991,7 +970,7 @@
 	if (!(pdv->pdv_flags & PDF_HAS_CHANNEL_ID) ||
 	    !(pdv->pdv_flags & PDF_HAS_TARGET_ID) ||
 	    !(pdv->pdv_flags & PDF_HAS_LUN_ID)) {
-		printk(KERN_ERR "Missing scsi_channel_id=, scsi_target_id= and"
+		pr_err("Missing scsi_channel_id=, scsi_target_id= and"
 			" scsi_lun_id= parameters\n");
 		return -EINVAL;
 	}
@@ -1061,8 +1040,8 @@
 	 * in block/blk-core.c:blk_make_request()
 	 */
 	bio = bio_kmalloc(GFP_KERNEL, sg_num);
-	if (!(bio)) {
-		printk(KERN_ERR "PSCSI: bio_kmalloc() failed\n");
+	if (!bio) {
+		pr_err("PSCSI: bio_kmalloc() failed\n");
 		return NULL;
 	}
 	bio->bi_end_io = pscsi_bi_endio;
@@ -1070,12 +1049,6 @@
 	return bio;
 }
 
-#if 0
-#define DEBUG_PSCSI(x...) printk(x)
-#else
-#define DEBUG_PSCSI(x...)
-#endif
-
 static int __pscsi_map_task_SG(
 	struct se_task *task,
 	struct scatterlist *task_sg,
@@ -1106,34 +1079,34 @@
 	 * is ported to upstream SCSI passthrough functionality that accepts
 	 * struct scatterlist->page_link or struct page as a paraemeter.
 	 */
-	DEBUG_PSCSI("PSCSI: nr_pages: %d\n", nr_pages);
+	pr_debug("PSCSI: nr_pages: %d\n", nr_pages);
 
 	for_each_sg(task_sg, sg, task_sg_num, i) {
 		page = sg_page(sg);
 		off = sg->offset;
 		len = sg->length;
 
-		DEBUG_PSCSI("PSCSI: i: %d page: %p len: %d off: %d\n", i,
+		pr_debug("PSCSI: i: %d page: %p len: %d off: %d\n", i,
 			page, len, off);
 
 		while (len > 0 && data_len > 0) {
 			bytes = min_t(unsigned int, len, PAGE_SIZE - off);
 			bytes = min(bytes, data_len);
 
-			if (!(bio)) {
+			if (!bio) {
 				nr_vecs = min_t(int, BIO_MAX_PAGES, nr_pages);
 				nr_pages -= nr_vecs;
 				/*
 				 * Calls bio_kmalloc() and sets bio->bi_end_io()
 				 */
 				bio = pscsi_get_bio(nr_vecs);
-				if (!(bio))
+				if (!bio)
 					goto fail;
 
 				if (rw)
 					bio->bi_rw |= REQ_WRITE;
 
-				DEBUG_PSCSI("PSCSI: Allocated bio: %p,"
+				pr_debug("PSCSI: Allocated bio: %p,"
 					" dir: %s nr_vecs: %d\n", bio,
 					(rw) ? "rw" : "r", nr_vecs);
 				/*
@@ -1148,7 +1121,7 @@
 					tbio = tbio->bi_next = bio;
 			}
 
-			DEBUG_PSCSI("PSCSI: Calling bio_add_pc_page() i: %d"
+			pr_debug("PSCSI: Calling bio_add_pc_page() i: %d"
 				" bio: %p page: %p len: %d off: %d\n", i, bio,
 				page, len, off);
 
@@ -1157,11 +1130,11 @@
 			if (rc != bytes)
 				goto fail;
 
-			DEBUG_PSCSI("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n",
+			pr_debug("PSCSI: bio->bi_vcnt: %d nr_vecs: %d\n",
 				bio->bi_vcnt, nr_vecs);
 
 			if (bio->bi_vcnt > nr_vecs) {
-				DEBUG_PSCSI("PSCSI: Reached bio->bi_vcnt max:"
+				pr_debug("PSCSI: Reached bio->bi_vcnt max:"
 					" %d i: %d bio: %p, allocating another"
 					" bio\n", bio->bi_vcnt, i, bio);
 				/*
@@ -1183,15 +1156,15 @@
 	 * Setup the primary pt->pscsi_req used for non BIDI and BIDI-COMMAND
 	 * primary SCSI WRITE poayload mapped for struct se_task->task_sg[]
 	 */
-	if (!(bidi_read)) {
+	if (!bidi_read) {
 		/*
 		 * Starting with v2.6.31, call blk_make_request() passing in *hbio to
 		 * allocate the pSCSI task a struct request.
 		 */
 		pt->pscsi_req = blk_make_request(pdv->pdv_sd->request_queue,
 					hbio, GFP_KERNEL);
-		if (!(pt->pscsi_req)) {
-			printk(KERN_ERR "pSCSI: blk_make_request() failed\n");
+		if (!pt->pscsi_req) {
+			pr_err("pSCSI: blk_make_request() failed\n");
 			goto fail;
 		}
 		/*
@@ -1200,7 +1173,7 @@
 		 */
 		pscsi_blk_init_request(task, pt, pt->pscsi_req, 0);
 
-		return task->task_sg_num;
+		return task->task_sg_nents;
 	}
 	/*
 	 * Setup the secondary pt->pscsi_req->next_rq used for the extra BIDI-COMMAND
@@ -1208,13 +1181,13 @@
 	 */
 	pt->pscsi_req->next_rq = blk_make_request(pdv->pdv_sd->request_queue,
 					hbio, GFP_KERNEL);
-	if (!(pt->pscsi_req->next_rq)) {
-		printk(KERN_ERR "pSCSI: blk_make_request() failed for BIDI\n");
+	if (!pt->pscsi_req->next_rq) {
+		pr_err("pSCSI: blk_make_request() failed for BIDI\n");
 		goto fail;
 	}
 	pscsi_blk_init_request(task, pt, pt->pscsi_req->next_rq, 1);
 
-	return task->task_sg_num;
+	return task->task_sg_nents;
 fail:
 	while (hbio) {
 		bio = hbio;
@@ -1233,14 +1206,14 @@
 	 * Setup the main struct request for the task->task_sg[] payload
 	 */
 
-	ret = __pscsi_map_task_SG(task, task->task_sg, task->task_sg_num, 0);
+	ret = __pscsi_map_task_SG(task, task->task_sg, task->task_sg_nents, 0);
 	if (ret >= 0 && task->task_sg_bidi) {
 		/*
 		 * If present, set up the extra BIDI-COMMAND SCSI READ
 		 * struct request and payload.
 		 */
 		ret = __pscsi_map_task_SG(task, task->task_sg_bidi,
-					task->task_sg_num, 1);
+					task->task_sg_nents, 1);
 	}
 
 	if (ret < 0)
@@ -1319,9 +1292,9 @@
 	struct pscsi_plugin_task *pt)
 {
 	task->task_scsi_status = status_byte(pt->pscsi_result);
-	if ((task->task_scsi_status)) {
+	if (task->task_scsi_status) {
 		task->task_scsi_status <<= 1;
-		printk(KERN_INFO "PSCSI Status Byte exception at task: %p CDB:"
+		pr_debug("PSCSI Status Byte exception at task: %p CDB:"
 			" 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
 			pt->pscsi_result);
 	}
@@ -1331,7 +1304,7 @@
 		transport_complete_task(task, (!task->task_scsi_status));
 		break;
 	default:
-		printk(KERN_INFO "PSCSI Host Byte exception at task: %p CDB:"
+		pr_debug("PSCSI Host Byte exception at task: %p CDB:"
 			" 0x%02x Result: 0x%08x\n", task, pt->pscsi_cdb[0],
 			pt->pscsi_result);
 		task->task_scsi_status = SAM_STAT_CHECK_CONDITION;
diff --git a/drivers/target/target_core_pscsi.h b/drivers/target/target_core_pscsi.h
index 280b689..ebf4f1a 100644
--- a/drivers/target/target_core_pscsi.h
+++ b/drivers/target/target_core_pscsi.h
@@ -23,13 +23,12 @@
 
 struct pscsi_plugin_task {
 	struct se_task pscsi_task;
-	unsigned char *pscsi_cdb;
-	unsigned char __pscsi_cdb[TCM_MAX_COMMAND_SIZE];
 	unsigned char pscsi_sense[SCSI_SENSE_BUFFERSIZE];
 	int	pscsi_direction;
 	int	pscsi_result;
 	u32	pscsi_resid;
 	struct request *pscsi_req;
+	unsigned char pscsi_cdb[0];
 } ____cacheline_aligned;
 
 #define PDF_HAS_CHANNEL_ID	0x01
diff --git a/drivers/target/target_core_rd.c b/drivers/target/target_core_rd.c
index 4f9416d..3dd81d2 100644
--- a/drivers/target/target_core_rd.c
+++ b/drivers/target/target_core_rd.c
@@ -44,12 +44,8 @@
 
 #include "target_core_rd.h"
 
-static struct se_subsystem_api rd_dr_template;
 static struct se_subsystem_api rd_mcp_template;
 
-/* #define DEBUG_RAMDISK_MCP */
-/* #define DEBUG_RAMDISK_DR */
-
 /*	rd_attach_hba(): (Part of se_subsystem_api_t template)
  *
  *
@@ -59,8 +55,8 @@
 	struct rd_host *rd_host;
 
 	rd_host = kzalloc(sizeof(struct rd_host), GFP_KERNEL);
-	if (!(rd_host)) {
-		printk(KERN_ERR "Unable to allocate memory for struct rd_host\n");
+	if (!rd_host) {
+		pr_err("Unable to allocate memory for struct rd_host\n");
 		return -ENOMEM;
 	}
 
@@ -68,10 +64,10 @@
 
 	hba->hba_ptr = rd_host;
 
-	printk(KERN_INFO "CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
+	pr_debug("CORE_HBA[%d] - TCM Ramdisk HBA Driver %s on"
 		" Generic Target Core Stack %s\n", hba->hba_id,
 		RD_HBA_VERSION, TARGET_CORE_MOD_VERSION);
-	printk(KERN_INFO "CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
+	pr_debug("CORE_HBA[%d] - Attached Ramdisk HBA: %u to Generic"
 		" MaxSectors: %u\n", hba->hba_id,
 		rd_host->rd_host_id, RD_MAX_SECTORS);
 
@@ -82,7 +78,7 @@
 {
 	struct rd_host *rd_host = hba->hba_ptr;
 
-	printk(KERN_INFO "CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
+	pr_debug("CORE_HBA[%d] - Detached Ramdisk HBA: %u from"
 		" Generic Target Core\n", hba->hba_id, rd_host->rd_host_id);
 
 	kfree(rd_host);
@@ -111,7 +107,7 @@
 
 		for (j = 0; j < sg_per_table; j++) {
 			pg = sg_page(&sg[j]);
-			if ((pg)) {
+			if (pg) {
 				__free_page(pg);
 				page_count++;
 			}
@@ -120,7 +116,7 @@
 		kfree(sg);
 	}
 
-	printk(KERN_INFO "CORE_RD[%u] - Released device space for Ramdisk"
+	pr_debug("CORE_RD[%u] - Released device space for Ramdisk"
 		" Device ID: %u, pages %u in %u tables total bytes %lu\n",
 		rd_dev->rd_host->rd_host_id, rd_dev->rd_dev_id, page_count,
 		rd_dev->sg_table_count, (unsigned long)page_count * PAGE_SIZE);
@@ -145,7 +141,7 @@
 	struct scatterlist *sg;
 
 	if (rd_dev->rd_page_count <= 0) {
-		printk(KERN_ERR "Illegal page count: %u for Ramdisk device\n",
+		pr_err("Illegal page count: %u for Ramdisk device\n",
 			rd_dev->rd_page_count);
 		return -EINVAL;
 	}
@@ -154,8 +150,8 @@
 	sg_tables = (total_sg_needed / max_sg_per_table) + 1;
 
 	sg_table = kzalloc(sg_tables * sizeof(struct rd_dev_sg_table), GFP_KERNEL);
-	if (!(sg_table)) {
-		printk(KERN_ERR "Unable to allocate memory for Ramdisk"
+	if (!sg_table) {
+		pr_err("Unable to allocate memory for Ramdisk"
 			" scatterlist tables\n");
 		return -ENOMEM;
 	}
@@ -169,13 +165,13 @@
 
 		sg = kzalloc(sg_per_table * sizeof(struct scatterlist),
 				GFP_KERNEL);
-		if (!(sg)) {
-			printk(KERN_ERR "Unable to allocate scatterlist array"
+		if (!sg) {
+			pr_err("Unable to allocate scatterlist array"
 				" for struct rd_dev\n");
 			return -ENOMEM;
 		}
 
-		sg_init_table((struct scatterlist *)&sg[0], sg_per_table);
+		sg_init_table(sg, sg_per_table);
 
 		sg_table[i].sg_table = sg;
 		sg_table[i].rd_sg_count = sg_per_table;
@@ -185,8 +181,8 @@
 
 		for (j = 0; j < sg_per_table; j++) {
 			pg = alloc_pages(GFP_KERNEL, 0);
-			if (!(pg)) {
-				printk(KERN_ERR "Unable to allocate scatterlist"
+			if (!pg) {
+				pr_err("Unable to allocate scatterlist"
 					" pages for struct rd_dev_sg_table\n");
 				return -ENOMEM;
 			}
@@ -198,7 +194,7 @@
 		total_sg_needed -= sg_per_table;
 	}
 
-	printk(KERN_INFO "CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
+	pr_debug("CORE_RD[%u] - Built Ramdisk Device ID: %u space of"
 		" %u pages in %u tables\n", rd_dev->rd_host->rd_host_id,
 		rd_dev->rd_dev_id, rd_dev->rd_page_count,
 		rd_dev->sg_table_count);
@@ -215,8 +211,8 @@
 	struct rd_host *rd_host = hba->hba_ptr;
 
 	rd_dev = kzalloc(sizeof(struct rd_dev), GFP_KERNEL);
-	if (!(rd_dev)) {
-		printk(KERN_ERR "Unable to allocate memory for struct rd_dev\n");
+	if (!rd_dev) {
+		pr_err("Unable to allocate memory for struct rd_dev\n");
 		return NULL;
 	}
 
@@ -226,11 +222,6 @@
 	return rd_dev;
 }
 
-static void *rd_DIRECT_allocate_virtdevice(struct se_hba *hba, const char *name)
-{
-	return rd_allocate_virtdevice(hba, name, 1);
-}
-
 static void *rd_MEMCPY_allocate_virtdevice(struct se_hba *hba, const char *name)
 {
 	return rd_allocate_virtdevice(hba, name, 0);
@@ -270,16 +261,15 @@
 	dev_limits.queue_depth = RD_DEVICE_QUEUE_DEPTH;
 
 	dev = transport_add_device_to_core_hba(hba,
-			(rd_dev->rd_direct) ? &rd_dr_template :
 			&rd_mcp_template, se_dev, dev_flags, rd_dev,
 			&dev_limits, prod, rev);
-	if (!(dev))
+	if (!dev)
 		goto fail;
 
 	rd_dev->rd_dev_id = rd_host->rd_host_dev_id_count++;
 	rd_dev->rd_queue_depth = dev->queue_depth;
 
-	printk(KERN_INFO "CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of"
+	pr_debug("CORE_RD[%u] - Added TCM %s Ramdisk Device ID: %u of"
 		" %u pages in %u tables, %lu total bytes\n",
 		rd_host->rd_host_id, (!rd_dev->rd_direct) ? "MEMCPY" :
 		"DIRECT", rd_dev->rd_dev_id, rd_dev->rd_page_count,
@@ -293,14 +283,6 @@
 	return ERR_PTR(ret);
 }
 
-static struct se_device *rd_DIRECT_create_virtdevice(
-	struct se_hba *hba,
-	struct se_subsystem_dev *se_dev,
-	void *p)
-{
-	return rd_create_virtdevice(hba, se_dev, p, 1);
-}
-
 static struct se_device *rd_MEMCPY_create_virtdevice(
 	struct se_hba *hba,
 	struct se_subsystem_dev *se_dev,
@@ -327,16 +309,15 @@
 }
 
 static struct se_task *
-rd_alloc_task(struct se_cmd *cmd)
+rd_alloc_task(unsigned char *cdb)
 {
 	struct rd_request *rd_req;
 
 	rd_req = kzalloc(sizeof(struct rd_request), GFP_KERNEL);
 	if (!rd_req) {
-		printk(KERN_ERR "Unable to allocate struct rd_request\n");
+		pr_err("Unable to allocate struct rd_request\n");
 		return NULL;
 	}
-	rd_req->rd_dev = cmd->se_dev->dev_ptr;
 
 	return &rd_req->rd_task;
 }
@@ -357,7 +338,7 @@
 			return sg_table;
 	}
 
-	printk(KERN_ERR "Unable to locate struct rd_dev_sg_table for page: %u\n",
+	pr_err("Unable to locate struct rd_dev_sg_table for page: %u\n",
 			page);
 
 	return NULL;
@@ -370,7 +351,7 @@
 static int rd_MEMCPY_read(struct rd_request *req)
 {
 	struct se_task *task = &req->rd_task;
-	struct rd_dev *dev = req->rd_dev;
+	struct rd_dev *dev = req->rd_task.se_dev->dev_ptr;
 	struct rd_dev_sg_table *table;
 	struct scatterlist *sg_d, *sg_s;
 	void *dst, *src;
@@ -379,32 +360,32 @@
 	u32 rd_offset = req->rd_offset;
 
 	table = rd_get_sg_table(dev, req->rd_page);
-	if (!(table))
+	if (!table)
 		return -EINVAL;
 
 	table_sg_end = (table->page_end_offset - req->rd_page);
 	sg_d = task->task_sg;
 	sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
-#ifdef DEBUG_RAMDISK_MCP
-	printk(KERN_INFO "RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
+
+	pr_debug("RD[%u]: Read LBA: %llu, Size: %u Page: %u, Offset:"
 		" %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
 		req->rd_page, req->rd_offset);
-#endif
+
 	src_offset = rd_offset;
 
 	while (req->rd_size) {
 		if ((sg_d[i].length - dst_offset) <
 		    (sg_s[j].length - src_offset)) {
 			length = (sg_d[i].length - dst_offset);
-#ifdef DEBUG_RAMDISK_MCP
-			printk(KERN_INFO "Step 1 - sg_d[%d]: %p length: %d"
+
+			pr_debug("Step 1 - sg_d[%d]: %p length: %d"
 				" offset: %u sg_s[%d].length: %u\n", i,
 				&sg_d[i], sg_d[i].length, sg_d[i].offset, j,
 				sg_s[j].length);
-			printk(KERN_INFO "Step 1 - length: %u dst_offset: %u"
+			pr_debug("Step 1 - length: %u dst_offset: %u"
 				" src_offset: %u\n", length, dst_offset,
 				src_offset);
-#endif
+
 			if (length > req->rd_size)
 				length = req->rd_size;
 
@@ -421,15 +402,15 @@
 			page_end = 0;
 		} else {
 			length = (sg_s[j].length - src_offset);
-#ifdef DEBUG_RAMDISK_MCP
-			printk(KERN_INFO "Step 2 - sg_d[%d]: %p length: %d"
+
+			pr_debug("Step 2 - sg_d[%d]: %p length: %d"
 				" offset: %u sg_s[%d].length: %u\n", i,
 				&sg_d[i], sg_d[i].length, sg_d[i].offset,
 				j, sg_s[j].length);
-			printk(KERN_INFO "Step 2 - length: %u dst_offset: %u"
+			pr_debug("Step 2 - length: %u dst_offset: %u"
 				" src_offset: %u\n", length, dst_offset,
 				src_offset);
-#endif
+
 			if (length > req->rd_size)
 				length = req->rd_size;
 
@@ -453,31 +434,28 @@
 
 		memcpy(dst, src, length);
 
-#ifdef DEBUG_RAMDISK_MCP
-		printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
+		pr_debug("page: %u, remaining size: %u, length: %u,"
 			" i: %u, j: %u\n", req->rd_page,
 			(req->rd_size - length), length, i, j);
-#endif
+
 		req->rd_size -= length;
-		if (!(req->rd_size))
+		if (!req->rd_size)
 			return 0;
 
 		if (!page_end)
 			continue;
 
 		if (++req->rd_page <= table->page_end_offset) {
-#ifdef DEBUG_RAMDISK_MCP
-			printk(KERN_INFO "page: %u in same page table\n",
+			pr_debug("page: %u in same page table\n",
 				req->rd_page);
-#endif
 			continue;
 		}
-#ifdef DEBUG_RAMDISK_MCP
-		printk(KERN_INFO "getting new page table for page: %u\n",
+
+		pr_debug("getting new page table for page: %u\n",
 				req->rd_page);
-#endif
+
 		table = rd_get_sg_table(dev, req->rd_page);
-		if (!(table))
+		if (!table)
 			return -EINVAL;
 
 		sg_s = &table->sg_table[j = 0];
@@ -493,7 +471,7 @@
 static int rd_MEMCPY_write(struct rd_request *req)
 {
 	struct se_task *task = &req->rd_task;
-	struct rd_dev *dev = req->rd_dev;
+	struct rd_dev *dev = req->rd_task.se_dev->dev_ptr;
 	struct rd_dev_sg_table *table;
 	struct scatterlist *sg_d, *sg_s;
 	void *dst, *src;
@@ -502,32 +480,32 @@
 	u32 rd_offset = req->rd_offset;
 
 	table = rd_get_sg_table(dev, req->rd_page);
-	if (!(table))
+	if (!table)
 		return -EINVAL;
 
 	table_sg_end = (table->page_end_offset - req->rd_page);
 	sg_d = &table->sg_table[req->rd_page - table->page_start_offset];
 	sg_s = task->task_sg;
-#ifdef DEBUG_RAMDISK_MCP
-	printk(KERN_INFO "RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
+
+	pr_debug("RD[%d] Write LBA: %llu, Size: %u, Page: %u,"
 		" Offset: %u\n", dev->rd_dev_id, task->task_lba, req->rd_size,
 		req->rd_page, req->rd_offset);
-#endif
+
 	dst_offset = rd_offset;
 
 	while (req->rd_size) {
 		if ((sg_s[i].length - src_offset) <
 		    (sg_d[j].length - dst_offset)) {
 			length = (sg_s[i].length - src_offset);
-#ifdef DEBUG_RAMDISK_MCP
-			printk(KERN_INFO "Step 1 - sg_s[%d]: %p length: %d"
+
+			pr_debug("Step 1 - sg_s[%d]: %p length: %d"
 				" offset: %d sg_d[%d].length: %u\n", i,
 				&sg_s[i], sg_s[i].length, sg_s[i].offset,
 				j, sg_d[j].length);
-			printk(KERN_INFO "Step 1 - length: %u src_offset: %u"
+			pr_debug("Step 1 - length: %u src_offset: %u"
 				" dst_offset: %u\n", length, src_offset,
 				dst_offset);
-#endif
+
 			if (length > req->rd_size)
 				length = req->rd_size;
 
@@ -544,15 +522,15 @@
 			page_end = 0;
 		} else {
 			length = (sg_d[j].length - dst_offset);
-#ifdef DEBUG_RAMDISK_MCP
-			printk(KERN_INFO "Step 2 - sg_s[%d]: %p length: %d"
+
+			pr_debug("Step 2 - sg_s[%d]: %p length: %d"
 				" offset: %d sg_d[%d].length: %u\n", i,
 				&sg_s[i], sg_s[i].length, sg_s[i].offset,
 				j, sg_d[j].length);
-			printk(KERN_INFO "Step 2 - length: %u src_offset: %u"
+			pr_debug("Step 2 - length: %u src_offset: %u"
 				" dst_offset: %u\n", length, src_offset,
 				dst_offset);
-#endif
+
 			if (length > req->rd_size)
 				length = req->rd_size;
 
@@ -576,31 +554,28 @@
 
 		memcpy(dst, src, length);
 
-#ifdef DEBUG_RAMDISK_MCP
-		printk(KERN_INFO "page: %u, remaining size: %u, length: %u,"
+		pr_debug("page: %u, remaining size: %u, length: %u,"
 			" i: %u, j: %u\n", req->rd_page,
 			(req->rd_size - length), length, i, j);
-#endif
+
 		req->rd_size -= length;
-		if (!(req->rd_size))
+		if (!req->rd_size)
 			return 0;
 
 		if (!page_end)
 			continue;
 
 		if (++req->rd_page <= table->page_end_offset) {
-#ifdef DEBUG_RAMDISK_MCP
-			printk(KERN_INFO "page: %u in same page table\n",
+			pr_debug("page: %u in same page table\n",
 				req->rd_page);
-#endif
 			continue;
 		}
-#ifdef DEBUG_RAMDISK_MCP
-		printk(KERN_INFO "getting new page table for page: %u\n",
+
+		pr_debug("getting new page table for page: %u\n",
 				req->rd_page);
-#endif
+
 		table = rd_get_sg_table(dev, req->rd_page);
-		if (!(table))
+		if (!table)
 			return -EINVAL;
 
 		sg_d = &table->sg_table[j = 0];
@@ -641,273 +616,6 @@
 	return PYX_TRANSPORT_SENT_TO_TRANSPORT;
 }
 
-/*	rd_DIRECT_with_offset():
- *
- *
- */
-static int rd_DIRECT_with_offset(
-	struct se_task *task,
-	struct list_head *se_mem_list,
-	u32 *se_mem_cnt,
-	u32 *task_offset)
-{
-	struct rd_request *req = RD_REQ(task);
-	struct rd_dev *dev = req->rd_dev;
-	struct rd_dev_sg_table *table;
-	struct se_mem *se_mem;
-	struct scatterlist *sg_s;
-	u32 j = 0, set_offset = 1;
-	u32 get_next_table = 0, offset_length, table_sg_end;
-
-	table = rd_get_sg_table(dev, req->rd_page);
-	if (!(table))
-		return -EINVAL;
-
-	table_sg_end = (table->page_end_offset - req->rd_page);
-	sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
-#ifdef DEBUG_RAMDISK_DR
-	printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u Page: %u, Offset: %u\n",
-		(task->task_data_direction == DMA_TO_DEVICE) ?
-			"Write" : "Read",
-		task->task_lba, req->rd_size, req->rd_page, req->rd_offset);
-#endif
-	while (req->rd_size) {
-		se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
-		if (!(se_mem)) {
-			printk(KERN_ERR "Unable to allocate struct se_mem\n");
-			return -ENOMEM;
-		}
-		INIT_LIST_HEAD(&se_mem->se_list);
-
-		if (set_offset) {
-			offset_length = sg_s[j].length - req->rd_offset;
-			if (offset_length > req->rd_size)
-				offset_length = req->rd_size;
-
-			se_mem->se_page = sg_page(&sg_s[j++]);
-			se_mem->se_off = req->rd_offset;
-			se_mem->se_len = offset_length;
-
-			set_offset = 0;
-			get_next_table = (j > table_sg_end);
-			goto check_eot;
-		}
-
-		offset_length = (req->rd_size < req->rd_offset) ?
-			req->rd_size : req->rd_offset;
-
-		se_mem->se_page = sg_page(&sg_s[j]);
-		se_mem->se_len = offset_length;
-
-		set_offset = 1;
-
-check_eot:
-#ifdef DEBUG_RAMDISK_DR
-		printk(KERN_INFO "page: %u, size: %u, offset_length: %u, j: %u"
-			" se_mem: %p, se_page: %p se_off: %u se_len: %u\n",
-			req->rd_page, req->rd_size, offset_length, j, se_mem,
-			se_mem->se_page, se_mem->se_off, se_mem->se_len);
-#endif
-		list_add_tail(&se_mem->se_list, se_mem_list);
-		(*se_mem_cnt)++;
-
-		req->rd_size -= offset_length;
-		if (!(req->rd_size))
-			goto out;
-
-		if (!set_offset && !get_next_table)
-			continue;
-
-		if (++req->rd_page <= table->page_end_offset) {
-#ifdef DEBUG_RAMDISK_DR
-			printk(KERN_INFO "page: %u in same page table\n",
-					req->rd_page);
-#endif
-			continue;
-		}
-#ifdef DEBUG_RAMDISK_DR
-		printk(KERN_INFO "getting new page table for page: %u\n",
-				req->rd_page);
-#endif
-		table = rd_get_sg_table(dev, req->rd_page);
-		if (!(table))
-			return -EINVAL;
-
-		sg_s = &table->sg_table[j = 0];
-	}
-
-out:
-	task->task_se_cmd->t_tasks_se_num += *se_mem_cnt;
-#ifdef DEBUG_RAMDISK_DR
-	printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
-			*se_mem_cnt);
-#endif
-	return 0;
-}
-
-/*	rd_DIRECT_without_offset():
- *
- *
- */
-static int rd_DIRECT_without_offset(
-	struct se_task *task,
-	struct list_head *se_mem_list,
-	u32 *se_mem_cnt,
-	u32 *task_offset)
-{
-	struct rd_request *req = RD_REQ(task);
-	struct rd_dev *dev = req->rd_dev;
-	struct rd_dev_sg_table *table;
-	struct se_mem *se_mem;
-	struct scatterlist *sg_s;
-	u32 length, j = 0;
-
-	table = rd_get_sg_table(dev, req->rd_page);
-	if (!(table))
-		return -EINVAL;
-
-	sg_s = &table->sg_table[req->rd_page - table->page_start_offset];
-#ifdef DEBUG_RAMDISK_DR
-	printk(KERN_INFO "%s DIRECT LBA: %llu, Size: %u, Page: %u\n",
-		(task->task_data_direction == DMA_TO_DEVICE) ?
-			"Write" : "Read",
-		task->task_lba, req->rd_size, req->rd_page);
-#endif
-	while (req->rd_size) {
-		se_mem = kmem_cache_zalloc(se_mem_cache, GFP_KERNEL);
-		if (!(se_mem)) {
-			printk(KERN_ERR "Unable to allocate struct se_mem\n");
-			return -ENOMEM;
-		}
-		INIT_LIST_HEAD(&se_mem->se_list);
-
-		length = (req->rd_size < sg_s[j].length) ?
-			req->rd_size : sg_s[j].length;
-
-		se_mem->se_page = sg_page(&sg_s[j++]);
-		se_mem->se_len = length;
-
-#ifdef DEBUG_RAMDISK_DR
-		printk(KERN_INFO "page: %u, size: %u, j: %u se_mem: %p,"
-			" se_page: %p se_off: %u se_len: %u\n", req->rd_page,
-			req->rd_size, j, se_mem, se_mem->se_page,
-			se_mem->se_off, se_mem->se_len);
-#endif
-		list_add_tail(&se_mem->se_list, se_mem_list);
-		(*se_mem_cnt)++;
-
-		req->rd_size -= length;
-		if (!(req->rd_size))
-			goto out;
-
-		if (++req->rd_page <= table->page_end_offset) {
-#ifdef DEBUG_RAMDISK_DR
-			printk("page: %u in same page table\n",
-				req->rd_page);
-#endif
-			continue;
-		}
-#ifdef DEBUG_RAMDISK_DR
-		printk(KERN_INFO "getting new page table for page: %u\n",
-				req->rd_page);
-#endif
-		table = rd_get_sg_table(dev, req->rd_page);
-		if (!(table))
-			return -EINVAL;
-
-		sg_s = &table->sg_table[j = 0];
-	}
-
-out:
-	task->task_se_cmd->t_tasks_se_num += *se_mem_cnt;
-#ifdef DEBUG_RAMDISK_DR
-	printk(KERN_INFO "RD_DR - Allocated %u struct se_mem segments for task\n",
-			*se_mem_cnt);
-#endif
-	return 0;
-}
-
-/*	rd_DIRECT_do_se_mem_map():
- *
- *
- */
-static int rd_DIRECT_do_se_mem_map(
-	struct se_task *task,
-	struct list_head *se_mem_list,
-	void *in_mem,
-	struct se_mem *in_se_mem,
-	struct se_mem **out_se_mem,
-	u32 *se_mem_cnt,
-	u32 *task_offset_in)
-{
-	struct se_cmd *cmd = task->task_se_cmd;
-	struct rd_request *req = RD_REQ(task);
-	u32 task_offset = *task_offset_in;
-	unsigned long long lba;
-	int ret;
-	int block_size = task->se_dev->se_sub_dev->se_dev_attrib.block_size;
-
-	lba = task->task_lba;
-	req->rd_page = ((task->task_lba * block_size) /	PAGE_SIZE);
-	req->rd_offset = (do_div(lba, (PAGE_SIZE / block_size))) * block_size;
-	req->rd_size = task->task_size;
-
-	if (req->rd_offset)
-		ret = rd_DIRECT_with_offset(task, se_mem_list, se_mem_cnt,
-				task_offset_in);
-	else
-		ret = rd_DIRECT_without_offset(task, se_mem_list, se_mem_cnt,
-				task_offset_in);
-
-	if (ret < 0)
-		return ret;
-
-	if (cmd->se_tfo->task_sg_chaining == 0)
-		return 0;
-	/*
-	 * Currently prevent writers from multiple HW fabrics doing
-	 * pci_map_sg() to RD_DR's internal scatterlist memory.
-	 */
-	if (cmd->data_direction == DMA_TO_DEVICE) {
-		printk(KERN_ERR "DMA_TO_DEVICE not supported for"
-				" RAMDISK_DR with task_sg_chaining=1\n");
-		return -ENOSYS;
-	}
-	/*
-	 * Special case for if task_sg_chaining is enabled, then
-	 * we setup struct se_task->task_sg[], as it will be used by
-	 * transport_do_task_sg_chain() for creating chainged SGLs
-	 * across multiple struct se_task->task_sg[].
-	 */
-	ret = transport_init_task_sg(task,
-			list_first_entry(&cmd->t_mem_list,
-				   struct se_mem, se_list),
-			task_offset);
-	if (ret <= 0)
-		return ret;
-
-	return transport_map_mem_to_sg(task, se_mem_list, task->task_sg,
-			list_first_entry(&cmd->t_mem_list,
-				   struct se_mem, se_list),
-			out_se_mem, se_mem_cnt, task_offset_in);
-}
-
-/*	rd_DIRECT_do_task(): (Part of se_subsystem_api_t template)
- *
- *
- */
-static int rd_DIRECT_do_task(struct se_task *task)
-{
-	/*
-	 * At this point the locally allocated RD tables have been mapped
-	 * to struct se_mem elements in rd_DIRECT_do_se_mem_map().
-	 */
-	task->task_scsi_status = GOOD;
-	transport_complete_task(task, 1);
-
-	return PYX_TRANSPORT_SENT_TO_TRANSPORT;
-}
-
 /*	rd_free_task(): (Part of se_subsystem_api_t template)
  *
  *
@@ -952,7 +660,7 @@
 		case Opt_rd_pages:
 			match_int(args, &arg);
 			rd_dev->rd_page_count = arg;
-			printk(KERN_INFO "RAMDISK: Referencing Page"
+			pr_debug("RAMDISK: Referencing Page"
 				" Count: %u\n", rd_dev->rd_page_count);
 			rd_dev->rd_flags |= RDF_HAS_PAGE_COUNT;
 			break;
@@ -970,7 +678,7 @@
 	struct rd_dev *rd_dev = se_dev->se_dev_su_ptr;
 
 	if (!(rd_dev->rd_flags & RDF_HAS_PAGE_COUNT)) {
-		printk(KERN_INFO "Missing rd_pages= parameter\n");
+		pr_debug("Missing rd_pages= parameter\n");
 		return -EINVAL;
 	}
 
@@ -1022,27 +730,6 @@
 	return blocks_long;
 }
 
-static struct se_subsystem_api rd_dr_template = {
-	.name			= "rd_dr",
-	.transport_type		= TRANSPORT_PLUGIN_VHBA_VDEV,
-	.attach_hba		= rd_attach_hba,
-	.detach_hba		= rd_detach_hba,
-	.allocate_virtdevice	= rd_DIRECT_allocate_virtdevice,
-	.create_virtdevice	= rd_DIRECT_create_virtdevice,
-	.free_device		= rd_free_device,
-	.alloc_task		= rd_alloc_task,
-	.do_task		= rd_DIRECT_do_task,
-	.free_task		= rd_free_task,
-	.check_configfs_dev_params = rd_check_configfs_dev_params,
-	.set_configfs_dev_params = rd_set_configfs_dev_params,
-	.show_configfs_dev_params = rd_show_configfs_dev_params,
-	.get_cdb		= rd_get_cdb,
-	.get_device_rev		= rd_get_device_rev,
-	.get_device_type	= rd_get_device_type,
-	.get_blocks		= rd_get_blocks,
-	.do_se_mem_map		= rd_DIRECT_do_se_mem_map,
-};
-
 static struct se_subsystem_api rd_mcp_template = {
 	.name			= "rd_mcp",
 	.transport_type		= TRANSPORT_PLUGIN_VHBA_VDEV,
@@ -1067,13 +754,8 @@
 {
 	int ret;
 
-	ret = transport_subsystem_register(&rd_dr_template);
-	if (ret < 0)
-		return ret;
-
 	ret = transport_subsystem_register(&rd_mcp_template);
 	if (ret < 0) {
-		transport_subsystem_release(&rd_dr_template);
 		return ret;
 	}
 
@@ -1082,6 +764,5 @@
 
 void rd_module_exit(void)
 {
-	transport_subsystem_release(&rd_dr_template);
 	transport_subsystem_release(&rd_mcp_template);
 }
diff --git a/drivers/target/target_core_rd.h b/drivers/target/target_core_rd.h
index bab9302..0d02773 100644
--- a/drivers/target/target_core_rd.h
+++ b/drivers/target/target_core_rd.h
@@ -32,8 +32,6 @@
 	u32		rd_page_count;
 	/* Scatterlist count */
 	u32		rd_size;
-	/* Ramdisk device */
-	struct rd_dev	*rd_dev;
 } ____cacheline_aligned;
 
 struct rd_dev_sg_table {
diff --git a/drivers/target/target_core_tmr.c b/drivers/target/target_core_tmr.c
index f1feea3..27d4925 100644
--- a/drivers/target/target_core_tmr.c
+++ b/drivers/target/target_core_tmr.c
@@ -41,13 +41,6 @@
 #include "target_core_alua.h"
 #include "target_core_pr.h"
 
-#define DEBUG_LUN_RESET
-#ifdef DEBUG_LUN_RESET
-#define DEBUG_LR(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_LR(x...)
-#endif
-
 struct se_tmr_req *core_tmr_alloc_req(
 	struct se_cmd *se_cmd,
 	void *fabric_tmr_ptr,
@@ -57,8 +50,8 @@
 
 	tmr = kmem_cache_zalloc(se_tmr_req_cache, (in_interrupt()) ?
 					GFP_ATOMIC : GFP_KERNEL);
-	if (!(tmr)) {
-		printk(KERN_ERR "Unable to allocate struct se_tmr_req\n");
+	if (!tmr) {
+		pr_err("Unable to allocate struct se_tmr_req\n");
 		return ERR_PTR(-ENOMEM);
 	}
 	tmr->task_cmd = se_cmd;
@@ -93,14 +86,14 @@
 	int tas,
 	int fe_count)
 {
-	if (!(fe_count)) {
+	if (!fe_count) {
 		transport_cmd_finish_abort(cmd, 1);
 		return;
 	}
 	/*
 	 * TASK ABORTED status (TAS) bit support
 	*/
-	if (((tmr_nacl != NULL) &&
+	if ((tmr_nacl &&
 	     (tmr_nacl == cmd->se_sess->se_node_acl)) || tas)
 		transport_send_task_abort(cmd);
 
@@ -141,13 +134,13 @@
 		tmr_nacl = tmr->task_cmd->se_sess->se_node_acl;
 		tmr_tpg = tmr->task_cmd->se_sess->se_tpg;
 		if (tmr_nacl && tmr_tpg) {
-			DEBUG_LR("LUN_RESET: TMR caller fabric: %s"
+			pr_debug("LUN_RESET: TMR caller fabric: %s"
 				" initiator port %s\n",
 				tmr_tpg->se_tpg_tfo->get_fabric_name(),
 				tmr_nacl->initiatorname);
 		}
 	}
-	DEBUG_LR("LUN_RESET: %s starting for [%s], tas: %d\n",
+	pr_debug("LUN_RESET: %s starting for [%s], tas: %d\n",
 		(preempt_and_abort_list) ? "Preempt" : "TMR",
 		dev->transport->name, tas);
 	/*
@@ -163,8 +156,8 @@
 			continue;
 
 		cmd = tmr_p->task_cmd;
-		if (!(cmd)) {
-			printk(KERN_ERR "Unable to locate struct se_cmd for TMR\n");
+		if (!cmd) {
+			pr_err("Unable to locate struct se_cmd for TMR\n");
 			continue;
 		}
 		/*
@@ -172,14 +165,14 @@
 		 * parameter (eg: for PROUT PREEMPT_AND_ABORT service action
 		 * skip non regisration key matching TMRs.
 		 */
-		if ((preempt_and_abort_list != NULL) &&
+		if (preempt_and_abort_list &&
 		    (core_scsi3_check_cdb_abort_and_preempt(
 					preempt_and_abort_list, cmd) != 0))
 			continue;
 		spin_unlock_irq(&dev->se_tmr_lock);
 
 		spin_lock_irqsave(&cmd->t_state_lock, flags);
-		if (!(atomic_read(&cmd->t_transport_active))) {
+		if (!atomic_read(&cmd->t_transport_active)) {
 			spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 			spin_lock_irq(&dev->se_tmr_lock);
 			continue;
@@ -189,7 +182,7 @@
 			spin_lock_irq(&dev->se_tmr_lock);
 			continue;
 		}
-		DEBUG_LR("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
+		pr_debug("LUN_RESET: %s releasing TMR %p Function: 0x%02x,"
 			" Response: 0x%02x, t_state: %d\n",
 			(preempt_and_abort_list) ? "Preempt" : "", tmr_p,
 			tmr_p->function, tmr_p->response, cmd->t_state);
@@ -224,7 +217,7 @@
 	list_for_each_entry_safe(task, task_tmp, &dev->state_task_list,
 				t_state_list) {
 		if (!task->task_se_cmd) {
-			printk(KERN_ERR "task->task_se_cmd is NULL!\n");
+			pr_err("task->task_se_cmd is NULL!\n");
 			continue;
 		}
 		cmd = task->task_se_cmd;
@@ -233,7 +226,7 @@
 		 * For PREEMPT_AND_ABORT usage, only process commands
 		 * with a matching reservation key.
 		 */
-		if ((preempt_and_abort_list != NULL) &&
+		if (preempt_and_abort_list &&
 		    (core_scsi3_check_cdb_abort_and_preempt(
 					preempt_and_abort_list, cmd) != 0))
 			continue;
@@ -248,14 +241,14 @@
 		spin_unlock_irqrestore(&dev->execute_task_lock, flags);
 
 		spin_lock_irqsave(&cmd->t_state_lock, flags);
-		DEBUG_LR("LUN_RESET: %s cmd: %p task: %p"
+		pr_debug("LUN_RESET: %s cmd: %p task: %p"
 			" ITT/CmdSN: 0x%08x/0x%08x, i_state: %d, t_state/"
 			"def_t_state: %d/%d cdb: 0x%02x\n",
 			(preempt_and_abort_list) ? "Preempt" : "", cmd, task,
 			cmd->se_tfo->get_task_tag(cmd), 0,
 			cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
 			cmd->deferred_t_state, cmd->t_task_cdb[0]);
-		DEBUG_LR("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
+		pr_debug("LUN_RESET: ITT[0x%08x] - pr_res_key: 0x%016Lx"
 			" t_task_cdbs: %d t_task_cdbs_left: %d"
 			" t_task_cdbs_sent: %d -- t_transport_active: %d"
 			" t_transport_stop: %d t_transport_sent: %d\n",
@@ -272,10 +265,10 @@
 			spin_unlock_irqrestore(
 				&cmd->t_state_lock, flags);
 
-			DEBUG_LR("LUN_RESET: Waiting for task: %p to shutdown"
+			pr_debug("LUN_RESET: Waiting for task: %p to shutdown"
 				" for dev: %p\n", task, dev);
 			wait_for_completion(&task->task_stop_comp);
-			DEBUG_LR("LUN_RESET Completed task: %p shutdown for"
+			pr_debug("LUN_RESET Completed task: %p shutdown for"
 				" dev: %p\n", task, dev);
 			spin_lock_irqsave(&cmd->t_state_lock, flags);
 			atomic_dec(&cmd->t_task_cdbs_left);
@@ -288,10 +281,10 @@
 		}
 		__transport_stop_task_timer(task, &flags);
 
-		if (!(atomic_dec_and_test(&cmd->t_task_cdbs_ex_left))) {
+		if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) {
 			spin_unlock_irqrestore(
 					&cmd->t_state_lock, flags);
-			DEBUG_LR("LUN_RESET: Skipping task: %p, dev: %p for"
+			pr_debug("LUN_RESET: Skipping task: %p, dev: %p for"
 				" t_task_cdbs_ex_left: %d\n", task, dev,
 				atomic_read(&cmd->t_task_cdbs_ex_left));
 
@@ -301,7 +294,7 @@
 		fe_count = atomic_read(&cmd->t_fe_count);
 
 		if (atomic_read(&cmd->t_transport_active)) {
-			DEBUG_LR("LUN_RESET: got t_transport_active = 1 for"
+			pr_debug("LUN_RESET: got t_transport_active = 1 for"
 				" task: %p, t_fe_count: %d dev: %p\n", task,
 				fe_count, dev);
 			atomic_set(&cmd->t_transport_aborted, 1);
@@ -312,7 +305,7 @@
 			spin_lock_irqsave(&dev->execute_task_lock, flags);
 			continue;
 		}
-		DEBUG_LR("LUN_RESET: Got t_transport_active = 0 for task: %p,"
+		pr_debug("LUN_RESET: Got t_transport_active = 0 for task: %p,"
 			" t_fe_count: %d dev: %p\n", task, fe_count, dev);
 		atomic_set(&cmd->t_transport_aborted, 1);
 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
@@ -335,7 +328,7 @@
 		 * For PREEMPT_AND_ABORT usage, only process commands
 		 * with a matching reservation key.
 		 */
-		if ((preempt_and_abort_list != NULL) &&
+		if (preempt_and_abort_list &&
 		    (core_scsi3_check_cdb_abort_and_preempt(
 					preempt_and_abort_list, cmd) != 0))
 			continue;
@@ -350,7 +343,7 @@
 		list_del(&cmd->se_queue_node);
 		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
 
-		DEBUG_LR("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
+		pr_debug("LUN_RESET: %s from Device Queue: cmd: %p t_state:"
 			" %d t_fe_count: %d\n", (preempt_and_abort_list) ?
 			"Preempt" : "", cmd, cmd->t_state,
 			atomic_read(&cmd->t_fe_count));
@@ -368,20 +361,20 @@
 	 * Clear any legacy SPC-2 reservation when called during
 	 * LOGICAL UNIT RESET
 	 */
-	if (!(preempt_and_abort_list) &&
+	if (!preempt_and_abort_list &&
 	     (dev->dev_flags & DF_SPC2_RESERVATIONS)) {
 		spin_lock(&dev->dev_reservation_lock);
 		dev->dev_reserved_node_acl = NULL;
 		dev->dev_flags &= ~DF_SPC2_RESERVATIONS;
 		spin_unlock(&dev->dev_reservation_lock);
-		printk(KERN_INFO "LUN_RESET: SCSI-2 Released reservation\n");
+		pr_debug("LUN_RESET: SCSI-2 Released reservation\n");
 	}
 
 	spin_lock_irq(&dev->stats_lock);
 	dev->num_resets++;
 	spin_unlock_irq(&dev->stats_lock);
 
-	DEBUG_LR("LUN_RESET: %s for [%s] Complete\n",
+	pr_debug("LUN_RESET: %s for [%s] Complete\n",
 			(preempt_and_abort_list) ? "Preempt" : "TMR",
 			dev->transport->name);
 	return 0;
diff --git a/drivers/target/target_core_tpg.c b/drivers/target/target_core_tpg.c
index 448129f..4f1ba4c 100644
--- a/drivers/target/target_core_tpg.c
+++ b/drivers/target/target_core_tpg.c
@@ -72,7 +72,7 @@
 			continue;
 
 		if (!deve->se_lun) {
-			printk(KERN_ERR "%s device entries device pointer is"
+			pr_err("%s device entries device pointer is"
 				" NULL, but Initiator has access.\n",
 				tpg->se_tpg_tfo->get_fabric_name());
 			continue;
@@ -86,14 +86,13 @@
 		spin_lock(&lun->lun_acl_lock);
 		list_for_each_entry_safe(acl, acl_tmp,
 					&lun->lun_acl_list, lacl_list) {
-			if (!(strcmp(acl->initiatorname,
-					nacl->initiatorname)) &&
-			     (acl->mapped_lun == deve->mapped_lun))
+			if (!strcmp(acl->initiatorname, nacl->initiatorname) &&
+			    (acl->mapped_lun == deve->mapped_lun))
 				break;
 		}
 
 		if (!acl) {
-			printk(KERN_ERR "Unable to locate struct se_lun_acl for %s,"
+			pr_err("Unable to locate struct se_lun_acl for %s,"
 				" mapped_lun: %u\n", nacl->initiatorname,
 				deve->mapped_lun);
 			spin_unlock(&lun->lun_acl_lock);
@@ -121,7 +120,7 @@
 	struct se_node_acl *acl;
 
 	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
-		if (!(strcmp(acl->initiatorname, initiatorname)))
+		if (!strcmp(acl->initiatorname, initiatorname))
 			return acl;
 	}
 
@@ -140,8 +139,8 @@
 
 	spin_lock_bh(&tpg->acl_node_lock);
 	list_for_each_entry(acl, &tpg->acl_node_list, acl_list) {
-		if (!(strcmp(acl->initiatorname, initiatorname)) &&
-		   (!(acl->dynamic_node_acl))) {
+		if (!strcmp(acl->initiatorname, initiatorname) &&
+		    !acl->dynamic_node_acl) {
 			spin_unlock_bh(&tpg->acl_node_lock);
 			return acl;
 		}
@@ -177,7 +176,7 @@
 		 * By default in LIO-Target $FABRIC_MOD,
 		 * demo_mode_write_protect is ON, or READ_ONLY;
 		 */
-		if (!(tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg))) {
+		if (!tpg->se_tpg_tfo->tpg_check_demo_mode_write_protect(tpg)) {
 			if (dev->dev_flags & DF_READ_ONLY)
 				lun_access = TRANSPORT_LUNFLAGS_READ_ONLY;
 			else
@@ -193,7 +192,7 @@
 				lun_access = TRANSPORT_LUNFLAGS_READ_WRITE;
 		}
 
-		printk(KERN_INFO "TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
+		pr_debug("TARGET_CORE[%s]->TPG[%u]_LUN[%u] - Adding %s"
 			" access for LUN in Demo Mode\n",
 			tpg->se_tpg_tfo->get_fabric_name(),
 			tpg->se_tpg_tfo->tpg_get_tag(tpg), lun->unpacked_lun,
@@ -216,7 +215,7 @@
 	struct se_node_acl *acl)
 {
 	if (!acl->queue_depth) {
-		printk(KERN_ERR "Queue depth for %s Initiator Node: %s is 0,"
+		pr_err("Queue depth for %s Initiator Node: %s is 0,"
 			"defaulting to 1.\n", tpg->se_tpg_tfo->get_fabric_name(),
 			acl->initiatorname);
 		acl->queue_depth = 1;
@@ -236,8 +235,8 @@
 
 	nacl->device_list = kzalloc(sizeof(struct se_dev_entry) *
 				TRANSPORT_MAX_LUNS_PER_TPG, GFP_KERNEL);
-	if (!(nacl->device_list)) {
-		printk(KERN_ERR "Unable to allocate memory for"
+	if (!nacl->device_list) {
+		pr_err("Unable to allocate memory for"
 			" struct se_node_acl->device_list\n");
 		return -ENOMEM;
 	}
@@ -265,14 +264,14 @@
 	struct se_node_acl *acl;
 
 	acl = core_tpg_get_initiator_node_acl(tpg, initiatorname);
-	if ((acl))
+	if (acl)
 		return acl;
 
-	if (!(tpg->se_tpg_tfo->tpg_check_demo_mode(tpg)))
+	if (!tpg->se_tpg_tfo->tpg_check_demo_mode(tpg))
 		return NULL;
 
 	acl =  tpg->se_tpg_tfo->tpg_alloc_fabric_acl(tpg);
-	if (!(acl))
+	if (!acl)
 		return NULL;
 
 	INIT_LIST_HEAD(&acl->acl_list);
@@ -307,7 +306,7 @@
 	tpg->num_node_acls++;
 	spin_unlock_bh(&tpg->acl_node_lock);
 
-	printk("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
+	pr_debug("%s_TPG[%u] - Added DYNAMIC ACL with TCQ Depth: %d for %s"
 		" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
 		tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
 		tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
@@ -357,10 +356,10 @@
 
 	spin_lock_bh(&tpg->acl_node_lock);
 	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
-	if ((acl)) {
+	if (acl) {
 		if (acl->dynamic_node_acl) {
 			acl->dynamic_node_acl = 0;
-			printk(KERN_INFO "%s_TPG[%u] - Replacing dynamic ACL"
+			pr_debug("%s_TPG[%u] - Replacing dynamic ACL"
 				" for %s\n", tpg->se_tpg_tfo->get_fabric_name(),
 				tpg->se_tpg_tfo->tpg_get_tag(tpg), initiatorname);
 			spin_unlock_bh(&tpg->acl_node_lock);
@@ -375,7 +374,7 @@
 			goto done;
 		}
 
-		printk(KERN_ERR "ACL entry for %s Initiator"
+		pr_err("ACL entry for %s Initiator"
 			" Node %s already exists for TPG %u, ignoring"
 			" request.\n",  tpg->se_tpg_tfo->get_fabric_name(),
 			initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
@@ -384,8 +383,8 @@
 	}
 	spin_unlock_bh(&tpg->acl_node_lock);
 
-	if (!(se_nacl)) {
-		printk("struct se_node_acl pointer is NULL\n");
+	if (!se_nacl) {
+		pr_err("struct se_node_acl pointer is NULL\n");
 		return ERR_PTR(-EINVAL);
 	}
 	/*
@@ -425,7 +424,7 @@
 	spin_unlock_bh(&tpg->acl_node_lock);
 
 done:
-	printk(KERN_INFO "%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
+	pr_debug("%s_TPG[%hu] - Added ACL with TCQ Depth: %d for %s"
 		" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
 		tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
 		tpg->se_tpg_tfo->get_fabric_name(), initiatorname);
@@ -463,7 +462,7 @@
 		/*
 		 * Determine if the session needs to be closed by our context.
 		 */
-		if (!(tpg->se_tpg_tfo->shutdown_session(sess)))
+		if (!tpg->se_tpg_tfo->shutdown_session(sess))
 			continue;
 
 		spin_unlock_bh(&tpg->session_lock);
@@ -481,7 +480,7 @@
 	core_clear_initiator_node_from_tpg(acl, tpg);
 	core_free_device_list_for_node(acl, tpg);
 
-	printk(KERN_INFO "%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
+	pr_debug("%s_TPG[%hu] - Deleted ACL with TCQ Depth: %d for %s"
 		" Initiator Node: %s\n", tpg->se_tpg_tfo->get_fabric_name(),
 		tpg->se_tpg_tfo->tpg_get_tag(tpg), acl->queue_depth,
 		tpg->se_tpg_tfo->get_fabric_name(), acl->initiatorname);
@@ -506,8 +505,8 @@
 
 	spin_lock_bh(&tpg->acl_node_lock);
 	acl = __core_tpg_get_initiator_node_acl(tpg, initiatorname);
-	if (!(acl)) {
-		printk(KERN_ERR "Access Control List entry for %s Initiator"
+	if (!acl) {
+		pr_err("Access Control List entry for %s Initiator"
 			" Node %s does not exists for TPG %hu, ignoring"
 			" request.\n", tpg->se_tpg_tfo->get_fabric_name(),
 			initiatorname, tpg->se_tpg_tfo->tpg_get_tag(tpg));
@@ -526,7 +525,7 @@
 			continue;
 
 		if (!force) {
-			printk(KERN_ERR "Unable to change queue depth for %s"
+			pr_err("Unable to change queue depth for %s"
 				" Initiator Node: %s while session is"
 				" operational.  To forcefully change the queue"
 				" depth and force session reinstatement"
@@ -543,7 +542,7 @@
 		/*
 		 * Determine if the session needs to be closed by our context.
 		 */
-		if (!(tpg->se_tpg_tfo->shutdown_session(sess)))
+		if (!tpg->se_tpg_tfo->shutdown_session(sess))
 			continue;
 
 		init_sess = sess;
@@ -586,7 +585,7 @@
 	if (init_sess)
 		tpg->se_tpg_tfo->close_session(init_sess);
 
-	printk(KERN_INFO "Successfuly changed queue depth to: %d for Initiator"
+	pr_debug("Successfuly changed queue depth to: %d for Initiator"
 		" Node: %s on %s Target Portal Group: %u\n", queue_depth,
 		initiatorname, tpg->se_tpg_tfo->get_fabric_name(),
 		tpg->se_tpg_tfo->tpg_get_tag(tpg));
@@ -644,8 +643,8 @@
 
 	se_tpg->tpg_lun_list = kzalloc((sizeof(struct se_lun) *
 				TRANSPORT_MAX_LUNS_PER_TPG), GFP_KERNEL);
-	if (!(se_tpg->tpg_lun_list)) {
-		printk(KERN_ERR "Unable to allocate struct se_portal_group->"
+	if (!se_tpg->tpg_lun_list) {
+		pr_err("Unable to allocate struct se_portal_group->"
 				"tpg_lun_list\n");
 		return -ENOMEM;
 	}
@@ -686,7 +685,7 @@
 	list_add_tail(&se_tpg->se_tpg_node, &tpg_list);
 	spin_unlock_bh(&tpg_lock);
 
-	printk(KERN_INFO "TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
+	pr_debug("TARGET_CORE[%s]: Allocated %s struct se_portal_group for"
 		" endpoint: %s, Portal Tag: %u\n", tfo->get_fabric_name(),
 		(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
 		"Normal" : "Discovery", (tfo->tpg_get_wwn(se_tpg) == NULL) ?
@@ -700,7 +699,7 @@
 {
 	struct se_node_acl *nacl, *nacl_tmp;
 
-	printk(KERN_INFO "TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
+	pr_debug("TARGET_CORE[%s]: Deallocating %s struct se_portal_group"
 		" for endpoint: %s Portal Tag %u\n",
 		(se_tpg->se_tpg_type == TRANSPORT_TPG_TYPE_NORMAL) ?
 		"Normal" : "Discovery", se_tpg->se_tpg_tfo->get_fabric_name(),
@@ -749,7 +748,7 @@
 	struct se_lun *lun;
 
 	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
-		printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
+		pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
 			"-1: %u for Target Portal Group: %u\n",
 			tpg->se_tpg_tfo->get_fabric_name(),
 			unpacked_lun, TRANSPORT_MAX_LUNS_PER_TPG-1,
@@ -760,7 +759,7 @@
 	spin_lock(&tpg->tpg_lun_lock);
 	lun = &tpg->tpg_lun_list[unpacked_lun];
 	if (lun->lun_status == TRANSPORT_LUN_STATUS_ACTIVE) {
-		printk(KERN_ERR "TPG Logical Unit Number: %u is already active"
+		pr_err("TPG Logical Unit Number: %u is already active"
 			" on %s Target Portal Group: %u, ignoring request.\n",
 			unpacked_lun, tpg->se_tpg_tfo->get_fabric_name(),
 			tpg->se_tpg_tfo->tpg_get_tag(tpg));
@@ -808,7 +807,7 @@
 	struct se_lun *lun;
 
 	if (unpacked_lun > (TRANSPORT_MAX_LUNS_PER_TPG-1)) {
-		printk(KERN_ERR "%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
+		pr_err("%s LUN: %u exceeds TRANSPORT_MAX_LUNS_PER_TPG"
 			"-1: %u for Target Portal Group: %u\n",
 			tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
 			TRANSPORT_MAX_LUNS_PER_TPG-1,
@@ -819,7 +818,7 @@
 	spin_lock(&tpg->tpg_lun_lock);
 	lun = &tpg->tpg_lun_list[unpacked_lun];
 	if (lun->lun_status != TRANSPORT_LUN_STATUS_ACTIVE) {
-		printk(KERN_ERR "%s Logical Unit Number: %u is not active on"
+		pr_err("%s Logical Unit Number: %u is not active on"
 			" Target Portal Group: %u, ignoring request.\n",
 			tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
 			tpg->se_tpg_tfo->tpg_get_tag(tpg));
diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c
index c743d94..55b6588 100644
--- a/drivers/target/target_core_transport.c
+++ b/drivers/target/target_core_transport.c
@@ -58,132 +58,6 @@
 #include "target_core_scdb.h"
 #include "target_core_ua.h"
 
-/* #define DEBUG_CDB_HANDLER */
-#ifdef DEBUG_CDB_HANDLER
-#define DEBUG_CDB_H(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_CDB_H(x...)
-#endif
-
-/* #define DEBUG_CMD_MAP */
-#ifdef DEBUG_CMD_MAP
-#define DEBUG_CMD_M(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_CMD_M(x...)
-#endif
-
-/* #define DEBUG_MEM_ALLOC */
-#ifdef DEBUG_MEM_ALLOC
-#define DEBUG_MEM(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_MEM(x...)
-#endif
-
-/* #define DEBUG_MEM2_ALLOC */
-#ifdef DEBUG_MEM2_ALLOC
-#define DEBUG_MEM2(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_MEM2(x...)
-#endif
-
-/* #define DEBUG_SG_CALC */
-#ifdef DEBUG_SG_CALC
-#define DEBUG_SC(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_SC(x...)
-#endif
-
-/* #define DEBUG_SE_OBJ */
-#ifdef DEBUG_SE_OBJ
-#define DEBUG_SO(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_SO(x...)
-#endif
-
-/* #define DEBUG_CMD_VOL */
-#ifdef DEBUG_CMD_VOL
-#define DEBUG_VOL(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_VOL(x...)
-#endif
-
-/* #define DEBUG_CMD_STOP */
-#ifdef DEBUG_CMD_STOP
-#define DEBUG_CS(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_CS(x...)
-#endif
-
-/* #define DEBUG_PASSTHROUGH */
-#ifdef DEBUG_PASSTHROUGH
-#define DEBUG_PT(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_PT(x...)
-#endif
-
-/* #define DEBUG_TASK_STOP */
-#ifdef DEBUG_TASK_STOP
-#define DEBUG_TS(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_TS(x...)
-#endif
-
-/* #define DEBUG_TRANSPORT_STOP */
-#ifdef DEBUG_TRANSPORT_STOP
-#define DEBUG_TRANSPORT_S(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_TRANSPORT_S(x...)
-#endif
-
-/* #define DEBUG_TASK_FAILURE */
-#ifdef DEBUG_TASK_FAILURE
-#define DEBUG_TF(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_TF(x...)
-#endif
-
-/* #define DEBUG_DEV_OFFLINE */
-#ifdef DEBUG_DEV_OFFLINE
-#define DEBUG_DO(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_DO(x...)
-#endif
-
-/* #define DEBUG_TASK_STATE */
-#ifdef DEBUG_TASK_STATE
-#define DEBUG_TSTATE(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_TSTATE(x...)
-#endif
-
-/* #define DEBUG_STATUS_THR */
-#ifdef DEBUG_STATUS_THR
-#define DEBUG_ST(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_ST(x...)
-#endif
-
-/* #define DEBUG_TASK_TIMEOUT */
-#ifdef DEBUG_TASK_TIMEOUT
-#define DEBUG_TT(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_TT(x...)
-#endif
-
-/* #define DEBUG_GENERIC_REQUEST_FAILURE */
-#ifdef DEBUG_GENERIC_REQUEST_FAILURE
-#define DEBUG_GRF(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_GRF(x...)
-#endif
-
-/* #define DEBUG_SAM_TASK_ATTRS */
-#ifdef DEBUG_SAM_TASK_ATTRS
-#define DEBUG_STA(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_STA(x...)
-#endif
-
 static int sub_api_initialized;
 
 static struct kmem_cache *se_cmd_cache;
@@ -225,62 +99,62 @@
 {
 	se_cmd_cache = kmem_cache_create("se_cmd_cache",
 			sizeof(struct se_cmd), __alignof__(struct se_cmd), 0, NULL);
-	if (!(se_cmd_cache)) {
-		printk(KERN_ERR "kmem_cache_create for struct se_cmd failed\n");
+	if (!se_cmd_cache) {
+		pr_err("kmem_cache_create for struct se_cmd failed\n");
 		goto out;
 	}
 	se_tmr_req_cache = kmem_cache_create("se_tmr_cache",
 			sizeof(struct se_tmr_req), __alignof__(struct se_tmr_req),
 			0, NULL);
-	if (!(se_tmr_req_cache)) {
-		printk(KERN_ERR "kmem_cache_create() for struct se_tmr_req"
+	if (!se_tmr_req_cache) {
+		pr_err("kmem_cache_create() for struct se_tmr_req"
 				" failed\n");
 		goto out;
 	}
 	se_sess_cache = kmem_cache_create("se_sess_cache",
 			sizeof(struct se_session), __alignof__(struct se_session),
 			0, NULL);
-	if (!(se_sess_cache)) {
-		printk(KERN_ERR "kmem_cache_create() for struct se_session"
+	if (!se_sess_cache) {
+		pr_err("kmem_cache_create() for struct se_session"
 				" failed\n");
 		goto out;
 	}
 	se_ua_cache = kmem_cache_create("se_ua_cache",
 			sizeof(struct se_ua), __alignof__(struct se_ua),
 			0, NULL);
-	if (!(se_ua_cache)) {
-		printk(KERN_ERR "kmem_cache_create() for struct se_ua failed\n");
+	if (!se_ua_cache) {
+		pr_err("kmem_cache_create() for struct se_ua failed\n");
 		goto out;
 	}
 	t10_pr_reg_cache = kmem_cache_create("t10_pr_reg_cache",
 			sizeof(struct t10_pr_registration),
 			__alignof__(struct t10_pr_registration), 0, NULL);
-	if (!(t10_pr_reg_cache)) {
-		printk(KERN_ERR "kmem_cache_create() for struct t10_pr_registration"
+	if (!t10_pr_reg_cache) {
+		pr_err("kmem_cache_create() for struct t10_pr_registration"
 				" failed\n");
 		goto out;
 	}
 	t10_alua_lu_gp_cache = kmem_cache_create("t10_alua_lu_gp_cache",
 			sizeof(struct t10_alua_lu_gp), __alignof__(struct t10_alua_lu_gp),
 			0, NULL);
-	if (!(t10_alua_lu_gp_cache)) {
-		printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_cache"
+	if (!t10_alua_lu_gp_cache) {
+		pr_err("kmem_cache_create() for t10_alua_lu_gp_cache"
 				" failed\n");
 		goto out;
 	}
 	t10_alua_lu_gp_mem_cache = kmem_cache_create("t10_alua_lu_gp_mem_cache",
 			sizeof(struct t10_alua_lu_gp_member),
 			__alignof__(struct t10_alua_lu_gp_member), 0, NULL);
-	if (!(t10_alua_lu_gp_mem_cache)) {
-		printk(KERN_ERR "kmem_cache_create() for t10_alua_lu_gp_mem_"
+	if (!t10_alua_lu_gp_mem_cache) {
+		pr_err("kmem_cache_create() for t10_alua_lu_gp_mem_"
 				"cache failed\n");
 		goto out;
 	}
 	t10_alua_tg_pt_gp_cache = kmem_cache_create("t10_alua_tg_pt_gp_cache",
 			sizeof(struct t10_alua_tg_pt_gp),
 			__alignof__(struct t10_alua_tg_pt_gp), 0, NULL);
-	if (!(t10_alua_tg_pt_gp_cache)) {
-		printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_"
+	if (!t10_alua_tg_pt_gp_cache) {
+		pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
 				"cache failed\n");
 		goto out;
 	}
@@ -289,8 +163,8 @@
 			sizeof(struct t10_alua_tg_pt_gp_member),
 			__alignof__(struct t10_alua_tg_pt_gp_member),
 			0, NULL);
-	if (!(t10_alua_tg_pt_gp_mem_cache)) {
-		printk(KERN_ERR "kmem_cache_create() for t10_alua_tg_pt_gp_"
+	if (!t10_alua_tg_pt_gp_mem_cache) {
+		pr_err("kmem_cache_create() for t10_alua_tg_pt_gp_"
 				"mem_t failed\n");
 		goto out;
 	}
@@ -366,19 +240,19 @@
 
 	ret = request_module("target_core_iblock");
 	if (ret != 0)
-		printk(KERN_ERR "Unable to load target_core_iblock\n");
+		pr_err("Unable to load target_core_iblock\n");
 
 	ret = request_module("target_core_file");
 	if (ret != 0)
-		printk(KERN_ERR "Unable to load target_core_file\n");
+		pr_err("Unable to load target_core_file\n");
 
 	ret = request_module("target_core_pscsi");
 	if (ret != 0)
-		printk(KERN_ERR "Unable to load target_core_pscsi\n");
+		pr_err("Unable to load target_core_pscsi\n");
 
 	ret = request_module("target_core_stgt");
 	if (ret != 0)
-		printk(KERN_ERR "Unable to load target_core_stgt\n");
+		pr_err("Unable to load target_core_stgt\n");
 
 	return 0;
 }
@@ -405,8 +279,8 @@
 	struct se_session *se_sess;
 
 	se_sess = kmem_cache_zalloc(se_sess_cache, GFP_KERNEL);
-	if (!(se_sess)) {
-		printk(KERN_ERR "Unable to allocate struct se_session from"
+	if (!se_sess) {
+		pr_err("Unable to allocate struct se_session from"
 				" se_sess_cache\n");
 		return ERR_PTR(-ENOMEM);
 	}
@@ -460,7 +334,7 @@
 	}
 	list_add_tail(&se_sess->sess_list, &se_tpg->tpg_sess_list);
 
-	printk(KERN_INFO "TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
+	pr_debug("TARGET_CORE[%s]: Registered fabric_sess_ptr: %p\n",
 		se_tpg->se_tpg_tfo->get_fabric_name(), se_sess->fabric_sess_ptr);
 }
 EXPORT_SYMBOL(__transport_register_session);
@@ -485,7 +359,7 @@
 	 * Used by struct se_node_acl's under ConfigFS to locate active struct se_session
 	 */
 	se_nacl = se_sess->se_node_acl;
-	if ((se_nacl)) {
+	if (se_nacl) {
 		spin_lock_irqsave(&se_nacl->nacl_sess_lock, flags);
 		list_del(&se_sess->sess_acl_list);
 		/*
@@ -516,7 +390,7 @@
 	struct se_portal_group *se_tpg = se_sess->se_tpg;
 	struct se_node_acl *se_nacl;
 
-	if (!(se_tpg)) {
+	if (!se_tpg) {
 		transport_free_session(se_sess);
 		return;
 	}
@@ -532,11 +406,11 @@
 	 * struct se_node_acl if it had been previously dynamically generated.
 	 */
 	se_nacl = se_sess->se_node_acl;
-	if ((se_nacl)) {
+	if (se_nacl) {
 		spin_lock_bh(&se_tpg->acl_node_lock);
 		if (se_nacl->dynamic_node_acl) {
-			if (!(se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache(
-					se_tpg))) {
+			if (!se_tpg->se_tpg_tfo->tpg_check_demo_mode_cache(
+					se_tpg)) {
 				list_del(&se_nacl->acl_list);
 				se_tpg->num_node_acls--;
 				spin_unlock_bh(&se_tpg->acl_node_lock);
@@ -553,7 +427,7 @@
 
 	transport_free_session(se_sess);
 
-	printk(KERN_INFO "TARGET_CORE[%s]: Deregistered fabric_sess\n",
+	pr_debug("TARGET_CORE[%s]: Deregistered fabric_sess\n",
 		se_tpg->se_tpg_tfo->get_fabric_name());
 }
 EXPORT_SYMBOL(transport_deregister_session);
@@ -569,19 +443,19 @@
 
 	list_for_each_entry(task, &cmd->t_task_list, t_list) {
 		dev = task->se_dev;
-		if (!(dev))
+		if (!dev)
 			continue;
 
 		if (atomic_read(&task->task_active))
 			continue;
 
-		if (!(atomic_read(&task->task_state_active)))
+		if (!atomic_read(&task->task_state_active))
 			continue;
 
 		spin_lock_irqsave(&dev->execute_task_lock, flags);
 		list_del(&task->t_state_list);
-		DEBUG_TSTATE("Removed ITT: 0x%08x dev: %p task[%p]\n",
-			cmd->se_tfo->tfo_get_task_tag(cmd), dev, task);
+		pr_debug("Removed ITT: 0x%08x dev: %p task[%p]\n",
+			cmd->se_tfo->get_task_tag(cmd), dev, task);
 		spin_unlock_irqrestore(&dev->execute_task_lock, flags);
 
 		atomic_set(&task->task_state_active, 0);
@@ -610,7 +484,7 @@
 	 * command for LUN shutdown purposes.
 	 */
 	if (atomic_read(&cmd->transport_lun_stop)) {
-		DEBUG_CS("%s:%d atomic_read(&cmd->transport_lun_stop)"
+		pr_debug("%s:%d atomic_read(&cmd->transport_lun_stop)"
 			" == TRUE for ITT: 0x%08x\n", __func__, __LINE__,
 			cmd->se_tfo->get_task_tag(cmd));
 
@@ -629,7 +503,7 @@
 	 * this command for frontend exceptions.
 	 */
 	if (atomic_read(&cmd->t_transport_stop)) {
-		DEBUG_CS("%s:%d atomic_read(&cmd->t_transport_stop) =="
+		pr_debug("%s:%d atomic_read(&cmd->t_transport_stop) =="
 			" TRUE for ITT: 0x%08x\n", __func__, __LINE__,
 			cmd->se_tfo->get_task_tag(cmd));
 
@@ -695,7 +569,7 @@
 		return;
 
 	spin_lock_irqsave(&cmd->t_state_lock, flags);
-	if (!(atomic_read(&cmd->transport_dev_active))) {
+	if (!atomic_read(&cmd->transport_dev_active)) {
 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 		goto check_lun;
 	}
@@ -710,7 +584,7 @@
 		list_del(&cmd->se_lun_node);
 		atomic_set(&cmd->transport_lun_active, 0);
 #if 0
-		printk(KERN_INFO "Removed ITT: 0x%08x from LUN LIST[%d]\n"
+		pr_debug("Removed ITT: 0x%08x from LUN LIST[%d]\n"
 			cmd->se_tfo->get_task_tag(cmd), lun->unpacked_lun);
 #endif
 	}
@@ -797,7 +671,7 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&qobj->cmd_queue_lock, flags);
-	if (!(atomic_read(&cmd->t_transport_queue_active))) {
+	if (!atomic_read(&cmd->t_transport_queue_active)) {
 		spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
 		return;
 	}
@@ -812,7 +686,7 @@
 	spin_unlock_irqrestore(&qobj->cmd_queue_lock, flags);
 
 	if (atomic_read(&cmd->t_transport_queue_active)) {
-		printk(KERN_ERR "ITT: 0x%08x t_transport_queue_active: %d\n",
+		pr_err("ITT: 0x%08x t_transport_queue_active: %d\n",
 			cmd->se_tfo->get_task_tag(cmd),
 			atomic_read(&cmd->t_transport_queue_active));
 	}
@@ -853,7 +727,7 @@
 	int t_state;
 	unsigned long flags;
 #if 0
-	printk(KERN_INFO "task: %p CDB: 0x%02x obj_ptr: %p\n", task,
+	pr_debug("task: %p CDB: 0x%02x obj_ptr: %p\n", task,
 			cmd->t_task_cdb[0], dev);
 #endif
 	if (dev)
@@ -899,8 +773,8 @@
 	 * the processing thread.
 	 */
 	if (atomic_read(&task->task_timeout)) {
-		if (!(atomic_dec_and_test(
-				&cmd->t_task_cdbs_timeout_left))) {
+		if (!atomic_dec_and_test(
+				&cmd->t_task_cdbs_timeout_left)) {
 			spin_unlock_irqrestore(&cmd->t_state_lock,
 				flags);
 			return;
@@ -918,7 +792,7 @@
 	 * struct se_task from struct se_cmd will complete itself into the
 	 * device queue depending upon int success.
 	 */
-	if (!(atomic_dec_and_test(&cmd->t_task_cdbs_left))) {
+	if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
 		if (!success)
 			cmd->t_tasks_failed = 1;
 
@@ -976,9 +850,9 @@
 				&task_prev->t_execute_list :
 				&dev->execute_task_list);
 
-		DEBUG_STA("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
+		pr_debug("Set HEAD_OF_QUEUE for task CDB: 0x%02x"
 				" in execution queue\n",
-				T_TASK(task->task_se_cmd)->t_task_cdb[0]);
+				task->task_se_cmd->t_task_cdb[0]);
 		return 1;
 	}
 	/*
@@ -1020,7 +894,7 @@
 
 	atomic_set(&task->task_state_active, 1);
 
-	DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n",
+	pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
 		task->task_se_cmd->se_tfo->get_task_tag(task->task_se_cmd),
 		task, dev);
 }
@@ -1042,8 +916,8 @@
 		list_add_tail(&task->t_state_list, &dev->state_task_list);
 		atomic_set(&task->task_state_active, 1);
 
-		DEBUG_TSTATE("Added ITT: 0x%08x task[%p] to dev: %p\n",
-			task->se_cmd->se_tfo->get_task_tag(
+		pr_debug("Added ITT: 0x%08x task[%p] to dev: %p\n",
+			task->task_se_cmd->se_tfo->get_task_tag(
 			task->task_se_cmd), task, dev);
 
 		spin_unlock(&dev->execute_task_lock);
@@ -1112,7 +986,7 @@
 		smp_mb__after_atomic_dec();
 		spin_unlock_irq(&dev->qf_cmd_lock);
 
-		printk(KERN_INFO "Processing %s cmd: %p QUEUE_FULL in work queue"
+		pr_debug("Processing %s cmd: %p QUEUE_FULL in work queue"
 			" context: %s\n", cmd->se_tfo->get_fabric_name(), cmd,
 			(cmd->t_state == TRANSPORT_COMPLETE_OK) ? "COMPLETE_OK" :
 			(cmd->t_state == TRANSPORT_COMPLETE_QF_WP) ? "WRITE_PENDING"
@@ -1197,7 +1071,7 @@
 		spin_unlock_irqrestore(&dev->dev_queue_obj.cmd_queue_lock,
 				flags);
 
-		printk(KERN_ERR "Releasing ITT: 0x%08x, i_state: %u,"
+		pr_err("Releasing ITT: 0x%08x, i_state: %u,"
 			" t_state: %u directly\n",
 			cmd->se_tfo->get_task_tag(cmd),
 			cmd->se_tfo->get_cmd_state(cmd), t_state);
@@ -1264,7 +1138,7 @@
 	if (p_buf)
 		strncpy(p_buf, buf, p_buf_len);
 	else
-		printk(KERN_INFO "%s", buf);
+		pr_debug("%s", buf);
 }
 
 void
@@ -1314,7 +1188,7 @@
 	if (p_buf)
 		strncpy(p_buf, buf, p_buf_len);
 	else
-		printk("%s", buf);
+		pr_debug("%s", buf);
 
 	return ret;
 }
@@ -1374,7 +1248,7 @@
 			return -EINVAL;
 		strncpy(p_buf, buf, p_buf_len);
 	} else {
-		printk("%s", buf);
+		pr_debug("%s", buf);
 	}
 
 	return ret;
@@ -1425,7 +1299,7 @@
 	if (p_buf)
 		strncpy(p_buf, buf, p_buf_len);
 	else
-		printk("%s", buf);
+		pr_debug("%s", buf);
 
 	return ret;
 }
@@ -1482,7 +1356,7 @@
 	}
 
 	dev->dev_task_attr_type = SAM_TASK_ATTR_EMULATED;
-	DEBUG_STA("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
+	pr_debug("%s: Using SAM_TASK_ATTR_EMULATED for SPC: 0x%02x"
 		" device\n", dev->transport->name,
 		dev->transport->get_device_rev(dev));
 }
@@ -1494,32 +1368,32 @@
 	/*
 	 * Print Linux/SCSI style INQUIRY formatting to the kernel ring buffer
 	 */
-	printk("  Vendor: ");
+	pr_debug("  Vendor: ");
 	for (i = 0; i < 8; i++)
 		if (wwn->vendor[i] >= 0x20)
-			printk("%c", wwn->vendor[i]);
+			pr_debug("%c", wwn->vendor[i]);
 		else
-			printk(" ");
+			pr_debug(" ");
 
-	printk("  Model: ");
+	pr_debug("  Model: ");
 	for (i = 0; i < 16; i++)
 		if (wwn->model[i] >= 0x20)
-			printk("%c", wwn->model[i]);
+			pr_debug("%c", wwn->model[i]);
 		else
-			printk(" ");
+			pr_debug(" ");
 
-	printk("  Revision: ");
+	pr_debug("  Revision: ");
 	for (i = 0; i < 4; i++)
 		if (wwn->revision[i] >= 0x20)
-			printk("%c", wwn->revision[i]);
+			pr_debug("%c", wwn->revision[i]);
 		else
-			printk(" ");
+			pr_debug(" ");
 
-	printk("\n");
+	pr_debug("\n");
 
 	device_type = dev->transport->get_device_type(dev);
-	printk("  Type:   %s ", scsi_device_type(device_type));
-	printk("                 ANSI SCSI revision: %02x\n",
+	pr_debug("  Type:   %s ", scsi_device_type(device_type));
+	pr_debug("                 ANSI SCSI revision: %02x\n",
 				dev->transport->get_device_rev(dev));
 }
 
@@ -1537,8 +1411,8 @@
 	struct se_device  *dev;
 
 	dev = kzalloc(sizeof(struct se_device), GFP_KERNEL);
-	if (!(dev)) {
-		printk(KERN_ERR "Unable to allocate memory for se_dev_t\n");
+	if (!dev) {
+		pr_err("Unable to allocate memory for se_dev_t\n");
 		return NULL;
 	}
 
@@ -1608,7 +1482,7 @@
 	dev->process_thread = kthread_run(transport_processing_thread, dev,
 					  "LIO_%s", dev->transport->name);
 	if (IS_ERR(dev->process_thread)) {
-		printk(KERN_ERR "Unable to create kthread: LIO_%s\n",
+		pr_err("Unable to create kthread: LIO_%s\n",
 			dev->transport->name);
 		goto out;
 	}
@@ -1626,7 +1500,7 @@
 	 */
 	if (dev->transport->transport_type != TRANSPORT_PLUGIN_PHBA_PDEV) {
 		if (!inquiry_prod || !inquiry_rev) {
-			printk(KERN_ERR "All non TCM/pSCSI plugins require"
+			pr_err("All non TCM/pSCSI plugins require"
 				" INQUIRY consts\n");
 			goto out;
 		}
@@ -1688,9 +1562,9 @@
 	struct se_task *task;
 	struct se_device *dev = cmd->se_dev;
 
-	task = dev->transport->alloc_task(cmd);
+	task = dev->transport->alloc_task(cmd->t_task_cdb);
 	if (!task) {
-		printk(KERN_ERR "Unable to allocate struct se_task\n");
+		pr_err("Unable to allocate struct se_task\n");
 		return NULL;
 	}
 
@@ -1751,7 +1625,7 @@
 		return 0;
 
 	if (cmd->sam_task_attr == MSG_ACA_TAG) {
-		DEBUG_STA("SAM Task Attribute ACA"
+		pr_debug("SAM Task Attribute ACA"
 			" emulation is not supported\n");
 		return -EINVAL;
 	}
@@ -1761,9 +1635,9 @@
 	 */
 	cmd->se_ordered_id = atomic_inc_return(&cmd->se_dev->dev_ordered_id);
 	smp_mb__after_atomic_inc();
-	DEBUG_STA("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
+	pr_debug("Allocated se_ordered_id: %u for Task Attr: 0x%02x on %s\n",
 			cmd->se_ordered_id, cmd->sam_task_attr,
-			TRANSPORT(cmd->se_dev)->name);
+			cmd->se_dev->transport->name);
 	return 0;
 }
 
@@ -1804,7 +1678,7 @@
 	 * for VARIABLE_LENGTH_CMD
 	 */
 	if (scsi_command_size(cdb) > SCSI_MAX_VARLEN_CDB_SIZE) {
-		printk(KERN_ERR "Received SCSI CDB with command_size: %d that"
+		pr_err("Received SCSI CDB with command_size: %d that"
 			" exceeds SCSI_MAX_VARLEN_CDB_SIZE: %d\n",
 			scsi_command_size(cdb), SCSI_MAX_VARLEN_CDB_SIZE);
 		return -EINVAL;
@@ -1817,8 +1691,8 @@
 	if (scsi_command_size(cdb) > sizeof(cmd->__t_task_cdb)) {
 		cmd->t_task_cdb = kzalloc(scsi_command_size(cdb),
 						GFP_KERNEL);
-		if (!(cmd->t_task_cdb)) {
-			printk(KERN_ERR "Unable to allocate cmd->t_task_cdb"
+		if (!cmd->t_task_cdb) {
+			pr_err("Unable to allocate cmd->t_task_cdb"
 				" %u > sizeof(cmd->__t_task_cdb): %lu ops\n",
 				scsi_command_size(cdb),
 				(unsigned long)sizeof(cmd->__t_task_cdb));
@@ -1864,7 +1738,7 @@
 {
 	if (!cmd->se_lun) {
 		dump_stack();
-		printk(KERN_ERR "cmd->se_lun is NULL\n");
+		pr_err("cmd->se_lun is NULL\n");
 		return -EINVAL;
 	}
 
@@ -1882,12 +1756,12 @@
 {
 	if (!cmd->se_lun) {
 		dump_stack();
-		printk(KERN_ERR "cmd->se_lun is NULL\n");
+		pr_err("cmd->se_lun is NULL\n");
 		return -EINVAL;
 	}
 	if (in_interrupt()) {
 		dump_stack();
-		printk(KERN_ERR "transport_generic_handle_cdb cannot be called"
+		pr_err("transport_generic_handle_cdb cannot be called"
 				" from interrupt context\n");
 		return -EINVAL;
 	}
@@ -1906,7 +1780,7 @@
 {
 	if (!cmd->se_lun) {
 		dump_stack();
-		printk(KERN_ERR "cmd->se_lun is NULL\n");
+		pr_err("cmd->se_lun is NULL\n");
 		return -EINVAL;
 	}
 
@@ -1975,7 +1849,7 @@
 	unsigned long flags;
 	int ret = 0;
 
-	DEBUG_TS("ITT[0x%08x] - Stopping tasks\n",
+	pr_debug("ITT[0x%08x] - Stopping tasks\n",
 		cmd->se_tfo->get_task_tag(cmd));
 
 	/*
@@ -1984,7 +1858,7 @@
 	spin_lock_irqsave(&cmd->t_state_lock, flags);
 	list_for_each_entry_safe(task, task_tmp,
 				&cmd->t_task_list, t_list) {
-		DEBUG_TS("task_no[%d] - Processing task %p\n",
+		pr_debug("task_no[%d] - Processing task %p\n",
 				task->task_no, task);
 		/*
 		 * If the struct se_task has not been sent and is not active,
@@ -1997,7 +1871,7 @@
 			transport_remove_task_from_execute_queue(task,
 					task->se_dev);
 
-			DEBUG_TS("task_no[%d] - Removed from execute queue\n",
+			pr_debug("task_no[%d] - Removed from execute queue\n",
 				task->task_no);
 			spin_lock_irqsave(&cmd->t_state_lock, flags);
 			continue;
@@ -2012,10 +1886,10 @@
 			spin_unlock_irqrestore(&cmd->t_state_lock,
 					flags);
 
-			DEBUG_TS("task_no[%d] - Waiting to complete\n",
+			pr_debug("task_no[%d] - Waiting to complete\n",
 				task->task_no);
 			wait_for_completion(&task->task_stop_comp);
-			DEBUG_TS("task_no[%d] - Stopped successfully\n",
+			pr_debug("task_no[%d] - Stopped successfully\n",
 				task->task_no);
 
 			spin_lock_irqsave(&cmd->t_state_lock, flags);
@@ -2024,7 +1898,7 @@
 			atomic_set(&task->task_active, 0);
 			atomic_set(&task->task_stop, 0);
 		} else {
-			DEBUG_TS("task_no[%d] - Did nothing\n", task->task_no);
+			pr_debug("task_no[%d] - Did nothing\n", task->task_no);
 			ret++;
 		}
 
@@ -2046,18 +1920,18 @@
 {
 	int ret = 0;
 
-	DEBUG_GRF("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
+	pr_debug("-----[ Storage Engine Exception for cmd: %p ITT: 0x%08x"
 		" CDB: 0x%02x\n", cmd, cmd->se_tfo->get_task_tag(cmd),
 		cmd->t_task_cdb[0]);
-	DEBUG_GRF("-----[ i_state: %d t_state/def_t_state:"
+	pr_debug("-----[ i_state: %d t_state/def_t_state:"
 		" %d/%d transport_error_status: %d\n",
 		cmd->se_tfo->get_cmd_state(cmd),
 		cmd->t_state, cmd->deferred_t_state,
 		cmd->transport_error_status);
-	DEBUG_GRF("-----[ t_task_cdbs: %d t_task_cdbs_left: %d"
+	pr_debug("-----[ t_tasks: %d t_task_cdbs_left: %d"
 		" t_task_cdbs_sent: %d t_task_cdbs_ex_left: %d --"
 		" t_transport_active: %d t_transport_stop: %d"
-		" t_transport_sent: %d\n", cmd->t_task_cdbs,
+		" t_transport_sent: %d\n", cmd->t_task_list_num,
 		atomic_read(&cmd->t_task_cdbs_left),
 		atomic_read(&cmd->t_task_cdbs_sent),
 		atomic_read(&cmd->t_task_cdbs_ex_left),
@@ -2146,7 +2020,7 @@
 		 */
 		break;
 	default:
-		printk(KERN_ERR "Unknown transport error for CDB 0x%02x: %d\n",
+		pr_err("Unknown transport error for CDB 0x%02x: %d\n",
 			cmd->t_task_cdb[0],
 			cmd->transport_error_status);
 		cmd->scsi_sense_reason = TCM_UNSUPPORTED_SCSI_OPCODE;
@@ -2164,7 +2038,7 @@
 
 check_stop:
 	transport_lun_remove_cmd(cmd);
-	if (!(transport_cmd_check_stop_to_fabric(cmd)))
+	if (!transport_cmd_check_stop_to_fabric(cmd))
 		;
 	return;
 
@@ -2178,7 +2052,7 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&cmd->t_state_lock, flags);
-	if (!(atomic_read(&cmd->t_transport_timeout))) {
+	if (!atomic_read(&cmd->t_transport_timeout)) {
 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 		return;
 	}
@@ -2262,7 +2136,7 @@
 	struct se_cmd *cmd = task->task_se_cmd;
 	unsigned long flags;
 
-	DEBUG_TT("transport task timeout fired! task: %p cmd: %p\n", task, cmd);
+	pr_debug("transport task timeout fired! task: %p cmd: %p\n", task, cmd);
 
 	spin_lock_irqsave(&cmd->t_state_lock, flags);
 	if (task->task_flags & TF_STOP) {
@@ -2274,8 +2148,8 @@
 	/*
 	 * Determine if transport_complete_task() has already been called.
 	 */
-	if (!(atomic_read(&task->task_active))) {
-		DEBUG_TT("transport task: %p cmd: %p timeout task_active"
+	if (!atomic_read(&task->task_active)) {
+		pr_debug("transport task: %p cmd: %p timeout task_active"
 				" == 0\n", task, cmd);
 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 		return;
@@ -2290,20 +2164,20 @@
 	task->task_scsi_status = 1;
 
 	if (atomic_read(&task->task_stop)) {
-		DEBUG_TT("transport task: %p cmd: %p timeout task_stop"
+		pr_debug("transport task: %p cmd: %p timeout task_stop"
 				" == 1\n", task, cmd);
 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 		complete(&task->task_stop_comp);
 		return;
 	}
 
-	if (!(atomic_dec_and_test(&cmd->t_task_cdbs_left))) {
-		DEBUG_TT("transport task: %p cmd: %p timeout non zero"
+	if (!atomic_dec_and_test(&cmd->t_task_cdbs_left)) {
+		pr_debug("transport task: %p cmd: %p timeout non zero"
 				" t_task_cdbs_left\n", task, cmd);
 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 		return;
 	}
-	DEBUG_TT("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n",
+	pr_debug("transport task: %p cmd: %p timeout ZERO t_task_cdbs_left\n",
 			task, cmd);
 
 	cmd->t_state = TRANSPORT_COMPLETE_FAILURE;
@@ -2326,7 +2200,7 @@
 	 * If the task_timeout is disabled, exit now.
 	 */
 	timeout = dev->se_sub_dev->se_dev_attrib.task_timeout;
-	if (!(timeout))
+	if (!timeout)
 		return;
 
 	init_timer(&task->task_timer);
@@ -2337,7 +2211,7 @@
 	task->task_flags |= TF_RUNNING;
 	add_timer(&task->task_timer);
 #if 0
-	printk(KERN_INFO "Starting task timer for cmd: %p task: %p seconds:"
+	pr_debug("Starting task timer for cmd: %p task: %p seconds:"
 		" %d\n", task->task_se_cmd, task, timeout);
 #endif
 }
@@ -2349,7 +2223,7 @@
 {
 	struct se_cmd *cmd = task->task_se_cmd;
 
-	if (!(task->task_flags & TF_RUNNING))
+	if (!task->task_flags & TF_RUNNING)
 		return;
 
 	task->task_flags |= TF_STOP;
@@ -2404,9 +2278,9 @@
 	 if (cmd->sam_task_attr == MSG_HEAD_TAG) {
 		atomic_inc(&cmd->se_dev->dev_hoq_count);
 		smp_mb__after_atomic_inc();
-		DEBUG_STA("Added HEAD_OF_QUEUE for CDB:"
+		pr_debug("Added HEAD_OF_QUEUE for CDB:"
 			" 0x%02x, se_ordered_id: %u\n",
-			cmd->_task_cdb[0],
+			cmd->t_task_cdb[0],
 			cmd->se_ordered_id);
 		return 1;
 	} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
@@ -2418,7 +2292,7 @@
 		atomic_inc(&cmd->se_dev->dev_ordered_sync);
 		smp_mb__after_atomic_inc();
 
-		DEBUG_STA("Added ORDERED for CDB: 0x%02x to ordered"
+		pr_debug("Added ORDERED for CDB: 0x%02x to ordered"
 				" list, se_ordered_id: %u\n",
 				cmd->t_task_cdb[0],
 				cmd->se_ordered_id);
@@ -2427,7 +2301,7 @@
 		 * no other older commands exist that need to be
 		 * completed first.
 		 */
-		if (!(atomic_read(&cmd->se_dev->simple_cmds)))
+		if (!atomic_read(&cmd->se_dev->simple_cmds))
 			return 1;
 	} else {
 		/*
@@ -2452,7 +2326,7 @@
 				&cmd->se_dev->delayed_cmd_list);
 		spin_unlock(&cmd->se_dev->delayed_cmd_lock);
 
-		DEBUG_STA("Added CDB: 0x%02x Task Attr: 0x%02x to"
+		pr_debug("Added CDB: 0x%02x Task Attr: 0x%02x to"
 			" delayed CMD list, se_ordered_id: %u\n",
 			cmd->t_task_cdb[0], cmd->sam_task_attr,
 			cmd->se_ordered_id);
@@ -2486,7 +2360,7 @@
 	 * Call transport_cmd_check_stop() to see if a fabric exception
 	 * has occurred that prevents execution.
 	 */
-	if (!(transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING))) {
+	if (!transport_cmd_check_stop(cmd, 0, TRANSPORT_PROCESSING)) {
 		/*
 		 * Check for SAM Task Attribute emulation and HEAD_OF_QUEUE
 		 * attribute for the tasks of the received struct se_cmd CDB
@@ -2777,7 +2651,7 @@
 			return sectors;
 	}
 #if 0
-	printk(KERN_INFO "Returning block_size: %u, sectors: %u == %u for"
+	pr_debug("Returning block_size: %u, sectors: %u == %u for"
 			" %s object\n", dev->se_sub_dev->se_dev_attrib.block_size, sectors,
 			dev->se_sub_dev->se_dev_attrib.block_size * sectors,
 			dev->transport->name);
@@ -2832,8 +2706,8 @@
 	 * 5) transfer the resulting XOR data to the data-in buffer.
 	 */
 	buf = kmalloc(cmd->data_length, GFP_KERNEL);
-	if (!(buf)) {
-		printk(KERN_ERR "Unable to allocate xor_callback buf\n");
+	if (!buf) {
+		pr_err("Unable to allocate xor_callback buf\n");
 		return;
 	}
 	/*
@@ -2893,18 +2767,18 @@
 			continue;
 
 		dev = task->se_dev;
-		if (!(dev))
+		if (!dev)
 			continue;
 
 		if (!dev->transport->get_sense_buffer) {
-			printk(KERN_ERR "dev->transport->get_sense_buffer"
+			pr_err("dev->transport->get_sense_buffer"
 					" is NULL\n");
 			continue;
 		}
 
 		sense_buffer = dev->transport->get_sense_buffer(task);
-		if (!(sense_buffer)) {
-			printk(KERN_ERR "ITT[0x%08x]_TASK[%d]: Unable to locate"
+		if (!sense_buffer) {
+			pr_err("ITT[0x%08x]_TASK[%d]: Unable to locate"
 				" sense buffer for task with sense\n",
 				cmd->se_tfo->get_task_tag(cmd), task->task_no);
 			continue;
@@ -2921,7 +2795,7 @@
 		cmd->scsi_sense_length =
 				(TRANSPORT_SENSE_BUFFER + offset);
 
-		printk(KERN_INFO "HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
+		pr_debug("HBA_[%u]_PLUG[%s]: Set SAM STATUS: 0x%02x"
 				" and sense\n",
 			dev->se_hba->hba_id, dev->transport->name,
 				cmd->scsi_status);
@@ -2969,13 +2843,12 @@
 
 	sectors = (cmd->data_length / dev->se_sub_dev->se_dev_attrib.block_size);
 
-	if ((cmd->t_task_lba + sectors) >
-	     transport_dev_end_lba(dev)) {
-		printk(KERN_ERR "LBA: %llu Sectors: %u exceeds"
+	if ((cmd->t_task_lba + sectors) > transport_dev_end_lba(dev)) {
+		pr_err("LBA: %llu Sectors: %u exceeds"
 			" transport_dev_end_lba(): %llu\n",
 			cmd->t_task_lba, sectors,
 			transport_dev_end_lba(dev));
-		printk(KERN_ERR "  We should return CHECK_CONDITION"
+		pr_err("  We should return CHECK_CONDITION"
 		       " but we don't yet\n");
 		return 0;
 	}
@@ -3026,7 +2899,7 @@
 		 */
 		if (ret > 0) {
 #if 0
-			printk(KERN_INFO "[%s]: ALUA TG Port not available,"
+			pr_debug("[%s]: ALUA TG Port not available,"
 				" SenseKey: NOT_READY, ASC/ASCQ: 0x04/0x%02x\n",
 				cmd->se_tfo->get_fabric_name(), alua_ascq);
 #endif
@@ -3192,10 +3065,13 @@
 			if (sector_ret)
 				goto out_unsupported_cdb;
 
-			if (sectors != 0)
+			if (sectors)
 				size = transport_get_size(sectors, cdb, cmd);
-			else
-				size = dev->se_sub_dev->se_dev_attrib.block_size;
+			else {
+				pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not"
+				       " supported\n");
+				goto out_invalid_cdb_field;
+			}
 
 			cmd->t_task_lba = get_unaligned_be64(&cdb[12]);
 			cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
@@ -3207,7 +3083,7 @@
 				break;
 
 			if ((cdb[10] & 0x04) || (cdb[10] & 0x02)) {
-				printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA"
+				pr_err("WRITE_SAME PBDATA and LBDATA"
 					" bits not supported for Block Discard"
 					" Emulation\n");
 				goto out_invalid_cdb_field;
@@ -3217,13 +3093,13 @@
 			 * tpws with the UNMAP=1 bit set.
 			 */
 			if (!(cdb[10] & 0x08)) {
-				printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not"
+				pr_err("WRITE_SAME w/o UNMAP bit not"
 					" supported for Block Discard Emulation\n");
 				goto out_invalid_cdb_field;
 			}
 			break;
 		default:
-			printk(KERN_ERR "VARIABLE_LENGTH_CMD service action"
+			pr_err("VARIABLE_LENGTH_CMD service action"
 				" 0x%04x not supported\n", service_action);
 			goto out_unsupported_cdb;
 		}
@@ -3469,10 +3345,12 @@
 		if (sector_ret)
 			goto out_unsupported_cdb;
 
-		if (sectors != 0)
+		if (sectors)
 			size = transport_get_size(sectors, cdb, cmd);
-		else
-			size = dev->se_sub_dev->se_dev_attrib.block_size;
+		else {
+			pr_err("WSNZ=1, WRITE_SAME w/sectors=0 not supported\n");
+			goto out_invalid_cdb_field;
+		}
 
 		cmd->t_task_lba = get_unaligned_be16(&cdb[2]);
 		passthrough = (dev->transport->transport_type ==
@@ -3484,9 +3362,9 @@
 		 * emulation for -> Linux/BLOCK disbard with TCM/IBLOCK and
 		 * TCM/FILEIO subsystem plugin backstores.
 		 */
-		if (!(passthrough)) {
+		if (!passthrough) {
 			if ((cdb[1] & 0x04) || (cdb[1] & 0x02)) {
-				printk(KERN_ERR "WRITE_SAME PBDATA and LBDATA"
+				pr_err("WRITE_SAME PBDATA and LBDATA"
 					" bits not supported for Block Discard"
 					" Emulation\n");
 				goto out_invalid_cdb_field;
@@ -3496,7 +3374,7 @@
 			 * tpws with the UNMAP=1 bit set.
 			 */
 			if (!(cdb[1] & 0x08)) {
-				printk(KERN_ERR "WRITE_SAME w/o UNMAP bit not "
+				pr_err("WRITE_SAME w/o UNMAP bit not "
 					" supported for Block Discard Emulation\n");
 				goto out_invalid_cdb_field;
 			}
@@ -3532,7 +3410,7 @@
 		cmd->se_cmd_flags |= SCF_SCSI_CONTROL_SG_IO_CDB;
 		break;
 	default:
-		printk(KERN_WARNING "TARGET_CORE[%s]: Unsupported SCSI Opcode"
+		pr_warn("TARGET_CORE[%s]: Unsupported SCSI Opcode"
 			" 0x%02x, sending CHECK_CONDITION.\n",
 			cmd->se_tfo->get_fabric_name(), cdb[0]);
 		cmd->transport_wait_for_tasks = &transport_nop_wait_for_tasks;
@@ -3540,7 +3418,7 @@
 	}
 
 	if (size != cmd->data_length) {
-		printk(KERN_WARNING "TARGET_CORE[%s]: Expected Transfer Length:"
+		pr_warn("TARGET_CORE[%s]: Expected Transfer Length:"
 			" %u does not match SCSI CDB Length: %u for SAM Opcode:"
 			" 0x%02x\n", cmd->se_tfo->get_fabric_name(),
 				cmd->data_length, size, cdb[0]);
@@ -3548,7 +3426,7 @@
 		cmd->cmd_spdtl = size;
 
 		if (cmd->data_direction == DMA_TO_DEVICE) {
-			printk(KERN_ERR "Rejecting underflow/overflow"
+			pr_err("Rejecting underflow/overflow"
 					" WRITE data\n");
 			goto out_invalid_cdb_field;
 		}
@@ -3556,8 +3434,8 @@
 		 * Reject READ_* or WRITE_* with overflow/underflow for
 		 * type SCF_SCSI_DATA_SG_IO_CDB.
 		 */
-		if (!(ret) && (dev->se_sub_dev->se_dev_attrib.block_size != 512))  {
-			printk(KERN_ERR "Failing OVERFLOW/UNDERFLOW for LBA op"
+		if (!ret && (dev->se_sub_dev->se_dev_attrib.block_size != 512))  {
+			pr_err("Failing OVERFLOW/UNDERFLOW for LBA op"
 				" CDB on non 512-byte sector setup subsystem"
 				" plugin: %s\n", dev->transport->name);
 			/* Returns CHECK_CONDITION + INVALID_CDB_FIELD */
@@ -3607,14 +3485,14 @@
 		atomic_dec(&dev->simple_cmds);
 		smp_mb__after_atomic_dec();
 		dev->dev_cur_ordered_id++;
-		DEBUG_STA("Incremented dev->dev_cur_ordered_id: %u for"
+		pr_debug("Incremented dev->dev_cur_ordered_id: %u for"
 			" SIMPLE: %u\n", dev->dev_cur_ordered_id,
 			cmd->se_ordered_id);
 	} else if (cmd->sam_task_attr == MSG_HEAD_TAG) {
 		atomic_dec(&dev->dev_hoq_count);
 		smp_mb__after_atomic_dec();
 		dev->dev_cur_ordered_id++;
-		DEBUG_STA("Incremented dev_cur_ordered_id: %u for"
+		pr_debug("Incremented dev_cur_ordered_id: %u for"
 			" HEAD_OF_QUEUE: %u\n", dev->dev_cur_ordered_id,
 			cmd->se_ordered_id);
 	} else if (cmd->sam_task_attr == MSG_ORDERED_TAG) {
@@ -3625,7 +3503,7 @@
 		spin_unlock(&dev->ordered_cmd_lock);
 
 		dev->dev_cur_ordered_id++;
-		DEBUG_STA("Incremented dev_cur_ordered_id: %u for ORDERED:"
+		pr_debug("Incremented dev_cur_ordered_id: %u for ORDERED:"
 			" %u\n", dev->dev_cur_ordered_id, cmd->se_ordered_id);
 	}
 	/*
@@ -3640,10 +3518,10 @@
 		list_del(&cmd_p->se_delayed_node);
 		spin_unlock(&dev->delayed_cmd_lock);
 
-		DEBUG_STA("Calling add_tasks() for"
+		pr_debug("Calling add_tasks() for"
 			" cmd_p: 0x%02x Task Attr: 0x%02x"
 			" Dormant -> Active, se_ordered_id: %u\n",
-			T_TASK(cmd_p)->t_task_cdb[0],
+			cmd_p->t_task_cdb[0],
 			cmd_p->sam_task_attr, cmd_p->se_ordered_id);
 
 		transport_add_tasks_from_cmd(cmd_p);
@@ -3812,7 +3690,7 @@
 	return;
 
 queue_full:
-	printk(KERN_INFO "Handling complete_ok QUEUE_FULL: se_cmd: %p,"
+	pr_debug("Handling complete_ok QUEUE_FULL: se_cmd: %p,"
 		" data_direction: %d\n", cmd, cmd->data_direction);
 	transport_handle_queue_full(cmd, cmd->se_dev, transport_complete_qf);
 }
@@ -3837,49 +3715,34 @@
 		if (task->se_dev)
 			task->se_dev->transport->free_task(task);
 		else
-			printk(KERN_ERR "task[%u] - task->se_dev is NULL\n",
+			pr_err("task[%u] - task->se_dev is NULL\n",
 				task->task_no);
 		spin_lock_irqsave(&cmd->t_state_lock, flags);
 	}
 	spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 }
 
-static inline void transport_free_pages(struct se_cmd *cmd)
+static inline void transport_free_sgl(struct scatterlist *sgl, int nents)
 {
 	struct scatterlist *sg;
-	int free_page = 1;
 	int count;
 
+	for_each_sg(sgl, sg, nents, count)
+		__free_page(sg_page(sg));
+
+	kfree(sgl);
+}
+
+static inline void transport_free_pages(struct se_cmd *cmd)
+{
 	if (cmd->se_cmd_flags & SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC)
-		free_page = 0;
-	if (cmd->se_dev->transport->do_se_mem_map)
-		free_page = 0;
+		return;
 
-	for_each_sg(cmd->t_data_sg, sg, cmd->t_data_nents, count) {
-		/*
-		 * Only called if
-		 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
-		 */
-		if (free_page)
-			__free_page(sg_page(sg));
-
-	}
-	if (free_page)
-		kfree(cmd->t_data_sg);
+	transport_free_sgl(cmd->t_data_sg, cmd->t_data_nents);
 	cmd->t_data_sg = NULL;
 	cmd->t_data_nents = 0;
 
-	for_each_sg(cmd->t_bidi_data_sg, sg, cmd->t_bidi_data_nents, count) {
-		/*
-		 * Only called if
-		 * SCF_PASSTHROUGH_SG_TO_MEM_NOALLOC is NOT in use,
-		 */
-		if (free_page)
-			__free_page(sg_page(sg));
-
-	}
-	if (free_page)
-		kfree(cmd->t_bidi_data_sg);
+	transport_free_sgl(cmd->t_bidi_data_sg, cmd->t_bidi_data_nents);
 	cmd->t_bidi_data_sg = NULL;
 	cmd->t_bidi_data_nents = 0;
 }
@@ -3895,7 +3758,7 @@
 
 	spin_lock_irqsave(&cmd->t_state_lock, flags);
 	if (atomic_read(&cmd->t_fe_count)) {
-		if (!(atomic_dec_and_test(&cmd->t_fe_count))) {
+		if (!atomic_dec_and_test(&cmd->t_fe_count)) {
 			spin_unlock_irqrestore(&cmd->t_state_lock,
 					flags);
 			return 1;
@@ -3903,7 +3766,7 @@
 	}
 
 	if (atomic_read(&cmd->t_se_count)) {
-		if (!(atomic_dec_and_test(&cmd->t_se_count))) {
+		if (!atomic_dec_and_test(&cmd->t_se_count)) {
 			spin_unlock_irqrestore(&cmd->t_state_lock,
 					flags);
 			return 1;
@@ -3922,7 +3785,7 @@
 		return;
 
 	spin_lock_irqsave(&cmd->t_state_lock, flags);
-	if (!(atomic_read(&cmd->transport_dev_active))) {
+	if (!atomic_read(&cmd->transport_dev_active)) {
 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 		goto free_pages;
 	}
@@ -3953,7 +3816,7 @@
 	}
 
 	spin_lock_irqsave(&cmd->t_state_lock, flags);
-	if (!(atomic_read(&cmd->transport_dev_active))) {
+	if (!atomic_read(&cmd->transport_dev_active)) {
 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 		goto free_pages;
 	}
@@ -4027,7 +3890,7 @@
 					      DMA_FROM_DEVICE,
 					      cmd->t_bidi_data_sg,
 					      cmd->t_bidi_data_nents);
-		if (!rc) {
+		if (rc <= 0) {
 			cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
 			cmd->scsi_sense_reason =
 				TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -4046,7 +3909,7 @@
 					     cmd->data_direction,
 					     cmd->t_data_sg,
 					     cmd->t_data_nents);
-	if (!task_cdbs) {
+	if (task_cdbs <= 0) {
 		cmd->se_cmd_flags |= SCF_SCSI_CDB_EXCEPTION;
 		cmd->scsi_sense_reason =
 			TCM_LOGICAL_UNIT_COMMUNICATION_FAILURE;
@@ -4094,12 +3957,6 @@
 	struct page *page;
 	int i = 0;
 
-	/*
-	 * If the device uses memory mapping this is enough.
-	 */
-	if (cmd->se_dev->transport->do_se_mem_map)
-		return 0;
-
 	nents = DIV_ROUND_UP(length, PAGE_SIZE);
 	cmd->t_data_sg = kmalloc(sizeof(struct scatterlist) * nents, GFP_KERNEL);
 	if (!cmd->t_data_sg)
@@ -4176,14 +4033,14 @@
 
 		if (!sg_first) {
 			sg_first = task->task_sg;
-			chained_nents = task->task_sg_num;
+			chained_nents = task->task_sg_nents;
 		} else {
 			sg_chain(sg_prev, sg_prev_nents, task->task_sg);
-			chained_nents += task->task_sg_num;
+			chained_nents += task->task_sg_nents;
 		}
 
 		sg_prev = task->task_sg;
-		sg_prev_nents = task->task_sg_num;
+		sg_prev_nents = task->task_sg_nents;
 	}
 	/*
 	 * Setup the starting pointer and total t_tasks_sg_linked_no including
@@ -4192,19 +4049,19 @@
 	cmd->t_tasks_sg_chained = sg_first;
 	cmd->t_tasks_sg_chained_no = chained_nents;
 
-	DEBUG_CMD_M("Setup cmd: %p cmd->t_tasks_sg_chained: %p and"
+	pr_debug("Setup cmd: %p cmd->t_tasks_sg_chained: %p and"
 		" t_tasks_sg_chained_no: %u\n", cmd, cmd->t_tasks_sg_chained,
 		cmd->t_tasks_sg_chained_no);
 
 	for_each_sg(cmd->t_tasks_sg_chained, sg,
 			cmd->t_tasks_sg_chained_no, i) {
 
-		DEBUG_CMD_M("SG[%d]: %p page: %p length: %d offset: %d\n",
+		pr_debug("SG[%d]: %p page: %p length: %d offset: %d\n",
 			i, sg, sg_page(sg), sg->length, sg->offset);
 		if (sg_is_chain(sg))
-			DEBUG_CMD_M("SG: %p sg_is_chain=1\n", sg);
+			pr_debug("SG: %p sg_is_chain=1\n", sg);
 		if (sg_is_last(sg))
-			DEBUG_CMD_M("SG: %p sg_is_last=1\n", sg);
+			pr_debug("SG: %p sg_is_last=1\n", sg);
 	}
 }
 EXPORT_SYMBOL(transport_do_task_sg_chain);
@@ -4266,25 +4123,25 @@
 		 * It's so much easier and only a waste when task_count > 1.
 		 * That is extremely rare.
 		 */
-		task->task_sg_num = sgl_nents;
+		task->task_sg_nents = sgl_nents;
 		if (cmd->se_tfo->task_sg_chaining) {
-			task->task_sg_num++;
+			task->task_sg_nents++;
 			task->task_padded_sg = 1;
 		}
 
 		task->task_sg = kmalloc(sizeof(struct scatterlist) * \
-					task->task_sg_num, GFP_KERNEL);
+					task->task_sg_nents, GFP_KERNEL);
 		if (!task->task_sg) {
 			cmd->se_dev->transport->free_task(task);
 			return -ENOMEM;
 		}
 
-		sg_init_table(task->task_sg, task->task_sg_num);
+		sg_init_table(task->task_sg, task->task_sg_nents);
 
 		task_size = task->task_size;
 
 		/* Build new sgl, only up to task_size */
-		for_each_sg(task->task_sg, sg, task->task_sg_num, count) {
+		for_each_sg(task->task_sg, sg, task->task_sg_nents, count) {
 			if (cmd_sg->length > task_size)
 				break;
 
@@ -4311,6 +4168,7 @@
 	unsigned char *cdb;
 	struct se_task *task;
 	unsigned long flags;
+	int ret = 0;
 
 	task = transport_generic_get_task(cmd, cmd->data_direction);
 	if (!task)
@@ -4331,7 +4189,7 @@
 	memcpy(task->task_sg, cmd->t_data_sg,
 	       sizeof(struct scatterlist) * cmd->t_data_nents);
 	task->task_size = cmd->data_length;
-	task->task_sg_num = cmd->t_data_nents;
+	task->task_sg_nents = cmd->t_data_nents;
 
 	spin_lock_irqsave(&cmd->t_state_lock, flags);
 	list_add_tail(&task->t_list, &cmd->t_task_list);
@@ -4339,16 +4197,19 @@
 
 	if (cmd->se_cmd_flags & SCF_SCSI_CONTROL_SG_IO_CDB) {
 		if (dev->transport->map_task_SG)
-			return dev->transport->map_task_SG(task);
-		return 0;
+			ret = dev->transport->map_task_SG(task);
 	} else if (cmd->se_cmd_flags & SCF_SCSI_NON_DATA_CDB) {
 		if (dev->transport->cdb_none)
-			return dev->transport->cdb_none(task);
-		return 0;
+			ret = dev->transport->cdb_none(task);
 	} else {
+		pr_err("target: Unknown control cmd type!\n");
 		BUG();
-		return -ENOMEM;
 	}
+
+	/* Success! Return number of tasks allocated */
+	if (ret == 0)
+		return 1;
+	return ret;
 }
 
 static u32 transport_allocate_tasks(
@@ -4358,18 +4219,12 @@
 	struct scatterlist *sgl,
 	unsigned int sgl_nents)
 {
-	int ret;
-
-	if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB) {
+	if (cmd->se_cmd_flags & SCF_SCSI_DATA_SG_IO_CDB)
 		return transport_allocate_data_tasks(cmd, lba, data_direction,
 						     sgl, sgl_nents);
-	} else {
-		ret = transport_allocate_control_task(cmd);
-		if (ret < 0)
-			return ret;
-		else
-			return 1;
-	}
+	else
+		return transport_allocate_control_task(cmd);
+
 }
 
 
@@ -4441,64 +4296,6 @@
  */
 void transport_generic_process_write(struct se_cmd *cmd)
 {
-#if 0
-	/*
-	 * Copy SCSI Presented DTL sector(s) from received buffers allocated to
-	 * original EDTL
-	 */
-	if (cmd->se_cmd_flags & SCF_UNDERFLOW_BIT) {
-		if (!cmd->t_tasks_se_num) {
-			unsigned char *dst, *buf =
-				(unsigned char *)cmd->t_task_buf;
-
-			dst = kzalloc(cmd->cmd_spdtl), GFP_KERNEL);
-			if (!(dst)) {
-				printk(KERN_ERR "Unable to allocate memory for"
-						" WRITE underflow\n");
-				transport_generic_request_failure(cmd, NULL,
-					PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
-				return;
-			}
-			memcpy(dst, buf, cmd->cmd_spdtl);
-
-			kfree(cmd->t_task_buf);
-			cmd->t_task_buf = dst;
-		} else {
-			struct scatterlist *sg =
-				(struct scatterlist *sg)cmd->t_task_buf;
-			struct scatterlist *orig_sg;
-
-			orig_sg = kzalloc(sizeof(struct scatterlist) *
-					cmd->t_tasks_se_num,
-					GFP_KERNEL))) {
-			if (!(orig_sg)) {
-				printk(KERN_ERR "Unable to allocate memory"
-						" for WRITE underflow\n");
-				transport_generic_request_failure(cmd, NULL,
-					PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
-				return;
-			}
-
-			memcpy(orig_sg, cmd->t_task_buf,
-					sizeof(struct scatterlist) *
-					cmd->t_tasks_se_num);
-
-			cmd->data_length = cmd->cmd_spdtl;
-			/*
-			 * FIXME, clear out original struct se_task and state
-			 * information.
-			 */
-			if (transport_generic_new_cmd(cmd) < 0) {
-				transport_generic_request_failure(cmd, NULL,
-					PYX_TRANSPORT_REQ_TOO_MANY_SECTORS, 1);
-				kfree(orig_sg);
-				return;
-			}
-
-			transport_memcpy_write_sg(cmd, orig_sg);
-		}
-	}
-#endif
 	transport_execute_tasks(cmd);
 }
 EXPORT_SYMBOL(transport_generic_process_write);
@@ -4554,7 +4351,7 @@
 	return PYX_TRANSPORT_WRITE_PENDING;
 
 queue_full:
-	printk(KERN_INFO "Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
+	pr_debug("Handling write_pending QUEUE__FULL: se_cmd: %p\n", cmd);
 	cmd->t_state = TRANSPORT_COMPLETE_QF_WP;
 	transport_handle_queue_full(cmd, cmd->se_dev,
 			transport_write_pending_qf);
@@ -4586,7 +4383,7 @@
 
 		if (cmd->se_lun) {
 #if 0
-			printk(KERN_INFO "cmd: %p ITT: 0x%08x contains"
+			pr_debug("cmd: %p ITT: 0x%08x contains"
 				" cmd->se_lun\n", cmd,
 				cmd->se_tfo->get_task_tag(cmd));
 #endif
@@ -4627,7 +4424,7 @@
 	spin_lock_irqsave(&cmd->t_state_lock, flags);
 	if (atomic_read(&cmd->t_transport_stop)) {
 		atomic_set(&cmd->transport_lun_stop, 0);
-		DEBUG_TRANSPORT_S("ConfigFS ITT[0x%08x] - t_transport_stop =="
+		pr_debug("ConfigFS ITT[0x%08x] - t_transport_stop =="
 			" TRUE, skipping\n", cmd->se_tfo->get_task_tag(cmd));
 		spin_unlock_irqrestore(&cmd->t_state_lock, flags);
 		transport_cmd_check_stop(cmd, 1, 0);
@@ -4640,13 +4437,13 @@
 
 	ret = transport_stop_tasks_for_cmd(cmd);
 
-	DEBUG_TRANSPORT_S("ConfigFS: cmd: %p t_task_cdbs: %d stop tasks ret:"
-			" %d\n", cmd, cmd->t_task_cdbs, ret);
+	pr_debug("ConfigFS: cmd: %p t_tasks: %d stop tasks ret:"
+			" %d\n", cmd, cmd->t_task_list_num, ret);
 	if (!ret) {
-		DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
+		pr_debug("ConfigFS: ITT[0x%08x] - stopping cmd....\n",
 				cmd->se_tfo->get_task_tag(cmd));
 		wait_for_completion(&cmd->transport_lun_stop_comp);
-		DEBUG_TRANSPORT_S("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
+		pr_debug("ConfigFS: ITT[0x%08x] - stopped cmd....\n",
 				cmd->se_tfo->get_task_tag(cmd));
 	}
 	transport_remove_cmd_from_queue(cmd, &cmd->se_dev->dev_queue_obj);
@@ -4654,13 +4451,6 @@
 	return 0;
 }
 
-/* #define DEBUG_CLEAR_LUN */
-#ifdef DEBUG_CLEAR_LUN
-#define DEBUG_CLEAR_L(x...) printk(KERN_INFO x)
-#else
-#define DEBUG_CLEAR_L(x...)
-#endif
-
 static void __transport_clear_lun_from_sessions(struct se_lun *lun)
 {
 	struct se_cmd *cmd = NULL;
@@ -4682,7 +4472,7 @@
 		 * progress for the iscsi_cmd_t.
 		 */
 		spin_lock(&cmd->t_state_lock);
-		DEBUG_CLEAR_L("SE_LUN[%d] - Setting cmd->transport"
+		pr_debug("SE_LUN[%d] - Setting cmd->transport"
 			"_lun_stop for  ITT: 0x%08x\n",
 			cmd->se_lun->unpacked_lun,
 			cmd->se_tfo->get_task_tag(cmd));
@@ -4691,8 +4481,8 @@
 
 		spin_unlock_irqrestore(&lun->lun_cmd_lock, lun_flags);
 
-		if (!(cmd->se_lun)) {
-			printk(KERN_ERR "ITT: 0x%08x, [i,t]_state: %u/%u\n",
+		if (!cmd->se_lun) {
+			pr_err("ITT: 0x%08x, [i,t]_state: %u/%u\n",
 				cmd->se_tfo->get_task_tag(cmd),
 				cmd->se_tfo->get_cmd_state(cmd), cmd->t_state);
 			BUG();
@@ -4701,7 +4491,7 @@
 		 * If the Storage engine still owns the iscsi_cmd_t, determine
 		 * and/or stop its context.
 		 */
-		DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x before transport"
+		pr_debug("SE_LUN[%d] - ITT: 0x%08x before transport"
 			"_lun_wait_for_tasks()\n", cmd->se_lun->unpacked_lun,
 			cmd->se_tfo->get_task_tag(cmd));
 
@@ -4710,13 +4500,13 @@
 			continue;
 		}
 
-		DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
+		pr_debug("SE_LUN[%d] - ITT: 0x%08x after transport_lun"
 			"_wait_for_tasks(): SUCCESS\n",
 			cmd->se_lun->unpacked_lun,
 			cmd->se_tfo->get_task_tag(cmd));
 
 		spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
-		if (!(atomic_read(&cmd->transport_dev_active))) {
+		if (!atomic_read(&cmd->transport_dev_active)) {
 			spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
 			goto check_cond;
 		}
@@ -4741,7 +4531,7 @@
 		 */
 		spin_lock_irqsave(&cmd->t_state_lock, cmd_flags);
 		if (atomic_read(&cmd->transport_lun_fe_stop)) {
-			DEBUG_CLEAR_L("SE_LUN[%d] - Detected FE stop for"
+			pr_debug("SE_LUN[%d] - Detected FE stop for"
 				" struct se_cmd: %p ITT: 0x%08x\n",
 				lun->unpacked_lun,
 				cmd, cmd->se_tfo->get_task_tag(cmd));
@@ -4753,7 +4543,7 @@
 			spin_lock_irqsave(&lun->lun_cmd_lock, lun_flags);
 			continue;
 		}
-		DEBUG_CLEAR_L("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
+		pr_debug("SE_LUN[%d] - ITT: 0x%08x finished processing\n",
 			lun->unpacked_lun, cmd->se_tfo->get_task_tag(cmd));
 
 		spin_unlock_irqrestore(&cmd->t_state_lock, cmd_flags);
@@ -4779,7 +4569,7 @@
 	kt = kthread_run(transport_clear_lun_thread, lun,
 			"tcm_cl_%u", lun->unpacked_lun);
 	if (IS_ERR(kt)) {
-		printk(KERN_ERR "Unable to start clear_lun thread\n");
+		pr_err("Unable to start clear_lun thread\n");
 		return PTR_ERR(kt);
 	}
 	wait_for_completion(&lun->lun_shutdown_comp);
@@ -4812,7 +4602,7 @@
 	 */
 	if (atomic_read(&cmd->transport_lun_stop)) {
 
-		DEBUG_TRANSPORT_S("wait_for_tasks: Stopping"
+		pr_debug("wait_for_tasks: Stopping"
 			" wait_for_completion(&cmd->t_tasktransport_lun_fe"
 			"_stop_comp); for ITT: 0x%08x\n",
 			cmd->se_tfo->get_task_tag(cmd));
@@ -4834,7 +4624,7 @@
 		 * struct se_cmd, now owns the structure and can be released through
 		 * normal means below.
 		 */
-		DEBUG_TRANSPORT_S("wait_for_tasks: Stopped"
+		pr_debug("wait_for_tasks: Stopped"
 			" wait_for_completion(&cmd->t_tasktransport_lun_fe_"
 			"stop_comp); for ITT: 0x%08x\n",
 			cmd->se_tfo->get_task_tag(cmd));
@@ -4847,7 +4637,7 @@
 
 	atomic_set(&cmd->t_transport_stop, 1);
 
-	DEBUG_TRANSPORT_S("wait_for_tasks: Stopping %p ITT: 0x%08x"
+	pr_debug("wait_for_tasks: Stopping %p ITT: 0x%08x"
 		" i_state: %d, t_state/def_t_state: %d/%d, t_transport_stop"
 		" = TRUE\n", cmd, cmd->se_tfo->get_task_tag(cmd),
 		cmd->se_tfo->get_cmd_state(cmd), cmd->t_state,
@@ -4863,7 +4653,7 @@
 	atomic_set(&cmd->t_transport_active, 0);
 	atomic_set(&cmd->t_transport_stop, 0);
 
-	DEBUG_TRANSPORT_S("wait_for_tasks: Stopped wait_for_compltion("
+	pr_debug("wait_for_tasks: Stopped wait_for_compltion("
 		"&cmd->t_transport_stop_comp) for ITT: 0x%08x\n",
 		cmd->se_tfo->get_task_tag(cmd));
 remove:
@@ -5071,11 +4861,11 @@
 	int ret = 0;
 
 	if (atomic_read(&cmd->t_transport_aborted) != 0) {
-		if (!(send_status) ||
+		if (!send_status ||
 		     (cmd->se_cmd_flags & SCF_SENT_DELAYED_TAS))
 			return 1;
 #if 0
-		printk(KERN_INFO "Sending delayed SAM_STAT_TASK_ABORTED"
+		pr_debug("Sending delayed SAM_STAT_TASK_ABORTED"
 			" status for CDB: 0x%02x ITT: 0x%08x\n",
 			cmd->t_task_cdb[0],
 			cmd->se_tfo->get_task_tag(cmd));
@@ -5107,7 +4897,7 @@
 	}
 	cmd->scsi_status = SAM_STAT_TASK_ABORTED;
 #if 0
-	printk(KERN_INFO "Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
+	pr_debug("Setting SAM_STAT_TASK_ABORTED status for CDB: 0x%02x,"
 		" ITT: 0x%08x\n", cmd->t_task_cdb[0],
 		cmd->se_tfo->get_task_tag(cmd));
 #endif
@@ -5145,7 +4935,7 @@
 		tmr->response = TMR_FUNCTION_REJECTED;
 		break;
 	default:
-		printk(KERN_ERR "Uknown TMR function: 0x%02x.\n",
+		pr_err("Uknown TMR function: 0x%02x.\n",
 				tmr->function);
 		tmr->response = TMR_FUNCTION_REJECTED;
 		break;
@@ -5190,7 +4980,7 @@
 	spin_lock_irqsave(&dev->execute_task_lock, flags);
 	while ((task = transport_get_task_from_state_list(dev))) {
 		if (!task->task_se_cmd) {
-			printk(KERN_ERR "task->task_se_cmd is NULL!\n");
+			pr_err("task->task_se_cmd is NULL!\n");
 			continue;
 		}
 		cmd = task->task_se_cmd;
@@ -5199,18 +4989,18 @@
 
 		spin_lock_irqsave(&cmd->t_state_lock, flags);
 
-		DEBUG_DO("PT: cmd: %p task: %p ITT/CmdSN: 0x%08x/0x%08x,"
-			" i_state/def_i_state: %d/%d, t_state/def_t_state:"
+		pr_debug("PT: cmd: %p task: %p ITT: 0x%08x,"
+			" i_state: %d, t_state/def_t_state:"
 			" %d/%d cdb: 0x%02x\n", cmd, task,
-			cmd->se_tfo->get_task_tag(cmd), cmd->cmd_sn,
-			cmd->se_tfo->get_cmd_state(cmd), cmd->deferred_i_state,
+			cmd->se_tfo->get_task_tag(cmd),
+			cmd->se_tfo->get_cmd_state(cmd),
 			cmd->t_state, cmd->deferred_t_state,
 			cmd->t_task_cdb[0]);
-		DEBUG_DO("PT: ITT[0x%08x] - t_task_cdbs: %d t_task_cdbs_left:"
+		pr_debug("PT: ITT[0x%08x] - t_tasks: %d t_task_cdbs_left:"
 			" %d t_task_cdbs_sent: %d -- t_transport_active: %d"
 			" t_transport_stop: %d t_transport_sent: %d\n",
 			cmd->se_tfo->get_task_tag(cmd),
-			cmd->t_task_cdbs,
+			cmd->t_task_list_num,
 			atomic_read(&cmd->t_task_cdbs_left),
 			atomic_read(&cmd->t_task_cdbs_sent),
 			atomic_read(&cmd->t_transport_active),
@@ -5222,10 +5012,10 @@
 			spin_unlock_irqrestore(
 				&cmd->t_state_lock, flags);
 
-			DEBUG_DO("Waiting for task: %p to shutdown for dev:"
+			pr_debug("Waiting for task: %p to shutdown for dev:"
 				" %p\n", task, dev);
 			wait_for_completion(&task->task_stop_comp);
-			DEBUG_DO("Completed task: %p shutdown for dev: %p\n",
+			pr_debug("Completed task: %p shutdown for dev: %p\n",
 				task, dev);
 
 			spin_lock_irqsave(&cmd->t_state_lock, flags);
@@ -5239,11 +5029,11 @@
 		}
 		__transport_stop_task_timer(task, &flags);
 
-		if (!(atomic_dec_and_test(&cmd->t_task_cdbs_ex_left))) {
+		if (!atomic_dec_and_test(&cmd->t_task_cdbs_ex_left)) {
 			spin_unlock_irqrestore(
 					&cmd->t_state_lock, flags);
 
-			DEBUG_DO("Skipping task: %p, dev: %p for"
+			pr_debug("Skipping task: %p, dev: %p for"
 				" t_task_cdbs_ex_left: %d\n", task, dev,
 				atomic_read(&cmd->t_task_cdbs_ex_left));
 
@@ -5252,7 +5042,7 @@
 		}
 
 		if (atomic_read(&cmd->t_transport_active)) {
-			DEBUG_DO("got t_transport_active = 1 for task: %p, dev:"
+			pr_debug("got t_transport_active = 1 for task: %p, dev:"
 					" %p\n", task, dev);
 
 			if (atomic_read(&cmd->t_fe_count)) {
@@ -5282,7 +5072,7 @@
 			spin_lock_irqsave(&dev->execute_task_lock, flags);
 			continue;
 		}
-		DEBUG_DO("Got t_transport_active = 0 for task: %p, dev: %p\n",
+		pr_debug("Got t_transport_active = 0 for task: %p, dev: %p\n",
 				task, dev);
 
 		if (atomic_read(&cmd->t_fe_count)) {
@@ -5315,7 +5105,7 @@
 	 */
 	while ((cmd = transport_get_cmd_from_queue(&dev->dev_queue_obj))) {
 
-		DEBUG_DO("From Device Queue: cmd: %p t_state: %d\n",
+		pr_debug("From Device Queue: cmd: %p t_state: %d\n",
 				cmd, cmd->t_state);
 
 		if (atomic_read(&cmd->t_fe_count)) {
@@ -5368,8 +5158,8 @@
 
 		switch (cmd->t_state) {
 		case TRANSPORT_NEW_CMD_MAP:
-			if (!(cmd->se_tfo->new_cmd_map)) {
-				printk(KERN_ERR "cmd->se_tfo->new_cmd_map is"
+			if (!cmd->se_tfo->new_cmd_map) {
+				pr_err("cmd->se_tfo->new_cmd_map is"
 					" NULL for TRANSPORT_NEW_CMD_MAP\n");
 				BUG();
 			}
@@ -5420,7 +5210,7 @@
 			transport_generic_write_pending(cmd);
 			break;
 		default:
-			printk(KERN_ERR "Unknown t_state: %d deferred_t_state:"
+			pr_err("Unknown t_state: %d deferred_t_state:"
 				" %d for ITT: 0x%08x i_state: %d on SE LUN:"
 				" %u\n", cmd->t_state, cmd->deferred_t_state,
 				cmd->se_tfo->get_task_tag(cmd),
diff --git a/drivers/target/target_core_ua.c b/drivers/target/target_core_ua.c
index d28e9c4..31e3c65 100644
--- a/drivers/target/target_core_ua.c
+++ b/drivers/target/target_core_ua.c
@@ -49,15 +49,15 @@
 	struct se_session *sess = cmd->se_sess;
 	struct se_node_acl *nacl;
 
-	if (!(sess))
+	if (!sess)
 		return 0;
 
 	nacl = sess->se_node_acl;
-	if (!(nacl))
+	if (!nacl)
 		return 0;
 
 	deve = &nacl->device_list[cmd->orig_fe_lun];
-	if (!(atomic_read(&deve->ua_count)))
+	if (!atomic_read(&deve->ua_count))
 		return 0;
 	/*
 	 * From sam4r14, section 5.14 Unit attention condition:
@@ -97,12 +97,12 @@
 	/*
 	 * PASSTHROUGH OPS
 	 */
-	if (!(nacl))
+	if (!nacl)
 		return -EINVAL;
 
 	ua = kmem_cache_zalloc(se_ua_cache, GFP_ATOMIC);
-	if (!(ua)) {
-		printk(KERN_ERR "Unable to allocate struct se_ua\n");
+	if (!ua) {
+		pr_err("Unable to allocate struct se_ua\n");
 		return -ENOMEM;
 	}
 	INIT_LIST_HEAD(&ua->ua_dev_list);
@@ -177,7 +177,7 @@
 	spin_unlock(&deve->ua_lock);
 	spin_unlock_irq(&nacl->device_list_lock);
 
-	printk(KERN_INFO "[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:"
+	pr_debug("[%s]: Allocated UNIT ATTENTION, mapped LUN: %u, ASC:"
 		" 0x%02x, ASCQ: 0x%02x\n",
 		nacl->se_tpg->se_tpg_tfo->get_fabric_name(), unpacked_lun,
 		asc, ascq);
@@ -215,16 +215,16 @@
 	struct se_ua *ua = NULL, *ua_p;
 	int head = 1;
 
-	if (!(sess))
+	if (!sess)
 		return;
 
 	nacl = sess->se_node_acl;
-	if (!(nacl))
+	if (!nacl)
 		return;
 
 	spin_lock_irq(&nacl->device_list_lock);
 	deve = &nacl->device_list[cmd->orig_fe_lun];
-	if (!(atomic_read(&deve->ua_count))) {
+	if (!atomic_read(&deve->ua_count)) {
 		spin_unlock_irq(&nacl->device_list_lock);
 		return;
 	}
@@ -264,7 +264,7 @@
 	spin_unlock(&deve->ua_lock);
 	spin_unlock_irq(&nacl->device_list_lock);
 
-	printk(KERN_INFO "[%s]: %s UNIT ATTENTION condition with"
+	pr_debug("[%s]: %s UNIT ATTENTION condition with"
 		" INTLCK_CTRL: %d, mapped LUN: %u, got CDB: 0x%02x"
 		" reported ASC: 0x%02x, ASCQ: 0x%02x\n",
 		nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
@@ -284,16 +284,16 @@
 	struct se_ua *ua = NULL, *ua_p;
 	int head = 1;
 
-	if (!(sess))
+	if (!sess)
 		return -EINVAL;
 
 	nacl = sess->se_node_acl;
-	if (!(nacl))
+	if (!nacl)
 		return -EINVAL;
 
 	spin_lock_irq(&nacl->device_list_lock);
 	deve = &nacl->device_list[cmd->orig_fe_lun];
-	if (!(atomic_read(&deve->ua_count))) {
+	if (!atomic_read(&deve->ua_count)) {
 		spin_unlock_irq(&nacl->device_list_lock);
 		return -EPERM;
 	}
@@ -323,7 +323,7 @@
 	spin_unlock(&deve->ua_lock);
 	spin_unlock_irq(&nacl->device_list_lock);
 
-	printk(KERN_INFO "[%s]: Released UNIT ATTENTION condition, mapped"
+	pr_debug("[%s]: Released UNIT ATTENTION condition, mapped"
 		" LUN: %u, got REQUEST_SENSE reported ASC: 0x%02x,"
 		" ASCQ: 0x%02x\n", nacl->se_tpg->se_tpg_tfo->get_fabric_name(),
 		cmd->orig_fe_lun, *asc, *ascq);
diff --git a/drivers/target/tcm_fc/tcm_fc.h b/drivers/target/tcm_fc/tcm_fc.h
index 8d26779..f7fff7e 100644
--- a/drivers/target/tcm_fc/tcm_fc.h
+++ b/drivers/target/tcm_fc/tcm_fc.h
@@ -23,30 +23,6 @@
 #define FT_TPG_NAMELEN 32	/* max length of TPG name */
 #define FT_LUN_NAMELEN 32	/* max length of LUN name */
 
-/*
- * Debug options.
- */
-#define FT_DEBUG_CONF	0x01	/* configuration messages */
-#define	FT_DEBUG_SESS	0x02	/* session messages */
-#define	FT_DEBUG_TM	0x04	/* TM operations */
-#define	FT_DEBUG_IO	0x08	/* I/O commands */
-#define	FT_DEBUG_DATA	0x10	/* Data transfer */
-
-extern unsigned int ft_debug_logging;	/* debug options */
-
-#define FT_DEBUG(mask, fmt, args...)					\
-	do {								\
-		if (ft_debug_logging & (mask))				\
-			printk(KERN_INFO "tcm_fc: %s: " fmt,		\
-				__func__, ##args);			\
-	} while (0)
-
-#define	FT_CONF_DBG(fmt, args...)	FT_DEBUG(FT_DEBUG_CONF, fmt, ##args)
-#define	FT_SESS_DBG(fmt, args...)	FT_DEBUG(FT_DEBUG_SESS, fmt, ##args)
-#define	FT_TM_DBG(fmt, args...)		FT_DEBUG(FT_DEBUG_TM, fmt, ##args)
-#define	FT_IO_DBG(fmt, args...)		FT_DEBUG(FT_DEBUG_IO, fmt, ##args)
-#define	FT_DATA_DBG(fmt, args...)	FT_DEBUG(FT_DEBUG_DATA, fmt, ##args)
-
 struct ft_transport_id {
 	__u8	format;
 	__u8	__resvd1[7];
diff --git a/drivers/target/tcm_fc/tfc_cmd.c b/drivers/target/tcm_fc/tfc_cmd.c
index 9365e539..a9e9a31 100644
--- a/drivers/target/tcm_fc/tfc_cmd.c
+++ b/drivers/target/tcm_fc/tfc_cmd.c
@@ -62,22 +62,19 @@
 	struct scatterlist *sg;
 	int count;
 
-	if (!(ft_debug_logging & FT_DEBUG_IO))
-		return;
-
 	se_cmd = &cmd->se_cmd;
-	printk(KERN_INFO "%s: cmd %p state %d sess %p seq %p se_cmd %p\n",
+	pr_debug("%s: cmd %p state %d sess %p seq %p se_cmd %p\n",
 		caller, cmd, cmd->state, cmd->sess, cmd->seq, se_cmd);
-	printk(KERN_INFO "%s: cmd %p cdb %p\n",
+	pr_debug("%s: cmd %p cdb %p\n",
 		caller, cmd, cmd->cdb);
-	printk(KERN_INFO "%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
+	pr_debug("%s: cmd %p lun %d\n", caller, cmd, cmd->lun);
 
-	printk(KERN_INFO "%s: cmd %p data_nents %u len %u se_cmd_flags <0x%x>\n",
+	pr_debug("%s: cmd %p data_nents %u len %u se_cmd_flags <0x%x>\n",
 		caller, cmd, se_cmd->t_data_nents,
 	       se_cmd->data_length, se_cmd->se_cmd_flags);
 
 	for_each_sg(se_cmd->t_data_sg, sg, se_cmd->t_data_nents, count)
-		printk(KERN_INFO "%s: cmd %p sg %p page %p "
+		pr_debug("%s: cmd %p sg %p page %p "
 			"len 0x%x off 0x%x\n",
 			caller, cmd, sg,
 			sg_page(sg), sg->length, sg->offset);
@@ -85,7 +82,7 @@
 	sp = cmd->seq;
 	if (sp) {
 		ep = fc_seq_exch(sp);
-		printk(KERN_INFO "%s: cmd %p sid %x did %x "
+		pr_debug("%s: cmd %p sid %x did %x "
 			"ox_id %x rx_id %x seq_id %x e_stat %x\n",
 			caller, cmd, ep->sid, ep->did, ep->oxid, ep->rxid,
 			sp->id, ep->esb_stat);
@@ -321,7 +318,7 @@
 	case FC_RCTL_DD_SOL_CTL:	/* transfer ready */
 	case FC_RCTL_DD_DATA_DESC:	/* transfer ready */
 	default:
-		printk(KERN_INFO "%s: unhandled frame r_ctl %x\n",
+		pr_debug("%s: unhandled frame r_ctl %x\n",
 		       __func__, fh->fh_r_ctl);
 		fc_frame_free(fp);
 		transport_generic_free_cmd(&cmd->se_cmd, 0, 0);
@@ -346,7 +343,7 @@
 	struct fcp_resp_rsp_info *info;
 
 	fh = fc_frame_header_get(rx_fp);
-	FT_IO_DBG("FCP error response: did %x oxid %x status %x code %x\n",
+	pr_debug("FCP error response: did %x oxid %x status %x code %x\n",
 		  ntoh24(fh->fh_s_id), ntohs(fh->fh_ox_id), status, code);
 	len = sizeof(*fcp);
 	if (status == SAM_STAT_GOOD)
@@ -416,15 +413,15 @@
 		 * FCP4r01 indicates having a combination of
 		 * tm_flags set is invalid.
 		 */
-		FT_TM_DBG("invalid FCP tm_flags %x\n", fcp->fc_tm_flags);
+		pr_debug("invalid FCP tm_flags %x\n", fcp->fc_tm_flags);
 		ft_send_resp_code(cmd, FCP_CMND_FIELDS_INVALID);
 		return;
 	}
 
-	FT_TM_DBG("alloc tm cmd fn %d\n", tm_func);
+	pr_debug("alloc tm cmd fn %d\n", tm_func);
 	tmr = core_tmr_alloc_req(&cmd->se_cmd, cmd, tm_func);
 	if (!tmr) {
-		FT_TM_DBG("alloc failed\n");
+		pr_debug("alloc failed\n");
 		ft_send_resp_code(cmd, FCP_TMF_FAILED);
 		return;
 	}
@@ -439,7 +436,7 @@
 			 * since "unable to  handle TMR request because failed
 			 * to get to LUN"
 			 */
-			FT_TM_DBG("Failed to get LUN for TMR func %d, "
+			pr_debug("Failed to get LUN for TMR func %d, "
 				  "se_cmd %p, unpacked_lun %d\n",
 				  tm_func, &cmd->se_cmd, cmd->lun);
 			ft_dump_cmd(cmd, __func__);
@@ -490,7 +487,7 @@
 		code = FCP_TMF_FAILED;
 		break;
 	}
-	FT_TM_DBG("tmr fn %d resp %d fcp code %d\n",
+	pr_debug("tmr fn %d resp %d fcp code %d\n",
 		  tmr->function, tmr->response, code);
 	ft_send_resp_code(cmd, code);
 	return 0;
@@ -518,7 +515,7 @@
 	return;
 
 busy:
-	FT_IO_DBG("cmd or seq allocation failure - sending BUSY\n");
+	pr_debug("cmd or seq allocation failure - sending BUSY\n");
 	ft_send_resp_status(lport, fp, SAM_STAT_BUSY, 0);
 	fc_frame_free(fp);
 	ft_sess_put(sess);		/* undo get from lookup */
@@ -543,7 +540,7 @@
 	case FC_RCTL_DD_DATA_DESC:	/* transfer ready */
 	case FC_RCTL_ELS4_REQ:		/* SRR, perhaps */
 	default:
-		printk(KERN_INFO "%s: unhandled frame r_ctl %x\n",
+		pr_debug("%s: unhandled frame r_ctl %x\n",
 		       __func__, fh->fh_r_ctl);
 		fc_frame_free(fp);
 		ft_sess_put(sess);	/* undo get from lookup */
@@ -642,7 +639,7 @@
 
 	ret = transport_generic_allocate_tasks(se_cmd, cmd->cdb);
 
-	FT_IO_DBG("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret);
+	pr_debug("r_ctl %x alloc task ret %d\n", fh->fh_r_ctl, ret);
 	ft_dump_cmd(cmd, __func__);
 
 	if (ret == -ENOMEM) {
@@ -672,7 +669,7 @@
  */
 static void ft_exec_req(struct ft_cmd *cmd)
 {
-	FT_IO_DBG("cmd state %x\n", cmd->state);
+	pr_debug("cmd state %x\n", cmd->state);
 	switch (cmd->state) {
 	case FC_CMD_ST_NEW:
 		ft_send_cmd(cmd);
diff --git a/drivers/target/tcm_fc/tfc_conf.c b/drivers/target/tcm_fc/tfc_conf.c
index ec9e40d..d63e3dd 100644
--- a/drivers/target/tcm_fc/tfc_conf.c
+++ b/drivers/target/tcm_fc/tfc_conf.c
@@ -106,7 +106,7 @@
 	}
 	err = 4;
 fail:
-	FT_CONF_DBG("err %u len %zu pos %u byte %u\n",
+	pr_debug("err %u len %zu pos %u byte %u\n",
 		    err, cp - name, pos, byte);
 	return -1;
 }
@@ -216,7 +216,7 @@
 	u64 wwpn;
 	u32 q_depth;
 
-	FT_CONF_DBG("add acl %s\n", name);
+	pr_debug("add acl %s\n", name);
 	tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
 
 	if (ft_parse_wwn(name, &wwpn, 1) < 0)
@@ -239,11 +239,11 @@
 	struct ft_node_acl *acl = container_of(se_acl,
 				struct ft_node_acl, se_node_acl);
 
-	FT_CONF_DBG("del acl %s\n",
+	pr_debug("del acl %s\n",
 		config_item_name(&se_acl->acl_group.cg_item));
 
 	tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
-	FT_CONF_DBG("del acl %p se_acl %p tpg %p se_tpg %p\n",
+	pr_debug("del acl %p se_acl %p tpg %p se_tpg %p\n",
 		    acl, se_acl, tpg, &tpg->se_tpg);
 
 	core_tpg_del_initiator_node_acl(&tpg->se_tpg, se_acl, 1);
@@ -260,11 +260,11 @@
 	spin_lock_bh(&se_tpg->acl_node_lock);
 	list_for_each_entry(se_acl, &se_tpg->acl_node_list, acl_list) {
 		acl = container_of(se_acl, struct ft_node_acl, se_node_acl);
-		FT_CONF_DBG("acl %p port_name %llx\n",
+		pr_debug("acl %p port_name %llx\n",
 			acl, (unsigned long long)acl->node_auth.port_name);
 		if (acl->node_auth.port_name == rdata->ids.port_name ||
 		    acl->node_auth.node_name == rdata->ids.node_name) {
-			FT_CONF_DBG("acl %p port_name %llx matched\n", acl,
+			pr_debug("acl %p port_name %llx matched\n", acl,
 				    (unsigned long long)rdata->ids.port_name);
 			found = acl;
 			/* XXX need to hold onto ACL */
@@ -281,10 +281,10 @@
 
 	acl = kzalloc(sizeof(*acl), GFP_KERNEL);
 	if (!acl) {
-		printk(KERN_ERR "Unable to allocate struct ft_node_acl\n");
+		pr_err("Unable to allocate struct ft_node_acl\n");
 		return NULL;
 	}
-	FT_CONF_DBG("acl %p\n", acl);
+	pr_debug("acl %p\n", acl);
 	return &acl->se_node_acl;
 }
 
@@ -294,7 +294,7 @@
 	struct ft_node_acl *acl = container_of(se_acl,
 				struct ft_node_acl, se_node_acl);
 
-	FT_CONF_DBG(KERN_INFO "acl %p\n", acl);
+	pr_debug("acl %p\n", acl);
 	kfree(acl);
 }
 
@@ -311,7 +311,7 @@
 	unsigned long index;
 	int ret;
 
-	FT_CONF_DBG("tcm_fc: add tpg %s\n", name);
+	pr_debug("tcm_fc: add tpg %s\n", name);
 
 	/*
 	 * Name must be "tpgt_" followed by the index.
@@ -354,7 +354,7 @@
 {
 	struct ft_tpg *tpg = container_of(se_tpg, struct ft_tpg, se_tpg);
 
-	FT_CONF_DBG("del tpg %s\n",
+	pr_debug("del tpg %s\n",
 		    config_item_name(&tpg->se_tpg.tpg_group.cg_item));
 
 	kthread_stop(tpg->thread);
@@ -412,7 +412,7 @@
 	struct ft_lport_acl *old_lacl;
 	u64 wwpn;
 
-	FT_CONF_DBG("add lport %s\n", name);
+	pr_debug("add lport %s\n", name);
 	if (ft_parse_wwn(name, &wwpn, 1) < 0)
 		return NULL;
 	lacl = kzalloc(sizeof(*lacl), GFP_KERNEL);
@@ -441,7 +441,7 @@
 	struct ft_lport_acl *lacl = container_of(wwn,
 				struct ft_lport_acl, fc_lport_wwn);
 
-	FT_CONF_DBG("del lport %s\n",
+	pr_debug("del lport %s\n",
 			config_item_name(&wwn->wwn_group.cg_item));
 	mutex_lock(&ft_lport_lock);
 	list_del(&lacl->list);
@@ -581,7 +581,7 @@
 	 */
 	fabric = target_fabric_configfs_init(THIS_MODULE, "fc");
 	if (IS_ERR(fabric)) {
-		printk(KERN_INFO "%s: target_fabric_configfs_init() failed!\n",
+		pr_err("%s: target_fabric_configfs_init() failed!\n",
 		       __func__);
 		return PTR_ERR(fabric);
 	}
@@ -608,11 +608,8 @@
 	 */
 	ret = target_fabric_configfs_register(fabric);
 	if (ret < 0) {
-		FT_CONF_DBG("target_fabric_configfs_register() for"
+		pr_debug("target_fabric_configfs_register() for"
 			    " FC Target failed!\n");
-		printk(KERN_INFO
-		       "%s: target_fabric_configfs_register() failed!\n",
-		       __func__);
 		target_fabric_configfs_free(fabric);
 		return -1;
 	}
diff --git a/drivers/target/tcm_fc/tfc_io.c b/drivers/target/tcm_fc/tfc_io.c
index 3563a90..11e6483 100644
--- a/drivers/target/tcm_fc/tfc_io.c
+++ b/drivers/target/tcm_fc/tfc_io.c
@@ -39,6 +39,7 @@
 #include <linux/configfs.h>
 #include <linux/ctype.h>
 #include <linux/hash.h>
+#include <linux/ratelimit.h>
 #include <asm/unaligned.h>
 #include <scsi/scsi.h>
 #include <scsi/scsi_host.h>
@@ -176,8 +177,7 @@
 		error = lport->tt.seq_send(lport, cmd->seq, fp);
 		if (error) {
 			/* XXX For now, initiator will retry */
-			if (printk_ratelimit())
-				printk(KERN_ERR "%s: Failed to send frame %p, "
+			pr_err_ratelimited("%s: Failed to send frame %p, "
 						"xid <0x%x>, remaining %zu, "
 						"lso_max <0x%x>\n",
 						__func__, fp, ep->xid,
@@ -222,7 +222,7 @@
 	 */
 	buf = fc_frame_payload_get(fp, 1);
 	if (cmd->was_ddp_setup && buf) {
-		printk(KERN_INFO "%s: When DDP was setup, not expected to"
+		pr_debug("%s: When DDP was setup, not expected to"
 				 "receive frame with payload, Payload shall be"
 				 "copied directly to buffer instead of coming "
 				 "via. legacy receive queues\n", __func__);
@@ -260,7 +260,7 @@
 			 * this point, but just in case if required in future
 			 * for debugging or any other purpose
 			 */
-			printk(KERN_ERR "%s: Received frame with TSI bit not"
+			pr_err("%s: Received frame with TSI bit not"
 					" being SET, dropping the frame, "
 					"cmd->sg <%p>, cmd->sg_cnt <0x%x>\n",
 					__func__, cmd->sg, cmd->sg_cnt);
diff --git a/drivers/target/tcm_fc/tfc_sess.c b/drivers/target/tcm_fc/tfc_sess.c
index 7491e21..fbcbb3d 100644
--- a/drivers/target/tcm_fc/tfc_sess.c
+++ b/drivers/target/tcm_fc/tfc_sess.c
@@ -198,13 +198,13 @@
 		if (sess->port_id == port_id) {
 			kref_get(&sess->kref);
 			rcu_read_unlock();
-			FT_SESS_DBG("port_id %x found %p\n", port_id, sess);
+			pr_debug("port_id %x found %p\n", port_id, sess);
 			return sess;
 		}
 	}
 out:
 	rcu_read_unlock();
-	FT_SESS_DBG("port_id %x not found\n", port_id);
+	pr_debug("port_id %x not found\n", port_id);
 	return NULL;
 }
 
@@ -240,7 +240,7 @@
 	hlist_add_head_rcu(&sess->hash, head);
 	tport->sess_count++;
 
-	FT_SESS_DBG("port_id %x sess %p\n", port_id, sess);
+	pr_debug("port_id %x sess %p\n", port_id, sess);
 
 	transport_register_session(&tport->tpg->se_tpg, &acl->se_node_acl,
 				   sess->se_sess, sess);
@@ -314,7 +314,7 @@
 {
 	struct ft_sess *sess = se_sess->fabric_sess_ptr;
 
-	FT_SESS_DBG("port_id %x\n", sess->port_id);
+	pr_debug("port_id %x\n", sess->port_id);
 	return 1;
 }
 
@@ -335,7 +335,7 @@
 		mutex_unlock(&ft_lport_lock);
 		return;
 	}
-	FT_SESS_DBG("port_id %x\n", port_id);
+	pr_debug("port_id %x\n", port_id);
 	ft_sess_unhash(sess);
 	mutex_unlock(&ft_lport_lock);
 	transport_deregister_session_configfs(se_sess);
@@ -348,7 +348,7 @@
 {
 	struct ft_sess *sess = se_sess->fabric_sess_ptr;
 
-	FT_SESS_DBG("port_id %x\n", sess->port_id);
+	pr_debug("port_id %x\n", sess->port_id);
 }
 
 int ft_sess_logged_in(struct se_session *se_sess)
@@ -458,7 +458,7 @@
 	mutex_lock(&ft_lport_lock);
 	ret = ft_prli_locked(rdata, spp_len, rspp, spp);
 	mutex_unlock(&ft_lport_lock);
-	FT_SESS_DBG("port_id %x flags %x ret %x\n",
+	pr_debug("port_id %x flags %x ret %x\n",
 	       rdata->ids.port_id, rspp ? rspp->spp_flags : 0, ret);
 	return ret;
 }
@@ -518,11 +518,11 @@
 	struct ft_sess *sess;
 	u32 sid = fc_frame_sid(fp);
 
-	FT_SESS_DBG("sid %x\n", sid);
+	pr_debug("sid %x\n", sid);
 
 	sess = ft_sess_get(lport, sid);
 	if (!sess) {
-		FT_SESS_DBG("sid %x sess lookup failed\n", sid);
+		pr_debug("sid %x sess lookup failed\n", sid);
 		/* TBD XXX - if FCP_CMND, send PRLO */
 		fc_frame_free(fp);
 		return;
diff --git a/include/target/target_core_base.h b/include/target/target_core_base.h
index 465f266..8204c76 100644
--- a/include/target/target_core_base.h
+++ b/include/target/target_core_base.h
@@ -403,7 +403,7 @@
 struct se_task {
 	unsigned char	task_sense;
 	struct scatterlist *task_sg;
-	u32		task_sg_num;
+	u32		task_sg_nents;
 	struct scatterlist *task_sg_bidi;
 	u8		task_scsi_status;
 	u8		task_flags;
diff --git a/include/target/target_core_transport.h b/include/target/target_core_transport.h
index 123df92..7409576 100644
--- a/include/target/target_core_transport.h
+++ b/include/target/target_core_transport.h
@@ -292,7 +292,7 @@
 	 * drivers.  Provided out of convenience.
 	 */
 	int (*transport_complete)(struct se_task *task);
-	struct se_task *(*alloc_task)(struct se_cmd *);
+	struct se_task *(*alloc_task)(unsigned char *cdb);
 	/*
 	 * do_task():
 	 */
@@ -342,11 +342,6 @@
 	 */
 	sector_t (*get_blocks)(struct se_device *);
 	/*
-	 * do_se_mem_map():
-	 */
-	int (*do_se_mem_map)(struct se_task *, struct list_head *, void *,
-				struct se_mem *, struct se_mem **, u32 *, u32 *);
-	/*
 	 * get_sense_buffer():
 	 */
 	unsigned char *(*get_sense_buffer)(struct se_task *);