target: Use array_zalloc for device_list

Turns an order-8 allocation into slab-sized ones, thereby preventing
allocation failures with memory fragmentation.

This likely saves memory as well, as the slab allocator can pack objects
more tightly than the buddy allocator.

(nab: Fix lio-core patch fuzz)

Signed-off-by: Joern Engel <joern@logfs.org>
Signed-off-by: Nicholas Bellinger <nab@linux-iscsi.org>
diff --git a/drivers/target/target_core_device.c b/drivers/target/target_core_device.c
index fd7f17c..3be7279 100644
--- a/drivers/target/target_core_device.c
+++ b/drivers/target/target_core_device.c
@@ -72,7 +72,7 @@
 	}
 
 	spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
-	se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun];
+	se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
 	if (se_cmd->se_deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
 		struct se_dev_entry *deve = se_cmd->se_deve;
 
@@ -182,7 +182,7 @@
 	}
 
 	spin_lock_irqsave(&se_sess->se_node_acl->device_list_lock, flags);
-	se_cmd->se_deve = &se_sess->se_node_acl->device_list[unpacked_lun];
+	se_cmd->se_deve = se_sess->se_node_acl->device_list[unpacked_lun];
 	deve = se_cmd->se_deve;
 
 	if (deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS) {
@@ -240,7 +240,7 @@
 
 	spin_lock_irq(&nacl->device_list_lock);
 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
-		deve = &nacl->device_list[i];
+		deve = nacl->device_list[i];
 
 		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
 			continue;
@@ -286,7 +286,7 @@
 
 	spin_lock_irq(&nacl->device_list_lock);
 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
-		deve = &nacl->device_list[i];
+		deve = nacl->device_list[i];
 
 		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
 			continue;
@@ -306,7 +306,7 @@
 	}
 	spin_unlock_irq(&nacl->device_list_lock);
 
-	kfree(nacl->device_list);
+	array_free(nacl->device_list, TRANSPORT_MAX_LUNS_PER_TPG);
 	nacl->device_list = NULL;
 
 	return 0;
@@ -318,7 +318,7 @@
 	unsigned long flags;
 
 	spin_lock_irqsave(&se_nacl->device_list_lock, flags);
-	deve = &se_nacl->device_list[se_cmd->orig_fe_lun];
+	deve = se_nacl->device_list[se_cmd->orig_fe_lun];
 	deve->deve_cmds--;
 	spin_unlock_irqrestore(&se_nacl->device_list_lock, flags);
 }
@@ -331,7 +331,7 @@
 	struct se_dev_entry *deve;
 
 	spin_lock_irq(&nacl->device_list_lock);
-	deve = &nacl->device_list[mapped_lun];
+	deve = nacl->device_list[mapped_lun];
 	if (lun_access & TRANSPORT_LUNFLAGS_READ_WRITE) {
 		deve->lun_flags &= ~TRANSPORT_LUNFLAGS_READ_ONLY;
 		deve->lun_flags |= TRANSPORT_LUNFLAGS_READ_WRITE;
@@ -356,7 +356,7 @@
 	int enable)
 {
 	struct se_port *port = lun->lun_sep;
-	struct se_dev_entry *deve = &nacl->device_list[mapped_lun];
+	struct se_dev_entry *deve = nacl->device_list[mapped_lun];
 	int trans = 0;
 	/*
 	 * If the MappedLUN entry is being disabled, the entry in
@@ -470,7 +470,7 @@
 
 		spin_lock_irq(&nacl->device_list_lock);
 		for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
-			deve = &nacl->device_list[i];
+			deve = nacl->device_list[i];
 			if (lun != deve->se_lun)
 				continue;
 			spin_unlock_irq(&nacl->device_list_lock);
@@ -669,7 +669,7 @@
 
 	spin_lock_irq(&se_sess->se_node_acl->device_list_lock);
 	for (i = 0; i < TRANSPORT_MAX_LUNS_PER_TPG; i++) {
-		deve = &se_sess->se_node_acl->device_list[i];
+		deve = se_sess->se_node_acl->device_list[i];
 		if (!(deve->lun_flags & TRANSPORT_LUNFLAGS_INITIATOR_ACCESS))
 			continue;
 		se_lun = deve->se_lun;