crypto: qat - change ae_num to ae_id

Change the logic how acceleration engines are indexed to make it
easier to read. Aslo some return code values updates to better reflect
what failed.

Signed-off-by: Pingchao Yang <pingchao.yang@intel.com>
Signed-off-by: Tadeusz Struk <tadeusz.struk@intel.com>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
diff --git a/drivers/crypto/qat/qat_common/qat_uclo.c b/drivers/crypto/qat/qat_common/qat_uclo.c
index 557fa60..ebd5da0 100644
--- a/drivers/crypto/qat/qat_common/qat_uclo.c
+++ b/drivers/crypto/qat/qat_common/qat_uclo.c
@@ -214,11 +214,10 @@
 
 static int qat_uclo_parse_num(char *str, unsigned int *num)
 {
-	char buf[16];
+	char buf[16] = {0};
 	unsigned long ae = 0;
 	int i;
 
-	memset(buf, '\0', 16);
 	strncpy(buf, str, 15);
 	for (i = 0; i < 16; i++) {
 		if (!isdigit(buf[i])) {
@@ -418,13 +417,13 @@
 	fill_data = kcalloc(ICP_QAT_UCLO_MAX_USTORE, sizeof(uint64_t),
 			    GFP_KERNEL);
 	if (!fill_data)
-		return -EFAULT;
+		return -ENOMEM;
 	for (i = 0; i < ICP_QAT_UCLO_MAX_USTORE; i++)
 		memcpy(&fill_data[i], &uof_image->fill_pattern,
 		       sizeof(uint64_t));
 	page = image->page;
 
-	for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) {
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
 		if (!test_bit(ae, (unsigned long *)&uof_image->ae_assigned))
 			continue;
 		ustore_size = obj_handle->ae_data[ae].eff_ustore_size;
@@ -442,11 +441,9 @@
 
 static int qat_uclo_init_memory(struct icp_qat_fw_loader_handle *handle)
 {
-	unsigned int i;
-	int status = 0;
+	int i, ae;
 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
 	struct icp_qat_uof_initmem *initmem = obj_handle->init_mem_tab.init_mem;
-	int ae;
 
 	for (i = 0; i < obj_handle->init_mem_tab.entry_num; i++) {
 		if (initmem->num_in_bytes) {
@@ -473,7 +470,7 @@
 						 &obj_handle->
 						 umem_init_tab[ae]);
 	}
-	return status;
+	return 0;
 }
 
 static void *qat_uclo_find_chunk(struct icp_qat_uof_objhdr *obj_hdr,
@@ -526,7 +523,7 @@
 {
 	struct icp_qat_uof_filechunkhdr *file_chunk;
 	struct icp_qat_uclo_objhdr *obj_hdr;
-	void *chunk;
+	char *chunk;
 	int i;
 
 	file_chunk = (struct icp_qat_uof_filechunkhdr *)
@@ -536,7 +533,7 @@
 			     ICP_QAT_UOF_OBJID_LEN)) {
 			chunk = buf + file_chunk->offset;
 			if (file_chunk->checksum != qat_uclo_calc_str_checksum(
-				(char *)chunk, file_chunk->size))
+				chunk, file_chunk->size))
 				break;
 			obj_hdr = kzalloc(sizeof(*obj_hdr), GFP_KERNEL);
 			if (!obj_hdr)
@@ -595,7 +592,7 @@
 	return 0;
 }
 
-static void qat_uclo_map_image_pages(struct icp_qat_uof_encap_obj
+static void qat_uclo_map_image_page(struct icp_qat_uof_encap_obj
 				     *encap_uof_obj,
 				     struct icp_qat_uof_image *img,
 				     struct icp_qat_uclo_encap_page *page)
@@ -631,7 +628,7 @@
 			       struct icp_qat_uclo_encapme *ae_uimage,
 			       int max_image)
 {
-	int a = 0, i;
+	int i, j;
 	struct icp_qat_uof_chunkhdr *chunk_hdr = NULL;
 	struct icp_qat_uof_image *image;
 	struct icp_qat_uof_objtable *ae_regtab;
@@ -640,7 +637,7 @@
 	struct icp_qat_uof_encap_obj *encap_uof_obj =
 					&obj_handle->encap_uof_obj;
 
-	for (a = 0; a < max_image; a++) {
+	for (j = 0; j < max_image; j++) {
 		chunk_hdr = qat_uclo_find_chunk(encap_uof_obj->obj_hdr,
 						ICP_QAT_UOF_IMAG, chunk_hdr);
 		if (!chunk_hdr)
@@ -650,37 +647,37 @@
 		ae_regtab = (struct icp_qat_uof_objtable *)
 			   (image->reg_tab_offset +
 			   obj_handle->obj_hdr->file_buff);
-		ae_uimage[a].ae_reg_num = ae_regtab->entry_num;
-		ae_uimage[a].ae_reg = (struct icp_qat_uof_ae_reg *)
+		ae_uimage[j].ae_reg_num = ae_regtab->entry_num;
+		ae_uimage[j].ae_reg = (struct icp_qat_uof_ae_reg *)
 			(((char *)ae_regtab) +
 			sizeof(struct icp_qat_uof_objtable));
 		init_reg_sym_tab = (struct icp_qat_uof_objtable *)
 				   (image->init_reg_sym_tab +
 				   obj_handle->obj_hdr->file_buff);
-		ae_uimage[a].init_regsym_num = init_reg_sym_tab->entry_num;
-		ae_uimage[a].init_regsym = (struct icp_qat_uof_init_regsym *)
+		ae_uimage[j].init_regsym_num = init_reg_sym_tab->entry_num;
+		ae_uimage[j].init_regsym = (struct icp_qat_uof_init_regsym *)
 			(((char *)init_reg_sym_tab) +
 			sizeof(struct icp_qat_uof_objtable));
 		sbreak_tab = (struct icp_qat_uof_objtable *)
 			(image->sbreak_tab + obj_handle->obj_hdr->file_buff);
-		ae_uimage[a].sbreak_num = sbreak_tab->entry_num;
-		ae_uimage[a].sbreak = (struct icp_qat_uof_sbreak *)
+		ae_uimage[j].sbreak_num = sbreak_tab->entry_num;
+		ae_uimage[j].sbreak = (struct icp_qat_uof_sbreak *)
 				      (((char *)sbreak_tab) +
 				      sizeof(struct icp_qat_uof_objtable));
-		ae_uimage[a].img_ptr = image;
+		ae_uimage[j].img_ptr = image;
 		if (qat_uclo_check_image_compat(encap_uof_obj, image))
 			goto out_err;
-		ae_uimage[a].page =
+		ae_uimage[j].page =
 			kzalloc(sizeof(struct icp_qat_uclo_encap_page),
 				GFP_KERNEL);
-		if (!ae_uimage[a].page)
+		if (!ae_uimage[j].page)
 			goto out_err;
-		qat_uclo_map_image_pages(encap_uof_obj, image,
-					 ae_uimage[a].page);
+		qat_uclo_map_image_page(encap_uof_obj, image,
+					ae_uimage[j].page);
 	}
-	return a;
+	return j;
 out_err:
-	for (i = 0; i < a; i++)
+	for (i = 0; i < j; i++)
 		kfree(ae_uimage[i].page);
 	return 0;
 }
@@ -875,7 +872,7 @@
 			return -EINVAL;
 		}
 	}
-	for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) {
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
 		for (s = 0; s < obj_handle->ae_data[ae].slice_num; s++) {
 			if (!obj_handle->ae_data[ae].ae_slices[s].encap_image)
 				continue;
@@ -896,7 +893,7 @@
 	struct icp_qat_uclo_aedata *ae_data;
 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
 
-	for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) {
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
 		if (!test_bit(ae,
 			      (unsigned long *)&handle->hal_handle->ae_mask))
 			continue;
@@ -1041,7 +1038,7 @@
 void qat_uclo_del_uof_obj(struct icp_qat_fw_loader_handle *handle)
 {
 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
-	int a;
+	unsigned int a;
 
 	if (!obj_handle)
 		return;
@@ -1050,7 +1047,7 @@
 	for (a = 0; a < obj_handle->uimage_num; a++)
 		kfree(obj_handle->ae_uimage[a].page);
 
-	for (a = 0; a <= (int)handle->hal_handle->ae_max_num; a++)
+	for (a = 0; a < handle->hal_handle->ae_max_num; a++)
 		qat_uclo_free_ae_data(&obj_handle->ae_data[a]);
 
 	kfree(obj_handle->obj_hdr);
@@ -1127,8 +1124,8 @@
 	}
 }
 
-static void qat_uclo_wr_uimage_pages(struct icp_qat_fw_loader_handle *handle,
-				     struct icp_qat_uof_image *image)
+static void qat_uclo_wr_uimage_page(struct icp_qat_fw_loader_handle *handle,
+				    struct icp_qat_uof_image *image)
 {
 	struct icp_qat_uclo_objhandle *obj_handle = handle->obj_handle;
 	unsigned int ctx_mask, s;
@@ -1142,7 +1139,7 @@
 		ctx_mask = 0x55;
 	/* load the default page and set assigned CTX PC
 	 * to the entrypoint address */
-	for (ae = 0; ae <= handle->hal_handle->ae_max_num; ae++) {
+	for (ae = 0; ae < handle->hal_handle->ae_max_num; ae++) {
 		if (!test_bit(ae, (unsigned long *)&image->ae_assigned))
 			continue;
 		/* find the slice to which this image is assigned */
@@ -1181,8 +1178,8 @@
 			return -EINVAL;
 		if (qat_uclo_init_ustore(handle, &obj_handle->ae_uimage[i]))
 			return -EINVAL;
-		qat_uclo_wr_uimage_pages(handle,
-					 obj_handle->ae_uimage[i].img_ptr);
+		qat_uclo_wr_uimage_page(handle,
+					obj_handle->ae_uimage[i].img_ptr);
 	}
 	return 0;
 }