platform: msm_shared: Correct invalidate cache actions

When invalidate cache, address and size must be aligned
to CACHE_LINE, if not, the arch_invalidate_cache_range
api itself will do the alignment, that may cause valid
data lost which is not yet cleaned to cache.

So correct invalidate cache actions by use CACHE_LINE
aligned address and size explicitly. Use memalign,
STACKBUF_DMA_ALIGN or BUF_DMA_ALIGN to alloc space
which will used for cache invalidate and have the size
alloc Round to CACHE_LINE.

Change-Id: I30c3f101481fd259c631c48501988fd403a8627b
diff --git a/platform/msm_shared/rpmb/rpmb_listener.c b/platform/msm_shared/rpmb/rpmb_listener.c
index 480a235..07b71b8 100644
--- a/platform/msm_shared/rpmb/rpmb_listener.c
+++ b/platform/msm_shared/rpmb/rpmb_listener.c
@@ -1,4 +1,4 @@
-/* Copyright (c) 2013-2015, The Linux Foundation. All rights reserved.
+/* Copyright (c) 2013-2015, 2018, The Linux Foundation. All rights reserved.
  *
  * Redistribution and use in source and binary forms, with or without
  * modification, are permitted provided that the following conditions are
@@ -29,6 +29,7 @@
 #include <rpmb.h>
 #include <rpmb_listener.h>
 #include <qseecom_lk_api.h>
+#include <stdlib.h>
 
 #define RPMB_LSTNR_VERSION_2        0x2
 
@@ -107,7 +108,7 @@
 	struct tz_rpmb_rw_req *req_p = (struct tz_rpmb_rw_req *)buf;
 	struct tz_rpmb_rw_resp *resp_p = NULL;
 	uint32_t *req_buf = buf + req_p->req_buff_offset;
-	uint32_t *resp_buf = buf + sizeof(struct tz_rpmb_rw_resp);
+	uint32_t *resp_buf = buf + ROUNDUP(sizeof(struct tz_rpmb_rw_resp), CACHE_LINE);
 
 	resp_p = (struct tz_rpmb_rw_resp *) buf;
 
@@ -132,7 +133,7 @@
 			ASSERT(0);
 	};
 
-	resp_p->res_buff_offset = sizeof(struct tz_rpmb_rw_resp);
+	resp_p->res_buff_offset = ROUNDUP(sizeof(struct tz_rpmb_rw_resp), CACHE_LINE);
 	resp_p->cmd_id = req_p->cmd_id;
 }