blob: 968969858ef3dfea02f0308adc0345835f86d16f [file] [log] [blame]
Seemanta Dutta1c827da2017-04-05 17:34:05 -07001/* Copyright (c) 2016-2017, The Linux Foundation. All rights reserved.
2 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
Seemanta Dutta1c827da2017-04-05 17:34:05 -070013#include <linux/module.h>
14#include <linux/types.h>
15#include <linux/mutex.h>
16#include <linux/msm_ion.h>
Patrick Dalyde1c64d2017-09-12 16:30:12 -070017#include <linux/slab.h>
Seemanta Dutta1c827da2017-04-05 17:34:05 -070018#include <asm/cacheflush.h>
19
20#include "cam_req_mgr_util.h"
21#include "cam_mem_mgr.h"
22#include "cam_smmu_api.h"
Jigarkumar Zala36ad7172017-07-18 19:52:14 -070023#include "cam_debug_util.h"
Seemanta Dutta1c827da2017-04-05 17:34:05 -070024
25static struct cam_mem_table tbl;
26
27static int cam_mem_util_map_cpu_va(struct ion_handle *hdl,
28 uint64_t *vaddr,
29 size_t *len)
30{
31 *vaddr = (uintptr_t)ion_map_kernel(tbl.client, hdl);
32 if (IS_ERR_OR_NULL((void *)*vaddr)) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -070033 CAM_ERR(CAM_CRM, "kernel map fail");
Seemanta Dutta1c827da2017-04-05 17:34:05 -070034 return -ENOSPC;
35 }
36
37 if (ion_handle_get_size(tbl.client, hdl, len)) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -070038 CAM_ERR(CAM_CRM, "kernel get len failed");
Seemanta Dutta1c827da2017-04-05 17:34:05 -070039 ion_unmap_kernel(tbl.client, hdl);
40 return -ENOSPC;
41 }
42
43 return 0;
44}
45
46static int cam_mem_util_get_dma_dir(uint32_t flags)
47{
48 int rc = -EINVAL;
49
50 if (flags & CAM_MEM_FLAG_HW_READ_ONLY)
51 rc = DMA_TO_DEVICE;
52 else if (flags & CAM_MEM_FLAG_HW_WRITE_ONLY)
53 rc = DMA_FROM_DEVICE;
54 else if (flags & CAM_MEM_FLAG_HW_READ_WRITE)
55 rc = DMA_BIDIRECTIONAL;
Lakshmi Narayana Kalavala2c714282017-09-08 12:27:36 -070056 else if (flags & CAM_MEM_FLAG_PROTECTED_MODE)
57 rc = DMA_BIDIRECTIONAL;
Seemanta Dutta1c827da2017-04-05 17:34:05 -070058
59 return rc;
60}
61
62static int cam_mem_util_client_create(void)
63{
64 int rc = 0;
65
66 tbl.client = msm_ion_client_create("camera_global_pool");
67 if (IS_ERR_OR_NULL(tbl.client)) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -070068 CAM_ERR(CAM_CRM, "fail to create client");
Seemanta Dutta1c827da2017-04-05 17:34:05 -070069 rc = -EINVAL;
70 }
71
72 return rc;
73}
74
75static void cam_mem_util_client_destroy(void)
76{
77 ion_client_destroy(tbl.client);
78 tbl.client = NULL;
79}
80
81int cam_mem_mgr_init(void)
82{
83 int rc;
84 int i;
85 int bitmap_size;
86
87 memset(tbl.bufq, 0, sizeof(tbl.bufq));
88
89 rc = cam_mem_util_client_create();
90 if (rc < 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -070091 CAM_ERR(CAM_CRM, "fail to create ion client");
Seemanta Dutta1c827da2017-04-05 17:34:05 -070092 goto client_fail;
93 }
94
95 bitmap_size = BITS_TO_LONGS(CAM_MEM_BUFQ_MAX) * sizeof(long);
Seemanta Dutta53b10902017-05-09 15:07:01 -070096 tbl.bitmap = kzalloc(bitmap_size, GFP_KERNEL);
Seemanta Dutta1c827da2017-04-05 17:34:05 -070097 if (!tbl.bitmap) {
98 rc = -ENOMEM;
99 goto bitmap_fail;
100 }
101 tbl.bits = bitmap_size * BITS_PER_BYTE;
102 bitmap_zero(tbl.bitmap, tbl.bits);
103 /* We need to reserve slot 0 because 0 is invalid */
104 set_bit(0, tbl.bitmap);
105
106 for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) {
107 tbl.bufq[i].fd = -1;
108 tbl.bufq[i].buf_handle = -1;
109 }
110 mutex_init(&tbl.m_lock);
111 return rc;
112
113bitmap_fail:
114 cam_mem_util_client_destroy();
115client_fail:
116 return rc;
117}
118
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700119static int32_t cam_mem_get_slot(void)
120{
121 int32_t idx;
122
123 mutex_lock(&tbl.m_lock);
124 idx = find_first_zero_bit(tbl.bitmap, tbl.bits);
125 if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
126 mutex_unlock(&tbl.m_lock);
127 return -ENOMEM;
128 }
129
130 set_bit(idx, tbl.bitmap);
131 tbl.bufq[idx].active = true;
132 mutex_init(&tbl.bufq[idx].q_lock);
133 mutex_unlock(&tbl.m_lock);
134
135 return idx;
136}
137
138static void cam_mem_put_slot(int32_t idx)
139{
140 mutex_lock(&tbl.m_lock);
141 mutex_lock(&tbl.bufq[idx].q_lock);
142 tbl.bufq[idx].active = false;
143 mutex_unlock(&tbl.bufq[idx].q_lock);
144 mutex_destroy(&tbl.bufq[idx].q_lock);
145 clear_bit(idx, tbl.bitmap);
146 mutex_unlock(&tbl.m_lock);
147}
148
149int cam_mem_get_io_buf(int32_t buf_handle, int32_t mmu_handle,
150 uint64_t *iova_ptr, size_t *len_ptr)
151{
152 int rc = 0, idx;
153
154 idx = CAM_MEM_MGR_GET_HDL_IDX(buf_handle);
155 if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
156 return -EINVAL;
157
158 if (!tbl.bufq[idx].active)
159 return -EINVAL;
160
161 mutex_lock(&tbl.bufq[idx].q_lock);
162 if (buf_handle != tbl.bufq[idx].buf_handle) {
163 rc = -EINVAL;
164 goto handle_mismatch;
165 }
166
Lakshmi Narayana Kalavala2c714282017-09-08 12:27:36 -0700167 if (CAM_MEM_MGR_IS_SECURE_HDL(buf_handle))
168 rc = cam_smmu_get_stage2_iova(mmu_handle,
169 tbl.bufq[idx].fd,
170 iova_ptr,
171 len_ptr);
172 else
173 rc = cam_smmu_get_iova(mmu_handle,
174 tbl.bufq[idx].fd,
175 iova_ptr,
176 len_ptr);
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700177 if (rc < 0)
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700178 CAM_ERR(CAM_CRM, "fail to get buf hdl :%d", buf_handle);
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700179
180handle_mismatch:
181 mutex_unlock(&tbl.bufq[idx].q_lock);
182 return rc;
183}
184EXPORT_SYMBOL(cam_mem_get_io_buf);
185
186int cam_mem_get_cpu_buf(int32_t buf_handle, uint64_t *vaddr_ptr, size_t *len)
187{
188 int rc = 0;
189 int idx;
190 struct ion_handle *ion_hdl = NULL;
191 uint64_t kvaddr = 0;
192 size_t klen = 0;
193
194 if (!buf_handle || !vaddr_ptr || !len)
195 return -EINVAL;
196
197 idx = CAM_MEM_MGR_GET_HDL_IDX(buf_handle);
198 if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
199 return -EINVAL;
200
201 if (!tbl.bufq[idx].active)
202 return -EPERM;
203
204 mutex_lock(&tbl.bufq[idx].q_lock);
205 if (buf_handle != tbl.bufq[idx].buf_handle) {
206 rc = -EINVAL;
207 goto exit_func;
208 }
209
210 ion_hdl = tbl.bufq[idx].i_hdl;
211 if (!ion_hdl) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700212 CAM_ERR(CAM_CRM, "Invalid ION handle");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700213 rc = -EINVAL;
214 goto exit_func;
215 }
216
217 if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS) {
218 if (!tbl.bufq[idx].kmdvaddr) {
219 rc = cam_mem_util_map_cpu_va(ion_hdl,
220 &kvaddr, &klen);
221 if (rc)
222 goto exit_func;
223 tbl.bufq[idx].kmdvaddr = kvaddr;
224 }
225 } else {
226 rc = -EINVAL;
227 goto exit_func;
228 }
229
230 *vaddr_ptr = tbl.bufq[idx].kmdvaddr;
231 *len = tbl.bufq[idx].len;
232
233exit_func:
234 mutex_unlock(&tbl.bufq[idx].q_lock);
235 return rc;
236}
237EXPORT_SYMBOL(cam_mem_get_cpu_buf);
238
239int cam_mem_mgr_cache_ops(struct cam_mem_cache_ops_cmd *cmd)
240{
241 int rc = 0, idx;
242 uint32_t ion_cache_ops;
243 unsigned long ion_flag = 0;
244
245 if (!cmd)
246 return -EINVAL;
247
248 idx = CAM_MEM_MGR_GET_HDL_IDX(cmd->buf_handle);
249 if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
250 return -EINVAL;
251
252 mutex_lock(&tbl.bufq[idx].q_lock);
253
254 if (!tbl.bufq[idx].active) {
255 rc = -EINVAL;
256 goto fail;
257 }
258
259 if (cmd->buf_handle != tbl.bufq[idx].buf_handle) {
260 rc = -EINVAL;
261 goto fail;
262 }
263
264 rc = ion_handle_get_flags(tbl.client, tbl.bufq[idx].i_hdl,
265 &ion_flag);
266 if (rc) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700267 CAM_ERR(CAM_CRM, "cache get flags failed %d", rc);
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700268 goto fail;
269 }
270
271 if (ION_IS_CACHED(ion_flag)) {
272 switch (cmd->mem_cache_ops) {
273 case CAM_MEM_CLEAN_CACHE:
274 ion_cache_ops = ION_IOC_CLEAN_CACHES;
275 break;
276 case CAM_MEM_INV_CACHE:
277 ion_cache_ops = ION_IOC_INV_CACHES;
278 break;
279 case CAM_MEM_CLEAN_INV_CACHE:
280 ion_cache_ops = ION_IOC_CLEAN_INV_CACHES;
281 break;
282 default:
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700283 CAM_ERR(CAM_CRM,
284 "invalid cache ops :%d", cmd->mem_cache_ops);
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700285 rc = -EINVAL;
286 goto fail;
287 }
288
289 rc = msm_ion_do_cache_op(tbl.client,
290 tbl.bufq[idx].i_hdl,
291 (void *)tbl.bufq[idx].vaddr,
292 tbl.bufq[idx].len,
293 ion_cache_ops);
294 if (rc)
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700295 CAM_ERR(CAM_CRM, "cache operation failed %d", rc);
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700296 }
297fail:
298 mutex_unlock(&tbl.bufq[idx].q_lock);
299 return rc;
300}
301EXPORT_SYMBOL(cam_mem_mgr_cache_ops);
302
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700303static int cam_mem_util_get_dma_buf(size_t len,
304 size_t align,
305 unsigned int heap_id_mask,
306 unsigned int flags,
307 struct ion_handle **hdl,
308 struct dma_buf **buf)
309{
310 int rc = 0;
311
312 if (!hdl || !buf) {
313 CAM_ERR(CAM_CRM, "Invalid params");
314 return -EINVAL;
315 }
316
317 *hdl = ion_alloc(tbl.client, len, align, heap_id_mask, flags);
318 if (IS_ERR_OR_NULL(*hdl))
319 return -ENOMEM;
320
321 *buf = ion_share_dma_buf(tbl.client, *hdl);
322 if (IS_ERR_OR_NULL(*buf)) {
323 CAM_ERR(CAM_CRM, "get dma buf fail");
324 rc = -EINVAL;
325 goto get_buf_fail;
326 }
327
328 return rc;
329
330get_buf_fail:
331 ion_free(tbl.client, *hdl);
332 return rc;
333
334}
335
336static int cam_mem_util_get_dma_buf_fd(size_t len,
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700337 size_t align,
338 unsigned int heap_id_mask,
339 unsigned int flags,
340 struct ion_handle **hdl,
341 int *fd)
342{
343 int rc = 0;
344
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700345 if (!hdl || !fd) {
346 CAM_ERR(CAM_CRM, "Invalid params");
347 return -EINVAL;
348 }
349
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700350 *hdl = ion_alloc(tbl.client, len, align, heap_id_mask, flags);
351 if (IS_ERR_OR_NULL(*hdl))
352 return -ENOMEM;
353
354 *fd = ion_share_dma_buf_fd(tbl.client, *hdl);
355 if (*fd < 0) {
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700356 CAM_ERR(CAM_CRM, "get fd fail");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700357 rc = -EINVAL;
358 goto get_fd_fail;
359 }
360
361 return rc;
362
363get_fd_fail:
364 ion_free(tbl.client, *hdl);
365 return rc;
366}
367
368static int cam_mem_util_ion_alloc(struct cam_mem_mgr_alloc_cmd *cmd,
369 struct ion_handle **hdl,
370 int *fd)
371{
372 uint32_t heap_id;
373 uint32_t ion_flag = 0;
374 int rc;
375
Lakshmi Narayana Kalavala2c714282017-09-08 12:27:36 -0700376 if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE) {
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700377 heap_id = ION_HEAP(ION_SECURE_DISPLAY_HEAP_ID);
Lakshmi Narayana Kalavala2c714282017-09-08 12:27:36 -0700378 ion_flag |= ION_FLAG_SECURE | ION_FLAG_CP_CAMERA;
379 } else {
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700380 heap_id = ION_HEAP(ION_SYSTEM_HEAP_ID);
Lakshmi Narayana Kalavala2c714282017-09-08 12:27:36 -0700381 }
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700382
383 if (cmd->flags & CAM_MEM_FLAG_CACHE)
384 ion_flag |= ION_FLAG_CACHED;
385 else
386 ion_flag &= ~ION_FLAG_CACHED;
387
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700388 rc = cam_mem_util_get_dma_buf_fd(cmd->len,
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700389 cmd->align,
390 heap_id,
391 ion_flag,
392 hdl,
393 fd);
394
395 return rc;
396}
397
398
399static int cam_mem_util_check_flags(struct cam_mem_mgr_alloc_cmd *cmd)
400{
401 if (!cmd->flags) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700402 CAM_ERR(CAM_CRM, "Invalid flags");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700403 return -EINVAL;
404 }
405
406 if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700407 CAM_ERR(CAM_CRM, "Num of mmu hdl exceeded maximum(%d)",
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700408 CAM_MEM_MMU_MAX_HANDLE);
409 return -EINVAL;
410 }
411
412 if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE &&
413 cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700414 CAM_ERR(CAM_CRM, "Kernel mapping in secure mode not allowed");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700415 return -EINVAL;
416 }
417
418 return 0;
419}
420
421static int cam_mem_util_check_map_flags(struct cam_mem_mgr_map_cmd *cmd)
422{
423 if (!cmd->flags) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700424 CAM_ERR(CAM_CRM, "Invalid flags");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700425 return -EINVAL;
426 }
427
428 if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700429 CAM_ERR(CAM_CRM, "Num of mmu hdl exceeded maximum(%d)",
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700430 CAM_MEM_MMU_MAX_HANDLE);
431 return -EINVAL;
432 }
433
434 if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE &&
435 cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700436 CAM_ERR(CAM_CRM, "Kernel mapping in secure mode not allowed");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700437 return -EINVAL;
438 }
439
440 if (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700441 CAM_ERR(CAM_CRM,
442 "Shared memory buffers are not allowed to be mapped");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700443 return -EINVAL;
444 }
445
446 return 0;
447}
448
449static int cam_mem_util_map_hw_va(uint32_t flags,
450 int32_t *mmu_hdls,
451 int32_t num_hdls,
452 int fd,
453 dma_addr_t *hw_vaddr,
454 size_t *len,
455 enum cam_smmu_region_id region)
456{
457 int i;
458 int rc = -1;
459 int dir = cam_mem_util_get_dma_dir(flags);
460
461 if (dir < 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700462 CAM_ERR(CAM_CRM, "fail to map DMA direction");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700463 return dir;
464 }
465
466 if (flags & CAM_MEM_FLAG_PROTECTED_MODE) {
467 for (i = 0; i < num_hdls; i++) {
Lakshmi Narayana Kalavala2c714282017-09-08 12:27:36 -0700468 rc = cam_smmu_map_stage2_iova(mmu_hdls[i],
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700469 fd,
470 dir,
Lakshmi Narayana Kalavala2c714282017-09-08 12:27:36 -0700471 tbl.client,
472 (ion_phys_addr_t *)hw_vaddr,
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700473 len);
474
475 if (rc < 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700476 CAM_ERR(CAM_CRM,
477 "Failed to securely map to smmu");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700478 goto multi_map_fail;
479 }
480 }
481 } else {
482 for (i = 0; i < num_hdls; i++) {
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700483 rc = cam_smmu_map_user_iova(mmu_hdls[i],
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700484 fd,
485 dir,
486 (dma_addr_t *)hw_vaddr,
487 len,
488 region);
489
490 if (rc < 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700491 CAM_ERR(CAM_CRM, "Failed to map to smmu");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700492 goto multi_map_fail;
493 }
494 }
495 }
496
497 return rc;
498multi_map_fail:
499 if (flags & CAM_MEM_FLAG_PROTECTED_MODE)
500 for (--i; i > 0; i--)
Lakshmi Narayana Kalavala2c714282017-09-08 12:27:36 -0700501 cam_smmu_unmap_stage2_iova(mmu_hdls[i], fd);
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700502 else
503 for (--i; i > 0; i--)
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700504 cam_smmu_unmap_user_iova(mmu_hdls[i],
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700505 fd,
506 CAM_SMMU_REGION_IO);
507 return rc;
508
509}
510
511int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd)
512{
513 int rc;
514 int32_t idx;
515 struct ion_handle *ion_hdl;
516 int ion_fd;
517 dma_addr_t hw_vaddr = 0;
518 size_t len;
519
520 if (!cmd) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700521 CAM_ERR(CAM_CRM, " Invalid argument");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700522 return -EINVAL;
523 }
524 len = cmd->len;
525
526 rc = cam_mem_util_check_flags(cmd);
527 if (rc) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700528 CAM_ERR(CAM_CRM, "Invalid flags: flags = %X", cmd->flags);
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700529 return rc;
530 }
531
532 rc = cam_mem_util_ion_alloc(cmd,
533 &ion_hdl,
534 &ion_fd);
535 if (rc) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700536 CAM_ERR(CAM_CRM, "Ion allocation failed");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700537 return rc;
538 }
539
540 idx = cam_mem_get_slot();
541 if (idx < 0) {
542 rc = -ENOMEM;
543 goto slot_fail;
544 }
545
Lakshmi Narayana Kalavala2c714282017-09-08 12:27:36 -0700546 if ((cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
547 (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) ||
548 (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)) {
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700549
550 enum cam_smmu_region_id region;
551
552 if (cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE)
553 region = CAM_SMMU_REGION_IO;
554
555
556 if (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)
557 region = CAM_SMMU_REGION_SHARED;
558
559 rc = cam_mem_util_map_hw_va(cmd->flags,
560 cmd->mmu_hdls,
561 cmd->num_hdl,
562 ion_fd,
563 &hw_vaddr,
564 &len,
565 region);
566 if (rc)
567 goto map_hw_fail;
568 }
569
570 mutex_lock(&tbl.bufq[idx].q_lock);
571 tbl.bufq[idx].fd = ion_fd;
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700572 tbl.bufq[idx].dma_buf = NULL;
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700573 tbl.bufq[idx].flags = cmd->flags;
574 tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, ion_fd);
Lakshmi Narayana Kalavala2c714282017-09-08 12:27:36 -0700575 if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
576 CAM_MEM_MGR_SET_SECURE_HDL(tbl.bufq[idx].buf_handle, true);
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700577 tbl.bufq[idx].kmdvaddr = 0;
578
579 if (cmd->num_hdl > 0)
580 tbl.bufq[idx].vaddr = hw_vaddr;
581 else
582 tbl.bufq[idx].vaddr = 0;
583
584 tbl.bufq[idx].i_hdl = ion_hdl;
585 tbl.bufq[idx].len = cmd->len;
586 tbl.bufq[idx].num_hdl = cmd->num_hdl;
587 memcpy(tbl.bufq[idx].hdls, cmd->mmu_hdls,
588 sizeof(int32_t) * cmd->num_hdl);
589 tbl.bufq[idx].is_imported = false;
590 mutex_unlock(&tbl.bufq[idx].q_lock);
591
592 cmd->out.buf_handle = tbl.bufq[idx].buf_handle;
593 cmd->out.fd = tbl.bufq[idx].fd;
594 cmd->out.vaddr = 0;
595
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700596 CAM_DBG(CAM_CRM, "buf handle: %x, fd: %d, len: %zu",
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700597 cmd->out.buf_handle, cmd->out.fd,
598 tbl.bufq[idx].len);
599
600 return rc;
601
602map_hw_fail:
603 cam_mem_put_slot(idx);
604slot_fail:
605 ion_free(tbl.client, ion_hdl);
606 return rc;
607}
608
609int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd)
610{
611 int32_t idx;
612 int rc;
613 struct ion_handle *ion_hdl;
614 dma_addr_t hw_vaddr = 0;
615 size_t len = 0;
616
617 if (!cmd || (cmd->fd < 0)) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700618 CAM_ERR(CAM_CRM, "Invalid argument");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700619 return -EINVAL;
620 }
621
622 if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE)
623 return -EINVAL;
624
625 rc = cam_mem_util_check_map_flags(cmd);
626 if (rc) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700627 CAM_ERR(CAM_CRM, "Invalid flags: flags = %X", cmd->flags);
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700628 return rc;
629 }
630
631 ion_hdl = ion_import_dma_buf_fd(tbl.client, cmd->fd);
632 if (IS_ERR_OR_NULL((void *)(ion_hdl))) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700633 CAM_ERR(CAM_CRM, "Failed to import ion fd");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700634 return -EINVAL;
635 }
636
Lakshmi Narayana Kalavala2c714282017-09-08 12:27:36 -0700637 if ((cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
638 (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)) {
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700639 rc = cam_mem_util_map_hw_va(cmd->flags,
640 cmd->mmu_hdls,
641 cmd->num_hdl,
642 cmd->fd,
643 &hw_vaddr,
644 &len,
645 CAM_SMMU_REGION_IO);
646 if (rc)
647 goto map_fail;
648 }
649
650 idx = cam_mem_get_slot();
651 if (idx < 0) {
652 rc = -ENOMEM;
653 goto map_fail;
654 }
655
656 mutex_lock(&tbl.bufq[idx].q_lock);
657 tbl.bufq[idx].fd = cmd->fd;
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700658 tbl.bufq[idx].dma_buf = NULL;
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700659 tbl.bufq[idx].flags = cmd->flags;
660 tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, cmd->fd);
Lakshmi Narayana Kalavala2c714282017-09-08 12:27:36 -0700661 if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
662 CAM_MEM_MGR_SET_SECURE_HDL(tbl.bufq[idx].buf_handle, true);
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700663 tbl.bufq[idx].kmdvaddr = 0;
664
665 if (cmd->num_hdl > 0)
666 tbl.bufq[idx].vaddr = hw_vaddr;
667 else
668 tbl.bufq[idx].vaddr = 0;
669
670 tbl.bufq[idx].i_hdl = ion_hdl;
671 tbl.bufq[idx].len = len;
672 tbl.bufq[idx].num_hdl = cmd->num_hdl;
673 memcpy(tbl.bufq[idx].hdls, cmd->mmu_hdls,
674 sizeof(int32_t) * cmd->num_hdl);
675 tbl.bufq[idx].is_imported = true;
676 mutex_unlock(&tbl.bufq[idx].q_lock);
677
678 cmd->out.buf_handle = tbl.bufq[idx].buf_handle;
679 cmd->out.vaddr = 0;
680
681 return rc;
682
683map_fail:
684 ion_free(tbl.client, ion_hdl);
685 return rc;
686}
687
688static int cam_mem_util_unmap_hw_va(int32_t idx,
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700689 enum cam_smmu_region_id region,
690 enum cam_smmu_mapping_client client)
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700691{
692 int i;
693 uint32_t flags;
694 int32_t *mmu_hdls;
695 int num_hdls;
696 int fd;
697 int rc = -EINVAL;
698
699 if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700700 CAM_ERR(CAM_CRM, "Incorrect index");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700701 return rc;
702 }
703
704 flags = tbl.bufq[idx].flags;
705 mmu_hdls = tbl.bufq[idx].hdls;
706 num_hdls = tbl.bufq[idx].num_hdl;
707 fd = tbl.bufq[idx].fd;
708
709 if (flags & CAM_MEM_FLAG_PROTECTED_MODE) {
710 for (i = 0; i < num_hdls; i++) {
Lakshmi Narayana Kalavala2c714282017-09-08 12:27:36 -0700711 rc = cam_smmu_unmap_stage2_iova(mmu_hdls[i], fd);
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700712 if (rc < 0)
713 goto unmap_end;
714 }
715 } else {
716 for (i = 0; i < num_hdls; i++) {
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700717 if (client == CAM_SMMU_MAPPING_USER) {
718 rc = cam_smmu_unmap_user_iova(mmu_hdls[i],
719 fd, region);
720 } else if (client == CAM_SMMU_MAPPING_KERNEL) {
721 rc = cam_smmu_unmap_kernel_iova(mmu_hdls[i],
722 tbl.bufq[idx].dma_buf, region);
723 } else {
724 CAM_ERR(CAM_CRM,
725 "invalid caller for unmapping : %d",
726 client);
727 rc = -EINVAL;
728 }
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700729 if (rc < 0)
730 goto unmap_end;
731 }
732 }
733
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700734 return rc;
735
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700736unmap_end:
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700737 CAM_ERR(CAM_CRM, "unmapping failed");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700738 return rc;
739}
740
Soundrapandian Jeyaprakash8d16e272017-10-12 11:05:37 -0700741static void cam_mem_mgr_unmap_active_buf(int idx)
742{
743 enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
744
745 if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)
746 region = CAM_SMMU_REGION_SHARED;
747 else if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE)
748 region = CAM_SMMU_REGION_IO;
749
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700750 cam_mem_util_unmap_hw_va(idx, region, CAM_SMMU_MAPPING_USER);
Soundrapandian Jeyaprakash8d16e272017-10-12 11:05:37 -0700751}
752
753static int cam_mem_mgr_cleanup_table(void)
754{
755 int i;
756
757 mutex_lock(&tbl.m_lock);
758 for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) {
759 if (!tbl.bufq[i].active) {
760 CAM_DBG(CAM_CRM,
761 "Buffer inactive at idx=%d, continuing", i);
762 continue;
763 } else {
764 CAM_INFO(CAM_CRM,
765 "Active buffer at idx=%d, possible leak needs unmapping",
766 i);
767 cam_mem_mgr_unmap_active_buf(i);
768 }
769
770 mutex_lock(&tbl.bufq[i].q_lock);
Suresh Vankadara466bed22017-11-30 06:30:20 +0530771 if (tbl.bufq[i].i_hdl) {
772 ion_free(tbl.client, tbl.bufq[i].i_hdl);
773 tbl.bufq[i].i_hdl = NULL;
774 }
Soundrapandian Jeyaprakash8d16e272017-10-12 11:05:37 -0700775 tbl.bufq[i].fd = -1;
776 tbl.bufq[i].flags = 0;
777 tbl.bufq[i].buf_handle = -1;
778 tbl.bufq[i].vaddr = 0;
779 tbl.bufq[i].len = 0;
780 memset(tbl.bufq[i].hdls, 0,
781 sizeof(int32_t) * tbl.bufq[i].num_hdl);
782 tbl.bufq[i].num_hdl = 0;
783 tbl.bufq[i].i_hdl = NULL;
784 tbl.bufq[i].active = false;
785 mutex_unlock(&tbl.bufq[i].q_lock);
786 mutex_destroy(&tbl.bufq[i].q_lock);
787 }
788 bitmap_zero(tbl.bitmap, tbl.bits);
789 /* We need to reserve slot 0 because 0 is invalid */
790 set_bit(0, tbl.bitmap);
791 mutex_unlock(&tbl.m_lock);
792
793 return 0;
794}
795
796void cam_mem_mgr_deinit(void)
797{
798 cam_mem_mgr_cleanup_table();
799 mutex_lock(&tbl.m_lock);
800 bitmap_zero(tbl.bitmap, tbl.bits);
801 kfree(tbl.bitmap);
802 tbl.bitmap = NULL;
803 cam_mem_util_client_destroy();
804 mutex_unlock(&tbl.m_lock);
805 mutex_destroy(&tbl.m_lock);
806}
807
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700808static int cam_mem_util_unmap(int32_t idx,
809 enum cam_smmu_mapping_client client)
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700810{
811 int rc = 0;
Krishnankutty Kolathappilly3ca340c2017-06-27 15:48:13 -0700812 enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700813
814 if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700815 CAM_ERR(CAM_CRM, "Incorrect index");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700816 return -EINVAL;
817 }
818
Suresh Vankadara466bed22017-11-30 06:30:20 +0530819 CAM_DBG(CAM_CRM, "Flags = %X idx %d", tbl.bufq[idx].flags, idx);
820
821 mutex_lock(&tbl.m_lock);
822 if ((!tbl.bufq[idx].active) &&
823 (tbl.bufq[idx].vaddr) == 0) {
824 CAM_WARN(CAM_CRM, "Buffer at idx=%d is already unmapped,",
825 idx);
826 mutex_unlock(&tbl.m_lock);
827 return 0;
828 }
829
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700830
831 if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS)
832 if (tbl.bufq[idx].i_hdl && tbl.bufq[idx].kmdvaddr)
833 ion_unmap_kernel(tbl.client, tbl.bufq[idx].i_hdl);
834
Seemanta Duttaa037cd12017-07-06 15:45:29 -0700835 /* SHARED flag gets precedence, all other flags after it */
836 if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
837 region = CAM_SMMU_REGION_SHARED;
838 } else {
Krishnankutty Kolathappilly3ca340c2017-06-27 15:48:13 -0700839 if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE)
840 region = CAM_SMMU_REGION_IO;
Krishnankutty Kolathappilly3ca340c2017-06-27 15:48:13 -0700841 }
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700842
Lakshmi Narayana Kalavala2c714282017-09-08 12:27:36 -0700843 if ((tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
844 (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) ||
845 (tbl.bufq[idx].flags & CAM_MEM_FLAG_PROTECTED_MODE))
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700846 rc = cam_mem_util_unmap_hw_va(idx, region, client);
Seemanta Duttaa037cd12017-07-06 15:45:29 -0700847
848
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700849 mutex_lock(&tbl.bufq[idx].q_lock);
850 tbl.bufq[idx].flags = 0;
851 tbl.bufq[idx].buf_handle = -1;
852 tbl.bufq[idx].vaddr = 0;
853 memset(tbl.bufq[idx].hdls, 0,
854 sizeof(int32_t) * CAM_MEM_MMU_MAX_HANDLE);
855
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700856 CAM_DBG(CAM_CRM,
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700857 "Ion handle at idx = %d freeing = %pK, fd = %d, imported %d dma_buf %pK",
Krishnankutty Kolathappilly29fedeb2017-06-13 12:05:48 -0700858 idx, tbl.bufq[idx].i_hdl, tbl.bufq[idx].fd,
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700859 tbl.bufq[idx].is_imported,
860 tbl.bufq[idx].dma_buf);
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700861
Krishnankutty Kolathappilly29fedeb2017-06-13 12:05:48 -0700862 if (tbl.bufq[idx].i_hdl) {
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700863 ion_free(tbl.client, tbl.bufq[idx].i_hdl);
864 tbl.bufq[idx].i_hdl = NULL;
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700865 }
866
867 tbl.bufq[idx].fd = -1;
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700868 tbl.bufq[idx].dma_buf = NULL;
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700869 tbl.bufq[idx].is_imported = false;
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700870 tbl.bufq[idx].len = 0;
871 tbl.bufq[idx].num_hdl = 0;
Suresh Vankadara466bed22017-11-30 06:30:20 +0530872 tbl.bufq[idx].active = false;
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700873 mutex_unlock(&tbl.bufq[idx].q_lock);
Suresh Vankadara466bed22017-11-30 06:30:20 +0530874 mutex_destroy(&tbl.bufq[idx].q_lock);
875 clear_bit(idx, tbl.bitmap);
876 mutex_unlock(&tbl.m_lock);
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700877
878 return rc;
879}
880
881int cam_mem_mgr_release(struct cam_mem_mgr_release_cmd *cmd)
882{
883 int idx;
884 int rc;
885
886 if (!cmd) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700887 CAM_ERR(CAM_CRM, "Invalid argument");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700888 return -EINVAL;
889 }
890
891 idx = CAM_MEM_MGR_GET_HDL_IDX(cmd->buf_handle);
892 if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700893 CAM_ERR(CAM_CRM, "Incorrect index extracted from mem handle");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700894 return -EINVAL;
895 }
896
897 if (!tbl.bufq[idx].active) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700898 CAM_ERR(CAM_CRM, "Released buffer state should be active");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700899 return -EINVAL;
900 }
901
902 if (tbl.bufq[idx].buf_handle != cmd->buf_handle) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700903 CAM_ERR(CAM_CRM,
904 "Released buf handle not matching within table");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700905 return -EINVAL;
906 }
907
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700908 CAM_DBG(CAM_CRM, "Releasing hdl = %u", cmd->buf_handle);
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700909 rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_USER);
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700910
911 return rc;
912}
913
914int cam_mem_mgr_request_mem(struct cam_mem_mgr_request_desc *inp,
915 struct cam_mem_mgr_memory_desc *out)
916{
917 struct ion_handle *hdl;
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700918 struct dma_buf *buf = NULL;
919 int ion_fd = -1;
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700920 int rc = 0;
921 uint32_t heap_id;
922 int32_t ion_flag = 0;
923 uint64_t kvaddr;
924 dma_addr_t iova = 0;
925 size_t request_len = 0;
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700926 uint32_t mem_handle;
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700927 int32_t idx;
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700928 int32_t smmu_hdl = 0;
929 int32_t num_hdl = 0;
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700930
Seemanta Duttaa037cd12017-07-06 15:45:29 -0700931 enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700932
933 if (!inp || !out) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700934 CAM_ERR(CAM_CRM, "Invalid params");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700935 return -EINVAL;
936 }
937
Seemanta Duttaa037cd12017-07-06 15:45:29 -0700938 if (!(inp->flags & CAM_MEM_FLAG_HW_READ_WRITE ||
939 inp->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS ||
940 inp->flags & CAM_MEM_FLAG_CACHE)) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700941 CAM_ERR(CAM_CRM, "Invalid flags for request mem");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700942 return -EINVAL;
943 }
944
945 if (inp->flags & CAM_MEM_FLAG_CACHE)
946 ion_flag |= ION_FLAG_CACHED;
947 else
948 ion_flag &= ~ION_FLAG_CACHED;
949
950 heap_id = ION_HEAP(ION_SYSTEM_HEAP_ID);
951
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700952 rc = cam_mem_util_get_dma_buf(inp->size,
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700953 inp->align,
954 heap_id,
955 ion_flag,
956 &hdl,
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700957 &buf);
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700958
959 if (rc) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700960 CAM_ERR(CAM_CRM, "ION alloc failed for shared buffer");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700961 goto ion_fail;
962 } else {
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700963 CAM_DBG(CAM_CRM, "Got dma_buf = %pK, hdl = %pK", buf, hdl);
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700964 }
965
966 rc = cam_mem_util_map_cpu_va(hdl, &kvaddr, &request_len);
967 if (rc) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700968 CAM_ERR(CAM_CRM, "Failed to get kernel vaddr");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700969 goto map_fail;
970 }
971
972 if (!inp->smmu_hdl) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700973 CAM_ERR(CAM_CRM, "Invalid SMMU handle");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700974 rc = -EINVAL;
975 goto smmu_fail;
976 }
977
Seemanta Duttaa037cd12017-07-06 15:45:29 -0700978 /* SHARED flag gets precedence, all other flags after it */
979 if (inp->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700980 region = CAM_SMMU_REGION_SHARED;
Seemanta Duttaa037cd12017-07-06 15:45:29 -0700981 } else {
982 if (inp->flags & CAM_MEM_FLAG_HW_READ_WRITE)
983 region = CAM_SMMU_REGION_IO;
984 }
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700985
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700986 rc = cam_smmu_map_kernel_iova(inp->smmu_hdl,
987 buf,
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700988 CAM_SMMU_MAP_RW,
989 &iova,
990 &request_len,
991 region);
992
993 if (rc < 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700994 CAM_ERR(CAM_CRM, "SMMU mapping failed");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700995 goto smmu_fail;
996 }
997
998 smmu_hdl = inp->smmu_hdl;
999 num_hdl = 1;
1000
1001 idx = cam_mem_get_slot();
1002 if (idx < 0) {
1003 rc = -ENOMEM;
1004 goto slot_fail;
1005 }
1006
1007 mutex_lock(&tbl.bufq[idx].q_lock);
1008 mem_handle = GET_MEM_HANDLE(idx, ion_fd);
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -07001009 tbl.bufq[idx].dma_buf = buf;
1010 tbl.bufq[idx].fd = -1;
Seemanta Dutta1c827da2017-04-05 17:34:05 -07001011 tbl.bufq[idx].flags = inp->flags;
1012 tbl.bufq[idx].buf_handle = mem_handle;
1013 tbl.bufq[idx].kmdvaddr = kvaddr;
1014
1015 tbl.bufq[idx].vaddr = iova;
1016
1017 tbl.bufq[idx].i_hdl = hdl;
1018 tbl.bufq[idx].len = inp->size;
1019 tbl.bufq[idx].num_hdl = num_hdl;
1020 memcpy(tbl.bufq[idx].hdls, &smmu_hdl,
1021 sizeof(int32_t));
1022 tbl.bufq[idx].is_imported = false;
1023 mutex_unlock(&tbl.bufq[idx].q_lock);
1024
1025 out->kva = kvaddr;
1026 out->iova = (uint32_t)iova;
1027 out->smmu_hdl = smmu_hdl;
1028 out->mem_handle = mem_handle;
1029 out->len = inp->size;
Seemanta Duttaa037cd12017-07-06 15:45:29 -07001030 out->region = region;
Seemanta Dutta1c827da2017-04-05 17:34:05 -07001031
1032 return rc;
1033slot_fail:
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -07001034 cam_smmu_unmap_kernel_iova(inp->smmu_hdl,
1035 buf, region);
Seemanta Dutta1c827da2017-04-05 17:34:05 -07001036smmu_fail:
1037 ion_unmap_kernel(tbl.client, hdl);
1038map_fail:
1039 ion_free(tbl.client, hdl);
1040ion_fail:
1041 return rc;
1042}
1043EXPORT_SYMBOL(cam_mem_mgr_request_mem);
1044
1045int cam_mem_mgr_release_mem(struct cam_mem_mgr_memory_desc *inp)
1046{
1047 int32_t idx;
1048 int rc;
1049
1050 if (!inp) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001051 CAM_ERR(CAM_CRM, "Invalid argument");
Seemanta Dutta1c827da2017-04-05 17:34:05 -07001052 return -EINVAL;
1053 }
1054
1055 idx = CAM_MEM_MGR_GET_HDL_IDX(inp->mem_handle);
1056 if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001057 CAM_ERR(CAM_CRM, "Incorrect index extracted from mem handle");
Seemanta Dutta1c827da2017-04-05 17:34:05 -07001058 return -EINVAL;
1059 }
1060
1061 if (!tbl.bufq[idx].active) {
Suresh Vankadara466bed22017-11-30 06:30:20 +05301062 if (tbl.bufq[idx].vaddr == 0) {
1063 CAM_ERR(CAM_CRM, "buffer is released already");
1064 return 0;
1065 }
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001066 CAM_ERR(CAM_CRM, "Released buffer state should be active");
Seemanta Dutta1c827da2017-04-05 17:34:05 -07001067 return -EINVAL;
1068 }
1069
1070 if (tbl.bufq[idx].buf_handle != inp->mem_handle) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001071 CAM_ERR(CAM_CRM,
1072 "Released buf handle not matching within table");
Seemanta Dutta1c827da2017-04-05 17:34:05 -07001073 return -EINVAL;
1074 }
1075
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001076 CAM_DBG(CAM_CRM, "Releasing hdl = %X", inp->mem_handle);
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -07001077 rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_KERNEL);
Seemanta Dutta1c827da2017-04-05 17:34:05 -07001078
1079 return rc;
1080}
1081EXPORT_SYMBOL(cam_mem_mgr_release_mem);
Seemanta Dutta93f940c2017-10-13 14:34:18 -07001082
1083int cam_mem_mgr_reserve_memory_region(struct cam_mem_mgr_request_desc *inp,
1084 enum cam_smmu_region_id region,
1085 struct cam_mem_mgr_memory_desc *out)
1086{
1087 struct ion_handle *hdl;
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -07001088 struct dma_buf *buf = NULL;
Seemanta Dutta93f940c2017-10-13 14:34:18 -07001089 int rc = 0;
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -07001090 int ion_fd = -1;
Seemanta Dutta93f940c2017-10-13 14:34:18 -07001091 uint32_t heap_id;
1092 dma_addr_t iova = 0;
1093 size_t request_len = 0;
Seemanta Dutta93f940c2017-10-13 14:34:18 -07001094 uint32_t mem_handle;
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -07001095 int32_t idx;
Seemanta Dutta93f940c2017-10-13 14:34:18 -07001096 int32_t smmu_hdl = 0;
1097 int32_t num_hdl = 0;
1098
1099 if (!inp || !out) {
1100 CAM_ERR(CAM_CRM, "Invalid param(s)");
1101 return -EINVAL;
1102 }
1103
1104 if (!inp->smmu_hdl) {
1105 CAM_ERR(CAM_CRM, "Invalid SMMU handle");
1106 return -EINVAL;
1107 }
1108
1109 if (region != CAM_SMMU_REGION_SECHEAP) {
1110 CAM_ERR(CAM_CRM, "Only secondary heap supported");
1111 return -EINVAL;
1112 }
1113
1114 heap_id = ION_HEAP(ION_SYSTEM_HEAP_ID);
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -07001115 rc = cam_mem_util_get_dma_buf(inp->size,
Seemanta Dutta93f940c2017-10-13 14:34:18 -07001116 inp->align,
1117 heap_id,
1118 0,
1119 &hdl,
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -07001120 &buf);
Seemanta Dutta93f940c2017-10-13 14:34:18 -07001121
1122 if (rc) {
1123 CAM_ERR(CAM_CRM, "ION alloc failed for sec heap buffer");
1124 goto ion_fail;
1125 } else {
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -07001126 CAM_DBG(CAM_CRM, "Got dma_buf = %pK, hdl = %pK", buf, hdl);
Seemanta Dutta93f940c2017-10-13 14:34:18 -07001127 }
1128
1129 rc = cam_smmu_reserve_sec_heap(inp->smmu_hdl,
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -07001130 buf,
Seemanta Dutta93f940c2017-10-13 14:34:18 -07001131 &iova,
1132 &request_len);
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -07001133
Seemanta Dutta93f940c2017-10-13 14:34:18 -07001134 if (rc) {
1135 CAM_ERR(CAM_CRM, "Reserving secondary heap failed");
1136 goto smmu_fail;
1137 }
1138
1139 smmu_hdl = inp->smmu_hdl;
1140 num_hdl = 1;
1141
1142 idx = cam_mem_get_slot();
1143 if (idx < 0) {
1144 rc = -ENOMEM;
1145 goto slot_fail;
1146 }
1147
1148 mutex_lock(&tbl.bufq[idx].q_lock);
1149 mem_handle = GET_MEM_HANDLE(idx, ion_fd);
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -07001150 tbl.bufq[idx].fd = -1;
1151 tbl.bufq[idx].dma_buf = buf;
Seemanta Dutta93f940c2017-10-13 14:34:18 -07001152 tbl.bufq[idx].flags = inp->flags;
1153 tbl.bufq[idx].buf_handle = mem_handle;
1154 tbl.bufq[idx].kmdvaddr = 0;
1155
1156 tbl.bufq[idx].vaddr = iova;
1157
1158 tbl.bufq[idx].i_hdl = hdl;
1159 tbl.bufq[idx].len = request_len;
1160 tbl.bufq[idx].num_hdl = num_hdl;
1161 memcpy(tbl.bufq[idx].hdls, &smmu_hdl,
1162 sizeof(int32_t));
1163 tbl.bufq[idx].is_imported = false;
1164 mutex_unlock(&tbl.bufq[idx].q_lock);
1165
1166 out->kva = 0;
1167 out->iova = (uint32_t)iova;
1168 out->smmu_hdl = smmu_hdl;
1169 out->mem_handle = mem_handle;
1170 out->len = request_len;
1171 out->region = region;
1172
1173 return rc;
1174
1175slot_fail:
1176 cam_smmu_release_sec_heap(smmu_hdl);
1177smmu_fail:
1178 ion_free(tbl.client, hdl);
1179ion_fail:
1180 return rc;
1181}
1182EXPORT_SYMBOL(cam_mem_mgr_reserve_memory_region);
1183
1184int cam_mem_mgr_free_memory_region(struct cam_mem_mgr_memory_desc *inp)
1185{
1186 int32_t idx;
1187 int rc;
1188 int32_t smmu_hdl;
1189
1190 if (!inp) {
1191 CAM_ERR(CAM_CRM, "Invalid argument");
1192 return -EINVAL;
1193 }
1194
1195 if (inp->region != CAM_SMMU_REGION_SECHEAP) {
1196 CAM_ERR(CAM_CRM, "Only secondary heap supported");
1197 return -EINVAL;
1198 }
1199
1200 idx = CAM_MEM_MGR_GET_HDL_IDX(inp->mem_handle);
1201 if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
1202 CAM_ERR(CAM_CRM, "Incorrect index extracted from mem handle");
1203 return -EINVAL;
1204 }
1205
1206 if (!tbl.bufq[idx].active) {
Suresh Vankadara466bed22017-11-30 06:30:20 +05301207 if (tbl.bufq[idx].vaddr == 0) {
1208 CAM_ERR(CAM_CRM, "buffer is released already");
1209 return 0;
1210 }
Seemanta Dutta93f940c2017-10-13 14:34:18 -07001211 CAM_ERR(CAM_CRM, "Released buffer state should be active");
1212 return -EINVAL;
1213 }
1214
1215 if (tbl.bufq[idx].buf_handle != inp->mem_handle) {
1216 CAM_ERR(CAM_CRM,
1217 "Released buf handle not matching within table");
1218 return -EINVAL;
1219 }
1220
1221 if (tbl.bufq[idx].num_hdl != 1) {
1222 CAM_ERR(CAM_CRM,
1223 "Sec heap region should have only one smmu hdl");
1224 return -ENODEV;
1225 }
1226
1227 memcpy(&smmu_hdl, tbl.bufq[idx].hdls,
1228 sizeof(int32_t));
1229 if (inp->smmu_hdl != smmu_hdl) {
1230 CAM_ERR(CAM_CRM,
1231 "Passed SMMU handle doesn't match with internal hdl");
1232 return -ENODEV;
1233 }
1234
1235 rc = cam_smmu_release_sec_heap(inp->smmu_hdl);
1236 if (rc) {
1237 CAM_ERR(CAM_CRM,
1238 "Sec heap region release failed");
1239 return -ENODEV;
1240 }
1241
1242 CAM_DBG(CAM_CRM, "Releasing hdl = %X", inp->mem_handle);
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -07001243 rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_KERNEL);
Seemanta Dutta93f940c2017-10-13 14:34:18 -07001244 if (rc)
1245 CAM_ERR(CAM_CRM, "unmapping secondary heap failed");
1246
1247 return rc;
1248}
1249EXPORT_SYMBOL(cam_mem_mgr_free_memory_region);