blob: 643b0afce7401fc960c867435ff896afd0ae2a0f [file] [log] [blame]
Abhilash Kumar9f3f5122018-01-23 11:15:11 +05301/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
Seemanta Dutta1c827da2017-04-05 17:34:05 -07002 *
3 * This program is free software; you can redistribute it and/or modify
4 * it under the terms of the GNU General Public License version 2 and
5 * only version 2 as published by the Free Software Foundation.
6 *
7 * This program is distributed in the hope that it will be useful,
8 * but WITHOUT ANY WARRANTY; without even the implied warranty of
9 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
10 * GNU General Public License for more details.
11 */
12
Seemanta Dutta1c827da2017-04-05 17:34:05 -070013#include <linux/module.h>
14#include <linux/types.h>
15#include <linux/mutex.h>
16#include <linux/msm_ion.h>
Patrick Dalyde1c64d2017-09-12 16:30:12 -070017#include <linux/slab.h>
Seemanta Dutta1c827da2017-04-05 17:34:05 -070018#include <asm/cacheflush.h>
19
20#include "cam_req_mgr_util.h"
21#include "cam_mem_mgr.h"
22#include "cam_smmu_api.h"
Jigarkumar Zala36ad7172017-07-18 19:52:14 -070023#include "cam_debug_util.h"
Seemanta Dutta1c827da2017-04-05 17:34:05 -070024
25static struct cam_mem_table tbl;
26
27static int cam_mem_util_map_cpu_va(struct ion_handle *hdl,
28 uint64_t *vaddr,
29 size_t *len)
30{
31 *vaddr = (uintptr_t)ion_map_kernel(tbl.client, hdl);
32 if (IS_ERR_OR_NULL((void *)*vaddr)) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -070033 CAM_ERR(CAM_CRM, "kernel map fail");
Seemanta Dutta1c827da2017-04-05 17:34:05 -070034 return -ENOSPC;
35 }
36
37 if (ion_handle_get_size(tbl.client, hdl, len)) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -070038 CAM_ERR(CAM_CRM, "kernel get len failed");
Seemanta Dutta1c827da2017-04-05 17:34:05 -070039 ion_unmap_kernel(tbl.client, hdl);
40 return -ENOSPC;
41 }
42
43 return 0;
44}
45
46static int cam_mem_util_get_dma_dir(uint32_t flags)
47{
48 int rc = -EINVAL;
49
50 if (flags & CAM_MEM_FLAG_HW_READ_ONLY)
51 rc = DMA_TO_DEVICE;
52 else if (flags & CAM_MEM_FLAG_HW_WRITE_ONLY)
53 rc = DMA_FROM_DEVICE;
54 else if (flags & CAM_MEM_FLAG_HW_READ_WRITE)
55 rc = DMA_BIDIRECTIONAL;
Lakshmi Narayana Kalavala2c714282017-09-08 12:27:36 -070056 else if (flags & CAM_MEM_FLAG_PROTECTED_MODE)
57 rc = DMA_BIDIRECTIONAL;
Seemanta Dutta1c827da2017-04-05 17:34:05 -070058
59 return rc;
60}
61
62static int cam_mem_util_client_create(void)
63{
64 int rc = 0;
65
66 tbl.client = msm_ion_client_create("camera_global_pool");
67 if (IS_ERR_OR_NULL(tbl.client)) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -070068 CAM_ERR(CAM_CRM, "fail to create client");
Seemanta Dutta1c827da2017-04-05 17:34:05 -070069 rc = -EINVAL;
70 }
71
72 return rc;
73}
74
75static void cam_mem_util_client_destroy(void)
76{
77 ion_client_destroy(tbl.client);
78 tbl.client = NULL;
79}
80
81int cam_mem_mgr_init(void)
82{
83 int rc;
84 int i;
85 int bitmap_size;
86
87 memset(tbl.bufq, 0, sizeof(tbl.bufq));
88
89 rc = cam_mem_util_client_create();
90 if (rc < 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -070091 CAM_ERR(CAM_CRM, "fail to create ion client");
Seemanta Dutta1c827da2017-04-05 17:34:05 -070092 goto client_fail;
93 }
94
95 bitmap_size = BITS_TO_LONGS(CAM_MEM_BUFQ_MAX) * sizeof(long);
Seemanta Dutta53b10902017-05-09 15:07:01 -070096 tbl.bitmap = kzalloc(bitmap_size, GFP_KERNEL);
Seemanta Dutta1c827da2017-04-05 17:34:05 -070097 if (!tbl.bitmap) {
98 rc = -ENOMEM;
99 goto bitmap_fail;
100 }
101 tbl.bits = bitmap_size * BITS_PER_BYTE;
102 bitmap_zero(tbl.bitmap, tbl.bits);
103 /* We need to reserve slot 0 because 0 is invalid */
104 set_bit(0, tbl.bitmap);
105
106 for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) {
107 tbl.bufq[i].fd = -1;
108 tbl.bufq[i].buf_handle = -1;
109 }
110 mutex_init(&tbl.m_lock);
111 return rc;
112
113bitmap_fail:
114 cam_mem_util_client_destroy();
115client_fail:
116 return rc;
117}
118
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700119static int32_t cam_mem_get_slot(void)
120{
121 int32_t idx;
122
123 mutex_lock(&tbl.m_lock);
124 idx = find_first_zero_bit(tbl.bitmap, tbl.bits);
125 if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
126 mutex_unlock(&tbl.m_lock);
127 return -ENOMEM;
128 }
129
130 set_bit(idx, tbl.bitmap);
131 tbl.bufq[idx].active = true;
132 mutex_init(&tbl.bufq[idx].q_lock);
133 mutex_unlock(&tbl.m_lock);
134
135 return idx;
136}
137
138static void cam_mem_put_slot(int32_t idx)
139{
140 mutex_lock(&tbl.m_lock);
141 mutex_lock(&tbl.bufq[idx].q_lock);
142 tbl.bufq[idx].active = false;
143 mutex_unlock(&tbl.bufq[idx].q_lock);
144 mutex_destroy(&tbl.bufq[idx].q_lock);
145 clear_bit(idx, tbl.bitmap);
146 mutex_unlock(&tbl.m_lock);
147}
148
149int cam_mem_get_io_buf(int32_t buf_handle, int32_t mmu_handle,
150 uint64_t *iova_ptr, size_t *len_ptr)
151{
152 int rc = 0, idx;
153
154 idx = CAM_MEM_MGR_GET_HDL_IDX(buf_handle);
155 if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
156 return -EINVAL;
157
158 if (!tbl.bufq[idx].active)
159 return -EINVAL;
160
161 mutex_lock(&tbl.bufq[idx].q_lock);
162 if (buf_handle != tbl.bufq[idx].buf_handle) {
163 rc = -EINVAL;
164 goto handle_mismatch;
165 }
166
Lakshmi Narayana Kalavala2c714282017-09-08 12:27:36 -0700167 if (CAM_MEM_MGR_IS_SECURE_HDL(buf_handle))
168 rc = cam_smmu_get_stage2_iova(mmu_handle,
169 tbl.bufq[idx].fd,
170 iova_ptr,
171 len_ptr);
172 else
173 rc = cam_smmu_get_iova(mmu_handle,
174 tbl.bufq[idx].fd,
175 iova_ptr,
176 len_ptr);
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700177 if (rc < 0)
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700178 CAM_ERR(CAM_CRM, "fail to get buf hdl :%d", buf_handle);
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700179
180handle_mismatch:
181 mutex_unlock(&tbl.bufq[idx].q_lock);
182 return rc;
183}
184EXPORT_SYMBOL(cam_mem_get_io_buf);
185
186int cam_mem_get_cpu_buf(int32_t buf_handle, uint64_t *vaddr_ptr, size_t *len)
187{
188 int rc = 0;
189 int idx;
190 struct ion_handle *ion_hdl = NULL;
191 uint64_t kvaddr = 0;
192 size_t klen = 0;
193
194 if (!buf_handle || !vaddr_ptr || !len)
195 return -EINVAL;
196
197 idx = CAM_MEM_MGR_GET_HDL_IDX(buf_handle);
198 if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
199 return -EINVAL;
200
201 if (!tbl.bufq[idx].active)
202 return -EPERM;
203
204 mutex_lock(&tbl.bufq[idx].q_lock);
205 if (buf_handle != tbl.bufq[idx].buf_handle) {
206 rc = -EINVAL;
207 goto exit_func;
208 }
209
210 ion_hdl = tbl.bufq[idx].i_hdl;
211 if (!ion_hdl) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700212 CAM_ERR(CAM_CRM, "Invalid ION handle");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700213 rc = -EINVAL;
214 goto exit_func;
215 }
216
217 if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS) {
218 if (!tbl.bufq[idx].kmdvaddr) {
219 rc = cam_mem_util_map_cpu_va(ion_hdl,
220 &kvaddr, &klen);
221 if (rc)
222 goto exit_func;
223 tbl.bufq[idx].kmdvaddr = kvaddr;
224 }
225 } else {
226 rc = -EINVAL;
227 goto exit_func;
228 }
229
230 *vaddr_ptr = tbl.bufq[idx].kmdvaddr;
231 *len = tbl.bufq[idx].len;
232
233exit_func:
234 mutex_unlock(&tbl.bufq[idx].q_lock);
235 return rc;
236}
237EXPORT_SYMBOL(cam_mem_get_cpu_buf);
238
239int cam_mem_mgr_cache_ops(struct cam_mem_cache_ops_cmd *cmd)
240{
241 int rc = 0, idx;
242 uint32_t ion_cache_ops;
243 unsigned long ion_flag = 0;
244
245 if (!cmd)
246 return -EINVAL;
247
248 idx = CAM_MEM_MGR_GET_HDL_IDX(cmd->buf_handle);
249 if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0)
250 return -EINVAL;
251
252 mutex_lock(&tbl.bufq[idx].q_lock);
253
254 if (!tbl.bufq[idx].active) {
255 rc = -EINVAL;
256 goto fail;
257 }
258
259 if (cmd->buf_handle != tbl.bufq[idx].buf_handle) {
260 rc = -EINVAL;
261 goto fail;
262 }
263
264 rc = ion_handle_get_flags(tbl.client, tbl.bufq[idx].i_hdl,
265 &ion_flag);
266 if (rc) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700267 CAM_ERR(CAM_CRM, "cache get flags failed %d", rc);
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700268 goto fail;
269 }
270
271 if (ION_IS_CACHED(ion_flag)) {
272 switch (cmd->mem_cache_ops) {
273 case CAM_MEM_CLEAN_CACHE:
274 ion_cache_ops = ION_IOC_CLEAN_CACHES;
275 break;
276 case CAM_MEM_INV_CACHE:
277 ion_cache_ops = ION_IOC_INV_CACHES;
278 break;
279 case CAM_MEM_CLEAN_INV_CACHE:
280 ion_cache_ops = ION_IOC_CLEAN_INV_CACHES;
281 break;
282 default:
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700283 CAM_ERR(CAM_CRM,
284 "invalid cache ops :%d", cmd->mem_cache_ops);
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700285 rc = -EINVAL;
286 goto fail;
287 }
288
289 rc = msm_ion_do_cache_op(tbl.client,
290 tbl.bufq[idx].i_hdl,
291 (void *)tbl.bufq[idx].vaddr,
292 tbl.bufq[idx].len,
293 ion_cache_ops);
294 if (rc)
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700295 CAM_ERR(CAM_CRM, "cache operation failed %d", rc);
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700296 }
297fail:
298 mutex_unlock(&tbl.bufq[idx].q_lock);
299 return rc;
300}
301EXPORT_SYMBOL(cam_mem_mgr_cache_ops);
302
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700303static int cam_mem_util_get_dma_buf(size_t len,
304 size_t align,
305 unsigned int heap_id_mask,
306 unsigned int flags,
307 struct ion_handle **hdl,
308 struct dma_buf **buf)
309{
310 int rc = 0;
311
312 if (!hdl || !buf) {
313 CAM_ERR(CAM_CRM, "Invalid params");
314 return -EINVAL;
315 }
316
317 *hdl = ion_alloc(tbl.client, len, align, heap_id_mask, flags);
318 if (IS_ERR_OR_NULL(*hdl))
319 return -ENOMEM;
320
321 *buf = ion_share_dma_buf(tbl.client, *hdl);
322 if (IS_ERR_OR_NULL(*buf)) {
323 CAM_ERR(CAM_CRM, "get dma buf fail");
324 rc = -EINVAL;
325 goto get_buf_fail;
326 }
327
328 return rc;
329
330get_buf_fail:
331 ion_free(tbl.client, *hdl);
332 return rc;
333
334}
335
336static int cam_mem_util_get_dma_buf_fd(size_t len,
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700337 size_t align,
338 unsigned int heap_id_mask,
339 unsigned int flags,
340 struct ion_handle **hdl,
341 int *fd)
342{
343 int rc = 0;
344
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700345 if (!hdl || !fd) {
346 CAM_ERR(CAM_CRM, "Invalid params");
347 return -EINVAL;
348 }
349
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700350 *hdl = ion_alloc(tbl.client, len, align, heap_id_mask, flags);
351 if (IS_ERR_OR_NULL(*hdl))
352 return -ENOMEM;
353
354 *fd = ion_share_dma_buf_fd(tbl.client, *hdl);
355 if (*fd < 0) {
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700356 CAM_ERR(CAM_CRM, "get fd fail");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700357 rc = -EINVAL;
358 goto get_fd_fail;
359 }
360
361 return rc;
362
363get_fd_fail:
364 ion_free(tbl.client, *hdl);
365 return rc;
366}
367
368static int cam_mem_util_ion_alloc(struct cam_mem_mgr_alloc_cmd *cmd,
369 struct ion_handle **hdl,
370 int *fd)
371{
372 uint32_t heap_id;
373 uint32_t ion_flag = 0;
374 int rc;
375
Lakshmi Narayana Kalavala2c714282017-09-08 12:27:36 -0700376 if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE) {
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700377 heap_id = ION_HEAP(ION_SECURE_DISPLAY_HEAP_ID);
Lakshmi Narayana Kalavala2c714282017-09-08 12:27:36 -0700378 ion_flag |= ION_FLAG_SECURE | ION_FLAG_CP_CAMERA;
379 } else {
Venkat Chinta686c9e52018-01-20 14:33:25 -0800380 heap_id = ION_HEAP(ION_SYSTEM_HEAP_ID) |
381 ION_HEAP(ION_CAMERA_HEAP_ID);
Lakshmi Narayana Kalavala2c714282017-09-08 12:27:36 -0700382 }
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700383
384 if (cmd->flags & CAM_MEM_FLAG_CACHE)
385 ion_flag |= ION_FLAG_CACHED;
386 else
387 ion_flag &= ~ION_FLAG_CACHED;
388
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700389 rc = cam_mem_util_get_dma_buf_fd(cmd->len,
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700390 cmd->align,
391 heap_id,
392 ion_flag,
393 hdl,
394 fd);
395
396 return rc;
397}
398
399
400static int cam_mem_util_check_flags(struct cam_mem_mgr_alloc_cmd *cmd)
401{
402 if (!cmd->flags) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700403 CAM_ERR(CAM_CRM, "Invalid flags");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700404 return -EINVAL;
405 }
406
407 if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700408 CAM_ERR(CAM_CRM, "Num of mmu hdl exceeded maximum(%d)",
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700409 CAM_MEM_MMU_MAX_HANDLE);
410 return -EINVAL;
411 }
412
413 if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE &&
414 cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700415 CAM_ERR(CAM_CRM, "Kernel mapping in secure mode not allowed");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700416 return -EINVAL;
417 }
418
419 return 0;
420}
421
422static int cam_mem_util_check_map_flags(struct cam_mem_mgr_map_cmd *cmd)
423{
424 if (!cmd->flags) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700425 CAM_ERR(CAM_CRM, "Invalid flags");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700426 return -EINVAL;
427 }
428
429 if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700430 CAM_ERR(CAM_CRM, "Num of mmu hdl exceeded maximum(%d)",
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700431 CAM_MEM_MMU_MAX_HANDLE);
432 return -EINVAL;
433 }
434
435 if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE &&
436 cmd->flags & CAM_MEM_FLAG_KMD_ACCESS) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700437 CAM_ERR(CAM_CRM, "Kernel mapping in secure mode not allowed");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700438 return -EINVAL;
439 }
440
441 if (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700442 CAM_ERR(CAM_CRM,
443 "Shared memory buffers are not allowed to be mapped");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700444 return -EINVAL;
445 }
446
447 return 0;
448}
449
450static int cam_mem_util_map_hw_va(uint32_t flags,
451 int32_t *mmu_hdls,
452 int32_t num_hdls,
453 int fd,
454 dma_addr_t *hw_vaddr,
455 size_t *len,
456 enum cam_smmu_region_id region)
457{
458 int i;
459 int rc = -1;
460 int dir = cam_mem_util_get_dma_dir(flags);
461
462 if (dir < 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700463 CAM_ERR(CAM_CRM, "fail to map DMA direction");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700464 return dir;
465 }
466
467 if (flags & CAM_MEM_FLAG_PROTECTED_MODE) {
468 for (i = 0; i < num_hdls; i++) {
Lakshmi Narayana Kalavala2c714282017-09-08 12:27:36 -0700469 rc = cam_smmu_map_stage2_iova(mmu_hdls[i],
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700470 fd,
471 dir,
Lakshmi Narayana Kalavala2c714282017-09-08 12:27:36 -0700472 tbl.client,
473 (ion_phys_addr_t *)hw_vaddr,
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700474 len);
475
476 if (rc < 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700477 CAM_ERR(CAM_CRM,
478 "Failed to securely map to smmu");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700479 goto multi_map_fail;
480 }
481 }
482 } else {
483 for (i = 0; i < num_hdls; i++) {
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700484 rc = cam_smmu_map_user_iova(mmu_hdls[i],
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700485 fd,
486 dir,
487 (dma_addr_t *)hw_vaddr,
488 len,
489 region);
490
491 if (rc < 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700492 CAM_ERR(CAM_CRM, "Failed to map to smmu");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700493 goto multi_map_fail;
494 }
495 }
496 }
497
498 return rc;
499multi_map_fail:
500 if (flags & CAM_MEM_FLAG_PROTECTED_MODE)
501 for (--i; i > 0; i--)
Lakshmi Narayana Kalavala2c714282017-09-08 12:27:36 -0700502 cam_smmu_unmap_stage2_iova(mmu_hdls[i], fd);
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700503 else
504 for (--i; i > 0; i--)
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700505 cam_smmu_unmap_user_iova(mmu_hdls[i],
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700506 fd,
507 CAM_SMMU_REGION_IO);
508 return rc;
509
510}
511
512int cam_mem_mgr_alloc_and_map(struct cam_mem_mgr_alloc_cmd *cmd)
513{
514 int rc;
515 int32_t idx;
516 struct ion_handle *ion_hdl;
517 int ion_fd;
518 dma_addr_t hw_vaddr = 0;
519 size_t len;
520
521 if (!cmd) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700522 CAM_ERR(CAM_CRM, " Invalid argument");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700523 return -EINVAL;
524 }
525 len = cmd->len;
526
527 rc = cam_mem_util_check_flags(cmd);
528 if (rc) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700529 CAM_ERR(CAM_CRM, "Invalid flags: flags = %X", cmd->flags);
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700530 return rc;
531 }
532
533 rc = cam_mem_util_ion_alloc(cmd,
534 &ion_hdl,
535 &ion_fd);
536 if (rc) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700537 CAM_ERR(CAM_CRM, "Ion allocation failed");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700538 return rc;
539 }
540
541 idx = cam_mem_get_slot();
542 if (idx < 0) {
543 rc = -ENOMEM;
544 goto slot_fail;
545 }
546
Lakshmi Narayana Kalavala2c714282017-09-08 12:27:36 -0700547 if ((cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
548 (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) ||
549 (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)) {
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700550
551 enum cam_smmu_region_id region;
552
553 if (cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE)
554 region = CAM_SMMU_REGION_IO;
555
556
557 if (cmd->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)
558 region = CAM_SMMU_REGION_SHARED;
559
560 rc = cam_mem_util_map_hw_va(cmd->flags,
561 cmd->mmu_hdls,
562 cmd->num_hdl,
563 ion_fd,
564 &hw_vaddr,
565 &len,
566 region);
567 if (rc)
568 goto map_hw_fail;
569 }
570
571 mutex_lock(&tbl.bufq[idx].q_lock);
572 tbl.bufq[idx].fd = ion_fd;
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700573 tbl.bufq[idx].dma_buf = NULL;
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700574 tbl.bufq[idx].flags = cmd->flags;
575 tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, ion_fd);
Lakshmi Narayana Kalavala2c714282017-09-08 12:27:36 -0700576 if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
577 CAM_MEM_MGR_SET_SECURE_HDL(tbl.bufq[idx].buf_handle, true);
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700578 tbl.bufq[idx].kmdvaddr = 0;
579
580 if (cmd->num_hdl > 0)
581 tbl.bufq[idx].vaddr = hw_vaddr;
582 else
583 tbl.bufq[idx].vaddr = 0;
584
585 tbl.bufq[idx].i_hdl = ion_hdl;
586 tbl.bufq[idx].len = cmd->len;
587 tbl.bufq[idx].num_hdl = cmd->num_hdl;
588 memcpy(tbl.bufq[idx].hdls, cmd->mmu_hdls,
589 sizeof(int32_t) * cmd->num_hdl);
590 tbl.bufq[idx].is_imported = false;
591 mutex_unlock(&tbl.bufq[idx].q_lock);
592
593 cmd->out.buf_handle = tbl.bufq[idx].buf_handle;
594 cmd->out.fd = tbl.bufq[idx].fd;
595 cmd->out.vaddr = 0;
596
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700597 CAM_DBG(CAM_CRM, "buf handle: %x, fd: %d, len: %zu",
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700598 cmd->out.buf_handle, cmd->out.fd,
599 tbl.bufq[idx].len);
600
601 return rc;
602
603map_hw_fail:
604 cam_mem_put_slot(idx);
605slot_fail:
606 ion_free(tbl.client, ion_hdl);
607 return rc;
608}
609
610int cam_mem_mgr_map(struct cam_mem_mgr_map_cmd *cmd)
611{
612 int32_t idx;
613 int rc;
614 struct ion_handle *ion_hdl;
615 dma_addr_t hw_vaddr = 0;
616 size_t len = 0;
617
618 if (!cmd || (cmd->fd < 0)) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700619 CAM_ERR(CAM_CRM, "Invalid argument");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700620 return -EINVAL;
621 }
622
623 if (cmd->num_hdl > CAM_MEM_MMU_MAX_HANDLE)
624 return -EINVAL;
625
626 rc = cam_mem_util_check_map_flags(cmd);
627 if (rc) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700628 CAM_ERR(CAM_CRM, "Invalid flags: flags = %X", cmd->flags);
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700629 return rc;
630 }
631
632 ion_hdl = ion_import_dma_buf_fd(tbl.client, cmd->fd);
633 if (IS_ERR_OR_NULL((void *)(ion_hdl))) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700634 CAM_ERR(CAM_CRM, "Failed to import ion fd");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700635 return -EINVAL;
636 }
637
Lakshmi Narayana Kalavala2c714282017-09-08 12:27:36 -0700638 if ((cmd->flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
639 (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)) {
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700640 rc = cam_mem_util_map_hw_va(cmd->flags,
641 cmd->mmu_hdls,
642 cmd->num_hdl,
643 cmd->fd,
644 &hw_vaddr,
645 &len,
646 CAM_SMMU_REGION_IO);
647 if (rc)
648 goto map_fail;
649 }
650
651 idx = cam_mem_get_slot();
652 if (idx < 0) {
653 rc = -ENOMEM;
654 goto map_fail;
655 }
656
657 mutex_lock(&tbl.bufq[idx].q_lock);
658 tbl.bufq[idx].fd = cmd->fd;
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700659 tbl.bufq[idx].dma_buf = NULL;
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700660 tbl.bufq[idx].flags = cmd->flags;
661 tbl.bufq[idx].buf_handle = GET_MEM_HANDLE(idx, cmd->fd);
Lakshmi Narayana Kalavala2c714282017-09-08 12:27:36 -0700662 if (cmd->flags & CAM_MEM_FLAG_PROTECTED_MODE)
663 CAM_MEM_MGR_SET_SECURE_HDL(tbl.bufq[idx].buf_handle, true);
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700664 tbl.bufq[idx].kmdvaddr = 0;
665
666 if (cmd->num_hdl > 0)
667 tbl.bufq[idx].vaddr = hw_vaddr;
668 else
669 tbl.bufq[idx].vaddr = 0;
670
671 tbl.bufq[idx].i_hdl = ion_hdl;
672 tbl.bufq[idx].len = len;
673 tbl.bufq[idx].num_hdl = cmd->num_hdl;
674 memcpy(tbl.bufq[idx].hdls, cmd->mmu_hdls,
675 sizeof(int32_t) * cmd->num_hdl);
676 tbl.bufq[idx].is_imported = true;
677 mutex_unlock(&tbl.bufq[idx].q_lock);
678
679 cmd->out.buf_handle = tbl.bufq[idx].buf_handle;
680 cmd->out.vaddr = 0;
681
682 return rc;
683
684map_fail:
685 ion_free(tbl.client, ion_hdl);
686 return rc;
687}
688
689static int cam_mem_util_unmap_hw_va(int32_t idx,
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700690 enum cam_smmu_region_id region,
691 enum cam_smmu_mapping_client client)
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700692{
693 int i;
694 uint32_t flags;
695 int32_t *mmu_hdls;
696 int num_hdls;
697 int fd;
698 int rc = -EINVAL;
699
700 if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700701 CAM_ERR(CAM_CRM, "Incorrect index");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700702 return rc;
703 }
704
705 flags = tbl.bufq[idx].flags;
706 mmu_hdls = tbl.bufq[idx].hdls;
707 num_hdls = tbl.bufq[idx].num_hdl;
708 fd = tbl.bufq[idx].fd;
709
710 if (flags & CAM_MEM_FLAG_PROTECTED_MODE) {
711 for (i = 0; i < num_hdls; i++) {
Lakshmi Narayana Kalavala2c714282017-09-08 12:27:36 -0700712 rc = cam_smmu_unmap_stage2_iova(mmu_hdls[i], fd);
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700713 if (rc < 0)
714 goto unmap_end;
715 }
716 } else {
717 for (i = 0; i < num_hdls; i++) {
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700718 if (client == CAM_SMMU_MAPPING_USER) {
719 rc = cam_smmu_unmap_user_iova(mmu_hdls[i],
720 fd, region);
721 } else if (client == CAM_SMMU_MAPPING_KERNEL) {
722 rc = cam_smmu_unmap_kernel_iova(mmu_hdls[i],
723 tbl.bufq[idx].dma_buf, region);
724 } else {
725 CAM_ERR(CAM_CRM,
726 "invalid caller for unmapping : %d",
727 client);
728 rc = -EINVAL;
729 }
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700730 if (rc < 0)
731 goto unmap_end;
732 }
733 }
734
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700735 return rc;
736
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700737unmap_end:
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700738 CAM_ERR(CAM_CRM, "unmapping failed");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700739 return rc;
740}
741
Soundrapandian Jeyaprakash8d16e272017-10-12 11:05:37 -0700742static void cam_mem_mgr_unmap_active_buf(int idx)
743{
744 enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
745
746 if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS)
747 region = CAM_SMMU_REGION_SHARED;
748 else if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE)
749 region = CAM_SMMU_REGION_IO;
750
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700751 cam_mem_util_unmap_hw_va(idx, region, CAM_SMMU_MAPPING_USER);
Soundrapandian Jeyaprakash8d16e272017-10-12 11:05:37 -0700752}
753
754static int cam_mem_mgr_cleanup_table(void)
755{
756 int i;
757
758 mutex_lock(&tbl.m_lock);
759 for (i = 1; i < CAM_MEM_BUFQ_MAX; i++) {
760 if (!tbl.bufq[i].active) {
761 CAM_DBG(CAM_CRM,
762 "Buffer inactive at idx=%d, continuing", i);
763 continue;
764 } else {
Abhilash Kumar9f3f5122018-01-23 11:15:11 +0530765 CAM_DBG(CAM_CRM,
Soundrapandian Jeyaprakash8d16e272017-10-12 11:05:37 -0700766 "Active buffer at idx=%d, possible leak needs unmapping",
767 i);
768 cam_mem_mgr_unmap_active_buf(i);
769 }
770
771 mutex_lock(&tbl.bufq[i].q_lock);
Suresh Vankadara466bed22017-11-30 06:30:20 +0530772 if (tbl.bufq[i].i_hdl) {
773 ion_free(tbl.client, tbl.bufq[i].i_hdl);
774 tbl.bufq[i].i_hdl = NULL;
775 }
Soundrapandian Jeyaprakash8d16e272017-10-12 11:05:37 -0700776 tbl.bufq[i].fd = -1;
777 tbl.bufq[i].flags = 0;
778 tbl.bufq[i].buf_handle = -1;
779 tbl.bufq[i].vaddr = 0;
780 tbl.bufq[i].len = 0;
781 memset(tbl.bufq[i].hdls, 0,
782 sizeof(int32_t) * tbl.bufq[i].num_hdl);
783 tbl.bufq[i].num_hdl = 0;
784 tbl.bufq[i].i_hdl = NULL;
785 tbl.bufq[i].active = false;
786 mutex_unlock(&tbl.bufq[i].q_lock);
787 mutex_destroy(&tbl.bufq[i].q_lock);
788 }
789 bitmap_zero(tbl.bitmap, tbl.bits);
790 /* We need to reserve slot 0 because 0 is invalid */
791 set_bit(0, tbl.bitmap);
792 mutex_unlock(&tbl.m_lock);
793
794 return 0;
795}
796
797void cam_mem_mgr_deinit(void)
798{
799 cam_mem_mgr_cleanup_table();
800 mutex_lock(&tbl.m_lock);
801 bitmap_zero(tbl.bitmap, tbl.bits);
802 kfree(tbl.bitmap);
803 tbl.bitmap = NULL;
804 cam_mem_util_client_destroy();
805 mutex_unlock(&tbl.m_lock);
806 mutex_destroy(&tbl.m_lock);
807}
808
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700809static int cam_mem_util_unmap(int32_t idx,
810 enum cam_smmu_mapping_client client)
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700811{
812 int rc = 0;
Krishnankutty Kolathappilly3ca340c2017-06-27 15:48:13 -0700813 enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700814
815 if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700816 CAM_ERR(CAM_CRM, "Incorrect index");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700817 return -EINVAL;
818 }
819
Suresh Vankadara466bed22017-11-30 06:30:20 +0530820 CAM_DBG(CAM_CRM, "Flags = %X idx %d", tbl.bufq[idx].flags, idx);
821
822 mutex_lock(&tbl.m_lock);
823 if ((!tbl.bufq[idx].active) &&
824 (tbl.bufq[idx].vaddr) == 0) {
825 CAM_WARN(CAM_CRM, "Buffer at idx=%d is already unmapped,",
826 idx);
827 mutex_unlock(&tbl.m_lock);
828 return 0;
829 }
830
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700831
832 if (tbl.bufq[idx].flags & CAM_MEM_FLAG_KMD_ACCESS)
833 if (tbl.bufq[idx].i_hdl && tbl.bufq[idx].kmdvaddr)
834 ion_unmap_kernel(tbl.client, tbl.bufq[idx].i_hdl);
835
Seemanta Duttaa037cd12017-07-06 15:45:29 -0700836 /* SHARED flag gets precedence, all other flags after it */
837 if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
838 region = CAM_SMMU_REGION_SHARED;
839 } else {
Krishnankutty Kolathappilly3ca340c2017-06-27 15:48:13 -0700840 if (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE)
841 region = CAM_SMMU_REGION_IO;
Krishnankutty Kolathappilly3ca340c2017-06-27 15:48:13 -0700842 }
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700843
Lakshmi Narayana Kalavala2c714282017-09-08 12:27:36 -0700844 if ((tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_READ_WRITE) ||
845 (tbl.bufq[idx].flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) ||
846 (tbl.bufq[idx].flags & CAM_MEM_FLAG_PROTECTED_MODE))
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700847 rc = cam_mem_util_unmap_hw_va(idx, region, client);
Seemanta Duttaa037cd12017-07-06 15:45:29 -0700848
849
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700850 mutex_lock(&tbl.bufq[idx].q_lock);
851 tbl.bufq[idx].flags = 0;
852 tbl.bufq[idx].buf_handle = -1;
853 tbl.bufq[idx].vaddr = 0;
854 memset(tbl.bufq[idx].hdls, 0,
855 sizeof(int32_t) * CAM_MEM_MMU_MAX_HANDLE);
856
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700857 CAM_DBG(CAM_CRM,
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700858 "Ion handle at idx = %d freeing = %pK, fd = %d, imported %d dma_buf %pK",
Krishnankutty Kolathappilly29fedeb2017-06-13 12:05:48 -0700859 idx, tbl.bufq[idx].i_hdl, tbl.bufq[idx].fd,
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700860 tbl.bufq[idx].is_imported,
861 tbl.bufq[idx].dma_buf);
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700862
Krishnankutty Kolathappilly29fedeb2017-06-13 12:05:48 -0700863 if (tbl.bufq[idx].i_hdl) {
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700864 ion_free(tbl.client, tbl.bufq[idx].i_hdl);
865 tbl.bufq[idx].i_hdl = NULL;
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700866 }
867
868 tbl.bufq[idx].fd = -1;
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700869 tbl.bufq[idx].dma_buf = NULL;
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700870 tbl.bufq[idx].is_imported = false;
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700871 tbl.bufq[idx].len = 0;
872 tbl.bufq[idx].num_hdl = 0;
Suresh Vankadara466bed22017-11-30 06:30:20 +0530873 tbl.bufq[idx].active = false;
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700874 mutex_unlock(&tbl.bufq[idx].q_lock);
Suresh Vankadara466bed22017-11-30 06:30:20 +0530875 mutex_destroy(&tbl.bufq[idx].q_lock);
876 clear_bit(idx, tbl.bitmap);
877 mutex_unlock(&tbl.m_lock);
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700878
879 return rc;
880}
881
882int cam_mem_mgr_release(struct cam_mem_mgr_release_cmd *cmd)
883{
884 int idx;
885 int rc;
886
887 if (!cmd) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700888 CAM_ERR(CAM_CRM, "Invalid argument");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700889 return -EINVAL;
890 }
891
892 idx = CAM_MEM_MGR_GET_HDL_IDX(cmd->buf_handle);
893 if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700894 CAM_ERR(CAM_CRM, "Incorrect index extracted from mem handle");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700895 return -EINVAL;
896 }
897
898 if (!tbl.bufq[idx].active) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700899 CAM_ERR(CAM_CRM, "Released buffer state should be active");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700900 return -EINVAL;
901 }
902
903 if (tbl.bufq[idx].buf_handle != cmd->buf_handle) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700904 CAM_ERR(CAM_CRM,
905 "Released buf handle not matching within table");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700906 return -EINVAL;
907 }
908
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700909 CAM_DBG(CAM_CRM, "Releasing hdl = %u", cmd->buf_handle);
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700910 rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_USER);
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700911
912 return rc;
913}
914
915int cam_mem_mgr_request_mem(struct cam_mem_mgr_request_desc *inp,
916 struct cam_mem_mgr_memory_desc *out)
917{
918 struct ion_handle *hdl;
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700919 struct dma_buf *buf = NULL;
920 int ion_fd = -1;
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700921 int rc = 0;
922 uint32_t heap_id;
923 int32_t ion_flag = 0;
924 uint64_t kvaddr;
925 dma_addr_t iova = 0;
926 size_t request_len = 0;
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700927 uint32_t mem_handle;
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700928 int32_t idx;
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700929 int32_t smmu_hdl = 0;
930 int32_t num_hdl = 0;
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700931
Seemanta Duttaa037cd12017-07-06 15:45:29 -0700932 enum cam_smmu_region_id region = CAM_SMMU_REGION_SHARED;
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700933
934 if (!inp || !out) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700935 CAM_ERR(CAM_CRM, "Invalid params");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700936 return -EINVAL;
937 }
938
Seemanta Duttaa037cd12017-07-06 15:45:29 -0700939 if (!(inp->flags & CAM_MEM_FLAG_HW_READ_WRITE ||
940 inp->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS ||
941 inp->flags & CAM_MEM_FLAG_CACHE)) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700942 CAM_ERR(CAM_CRM, "Invalid flags for request mem");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700943 return -EINVAL;
944 }
945
946 if (inp->flags & CAM_MEM_FLAG_CACHE)
947 ion_flag |= ION_FLAG_CACHED;
948 else
949 ion_flag &= ~ION_FLAG_CACHED;
950
Venkat Chinta686c9e52018-01-20 14:33:25 -0800951 heap_id = ION_HEAP(ION_SYSTEM_HEAP_ID) |
952 ION_HEAP(ION_CAMERA_HEAP_ID);
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700953
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700954 rc = cam_mem_util_get_dma_buf(inp->size,
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700955 inp->align,
956 heap_id,
957 ion_flag,
958 &hdl,
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700959 &buf);
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700960
961 if (rc) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700962 CAM_ERR(CAM_CRM, "ION alloc failed for shared buffer");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700963 goto ion_fail;
964 } else {
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700965 CAM_DBG(CAM_CRM, "Got dma_buf = %pK, hdl = %pK", buf, hdl);
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700966 }
967
968 rc = cam_mem_util_map_cpu_va(hdl, &kvaddr, &request_len);
969 if (rc) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700970 CAM_ERR(CAM_CRM, "Failed to get kernel vaddr");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700971 goto map_fail;
972 }
973
974 if (!inp->smmu_hdl) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700975 CAM_ERR(CAM_CRM, "Invalid SMMU handle");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700976 rc = -EINVAL;
977 goto smmu_fail;
978 }
979
Seemanta Duttaa037cd12017-07-06 15:45:29 -0700980 /* SHARED flag gets precedence, all other flags after it */
981 if (inp->flags & CAM_MEM_FLAG_HW_SHARED_ACCESS) {
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700982 region = CAM_SMMU_REGION_SHARED;
Seemanta Duttaa037cd12017-07-06 15:45:29 -0700983 } else {
984 if (inp->flags & CAM_MEM_FLAG_HW_READ_WRITE)
985 region = CAM_SMMU_REGION_IO;
986 }
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700987
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -0700988 rc = cam_smmu_map_kernel_iova(inp->smmu_hdl,
989 buf,
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700990 CAM_SMMU_MAP_RW,
991 &iova,
992 &request_len,
993 region);
994
995 if (rc < 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -0700996 CAM_ERR(CAM_CRM, "SMMU mapping failed");
Seemanta Dutta1c827da2017-04-05 17:34:05 -0700997 goto smmu_fail;
998 }
999
1000 smmu_hdl = inp->smmu_hdl;
1001 num_hdl = 1;
1002
1003 idx = cam_mem_get_slot();
1004 if (idx < 0) {
1005 rc = -ENOMEM;
1006 goto slot_fail;
1007 }
1008
1009 mutex_lock(&tbl.bufq[idx].q_lock);
1010 mem_handle = GET_MEM_HANDLE(idx, ion_fd);
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -07001011 tbl.bufq[idx].dma_buf = buf;
1012 tbl.bufq[idx].fd = -1;
Seemanta Dutta1c827da2017-04-05 17:34:05 -07001013 tbl.bufq[idx].flags = inp->flags;
1014 tbl.bufq[idx].buf_handle = mem_handle;
1015 tbl.bufq[idx].kmdvaddr = kvaddr;
1016
1017 tbl.bufq[idx].vaddr = iova;
1018
1019 tbl.bufq[idx].i_hdl = hdl;
1020 tbl.bufq[idx].len = inp->size;
1021 tbl.bufq[idx].num_hdl = num_hdl;
1022 memcpy(tbl.bufq[idx].hdls, &smmu_hdl,
1023 sizeof(int32_t));
1024 tbl.bufq[idx].is_imported = false;
1025 mutex_unlock(&tbl.bufq[idx].q_lock);
1026
1027 out->kva = kvaddr;
1028 out->iova = (uint32_t)iova;
1029 out->smmu_hdl = smmu_hdl;
1030 out->mem_handle = mem_handle;
1031 out->len = inp->size;
Seemanta Duttaa037cd12017-07-06 15:45:29 -07001032 out->region = region;
Seemanta Dutta1c827da2017-04-05 17:34:05 -07001033
1034 return rc;
1035slot_fail:
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -07001036 cam_smmu_unmap_kernel_iova(inp->smmu_hdl,
1037 buf, region);
Seemanta Dutta1c827da2017-04-05 17:34:05 -07001038smmu_fail:
1039 ion_unmap_kernel(tbl.client, hdl);
1040map_fail:
1041 ion_free(tbl.client, hdl);
1042ion_fail:
1043 return rc;
1044}
1045EXPORT_SYMBOL(cam_mem_mgr_request_mem);
1046
1047int cam_mem_mgr_release_mem(struct cam_mem_mgr_memory_desc *inp)
1048{
1049 int32_t idx;
1050 int rc;
1051
1052 if (!inp) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001053 CAM_ERR(CAM_CRM, "Invalid argument");
Seemanta Dutta1c827da2017-04-05 17:34:05 -07001054 return -EINVAL;
1055 }
1056
1057 idx = CAM_MEM_MGR_GET_HDL_IDX(inp->mem_handle);
1058 if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001059 CAM_ERR(CAM_CRM, "Incorrect index extracted from mem handle");
Seemanta Dutta1c827da2017-04-05 17:34:05 -07001060 return -EINVAL;
1061 }
1062
1063 if (!tbl.bufq[idx].active) {
Suresh Vankadara466bed22017-11-30 06:30:20 +05301064 if (tbl.bufq[idx].vaddr == 0) {
1065 CAM_ERR(CAM_CRM, "buffer is released already");
1066 return 0;
1067 }
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001068 CAM_ERR(CAM_CRM, "Released buffer state should be active");
Seemanta Dutta1c827da2017-04-05 17:34:05 -07001069 return -EINVAL;
1070 }
1071
1072 if (tbl.bufq[idx].buf_handle != inp->mem_handle) {
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001073 CAM_ERR(CAM_CRM,
1074 "Released buf handle not matching within table");
Seemanta Dutta1c827da2017-04-05 17:34:05 -07001075 return -EINVAL;
1076 }
1077
Jigarkumar Zala36ad7172017-07-18 19:52:14 -07001078 CAM_DBG(CAM_CRM, "Releasing hdl = %X", inp->mem_handle);
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -07001079 rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_KERNEL);
Seemanta Dutta1c827da2017-04-05 17:34:05 -07001080
1081 return rc;
1082}
1083EXPORT_SYMBOL(cam_mem_mgr_release_mem);
Seemanta Dutta93f940c2017-10-13 14:34:18 -07001084
1085int cam_mem_mgr_reserve_memory_region(struct cam_mem_mgr_request_desc *inp,
1086 enum cam_smmu_region_id region,
1087 struct cam_mem_mgr_memory_desc *out)
1088{
1089 struct ion_handle *hdl;
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -07001090 struct dma_buf *buf = NULL;
Seemanta Dutta93f940c2017-10-13 14:34:18 -07001091 int rc = 0;
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -07001092 int ion_fd = -1;
Seemanta Dutta93f940c2017-10-13 14:34:18 -07001093 uint32_t heap_id;
1094 dma_addr_t iova = 0;
1095 size_t request_len = 0;
Seemanta Dutta93f940c2017-10-13 14:34:18 -07001096 uint32_t mem_handle;
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -07001097 int32_t idx;
Seemanta Dutta93f940c2017-10-13 14:34:18 -07001098 int32_t smmu_hdl = 0;
1099 int32_t num_hdl = 0;
1100
1101 if (!inp || !out) {
1102 CAM_ERR(CAM_CRM, "Invalid param(s)");
1103 return -EINVAL;
1104 }
1105
1106 if (!inp->smmu_hdl) {
1107 CAM_ERR(CAM_CRM, "Invalid SMMU handle");
1108 return -EINVAL;
1109 }
1110
1111 if (region != CAM_SMMU_REGION_SECHEAP) {
1112 CAM_ERR(CAM_CRM, "Only secondary heap supported");
1113 return -EINVAL;
1114 }
1115
Venkat Chinta686c9e52018-01-20 14:33:25 -08001116 heap_id = ION_HEAP(ION_SYSTEM_HEAP_ID) |
1117 ION_HEAP(ION_CAMERA_HEAP_ID);
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -07001118 rc = cam_mem_util_get_dma_buf(inp->size,
Seemanta Dutta93f940c2017-10-13 14:34:18 -07001119 inp->align,
1120 heap_id,
1121 0,
1122 &hdl,
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -07001123 &buf);
Seemanta Dutta93f940c2017-10-13 14:34:18 -07001124
1125 if (rc) {
1126 CAM_ERR(CAM_CRM, "ION alloc failed for sec heap buffer");
1127 goto ion_fail;
1128 } else {
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -07001129 CAM_DBG(CAM_CRM, "Got dma_buf = %pK, hdl = %pK", buf, hdl);
Seemanta Dutta93f940c2017-10-13 14:34:18 -07001130 }
1131
1132 rc = cam_smmu_reserve_sec_heap(inp->smmu_hdl,
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -07001133 buf,
Seemanta Dutta93f940c2017-10-13 14:34:18 -07001134 &iova,
1135 &request_len);
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -07001136
Seemanta Dutta93f940c2017-10-13 14:34:18 -07001137 if (rc) {
1138 CAM_ERR(CAM_CRM, "Reserving secondary heap failed");
1139 goto smmu_fail;
1140 }
1141
1142 smmu_hdl = inp->smmu_hdl;
1143 num_hdl = 1;
1144
1145 idx = cam_mem_get_slot();
1146 if (idx < 0) {
1147 rc = -ENOMEM;
1148 goto slot_fail;
1149 }
1150
1151 mutex_lock(&tbl.bufq[idx].q_lock);
1152 mem_handle = GET_MEM_HANDLE(idx, ion_fd);
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -07001153 tbl.bufq[idx].fd = -1;
1154 tbl.bufq[idx].dma_buf = buf;
Seemanta Dutta93f940c2017-10-13 14:34:18 -07001155 tbl.bufq[idx].flags = inp->flags;
1156 tbl.bufq[idx].buf_handle = mem_handle;
1157 tbl.bufq[idx].kmdvaddr = 0;
1158
1159 tbl.bufq[idx].vaddr = iova;
1160
1161 tbl.bufq[idx].i_hdl = hdl;
1162 tbl.bufq[idx].len = request_len;
1163 tbl.bufq[idx].num_hdl = num_hdl;
1164 memcpy(tbl.bufq[idx].hdls, &smmu_hdl,
1165 sizeof(int32_t));
1166 tbl.bufq[idx].is_imported = false;
1167 mutex_unlock(&tbl.bufq[idx].q_lock);
1168
1169 out->kva = 0;
1170 out->iova = (uint32_t)iova;
1171 out->smmu_hdl = smmu_hdl;
1172 out->mem_handle = mem_handle;
1173 out->len = request_len;
1174 out->region = region;
1175
1176 return rc;
1177
1178slot_fail:
1179 cam_smmu_release_sec_heap(smmu_hdl);
1180smmu_fail:
1181 ion_free(tbl.client, hdl);
1182ion_fail:
1183 return rc;
1184}
1185EXPORT_SYMBOL(cam_mem_mgr_reserve_memory_region);
1186
1187int cam_mem_mgr_free_memory_region(struct cam_mem_mgr_memory_desc *inp)
1188{
1189 int32_t idx;
1190 int rc;
1191 int32_t smmu_hdl;
1192
1193 if (!inp) {
1194 CAM_ERR(CAM_CRM, "Invalid argument");
1195 return -EINVAL;
1196 }
1197
1198 if (inp->region != CAM_SMMU_REGION_SECHEAP) {
1199 CAM_ERR(CAM_CRM, "Only secondary heap supported");
1200 return -EINVAL;
1201 }
1202
1203 idx = CAM_MEM_MGR_GET_HDL_IDX(inp->mem_handle);
1204 if (idx >= CAM_MEM_BUFQ_MAX || idx <= 0) {
1205 CAM_ERR(CAM_CRM, "Incorrect index extracted from mem handle");
1206 return -EINVAL;
1207 }
1208
1209 if (!tbl.bufq[idx].active) {
Suresh Vankadara466bed22017-11-30 06:30:20 +05301210 if (tbl.bufq[idx].vaddr == 0) {
1211 CAM_ERR(CAM_CRM, "buffer is released already");
1212 return 0;
1213 }
Seemanta Dutta93f940c2017-10-13 14:34:18 -07001214 CAM_ERR(CAM_CRM, "Released buffer state should be active");
1215 return -EINVAL;
1216 }
1217
1218 if (tbl.bufq[idx].buf_handle != inp->mem_handle) {
1219 CAM_ERR(CAM_CRM,
1220 "Released buf handle not matching within table");
1221 return -EINVAL;
1222 }
1223
1224 if (tbl.bufq[idx].num_hdl != 1) {
1225 CAM_ERR(CAM_CRM,
1226 "Sec heap region should have only one smmu hdl");
1227 return -ENODEV;
1228 }
1229
1230 memcpy(&smmu_hdl, tbl.bufq[idx].hdls,
1231 sizeof(int32_t));
1232 if (inp->smmu_hdl != smmu_hdl) {
1233 CAM_ERR(CAM_CRM,
1234 "Passed SMMU handle doesn't match with internal hdl");
1235 return -ENODEV;
1236 }
1237
1238 rc = cam_smmu_release_sec_heap(inp->smmu_hdl);
1239 if (rc) {
1240 CAM_ERR(CAM_CRM,
1241 "Sec heap region release failed");
1242 return -ENODEV;
1243 }
1244
1245 CAM_DBG(CAM_CRM, "Releasing hdl = %X", inp->mem_handle);
Karthik Anantha Ram92a044d2017-10-27 18:53:25 -07001246 rc = cam_mem_util_unmap(idx, CAM_SMMU_MAPPING_KERNEL);
Seemanta Dutta93f940c2017-10-13 14:34:18 -07001247 if (rc)
1248 CAM_ERR(CAM_CRM, "unmapping secondary heap failed");
1249
1250 return rc;
1251}
1252EXPORT_SYMBOL(cam_mem_mgr_free_memory_region);