blob: 6dc3f689e5b9ecc70a17249a6a7b239df341e8b2 [file] [log] [blame]
Hridya Valsaraju0a833872020-05-13 10:43:30 -07001/*
2 * Copyright (C) 2020 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17#define LOG_TAG "DMABUFHEAPS"
18
19#include <BufferAllocator/BufferAllocator.h>
20
21#include <errno.h>
22#include <fcntl.h>
23#include <ion/ion.h>
Hridya Valsarajua53d7f22020-06-12 11:23:12 -070024#include <linux/dma-buf.h>
Hridya Valsaraju0a833872020-05-13 10:43:30 -070025#include <linux/dma-heap.h>
26#include <linux/ion_4.12.h>
27#include <stdlib.h>
28#include <sys/types.h>
29#include <unistd.h>
30
Hridya Valsarajucff13da2021-03-12 17:01:01 -080031#include <shared_mutex>
Hridya Valsaraju0a833872020-05-13 10:43:30 -070032#include <string>
Hridya Valsarajuac7673b2021-02-02 11:05:27 -080033#include <unordered_set>
Hridya Valsaraju0a833872020-05-13 10:43:30 -070034
35#include <android-base/logging.h>
36#include <android-base/unique_fd.h>
37
38static constexpr char kDmaHeapRoot[] = "/dev/dma_heap/";
39static constexpr char kIonDevice[] = "/dev/ion";
Hridya Valsaraju4e5cebe2020-05-25 21:50:40 -070040static constexpr char kIonSystemHeapName[] = "ion_system_heap";
41
42void BufferAllocator::LogInterface(const std::string& interface) {
43 if (!logged_interface_) {
44 LOG(INFO) << "Using : " << interface;
45 logged_interface_ = true;
46 }
47}
Hridya Valsaraju0a833872020-05-13 10:43:30 -070048
Hridya Valsarajucff13da2021-03-12 17:01:01 -080049int BufferAllocator::OpenDmabufHeap(const std::string& heap_name) {
50 std::shared_lock<std::shared_mutex> slock(dmabuf_heap_fd_mutex_);
51
52 /* Check if heap has already been opened. */
Hridya Valsaraju0a833872020-05-13 10:43:30 -070053 auto it = dmabuf_heap_fds_.find(heap_name);
54 if (it != dmabuf_heap_fds_.end())
55 return it->second;
Hridya Valsaraju0a833872020-05-13 10:43:30 -070056
Hridya Valsarajucff13da2021-03-12 17:01:01 -080057 slock.unlock();
58
59 /*
60 * Heap device needs to be opened, use a unique_lock since dmabuf_heap_fd_
61 * needs to be modified.
62 */
63 std::unique_lock<std::shared_mutex> ulock(dmabuf_heap_fd_mutex_);
64
65 /*
66 * Check if we already opened this heap again to prevent racing threads from
67 * opening the heap device multiple times.
68 */
69 it = dmabuf_heap_fds_.find(heap_name);
70 if (it != dmabuf_heap_fds_.end()) return it->second;
71
72 std::string heap_path = kDmaHeapRoot + heap_name;
73 int fd = TEMP_FAILURE_RETRY(open(heap_path.c_str(), O_RDONLY | O_CLOEXEC));
74 if (fd < 0) return -errno;
75
76 LOG(INFO) << "Using DMA-BUF heap named: " << heap_name;
77
78 auto ret = dmabuf_heap_fds_.insert({heap_name, android::base::unique_fd(fd)});
79 CHECK(ret.second);
Hridya Valsaraju0a833872020-05-13 10:43:30 -070080 return fd;
81}
82
83void BufferAllocator::QueryIonHeaps() {
84 uses_legacy_ion_iface_ = ion_is_legacy(ion_fd_);
85 if (uses_legacy_ion_iface_) {
Hridya Valsaraju4e5cebe2020-05-25 21:50:40 -070086 LogInterface("Legacy ion heaps");
Hridya Valsarajuf958e852021-01-21 15:00:42 -080087 MapNameToIonMask(kDmabufSystemHeapName, ION_HEAP_SYSTEM_MASK, ION_FLAG_CACHED);
88 MapNameToIonMask(kDmabufSystemUncachedHeapName, ION_HEAP_SYSTEM_MASK);
Hridya Valsaraju0a833872020-05-13 10:43:30 -070089 return;
90 }
91
92 int heap_count;
93 int ret = ion_query_heap_cnt(ion_fd_, &heap_count);
94 if (ret == 0) {
95 ion_heap_info_.resize(heap_count, {});
96 ret = ion_query_get_heaps(ion_fd_, heap_count, ion_heap_info_.data());
97 }
98
99 // Abort if heap query fails
100 CHECK(ret == 0)
101 << "Non-legacy ION implementation must support heap information queries";
Hridya Valsaraju4e5cebe2020-05-25 21:50:40 -0700102 LogInterface("Non-legacy ION heaps");
103
104 /*
105 * No error checking here, it is possible that devices may have used another name for
106 * the ion system heap.
107 */
Hridya Valsarajuf958e852021-01-21 15:00:42 -0800108 MapNameToIonName(kDmabufSystemHeapName, kIonSystemHeapName, ION_FLAG_CACHED);
109 MapNameToIonName(kDmabufSystemUncachedHeapName, kIonSystemHeapName);
Hridya Valsaraju0a833872020-05-13 10:43:30 -0700110}
111
112BufferAllocator::BufferAllocator() {
Hridya Valsarajuac059d82020-10-28 15:17:32 -0700113 ion_fd_.reset(TEMP_FAILURE_RETRY(open(kIonDevice, O_RDONLY| O_CLOEXEC)));
114 if (ion_fd_ >= 0)
Hridya Valsaraju0a833872020-05-13 10:43:30 -0700115 QueryIonHeaps();
Hridya Valsaraju0a833872020-05-13 10:43:30 -0700116}
Hridya Valsaraju4e5cebe2020-05-25 21:50:40 -0700117
118int BufferAllocator::MapNameToIonMask(const std::string& heap_name, unsigned int ion_heap_mask,
119 unsigned int ion_heap_flags) {
120 if (!ion_heap_mask)
121 return -EINVAL;
122 IonHeapConfig heap_config = { ion_heap_mask, ion_heap_flags };
Hridya Valsarajucff13da2021-03-12 17:01:01 -0800123
124 std::unique_lock<std::shared_mutex> ulock(heap_name_to_config_mutex_);
Hridya Valsaraju4e5cebe2020-05-25 21:50:40 -0700125 heap_name_to_config_[heap_name] = heap_config;
126 return 0;
127}
128
129int BufferAllocator::GetIonHeapIdByName(const std::string& heap_name, unsigned int* heap_id) {
130 for (auto& it : ion_heap_info_) {
131 if (heap_name == it.name) {
132 *heap_id = it.heap_id;
133 return 0;
134 }
135 }
136
137 LOG(ERROR) << "No ion heap of name " << heap_name << " exists";
138 return -EINVAL;
139}
140
141int BufferAllocator::MapNameToIonName(const std::string& heap_name,
142 const std::string& ion_heap_name,
143 unsigned int ion_heap_flags) {
144 unsigned int ion_heap_id = 0;
145 auto ret = GetIonHeapIdByName(ion_heap_name, &ion_heap_id);
146 if (ret < 0)
147 return ret;
148
149 unsigned int ion_heap_mask = 1 << ion_heap_id;
150 IonHeapConfig heap_config = { ion_heap_mask, ion_heap_flags };
Hridya Valsarajucff13da2021-03-12 17:01:01 -0800151
152 std::unique_lock<std::shared_mutex> ulock(heap_name_to_config_mutex_);
Hridya Valsaraju4e5cebe2020-05-25 21:50:40 -0700153 heap_name_to_config_[heap_name] = heap_config;
154
155 return 0;
156}
157
158int BufferAllocator::MapNameToIonHeap(const std::string& heap_name,
159 const std::string& ion_heap_name,
160 unsigned int ion_heap_flags,
161 unsigned int legacy_ion_heap_mask,
162 unsigned int legacy_ion_heap_flags) {
John Stultzdf362cd2020-11-11 06:29:06 +0000163 /* if the DMA-BUF Heap exists, we can ignore ion mappings */
164 int ret = OpenDmabufHeap(heap_name);
165 if (ret >= 0)
166 return 0;
Hridya Valsaraju4e5cebe2020-05-25 21:50:40 -0700167
Hridya Valsaraju0f2cc9e2021-08-11 11:15:12 -0700168 /* If ION support is not detected, ignore the mappings */
169 if (ion_fd_ < 0) return 0;
170
John Stultzba7e8e52020-09-03 04:23:34 +0000171 if (uses_legacy_ion_iface_ || ion_heap_name == "") {
Hridya Valsaraju4e5cebe2020-05-25 21:50:40 -0700172 ret = MapNameToIonMask(heap_name, legacy_ion_heap_mask, legacy_ion_heap_flags);
Hridya Valsarajuac059d82020-10-28 15:17:32 -0700173 } else if (!ion_heap_name.empty()) {
Hridya Valsaraju4e5cebe2020-05-25 21:50:40 -0700174 ret = MapNameToIonName(heap_name, ion_heap_name, ion_heap_flags);
175 }
176
177 return ret;
178}
Hridya Valsarajuff5134a2020-06-11 14:23:31 -0700179
180int BufferAllocator::GetIonConfig(const std::string& heap_name, IonHeapConfig& heap_config) {
181 int ret = 0;
Hridya Valsarajucff13da2021-03-12 17:01:01 -0800182
183 std::shared_lock<std::shared_mutex> slock(heap_name_to_config_mutex_);
184
Hridya Valsarajuff5134a2020-06-11 14:23:31 -0700185 auto it = heap_name_to_config_.find(heap_name);
186 if (it != heap_name_to_config_.end()) {
187 heap_config = it->second;
Hridya Valsarajucff13da2021-03-12 17:01:01 -0800188 return ret;
189 }
190
191 slock.unlock();
192
193 if (uses_legacy_ion_iface_) {
194 ret = -EINVAL;
Hridya Valsarajuff5134a2020-06-11 14:23:31 -0700195 } else {
Hridya Valsarajucff13da2021-03-12 17:01:01 -0800196 unsigned int heap_id;
197 ret = GetIonHeapIdByName(heap_name, &heap_id);
198 if (ret == 0) {
199 heap_config.mask = 1 << heap_id;
200 heap_config.flags = 0;
201 /* save it so that this lookup does not need to happen again */
202 std::unique_lock<std::shared_mutex> ulock(heap_name_to_config_mutex_);
203 heap_name_to_config_[heap_name] = heap_config;
Hridya Valsarajuff5134a2020-06-11 14:23:31 -0700204 }
205 }
206
207 if (ret)
208 LOG(ERROR) << "No ion heap of name " << heap_name << " exists";
209 return ret;
210}
211
Hridya Valsarajub07fb972020-06-08 22:42:54 -0700212int BufferAllocator::DmabufAlloc(const std::string& heap_name, size_t len) {
213 int fd = OpenDmabufHeap(heap_name);
Hridya Valsarajucff13da2021-03-12 17:01:01 -0800214 if (fd < 0) return fd;
Hridya Valsarajub07fb972020-06-08 22:42:54 -0700215
216 struct dma_heap_allocation_data heap_data{
217 .len = len, // length of data to be allocated in bytes
218 .fd_flags = O_RDWR | O_CLOEXEC, // permissions for the memory to be allocated
219 };
220
221 auto ret = TEMP_FAILURE_RETRY(ioctl(fd, DMA_HEAP_IOCTL_ALLOC, &heap_data));
Hridya Valsarajuac059d82020-10-28 15:17:32 -0700222 if (ret < 0) {
Hridya Valsarajucff13da2021-03-12 17:01:01 -0800223 PLOG(ERROR) << "Unable to allocate from DMA-BUF heap: " << heap_name;
Hridya Valsarajub07fb972020-06-08 22:42:54 -0700224 return ret;
Hridya Valsarajuac059d82020-10-28 15:17:32 -0700225 }
Hridya Valsarajub07fb972020-06-08 22:42:54 -0700226
227 return heap_data.fd;
228}
229
Hridya Valsarajuac059d82020-10-28 15:17:32 -0700230int BufferAllocator::IonAlloc(const std::string& heap_name, size_t len,
231 unsigned int heap_flags, size_t legacy_align) {
Hridya Valsarajuff5134a2020-06-11 14:23:31 -0700232 IonHeapConfig heap_config;
233 auto ret = GetIonConfig(heap_name, heap_config);
234 if (ret)
235 return ret;
236
237 int alloc_fd = -1;
238 unsigned int flags = heap_config.flags | heap_flags;
John Stultzfb279522020-09-03 04:58:29 +0000239 ret = ion_alloc_fd(ion_fd_, len, legacy_align, heap_config.mask, flags, &alloc_fd);
Hridya Valsarajuff5134a2020-06-11 14:23:31 -0700240 if (ret) {
241 PLOG(ERROR) << "allocation fails for ion heap with mask: " << heap_config.mask
242 << " and flags: " << flags;
243 return ret;
244 }
245 return alloc_fd;
246}
Hridya Valsarajub07fb972020-06-08 22:42:54 -0700247
Hridya Valsarajuac059d82020-10-28 15:17:32 -0700248int BufferAllocator::Alloc(const std::string& heap_name, size_t len,
249 unsigned int heap_flags, size_t legacy_align) {
250 int fd = DmabufAlloc(heap_name, len);
Hridya Valsarajub07fb972020-06-08 22:42:54 -0700251
Hridya Valsarajuac059d82020-10-28 15:17:32 -0700252 if (fd < 0)
253 fd = IonAlloc(heap_name, len, heap_flags, legacy_align);
254
255 return fd;
Hridya Valsarajub07fb972020-06-08 22:42:54 -0700256}
Hridya Valsarajua53d7f22020-06-12 11:23:12 -0700257
Hridya Valsarajuf7b3d5d2021-03-22 14:52:57 -0700258int BufferAllocator::AllocSystem(bool cpu_access_needed, size_t len, unsigned int heap_flags,
259 size_t legacy_align) {
260 if (!cpu_access_needed) {
261 /*
262 * CPU does not need to access allocated buffer so we try to allocate in
263 * the 'system-uncached' heap after querying for its existence.
264 */
265 static bool uncached_dmabuf_system_heap_support = [this]() -> bool {
266 auto dmabuf_heap_list = this->GetDmabufHeapList();
267 return (dmabuf_heap_list.find(kDmabufSystemUncachedHeapName) != dmabuf_heap_list.end());
268 }();
269
270 if (uncached_dmabuf_system_heap_support)
271 return DmabufAlloc(kDmabufSystemUncachedHeapName, len);
272
273 static bool uncached_ion_system_heap_support = [this]() -> bool {
274 IonHeapConfig heap_config;
275 auto ret = this->GetIonConfig(kDmabufSystemUncachedHeapName, heap_config);
276 return (ret == 0);
277 }();
278
279 if (uncached_ion_system_heap_support)
280 return IonAlloc(kDmabufSystemUncachedHeapName, len, heap_flags, legacy_align);
281 }
282
283 /*
284 * Either 1) CPU needs to access allocated buffer OR 2) CPU does not need to
285 * access allocated buffer but the "system-uncached" heap is unsupported.
286 */
287 return Alloc(kDmabufSystemHeapName, len, heap_flags, legacy_align);
288}
289
Hridya Valsarajua53d7f22020-06-12 11:23:12 -0700290int BufferAllocator::LegacyIonCpuSync(unsigned int dmabuf_fd,
Chris Goldsworthyec5411c2020-08-14 10:14:59 -0700291 const CustomCpuSyncLegacyIon& legacy_ion_cpu_sync_custom,
292 void *legacy_ion_custom_data) {
Hridya Valsarajua53d7f22020-06-12 11:23:12 -0700293 if (!legacy_ion_cpu_sync_custom)
294 return ion_sync_fd(ion_fd_, dmabuf_fd);
295
296 // dup ion_fd_ so that we retain its ownership.
297 int new_ion_fd = TEMP_FAILURE_RETRY(dup(ion_fd_.get()));
298 if (new_ion_fd < 0) {
299 PLOG(ERROR) << "Unable to dup ion fd. error: " << new_ion_fd;
300 return new_ion_fd;
301 }
302
Chris Goldsworthyec5411c2020-08-14 10:14:59 -0700303 int ret = legacy_ion_cpu_sync_custom(new_ion_fd, dmabuf_fd, legacy_ion_custom_data);
Hridya Valsarajua53d7f22020-06-12 11:23:12 -0700304
305 close(new_ion_fd);
306 return ret;
307}
308
309int BufferAllocator::DoSync(unsigned int dmabuf_fd, bool start, SyncType sync_type,
Chris Goldsworthyec5411c2020-08-14 10:14:59 -0700310 const CustomCpuSyncLegacyIon& legacy_ion_cpu_sync_custom,
311 void *legacy_ion_custom_data) {
Hridya Valsarajua53d7f22020-06-12 11:23:12 -0700312 if (uses_legacy_ion_iface_) {
Chris Goldsworthyec5411c2020-08-14 10:14:59 -0700313 return LegacyIonCpuSync(dmabuf_fd, legacy_ion_cpu_sync_custom,
314 legacy_ion_custom_data);
Hridya Valsarajua53d7f22020-06-12 11:23:12 -0700315 }
316
317 struct dma_buf_sync sync = {
318 .flags = (start ? DMA_BUF_SYNC_START : DMA_BUF_SYNC_END) |
319 static_cast<uint64_t>(sync_type),
320 };
321 return TEMP_FAILURE_RETRY(ioctl(dmabuf_fd, DMA_BUF_IOCTL_SYNC, &sync));
322}
323
324int BufferAllocator::CpuSyncStart(unsigned int dmabuf_fd, SyncType sync_type,
Chris Goldsworthyec5411c2020-08-14 10:14:59 -0700325 const CustomCpuSyncLegacyIon& legacy_ion_cpu_sync_custom,
326 void *legacy_ion_custom_data) {
Chris Goldsworthyec5411c2020-08-14 10:14:59 -0700327 int ret = DoSync(dmabuf_fd, true /* start */, sync_type, legacy_ion_cpu_sync_custom,
328 legacy_ion_custom_data);
Hridya Valsarajua53d7f22020-06-12 11:23:12 -0700329
Hridya Valsarajua83c24e2021-03-13 15:30:16 -0800330 if (ret) PLOG(ERROR) << "CpuSyncStart() failure";
Hridya Valsarajua53d7f22020-06-12 11:23:12 -0700331 return ret;
332}
333
Hridya Valsarajua83c24e2021-03-13 15:30:16 -0800334int BufferAllocator::CpuSyncEnd(unsigned int dmabuf_fd, SyncType sync_type,
Chris Goldsworthyec5411c2020-08-14 10:14:59 -0700335 const CustomCpuSyncLegacyIon& legacy_ion_cpu_sync_custom,
Hridya Valsarajua83c24e2021-03-13 15:30:16 -0800336 void* legacy_ion_custom_data) {
337 int ret = DoSync(dmabuf_fd, false /* start */, sync_type, legacy_ion_cpu_sync_custom,
338 legacy_ion_custom_data);
339 if (ret) PLOG(ERROR) << "CpuSyncEnd() failure";
Hridya Valsarajua53d7f22020-06-12 11:23:12 -0700340
341 return ret;
342}
Hridya Valsarajuac7673b2021-02-02 11:05:27 -0800343
344std::unordered_set<std::string> BufferAllocator::GetDmabufHeapList() {
345 std::unordered_set<std::string> heap_list;
346 std::unique_ptr<DIR, int (*)(DIR*)> dir(opendir(kDmaHeapRoot), closedir);
347
348 if (dir) {
349 struct dirent* dent;
350 while ((dent = readdir(dir.get()))) {
351 if (!strcmp(dent->d_name, ".") || !strcmp(dent->d_name, "..")) continue;
352
353 heap_list.insert(dent->d_name);
354 }
355 }
356
357 return heap_list;
358}
Hridya Valsaraju872252b2021-03-24 10:43:21 -0700359
360bool BufferAllocator::CheckIonSupport() {
361 static bool ion_support = (access(kIonDevice, R_OK) == 0);
362
363 return ion_support;
364}