summaryrefslogtreecommitdiff
path: root/alloc_ion.cpp (plain)
blob: a7f5c3857663bc1341430ef2c04e601b51e866f5
1/*
2 * Copyright (C) 2013 ARM Limited. All rights reserved.
3 *
4 * Copyright (C) 2008 The Android Open Source Project
5 *
6 * Licensed under the Apache License, Version 2.0 (the "License");
7 * you may not use this file except in compliance with the License.
8 * You may obtain a copy of the License at
9 *
10 * http://www.apache.org/licenses/LICENSE-2.0
11 *
12 * Unless required by applicable law or agreed to in writing, software
13 * distributed under the License is distributed on an "AS IS" BASIS,
14 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
15 * See the License for the specific language governing permissions and
16 * limitations under the License.
17 */
18
19#include <string.h>
20#include <errno.h>
21#include <pthread.h>
22
23#include <cutils/log.h>
24#include <cutils/atomic.h>
25#include <hardware/hardware.h>
26#include <hardware/gralloc.h>
27
28#include <sys/ioctl.h>
29
30#include "alloc_device.h"
31#include "gralloc_priv.h"
32#include "gralloc_helper.h"
33#include "framebuffer_device.h"
34#include <linux/ion.h>
35#include <ion/ion.h>
36#include <linux/errno.h>
37
38#if PLATFORM_SDK_VERSION >= 24
39#include "gralloc_usage_ext.h"
40#endif
41
42bool isChunkHeapAvail = true;
43
44int alloc_backend_alloc(alloc_device_t* dev, size_t size, int usage, buffer_handle_t* pHandle)
45{
46 private_module_t* m = reinterpret_cast<private_module_t*>(dev->common.module);
47 ion_user_handle_t ion_hnd;
48 unsigned char *cpu_ptr = NULL;
49 int shared_fd;
50 int ret = -1;
51 unsigned int heap_type;
52 int ion_flags = 0;
53 static int support_protected = 1; /* initially, assume we support protected memory */
54 int lock_state = 0;
55
56#define ION_HEAP_TYPE_SECURE ION_HEAP_TYPE_SYSTEM
57
58 bool secureOrProtectedLayer = false;
59#ifdef GRALLOC_ENABLE_SECURE_LAYER
60 if (usage & GRALLOC_USAGE_AML_SECURE)
61 {
62 secureOrProtectedLayer = true;
63 }
64#endif
65
66 if (usage & GRALLOC_USAGE_PROTECTED)
67 {
68 secureOrProtectedLayer = true;
69 usage &= ~GRALLOC_USAGE_PROTECTED;
70 }
71 /* Select heap type based on usage hints */
72 if (usage & GRALLOC_USAGE_PROTECTED)
73 {
74#if defined(ION_HEAP_TYPE_SECURE)
75 heap_type = ION_HEAP_TYPE_SECURE;
76#else
77 AERR("Protected ION memory is not supported on this platform.");
78 return -1;
79#endif
80 }
81 else
82 {
83 heap_type = ION_HEAP_TYPE_SYSTEM;
84 }
85
86 if ( (usage & GRALLOC_USAGE_SW_READ_MASK) == GRALLOC_USAGE_SW_READ_OFTEN )
87 {
88 ion_flags = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC;
89 }
90
91 if (usage & GRALLOC_USAGE_HW_VIDEO_ENCODER)
92 {
93 ion_flags = ION_FLAG_CACHED | ION_FLAG_CACHED_NEEDS_SYNC;
94 }
95
96#ifdef GRALLOC_APP_ALLOC_CONTINUOUS_BUF
97 bool layerAllocContinousBuf = false;
98#endif
99
100 if (usage & GRALLOC_USAGE_AML_DMA_BUFFER) //alloc from carveout heap.
101 {
102#if PLATFORM_SDK_VERSION < 26
103 ret = ion_alloc(m->ion_client, size, 0,
104 ION_HEAP_CARVEOUT_MASK,
105 ion_flags, &ion_hnd);
106 if (ret != 0)
107#endif
108 {
109 ret = ion_alloc(m->ion_client, size, 0,
110 1<<ION_HEAP_TYPE_CUSTOM, ion_flags, &ion_hnd);
111 ALOGE("omx alloc from custom%d, errno=%d\n", ret, errno);
112
113 }
114#ifdef GRALLOC_APP_ALLOC_CONTINUOUS_BUF
115 if (ret == 0) {
116 layerAllocContinousBuf = true;
117 }
118#endif
119 }
120#if GRALLOC_ALLOC_FB_FROM_ION == 1
121 else if (usage & GRALLOC_USAGE_HW_FB) {
122 ALOGE("alloc framebuffer %d", size);
123 ret = ion_alloc(m->ion_client, size, 0, 1<<ION_HEAP_TYPE_DMA,
124 ion_flags, &ion_hnd);
125 }
126#endif
127#ifdef GRALLOC_APP_ALLOC_CONTINUOUS_BUF
128 else if ((size <= 8294400) && usage & GRALLOC_USAGE_HW_COMPOSER
129 && !(usage & GRALLOC_USAGE_AML_VIDEO_OVERLAY
130 || usage & GRALLOC_USAGE_AML_OMX_OVERLAY)) {
131 layerAllocContinousBuf = true;
132 if (true == isChunkHeapAvail) {
133 ret = ion_alloc(m->ion_client, size, 0,
134 1<<ION_HEAP_TYPE_CHUNK, ion_flags, &ion_hnd);
135 if (ret == -ENODEV) {
136 isChunkHeapAvail = false;
137 }
138 }
139 if (ret != 0) {
140 ALOGV("(%d) Failed to alloc ion chunk mem, alloc from ion cma buffer.", ret);
141 ret = ion_alloc(m->ion_client, size, 0,
142 1<<ION_HEAP_TYPE_DMA, ion_flags & (~ION_FLAG_CACHED), &ion_hnd);
143 }
144 if (ret != 0) {
145 layerAllocContinousBuf = false;
146 ALOGV("(%d) Failed to alloc ion cma|chunk mem, alloc from system ion buffer.", ret);
147 ret = ion_alloc(m->ion_client, size, 0, 1<<heap_type,
148 ion_flags, &ion_hnd);
149 }
150 }
151#endif
152 else
153 {
154 ret = ion_alloc(m->ion_client, size, 0, 1<<heap_type,
155 ion_flags, &ion_hnd);
156 }
157
158 if ( ret != 0)
159 {
160 AERR("Failed to ion_alloc from ion_client:%d", m->ion_client);
161 return -1;
162 }
163
164 ret = ion_share( m->ion_client, ion_hnd, &shared_fd );
165 if ( ret != 0 )
166 {
167 AERR( "ion_share( %d ) failed", m->ion_client );
168 if ( 0 != ion_free( m->ion_client, ion_hnd ) ) AERR( "ion_free( %d ) failed", m->ion_client );
169 return -1;
170 }
171
172 if (!(usage & GRALLOC_USAGE_PROTECTED))
173 {
174 cpu_ptr = (unsigned char*)mmap( NULL, size, PROT_READ | PROT_WRITE, MAP_SHARED, shared_fd, 0 );
175
176 if ( MAP_FAILED == cpu_ptr )
177 {
178 AERR( "ion_map( %d ) failed", m->ion_client );
179 if ( 0 != ion_free( m->ion_client, ion_hnd ) ) AERR( "ion_free( %d ) failed", m->ion_client );
180 close( shared_fd );
181 return -1;
182 }
183 lock_state = private_handle_t::LOCK_STATE_MAPPED;
184 }
185
186 private_handle_t *hnd = new private_handle_t( private_handle_t::PRIV_FLAGS_USES_ION /*TODO ion extend*| priv_heap_flag*/, usage, size, cpu_ptr,
187 lock_state );
188
189 if ( NULL != hnd )
190 {
191 hnd->share_fd = shared_fd;
192 hnd->ion_hnd = ion_hnd;
193 /*TODO ion extend hnd->min_pgsz = min_pgsz; */
194 *pHandle = hnd;
195#ifdef GRALLOC_APP_ALLOC_CONTINUOUS_BUF
196 if (layerAllocContinousBuf)
197 {
198 hnd->flags |= private_handle_t::PRIV_FLAGS_CONTINUOUS_BUF;
199 }
200#endif
201
202#ifdef GRALLOC_ENABLE_SECURE_LAYER
203 if (secureOrProtectedLayer)
204 {
205 hnd->flags |= private_handle_t::PRIV_FLAGS_SECURE_PROTECTED;
206 }
207#endif
208 return 0;
209 }
210 else
211 {
212 AERR( "Gralloc out of mem for ion_client:%d", m->ion_client );
213 }
214
215 close( shared_fd );
216
217 if (!(usage & GRALLOC_USAGE_PROTECTED))
218 {
219 ret = munmap( cpu_ptr, size );
220 if ( 0 != ret ) AERR( "munmap failed for base:%p size: %zd", cpu_ptr, size );
221 }
222
223 ret = ion_free( m->ion_client, ion_hnd );
224 if ( 0 != ret ) AERR( "ion_free( %d ) failed", m->ion_client );
225 return -1;
226}
227
228int alloc_backend_alloc_framebuffer(private_module_t* m, private_handle_t* hnd, uint32_t idx)
229{
230#ifdef GRALLOC_T83X
231 framebuffer_mapper_t* m_fb = NULL;
232
233#ifdef DEBUG_EXTERNAL_DISPLAY_ON_PANEL
234 ALOGD("always alloc from fb0");
235 m_fb = &(m->fb_primary);
236#else
237 if (hnd->usage & GRALLOC_USAGE_EXTERNAL_DISP)
238 {
239 m_fb = &(m->fb_external);
240 }
241 else
242 {
243 m_fb = &(m->fb_primary);
244 }
245#endif
246 struct fb_dmabuf_export fb_dma_buf;
247 int res;
248
249 fb_dma_buf.buffer_idx = idx;
250 res = ioctl( m_fb->framebuffer->fd, FBIOGET_DMABUF, &fb_dma_buf );
251 if (res == 0)
252 {
253 hnd->share_fd = fb_dma_buf.fd;
254 return 0;
255 }
256 else
257 {
258 AINF("FBIOGET_DMABUF ioctl failed(%d). try FBIOGET_OSD_DMABUF", res);
259 res = ioctl( m_fb->framebuffer->fd, FBIOGET_OSD_DMABUF, &fb_dma_buf );
260 if (res == 0)
261 {
262 hnd->share_fd = fb_dma_buf.fd;
263 return 0;
264 }
265 else
266 {
267 AINF("FBIOGET_OSD_DMABUF ioctl failed(%d). See gralloc_priv.h and the integration manual for vendor framebuffer integration", res);
268#if MALI_ARCHITECTURE_UTGARD
269 /* On Utgard we do not have a strict requirement of DMA-BUF integration */
270 return 0;
271#else
272 return -1;
273#endif
274 }
275 }
276#else
277 return 0;
278#endif
279}
280
281void alloc_backend_alloc_free(private_handle_t const* hnd, private_module_t* m)
282{
283 if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER)
284 {
285 return;
286 }
287 else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_UMP)
288 {
289 AERR( "Can't free ump memory for handle:%p. Not supported.", hnd );
290 }
291 else if ( hnd->flags & private_handle_t::PRIV_FLAGS_USES_ION )
292 {
293 /* Buffer might be unregistered already so we need to assure we have a valid handle*/
294 if ( 0 != hnd->base )
295 {
296 if ( 0 != munmap( (void*)hnd->base, hnd->size ) ) AERR( "Failed to munmap handle %p", hnd );
297 }
298 close( hnd->share_fd );
299 if ( 0 != ion_free( m->ion_client, hnd->ion_hnd ) ) AERR( "Failed to ion_free( ion_client: %d ion_hnd: %p )", m->ion_client, hnd->ion_hnd );
300 memset( (void*)hnd, 0, sizeof( *hnd ) );
301 }
302}
303
304int alloc_backend_open(alloc_device_t *dev)
305{
306 private_module_t *m = reinterpret_cast<private_module_t *>(dev->common.module);
307 m->ion_client = ion_open();
308 if ( m->ion_client < 0 )
309 {
310 AERR( "ion_open failed with %s", strerror(errno) );
311 return -1;
312 }
313
314 return 0;
315}
316
317int alloc_backend_close(struct hw_device_t *device)
318{
319 alloc_device_t* dev = reinterpret_cast<alloc_device_t*>(device);
320 if (dev)
321 {
322 private_module_t *m = reinterpret_cast<private_module_t*>(dev->common.module);
323 if ( 0 != ion_close(m->ion_client) ) AERR( "Failed to close ion_client: %d err=%s", m->ion_client , strerror(errno));
324 delete dev;
325 }
326 return 0;
327}
328