make gralloc-qsd8k closer to the generic gralloc so it's easier to maintain

This commit is contained in:
Mathias Agopian 2009-08-18 18:39:57 -07:00
parent edc0cd6c3f
commit 78087b2664
7 changed files with 437 additions and 256 deletions

View File

@ -22,13 +22,15 @@
// align all the memory blocks on a cache-line boundary
const int SimpleBestFitAllocator::kMemoryAlign = 32;
SimpleBestFitAllocator::SimpleBestFitAllocator(size_t size)
SimpleBestFitAllocator::SimpleBestFitAllocator()
: mHeapSize(0)
{
size_t pagesize = getpagesize();
mHeapSize = ((size + pagesize-1) & ~(pagesize-1));
}
chunk_t* node = new chunk_t(0, mHeapSize / kMemoryAlign);
mList.insertHead(node);
SimpleBestFitAllocator::SimpleBestFitAllocator(size_t size)
: mHeapSize(0)
{
setSize(size);
}
SimpleBestFitAllocator::~SimpleBestFitAllocator()
@ -38,14 +40,27 @@ SimpleBestFitAllocator::~SimpleBestFitAllocator()
}
}
ssize_t SimpleBestFitAllocator::setSize(size_t size)
{
Locker::Autolock _l(mLock);
if (mHeapSize != 0) return -EINVAL;
size_t pagesize = getpagesize();
mHeapSize = ((size + pagesize-1) & ~(pagesize-1));
chunk_t* node = new chunk_t(0, mHeapSize / kMemoryAlign);
mList.insertHead(node);
return size;
}
size_t SimpleBestFitAllocator::size() const
{
return mHeapSize;
}
size_t SimpleBestFitAllocator::allocate(size_t size, uint32_t flags)
ssize_t SimpleBestFitAllocator::allocate(size_t size, uint32_t flags)
{
Locker::Autolock _l(mLock);
if (mHeapSize == 0) return -EINVAL;
ssize_t offset = alloc(size, flags);
return offset;
}
@ -53,6 +68,7 @@ size_t SimpleBestFitAllocator::allocate(size_t size, uint32_t flags)
ssize_t SimpleBestFitAllocator::deallocate(size_t offset)
{
Locker::Autolock _l(mLock);
if (mHeapSize == 0) return -EINVAL;
chunk_t const * const freed = dealloc(offset);
if (freed) {
return 0;

View File

@ -21,7 +21,7 @@
#include <stdint.h>
#include <sys/types.h>
#include "gralloc_priv.h"
#include "gr.h"
// ----------------------------------------------------------------------------
@ -95,12 +95,15 @@ class SimpleBestFitAllocator
{
public:
SimpleBestFitAllocator(size_t size);
virtual ~SimpleBestFitAllocator();
SimpleBestFitAllocator();
SimpleBestFitAllocator(size_t size);
~SimpleBestFitAllocator();
virtual size_t allocate(size_t size, uint32_t flags = 0);
virtual ssize_t deallocate(size_t offset);
virtual size_t size() const;
ssize_t setSize(size_t size);
ssize_t allocate(size_t size, uint32_t flags = 0);
ssize_t deallocate(size_t offset);
size_t size() const;
private:
struct chunk_t {
@ -123,5 +126,4 @@ private:
size_t mHeapSize;
};
#endif /* GRALLOC_ALLOCATOR_H_ */

View File

@ -14,28 +14,40 @@
* limitations under the License.
*/
#include <dlfcn.h>
#include <errno.h>
#include <fcntl.h>
#include <linux/fb.h>
#include <linux/msm_mdp.h>
#include <sys/mman.h>
#include <sys/ioctl.h>
#include <cutils/atomic.h>
#include <dlfcn.h>
#include <cutils/ashmem.h>
#include <cutils/log.h>
#include <hardware/hardware.h>
#include <hardware/gralloc.h>
#include <fcntl.h>
#include <errno.h>
#include <sys/ioctl.h>
#include <string.h>
#include <stdlib.h>
#include <cutils/log.h>
#include <cutils/atomic.h>
#include <linux/fb.h>
#include <linux/msm_mdp.h>
#include "gralloc_priv.h"
#include "gr.h"
/*****************************************************************************/
// should be a build option
#define SUPPORTS_UPDATE_ON_DEMAND 1
#define SUPPORTS_UPDATE_ON_DEMAND 0
// numbers of buffers for page flipping
#define NUM_BUFFERS 2
enum {
PAGE_FLIP = 0x00000001,
LOCKED = 0x00000002
@ -86,13 +98,14 @@ static int fb_post(struct framebuffer_device_t* dev, buffer_handle_t buffer)
private_handle_t const* hnd = reinterpret_cast<private_handle_t const*>(buffer);
private_module_t* m = reinterpret_cast<private_module_t*>(
dev->common.module);
if (m->currentBuffer) {
m->base.unlock(&m->base, m->currentBuffer);
m->currentBuffer = 0;
}
if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER) {
m->base.lock(&m->base, buffer,
private_module_t::PRIV_USAGE_LOCKED_FOR_POST,
0, 0, m->info.xres, m->info.yres, NULL);
@ -106,13 +119,14 @@ static int fb_post(struct framebuffer_device_t* dev, buffer_handle_t buffer)
return -errno;
}
m->currentBuffer = buffer;
} else {
// If we can't do the page_flip, just copy the buffer to the front
// FIXME: use copybit HAL instead of memcpy
void* fb_vaddr;
void* buffer_vaddr;
m->base.lock(&m->base, m->framebuffer,
GRALLOC_USAGE_SW_WRITE_RARELY,
0, 0, m->info.xres, m->info.yres,
@ -290,7 +304,7 @@ int mapFrameBufferLocked(struct private_module_t* module)
int err;
size_t fbSize = roundUpToPageSize(finfo.line_length * info.yres_virtual);
module->framebuffer = new private_handle_t(dup(fd), fbSize,
private_handle_t::PRIV_FLAGS_USES_PMEM, BUFFER_TYPE_FB);
private_handle_t::PRIV_FLAGS_USES_PMEM);
module->numBuffers = info.yres_virtual / info.yres;
module->bufferMask = 0;
@ -330,7 +344,6 @@ int fb_device_open(hw_module_t const* module, const char* name,
{
int status = -EINVAL;
if (!strcmp(name, GRALLOC_HARDWARE_FB0)) {
alloc_device_t* gralloc_device;
status = gralloc_open(module, &gralloc_device);
if (status < 0)

63
libgralloc-qsd8k/gr.h Normal file
View File

@ -0,0 +1,63 @@
/*
* Copyright (C) 2008 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef GR_H_
#define GR_H_
#include <stdint.h>
#ifdef HAVE_ANDROID_OS // just want PAGE_SIZE define
# include <asm/page.h>
#else
# include <sys/user.h>
#endif
#include <limits.h>
#include <sys/cdefs.h>
#include <hardware/gralloc.h>
#include <pthread.h>
#include <errno.h>
#include <cutils/native_handle.h>
/*****************************************************************************/
struct private_module_t;
struct private_handle_t;
inline size_t roundUpToPageSize(size_t x) {
return (x + (PAGE_SIZE-1)) & ~(PAGE_SIZE-1);
}
int mapFrameBufferLocked(struct private_module_t* module);
int terminateBuffer(gralloc_module_t const* module, private_handle_t* hnd);
/*****************************************************************************/
class Locker {
pthread_mutex_t mutex;
public:
class Autolock {
Locker& locker;
public:
inline Autolock(Locker& locker) : locker(locker) { locker.lock(); }
inline ~Autolock() { locker.unlock(); }
};
inline Locker() { pthread_mutex_init(&mutex, 0); }
inline ~Locker() { pthread_mutex_destroy(&mutex); }
inline void lock() { pthread_mutex_lock(&mutex); }
inline void unlock() { pthread_mutex_unlock(&mutex); }
};
#endif /* GR_H_ */

View File

@ -19,6 +19,8 @@
#include <fcntl.h>
#include <errno.h>
#include <pthread.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/stat.h>
@ -41,10 +43,16 @@
/*****************************************************************************/
#define ALLOCATORREGION_RESERVED_SIZE (3<<20)
static SimpleBestFitAllocator sAllocator;
static SimpleBestFitAllocator sAllocatorGPU(ALLOCATORREGION_RESERVED_SIZE);
/*****************************************************************************/
struct gralloc_context_t {
alloc_device_t device;
/* our private data here */
int bufferType;
};
static int gralloc_alloc_buffer(alloc_device_t* dev,
@ -103,8 +111,8 @@ struct private_module_t HAL_MODULE_INFO_SYM = {
pmem_master: -1,
pmem_master_base: 0,
master_phys: 0,
gpu_master: -1,
gpu_master_base: 0
gpu: -1,
gpu_base: 0
};
/*****************************************************************************/
@ -145,7 +153,7 @@ static int gralloc_alloc_framebuffer_locked(alloc_device_t* dev,
intptr_t vaddr = intptr_t(m->framebuffer->base);
private_handle_t* hnd = new private_handle_t(dup(m->framebuffer->fd), size,
private_handle_t::PRIV_FLAGS_USES_PMEM |
private_handle_t::PRIV_FLAGS_FRAMEBUFFER, BUFFER_TYPE_FB);
private_handle_t::PRIV_FLAGS_FRAMEBUFFER);
// find a free slot
for (uint32_t i=0 ; i<numBuffers ; i++) {
@ -155,10 +163,9 @@ static int gralloc_alloc_framebuffer_locked(alloc_device_t* dev,
}
vaddr += bufferSize;
}
hnd->base = vaddr;
hnd->offset = vaddr - intptr_t(m->framebuffer->base);
hnd->phys = intptr_t(m->framebuffer->phys) + hnd->offset;
*pHandle = hnd;
return 0;
@ -175,65 +182,116 @@ static int gralloc_alloc_framebuffer(alloc_device_t* dev,
return err;
}
static SimpleBestFitAllocator sAllocator(10*1024*1024);
static SimpleBestFitAllocator sGPUAllocator(3*1024*1024);
static int init_pmem_area(private_module_t* m, int type)
static int init_pmem_area_locked(private_module_t* m)
{
int err = 0;
int master_fd = -1;
size_t master_heap_size;
if(type == BUFFER_TYPE_GPU0)
{
master_fd = open("/dev/pmem_gpu0", O_RDWR, 0);
master_heap_size = sGPUAllocator.size();
}
else if(type == BUFFER_TYPE_GPU1)
{
master_fd = open("/dev/pmem_gpu1", O_RDWR, 0);
master_heap_size = sGPUAllocator.size();
}
else if (type == BUFFER_TYPE_PMEM)
{
master_fd = open("/dev/pmem", O_RDWR, 0);
master_heap_size = sAllocator.size();
}
int master_fd = open("/dev/pmem", O_RDWR, 0);
if (master_fd >= 0) {
void* base = mmap(0, master_heap_size,
size_t size;
pmem_region region;
if (ioctl(master_fd, PMEM_GET_TOTAL_SIZE, &region) < 0) {
LOGE("PMEM_GET_TOTAL_SIZE failed, limp mode");
size = 8<<20; // 8 MiB
} else {
size = region.len;
}
sAllocator.setSize(size);
void* base = mmap(0, size,
PROT_READ|PROT_WRITE, MAP_SHARED, master_fd, 0);
if (base == MAP_FAILED) {
LOGE("Enter init_pmem_area error: %d", -errno);
err = -errno;
base = 0;
close(master_fd);
master_fd = -1;
}
if(type == BUFFER_TYPE_PMEM){
m->pmem_master = master_fd;
m->pmem_master_base = base;
}
else
{
m->gpu_master = master_fd;
m->gpu_master_base = base;
pmem_region region;
err = ioctl(m->gpu_master, PMEM_GET_PHYS, &region);
if(err < 0)
{
LOGE("init pmem: master ioctl failed %d", -errno);
}
else
{
m->master_phys = (unsigned long)region.offset;
}
}
m->pmem_master = master_fd;
m->pmem_master_base = base;
} else {
err = -errno;
}
return err;
}
static int init_pmem_area(private_module_t* m)
{
pthread_mutex_lock(&m->lock);
int err = m->pmem_master;
if (err == -1) {
// first time, try to initialize pmem
err = init_pmem_area_locked(m);
if (err) {
m->pmem_master = err;
}
} else if (err < 0) {
// pmem couldn't be initialized, never use it
} else {
// pmem OK
err = 0;
}
pthread_mutex_unlock(&m->lock);
return err;
}
static int init_gpu_area_locked(private_module_t* m)
{
int err = 0;
int gpu = open("/dev/pmem_gpu1", O_RDWR, 0);
LOGE_IF(gpu<0, "could not open /dev/pmem_gpu1 (%s)", strerror(errno));
if (gpu >= 0) {
size_t size = sAllocatorGPU.size();
void* base = mmap(0, size,
PROT_READ|PROT_WRITE, MAP_SHARED, gpu, 0);
if (base == MAP_FAILED) {
LOGE("mmap /dev/pmem_gpu1 (%s)", strerror(errno));
err = -errno;
base = 0;
close(gpu);
gpu = -1;
} else {
pmem_region region;
err = ioctl(gpu, PMEM_GET_PHYS, &region);
if(err < 0) {
LOGE("init pmem: master ioctl failed %d", -errno);
} else {
m->master_phys = (unsigned long)region.offset;
}
}
m->gpu = gpu;
m->gpu_base = base;
} else {
err = -errno;
m->gpu = 0;
m->gpu_base = 0;
}
return err;
}
static int init_gpu_area(private_module_t* m)
{
pthread_mutex_lock(&m->lock);
int err = m->gpu;
if (err == -1) {
// first time, try to initialize gpu
err = init_gpu_area_locked(m);
if (err) {
m->gpu = err;
}
} else if (err < 0) {
// gpu couldn't be initialized, never use it
} else {
// gpu OK
err = 0;
}
pthread_mutex_unlock(&m->lock);
return err;
}
static int gralloc_alloc_buffer(alloc_device_t* dev,
size_t size, int usage, buffer_handle_t* pHandle)
{
@ -241,105 +299,66 @@ static int gralloc_alloc_buffer(alloc_device_t* dev,
int flags = 0;
int fd = -1;
int gpu_fd = -1;
void* base = 0;
int offset = 0;
int lockState = 0;
private_module_t* m = reinterpret_cast<private_module_t*>(
dev->common.module);
gralloc_context_t *context = (gralloc_context_t *) dev;
int bufferType;
size = roundUpToPageSize(size);
if (usage & (GRALLOC_USAGE_HW_2D | GRALLOC_USAGE_HW_RENDER)) {
flags |= private_handle_t::PRIV_FLAGS_USES_PMEM;
bufferType = context->bufferType;
}
else if (usage & GRALLOC_USAGE_HW_TEXTURE) {
if (usage & GRALLOC_USAGE_HW_TEXTURE) {
// enable pmem in that case, so our software GL can fallback to
// the copybit module.
flags |= private_handle_t::PRIV_FLAGS_USES_PMEM;
bufferType = BUFFER_TYPE_PMEM;
}
if (usage & GRALLOC_USAGE_HW_2D) {
flags |= private_handle_t::PRIV_FLAGS_USES_PMEM;
}
int phys = 0;
if ((flags & private_handle_t::PRIV_FLAGS_USES_PMEM) == 0) {
try_ashmem:
fd = ashmem_create_region("Buffer", size);
fd = ashmem_create_region("gralloc-buffer", size);
if (fd < 0) {
LOGE("couldn't create ashmem (%s)", strerror(-errno));
LOGE("couldn't create ashmem (%s)", strerror(errno));
err = -errno;
}
} else {
int master_fd = -1;
if(bufferType == BUFFER_TYPE_PMEM)
{
master_fd = m->pmem_master;
}
else
{
master_fd = m->gpu_master;
}
} else if ((usage & GRALLOC_USAGE_HW_RENDER) == 0) {
private_module_t* m = reinterpret_cast<private_module_t*>(
dev->common.module);
pthread_mutex_lock(&m->lock);
if (master_fd == -1) {
err = init_pmem_area(m, bufferType);
}
pthread_mutex_unlock(&m->lock);
if(bufferType == BUFFER_TYPE_PMEM)
{
master_fd = m->pmem_master;
}
else
{
master_fd = m->gpu_master;
}
if (master_fd >= 0) {
err = init_pmem_area(m);
if (err == 0) {
// PMEM buffers are always mmapped
if(bufferType == BUFFER_TYPE_PMEM)
{
base = m->pmem_master_base;
offset = sAllocator.allocate(size);
}
else
{
base = m->gpu_master_base;
offset = sGPUAllocator.allocate(size);
}
base = m->pmem_master_base;
lockState |= private_handle_t::LOCK_STATE_MAPPED;
offset = sAllocator.allocate(size);
if (offset < 0) {
// no more pmem memory
err = -ENOMEM;
} else {
if(bufferType == BUFFER_TYPE_GPU0)
fd = open("/dev/pmem_gpu0", O_RDWR, 0);
else if(bufferType == BUFFER_TYPE_GPU1)
fd = open("/dev/pmem_gpu1", O_RDWR, 0);
else if (bufferType == BUFFER_TYPE_PMEM)
fd = open("/dev/pmem", O_RDWR, 0);
struct pmem_region sub = { offset, size };
// now create the "sub-heap"
fd = open("/dev/pmem", O_RDWR, 0);
err = fd < 0 ? fd : 0;
// and connect to it
if (err == 0)
err = ioctl(fd, PMEM_CONNECT, m->pmem_master);
// and make it available to the client process
if (err == 0)
err = ioctl(fd, PMEM_MAP, &sub);
err = ioctl(fd, PMEM_CONNECT, master_fd);
if (err < 0) {
err = -errno;
} else {
struct pmem_region sub = { offset, size };
err = ioctl(fd, PMEM_MAP, &sub);
}
if (err < 0) {
close(fd);
if(bufferType == BUFFER_TYPE_PMEM)
sAllocator.deallocate(offset);
else
sGPUAllocator.deallocate(offset);
sAllocator.deallocate(offset);
fd = -1;
}
memset((char*)base + offset, 0, size);
//LOGD_IF(!err, "allocating pmem size=%d, offset=%d", size, offset);
}
} else {
@ -349,20 +368,51 @@ try_ashmem:
err = 0;
goto try_ashmem;
} else {
LOGE("couldn't open pmem (%s)", strerror(-errno));
LOGE("couldn't open pmem (%s)", strerror(errno));
}
}
}
} else {
// looks like we want 3D...
flags &= ~private_handle_t::PRIV_FLAGS_USES_PMEM;
flags |= private_handle_t::PRIV_FLAGS_USES_GPU;
private_module_t* m = reinterpret_cast<private_module_t*>(
dev->common.module);
err = init_gpu_area(m);
if (err == 0) {
// GPU buffers are always mmapped
base = m->gpu_base;
lockState |= private_handle_t::LOCK_STATE_MAPPED;
offset = sAllocatorGPU.allocate(size);
if (offset < 0) {
// no more pmem memory
err = -ENOMEM;
} else {
LOGD("allocating GPU size=%d, offset=%d", size, offset);
fd = open("/dev/null", O_RDONLY); // just so marshalling doesn't fail
gpu_fd = m->gpu;
memset((char*)base + offset, 0, size);
}
} else {
// not enough memory, try ashmem
flags &= ~private_handle_t::PRIV_FLAGS_USES_GPU;
err = 0;
goto try_ashmem;
}
}
if (err == 0) {
private_handle_t* hnd = new private_handle_t(fd, size, flags, bufferType);
private_handle_t* hnd = new private_handle_t(fd, size, flags);
hnd->offset = offset;
hnd->base = int(base)+offset;
hnd->lockState = lockState;
if(bufferType == BUFFER_TYPE_GPU1)
hnd->phys = m->master_phys + offset;
else
hnd->phys = 0;
hnd->gpu_fd = gpu_fd;
if (flags & private_handle_t::PRIV_FLAGS_USES_GPU) {
private_module_t* m = reinterpret_cast<private_module_t*>(
dev->common.module);
hnd->phys = m->master_phys + offset;
}
*pHandle = hnd;
}
@ -380,32 +430,56 @@ static int gralloc_alloc(alloc_device_t* dev,
if (!pHandle || !pStride)
return -EINVAL;
int align = 4;
int bpp = 0;
switch (format) {
case HAL_PIXEL_FORMAT_RGBA_8888:
case HAL_PIXEL_FORMAT_BGRA_8888:
bpp = 4;
break;
case HAL_PIXEL_FORMAT_RGB_565:
case HAL_PIXEL_FORMAT_RGBA_5551:
case HAL_PIXEL_FORMAT_RGBA_4444:
bpp = 2;
break;
default:
return -EINVAL;
size_t size, stride;
if (format == HAL_PIXEL_FORMAT_YCbCr_420_SP ||
format == HAL_PIXEL_FORMAT_YCbCr_422_SP)
{
// FIXME: there is no way to return the vstride
int vstride;
stride = (w + 1) & ~1;
switch (format) {
case HAL_PIXEL_FORMAT_YCbCr_420_SP:
size = stride * h * 2;
break;
case HAL_PIXEL_FORMAT_YCbCr_422_SP:
vstride = (h+1) & ~1;
size = (stride * vstride) + (w/2 * h/2) * 2;
break;
default:
return -EINVAL;
}
} else {
int align = 4;
int bpp = 0;
switch (format) {
case HAL_PIXEL_FORMAT_RGBA_8888:
case HAL_PIXEL_FORMAT_RGBX_8888:
case HAL_PIXEL_FORMAT_BGRA_8888:
bpp = 4;
break;
case HAL_PIXEL_FORMAT_RGB_888:
bpp = 3;
break;
case HAL_PIXEL_FORMAT_RGB_565:
case HAL_PIXEL_FORMAT_RGBA_5551:
case HAL_PIXEL_FORMAT_RGBA_4444:
bpp = 2;
break;
default:
return -EINVAL;
}
size_t bpr = (w*bpp + (align-1)) & ~(align-1);
size = bpr * h;
stride = bpr / bpp;
}
size_t bpr = (w*bpp + (align-1)) & ~(align-1);
size_t size = bpr * h;
size_t stride = bpr / bpp;
int err;
if (usage & GRALLOC_USAGE_HW_FB) {
err = gralloc_alloc_framebuffer(dev, size, usage, pHandle);
} else {
err = gralloc_alloc_buffer(dev, size, usage, pHandle);
}
if (err < 0) {
return err;
}
@ -428,23 +502,31 @@ static int gralloc_free(alloc_device_t* dev,
const size_t bufferSize = m->finfo.line_length * m->info.yres;
int index = (hnd->base - m->framebuffer->base) / bufferSize;
m->bufferMask &= ~(1<<index);
} else if (true || hnd->flags & private_handle_t::PRIV_FLAGS_USES_PMEM) {
if (hnd->fd >= 0) {
if(hnd->bufferType == BUFFER_TYPE_PMEM){
sAllocator.deallocate(hnd->offset);
memset((void *)hnd->base, 0, hnd->size);
} else {
if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_PMEM) {
if (hnd->fd >= 0) {
struct pmem_region sub = { hnd->offset, hnd->size };
int err = ioctl(hnd->fd, PMEM_UNMAP, &sub);
LOGE_IF(err<0, "PMEM_UNMAP failed (%s), "
"fd=%d, sub.offset=%lu, sub.size=%lu",
strerror(errno), hnd->fd, hnd->offset, hnd->size);
if (err == 0) {
// we can't deallocate the memory in case of UNMAP failure
// because it would give that process access to someone else's
// surfaces, which would be a security breach.
sAllocator.deallocate(hnd->offset);
}
}
else {
sGPUAllocator.deallocate(hnd->offset);
memset((void *)hnd->base, 0, hnd->size);
}
}
} else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_GPU) {
LOGD("freeing GPU buffer at %d", hnd->offset);
sAllocatorGPU.deallocate(hnd->offset);
}
gralloc_module_t* module = reinterpret_cast<gralloc_module_t*>(
dev->common.module);
terminateBuffer(module, const_cast<private_handle_t*>(hnd));
}
gralloc_module_t* m = reinterpret_cast<gralloc_module_t*>(
dev->common.module);
gralloc_unregister_buffer(m, handle);
close(hnd->fd);
delete hnd;
return 0;
@ -483,7 +565,7 @@ int gralloc_device_open(const hw_module_t* module, const char* name,
dev->device.alloc = gralloc_alloc;
dev->device.free = gralloc_free;
dev->bufferType = BUFFER_TYPE_GPU1;
*device = &dev->device.common;
status = 0;
} else {

View File

@ -18,8 +18,6 @@
#define GRALLOC_PRIV_H_
#include <stdint.h>
#include <errno.h>
#include <asm/page.h>
#include <limits.h>
#include <sys/cdefs.h>
#include <hardware/gralloc.h>
@ -29,57 +27,17 @@
#include <cutils/native_handle.h>
#if HAVE_ANDROID_OS
#include <linux/fb.h>
#endif
#ifdef __cplusplus
extern "C" {
#endif
enum {
BUFFER_TYPE_GPU0 = 0,
BUFFER_TYPE_GPU1 = 1,
BUFFER_TYPE_FB = 2,
BUFFER_TYPE_PMEM = 3
};
/*****************************************************************************/
#ifdef __cplusplus
inline size_t roundUpToPageSize(size_t x) {
return (x + (PAGESIZE-1)) & ~(PAGESIZE-1);
}
int mapFrameBufferLocked(struct private_module_t* module);
#endif //__cplusplus
/*****************************************************************************/
#ifdef __cplusplus
class Locker {
pthread_mutex_t mutex;
public:
class Autolock {
Locker& locker;
public:
inline Autolock(Locker& locker) : locker(locker) { locker.lock(); }
inline ~Autolock() { locker.unlock(); }
};
inline Locker() { pthread_mutex_init(&mutex, 0); }
inline ~Locker() { pthread_mutex_destroy(&mutex); }
inline void lock() { pthread_mutex_lock(&mutex); }
inline void unlock() { pthread_mutex_unlock(&mutex); }
};
#endif //__cplusplus
/*****************************************************************************/
struct private_module_t;
struct private_handle_t;
struct private_module_t {
struct gralloc_module_t base;
gralloc_module_t base;
struct private_handle_t* framebuffer;
private_handle_t* framebuffer;
uint32_t flags;
uint32_t numBuffers;
uint32_t bufferMask;
@ -88,8 +46,8 @@ struct private_module_t {
int pmem_master;
void* pmem_master_base;
unsigned long master_phys;
int gpu_master;
void* gpu_master_base;
int gpu;
void* gpu_base;
struct fb_var_screeninfo info;
struct fb_fix_screeninfo finfo;
@ -104,15 +62,18 @@ struct private_module_t {
};
/*****************************************************************************/
#ifdef __cplusplus
struct private_handle_t : public native_handle
struct private_handle_t : public native_handle {
#else
struct private_handle_t
struct private_handle_t {
struct native_handle nativeHandle;
#endif
{
enum {
PRIV_FLAGS_FRAMEBUFFER = 0x00000001,
PRIV_FLAGS_USES_PMEM = 0x00000002,
PRIV_FLAGS_USES_GPU = 0x00000004,
};
enum {
@ -121,15 +82,15 @@ struct private_handle_t
LOCK_STATE_READ_MASK = 0x3FFFFFFF
};
#ifndef __cplusplus
native_handle nativeHandle;
#endif
// file-descriptors
int fd;
// ints
int magic;
int flags;
int size;
int offset;
int gpu_fd;
// FIXME: the attributes below should be out-of-line
int base;
int lockState;
@ -139,35 +100,35 @@ struct private_handle_t
int pid;
#ifdef __cplusplus
static const int sNumInts = 10;
static const int sNumInts = 11;
static const int sNumFds = 1;
static const int sMagic = 0x3141592;
static const int sMagic = 0x3141592; // FIXME: should be 'msm8'
private_handle_t(int fd, int size, int flags, int type) :
private_handle_t(int fd, int size, int flags) :
fd(fd), magic(sMagic), flags(flags), size(size), offset(0),
base(0), lockState(0), writeOwner(0), pid(getpid())
{
version = sizeof(native_handle);
numInts = sNumInts;
numFds = sNumFds;
bufferType = type;
}
~private_handle_t() {
magic = 0;
}
bool usesPhysicallyContiguousMemory() {
return (flags & PRIV_FLAGS_USES_PMEM) != 0;
return (flags & (PRIV_FLAGS_USES_PMEM|PRIV_FLAGS_USES_GPU)) != 0;
}
static int validate(const native_handle* h) {
const private_handle_t* hnd = (const private_handle_t*)h;
if (!h || h->version != sizeof(native_handle) ||
h->numInts!=sNumInts || h->numFds!=sNumFds) {
h->numInts != sNumInts || h->numFds != sNumFds ||
hnd->magic != sMagic)
{
LOGE("invalid gralloc handle (at %p)", h);
return -EINVAL;
}
const private_handle_t* hnd = (const private_handle_t*)h;
if (hnd->magic != sMagic)
return -EINVAL;
return 0;
}
@ -177,12 +138,7 @@ struct private_handle_t
}
return NULL;
}
#endif //__cplusplus
#endif
};
#ifdef __cplusplus
}
#endif
#endif /* GRALLOC_PRIV_H_ */

View File

@ -18,6 +18,7 @@
#include <errno.h>
#include <pthread.h>
#include <unistd.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/stat.h>
@ -35,6 +36,14 @@
// we need this for now because pmem cannot mmap at an offset
#define PMEM_HACK 1
/* desktop Linux needs a little help with gettid() */
#if defined(ARCH_X86) && !defined(HAVE_ANDROID_OS)
#define __KERNEL__
# include <linux/unistd.h>
pid_t gettid() { return syscall(__NR_gettid);}
#undef __KERNEL__
#endif
/*****************************************************************************/
static int gralloc_map(gralloc_module_t const* module,
@ -50,7 +59,9 @@ static int gralloc_map(gralloc_module_t const* module,
void* mappedAddress = mmap(0, size,
PROT_READ|PROT_WRITE, MAP_SHARED, hnd->fd, 0);
if (mappedAddress == MAP_FAILED) {
LOGE("Could not mmap %s", strerror(errno));
LOGE("Could not mmap handle %p, fd=%d (%s)",
handle, hnd->fd, strerror(errno));
hnd->base = 0;
return -errno;
}
hnd->base = intptr_t(mappedAddress) + hnd->offset;
@ -66,7 +77,14 @@ static int gralloc_unmap(gralloc_module_t const* module,
{
private_handle_t* hnd = (private_handle_t*)handle;
if (!(hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER)) {
if (munmap((void*)hnd->base, hnd->size) < 0) {
void* base = (void*)hnd->base;
size_t size = hnd->size;
#if PMEM_HACK
base = (void*)(intptr_t(base) - hnd->offset);
size += hnd->offset;
#endif
//LOGD("unmapping from %p, size=%d", base, size);
if (munmap(base, size) < 0) {
LOGE("Could not unmap %s", strerror(errno));
}
}
@ -120,7 +138,7 @@ int gralloc_unregister_buffer(gralloc_module_t const* module,
private_handle_t* hnd = (private_handle_t*)handle;
LOGE_IF(hnd->lockState & private_handle_t::LOCK_STATE_READ_MASK,
"handle %p still locked (state=%08x)",
"[unregister] handle %p still locked (state=%08x)",
hnd, hnd->lockState);
// never unmap buffers that were created in this process
@ -135,6 +153,37 @@ int gralloc_unregister_buffer(gralloc_module_t const* module,
return 0;
}
int terminateBuffer(gralloc_module_t const* module,
private_handle_t* hnd)
{
/*
* If the buffer has been mapped during a lock operation, it's time
* to un-map it. It's an error to be here with a locked buffer.
*/
LOGE_IF(hnd->lockState & private_handle_t::LOCK_STATE_READ_MASK,
"[terminate] handle %p still locked (state=%08x)",
hnd, hnd->lockState);
if (hnd->lockState & private_handle_t::LOCK_STATE_MAPPED) {
// this buffer was mapped, unmap it now
if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_PMEM) {
if (hnd->pid != getpid()) {
// ... unless it's a "master" pmem buffer, that is a buffer
// mapped in the process it's been allocated.
// (see gralloc_alloc_buffer())
gralloc_unmap(module, hnd);
}
} else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_GPU) {
// XXX: for now do nothing here
} else {
gralloc_unmap(module, hnd);
}
}
return 0;
}
int gralloc_lock(gralloc_module_t const* module,
buffer_handle_t handle, int usage,
int l, int t, int w, int h,