Refactor the qsd8k gralloc implementation.

The purpose of this change is to add support for allocating gralloc buffers
from either /dev/pmem or /dev/pmem_adsp depending on the usage flags.  It does
this by factoring out and abstracting the interactions with the pmem device.
For /dev/pmem allocations, the kernel allocator is not used, so a single master
fd is opened, and all the allocations are sub-allocated from that by gralloc.
For /dev/pmem_adsp the kernel allocator is used, so it simply opens a new fd
for each allocation.

A very basic unit test that can be run on the host is included.  It requires
gtest, so to run it on a host system gtest must (currently) be compiled with
BUILD_WITH_ASTL=true.

Change-Id: If2ae0151698fad8107e18e808a3fa012a846263f
This commit is contained in:
Jamie Gennis 2010-04-21 17:33:32 -07:00
parent 6c2bc88a03
commit 5bc176b1a6
11 changed files with 1686 additions and 414 deletions

View File

@ -23,10 +23,22 @@ LOCAL_SHARED_LIBRARIES := liblog libcutils libGLESv1_CM
LOCAL_SRC_FILES := \
allocator.cpp \
gralloc.cpp \
framebuffer.cpp \
mapper.cpp
gpu.cpp \
gralloc.cpp \
mapper.cpp \
pmemalloc.cpp
LOCAL_MODULE := gralloc.$(TARGET_BOARD_PLATFORM)
LOCAL_CFLAGS:= -DLOG_TAG=\"$(TARGET_BOARD_PLATFORM).gralloc\"
include $(BUILD_SHARED_LIBRARY)
# Build a host library for testing
include $(CLEAR_VARS)
LOCAL_SRC_FILES := \
gpu.cpp \
pmemalloc.cpp
LOCAL_MODULE := libgralloc_qsd8k_host
LOCAL_CFLAGS:= -DLOG_TAG=\"gralloc-qsd8k\"
include $(BUILD_HOST_STATIC_LIBRARY)

View File

@ -22,6 +22,7 @@
#include <sys/types.h>
#include "gr.h"
#include "pmemalloc.h"
// ----------------------------------------------------------------------------
@ -91,19 +92,19 @@ public:
}
};
class SimpleBestFitAllocator
class SimpleBestFitAllocator : public PmemUserspaceAllocator::Deps::Allocator
{
public:
SimpleBestFitAllocator();
SimpleBestFitAllocator(size_t size);
~SimpleBestFitAllocator();
virtual ~SimpleBestFitAllocator();
ssize_t setSize(size_t size);
virtual ssize_t setSize(size_t size);
ssize_t allocate(size_t size, uint32_t flags = 0);
ssize_t deallocate(size_t offset);
size_t size() const;
virtual ssize_t allocate(size_t size, uint32_t flags = 0);
virtual ssize_t deallocate(size_t offset);
virtual size_t size() const;
private:
struct chunk_t {

340
libgralloc-qsd8k/gpu.cpp Normal file
View File

@ -0,0 +1,340 @@
/*
* Copyright (C) 2010 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <limits.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/mman.h>
#include "gr.h"
#include "gpu.h"
gpu_context_t::gpu_context_t(Deps& deps, PmemAllocator& pmemAllocator,
PmemAllocator& pmemAdspAllocator, const private_module_t* module) :
deps(deps),
pmemAllocator(pmemAllocator),
pmemAdspAllocator(pmemAdspAllocator)
{
// Zero out the alloc_device_t
memset(static_cast<alloc_device_t*>(this), 0, sizeof(alloc_device_t));
// Initialize the procs
common.tag = HARDWARE_DEVICE_TAG;
common.version = 0;
common.module = const_cast<hw_module_t*>(&module->base.common);
common.close = gralloc_close;
alloc = gralloc_alloc;
free = gralloc_free;
}
int gpu_context_t::gralloc_alloc_framebuffer_locked(size_t size, int usage,
buffer_handle_t* pHandle)
{
private_module_t* m = reinterpret_cast<private_module_t*>(common.module);
// we don't support allocations with both the FB and PMEM_ADSP flags
if (usage & GRALLOC_USAGE_PRIVATE_PMEM_ADSP) {
return -EINVAL;
}
// allocate the framebuffer
if (m->framebuffer == NULL) {
// initialize the framebuffer, the framebuffer is mapped once
// and forever.
int err = deps.mapFrameBufferLocked(m);
if (err < 0) {
return err;
}
}
const uint32_t bufferMask = m->bufferMask;
const uint32_t numBuffers = m->numBuffers;
const size_t bufferSize = m->finfo.line_length * m->info.yres;
if (numBuffers == 1) {
// If we have only one buffer, we never use page-flipping. Instead,
// we return a regular buffer which will be memcpy'ed to the main
// screen when post is called.
int newUsage = (usage & ~GRALLOC_USAGE_HW_FB) | GRALLOC_USAGE_HW_2D;
return gralloc_alloc_buffer(bufferSize, newUsage, pHandle);
}
if (bufferMask >= ((1LU<<numBuffers)-1)) {
// We ran out of buffers.
return -ENOMEM;
}
// create a "fake" handles for it
intptr_t vaddr = intptr_t(m->framebuffer->base);
private_handle_t* hnd = new private_handle_t(dup(m->framebuffer->fd), size,
private_handle_t::PRIV_FLAGS_USES_PMEM |
private_handle_t::PRIV_FLAGS_FRAMEBUFFER);
// find a free slot
for (uint32_t i=0 ; i<numBuffers ; i++) {
if ((bufferMask & (1LU<<i)) == 0) {
m->bufferMask |= (1LU<<i);
break;
}
vaddr += bufferSize;
}
hnd->base = vaddr;
hnd->offset = vaddr - intptr_t(m->framebuffer->base);
*pHandle = hnd;
return 0;
}
int gpu_context_t::gralloc_alloc_framebuffer(size_t size, int usage,
buffer_handle_t* pHandle)
{
private_module_t* m = reinterpret_cast<private_module_t*>(common.module);
pthread_mutex_lock(&m->lock);
int err = gralloc_alloc_framebuffer_locked(size, usage, pHandle);
pthread_mutex_unlock(&m->lock);
return err;
}
int gpu_context_t::gralloc_alloc_buffer(size_t size, int usage, buffer_handle_t* pHandle)
{
int err = 0;
int flags = 0;
int fd = -1;
void* base = 0; // XXX JMG: This should change to just get an address from
// the PmemAllocator rather than getting the base & offset separately
int offset = 0;
int lockState = 0;
size = roundUpToPageSize(size);
if (usage & GRALLOC_USAGE_HW_TEXTURE) {
// enable pmem in that case, so our software GL can fallback to
// the copybit module.
flags |= private_handle_t::PRIV_FLAGS_USES_PMEM;
}
if (usage & GRALLOC_USAGE_HW_2D) {
flags |= private_handle_t::PRIV_FLAGS_USES_PMEM;
}
if (usage & GRALLOC_USAGE_PRIVATE_PMEM_ADSP) {
flags |= private_handle_t::PRIV_FLAGS_USES_PMEM_ADSP;
flags &= ~private_handle_t::PRIV_FLAGS_USES_PMEM;
}
private_module_t* m = reinterpret_cast<private_module_t*>(common.module);
if ((flags & private_handle_t::PRIV_FLAGS_USES_PMEM) != 0 ||
(flags & private_handle_t::PRIV_FLAGS_USES_PMEM_ADSP) != 0) {
PmemAllocator* pma = 0;
if ((flags & private_handle_t::PRIV_FLAGS_USES_PMEM) != 0) {
if ((flags & private_handle_t::PRIV_FLAGS_USES_PMEM_ADSP) != 0) {
LOGE("attempting to allocate a gralloc buffer with both the "
"USES_PMEM and USES_PMEM_ADSP flags. Unsetting the "
"USES_PMEM_ADSP flag.");
flags &= ~private_handle_t::PRIV_FLAGS_USES_PMEM_ADSP;
}
pma = &pmemAllocator;
} else { // (flags & private_handle_t::PRIV_FLAGS_USES_PMEM_ADSP) != 0
pma = &pmemAdspAllocator;
}
// PMEM buffers are always mmapped
lockState |= private_handle_t::LOCK_STATE_MAPPED;
// Allocate the buffer from pmem
err = pma->alloc_pmem_buffer(size, usage, &base, &offset, &fd);
if (err < 0) {
if (((usage & GRALLOC_USAGE_HW_2D) == 0) &&
((usage & GRALLOC_USAGE_PRIVATE_PMEM_ADSP) == 0)) {
// the caller didn't request PMEM, so we can try something else
flags &= ~private_handle_t::PRIV_FLAGS_USES_PMEM;
err = 0;
goto try_ashmem;
} else {
LOGE("couldn't open pmem (%s)", strerror(errno));
}
}
} else {
try_ashmem:
fd = deps.ashmem_create_region("gralloc-buffer", size);
if (fd < 0) {
LOGE("couldn't create ashmem (%s)", strerror(errno));
err = -errno;
}
}
if (err == 0) {
private_handle_t* hnd = new private_handle_t(fd, size, flags);
hnd->offset = offset;
hnd->base = int(base)+offset;
hnd->lockState = lockState;
*pHandle = hnd;
}
LOGE_IF(err, "gralloc failed err=%s", strerror(-err));
return err;
}
static inline size_t ALIGN(size_t x, size_t align) {
return (x + align-1) & ~(align-1);
}
int gpu_context_t::alloc_impl(int w, int h, int format, int usage,
buffer_handle_t* pHandle, int* pStride) {
if (!pHandle || !pStride)
return -EINVAL;
size_t size, alignedw, alignedh;
alignedw = ALIGN(w, 32);
alignedh = ALIGN(h, 32);
switch (format) {
case HAL_PIXEL_FORMAT_RGBA_8888:
case HAL_PIXEL_FORMAT_RGBX_8888:
case HAL_PIXEL_FORMAT_BGRA_8888:
size = alignedw * alignedh * 4;
break;
case HAL_PIXEL_FORMAT_RGB_888:
size = alignedw * alignedh * 3;
break;
case HAL_PIXEL_FORMAT_RGB_565:
case HAL_PIXEL_FORMAT_RGBA_5551:
case HAL_PIXEL_FORMAT_RGBA_4444:
size = alignedw * alignedh * 2;
break;
// adreno formats
case HAL_PIXEL_FORMAT_YCrCb_420_SP: // NV21
size = ALIGN(alignedw*alignedh, 4096);
size += ALIGN(2 * ALIGN(w/2, 32) * ALIGN(h/2, 32), 4096);
break;
case HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED: // NV12
// The chroma plane is subsampled,
// but the pitch in bytes is unchanged
// The GPU needs 4K alignment, but the video decoder needs 8K
alignedw = ALIGN(w, 128);
size = ALIGN( alignedw * alignedh, 8192);
size += ALIGN( alignedw * ALIGN(h/2, 32), 4096);
break;
case HAL_PIXEL_FORMAT_YV12:
alignedw = ALIGN(w, 16);
alignedh = ALIGN(h, 16);
size = alignedw * alignedh;
size += size / 2;
break;
case HAL_PIXEL_FORMAT_YV16:
alignedh = ALIGN(h, 16);
size = alignedw * alignedh * 2;
break;
default:
LOGE("unrecognized pixel format: %d", format);
return -EINVAL;
}
if ((ssize_t)size <= 0)
return -EINVAL;
int err;
if (usage & GRALLOC_USAGE_HW_FB) {
err = gralloc_alloc_framebuffer(size, usage, pHandle);
} else {
err = gralloc_alloc_buffer(size, usage, pHandle);
}
if (err < 0) {
return err;
}
*pStride = alignedw;
return 0;
}
int gpu_context_t::free_impl(private_handle_t const* hnd) {
private_module_t* m = reinterpret_cast<private_module_t*>(common.module);
if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER) {
// free this buffer
const size_t bufferSize = m->finfo.line_length * m->info.yres;
int index = (hnd->base - m->framebuffer->base) / bufferSize;
m->bufferMask &= ~(1<<index);
} else {
PmemAllocator* pmem_allocator = 0;
if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_PMEM) {
pmem_allocator = &pmemAllocator;
} else if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_PMEM_ADSP) {
pmem_allocator = &pmemAdspAllocator;
}
pmem_allocator->free_pmem_buffer(hnd->size, (void*)hnd->base,
hnd->offset, hnd->fd);
deps.terminateBuffer(&m->base, const_cast<private_handle_t*>(hnd));
}
deps.close(hnd->fd);
delete hnd; // XXX JMG: move this to the deps
return 0;
}
/******************************************************************************
* Static functions
*****************************************************************************/
int gpu_context_t::gralloc_alloc(alloc_device_t* dev, int w, int h, int format,
int usage, buffer_handle_t* pHandle, int* pStride)
{
if (!dev) {
return -EINVAL;
}
gpu_context_t* gpu = reinterpret_cast<gpu_context_t*>(dev);
return gpu->alloc_impl(w, h, format, usage, pHandle, pStride);
}
int gpu_context_t::gralloc_free(alloc_device_t* dev,
buffer_handle_t handle)
{
if (private_handle_t::validate(handle) < 0)
return -EINVAL;
private_handle_t const* hnd = reinterpret_cast<private_handle_t const*>(handle);
gpu_context_t* gpu = reinterpret_cast<gpu_context_t*>(dev);
return gpu->free_impl(hnd);
}
/*****************************************************************************/
int gpu_context_t::gralloc_close(struct hw_device_t *dev)
{
gpu_context_t* ctx = reinterpret_cast<gpu_context_t*>(dev);
if (ctx) {
/* TODO: keep a list of all buffer_handle_t created, and free them
* all here.
*/
delete ctx;
}
return 0;
}
gpu_context_t::Deps::~Deps() {}

76
libgralloc-qsd8k/gpu.h Normal file
View File

@ -0,0 +1,76 @@
/*
* Copyright (C) 2008 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef GRALLOC_QSD8K_GPU_H_
#define GRALLOC_QSD8K_GPU_H_
#include <errno.h>
#include <pthread.h>
#include <stdlib.h>
#include <string.h>
#include <cutils/log.h>
#include <cutils/ashmem.h>
#include "gralloc_priv.h"
#include "pmemalloc.h"
class gpu_context_t : public alloc_device_t {
public:
class Deps {
public:
virtual ~Deps();
// ashmem
virtual int ashmem_create_region(const char *name, size_t size) = 0;
// POSIX
virtual int close(int fd) = 0;
// Framebuffer (locally defined)
virtual int mapFrameBufferLocked(struct private_module_t* module) = 0;
virtual int terminateBuffer(gralloc_module_t const* module,
private_handle_t* hnd) = 0;
};
gpu_context_t(Deps& deps, PmemAllocator& pmemAllocator,
PmemAllocator& pmemAdspAllocator, const private_module_t* module);
int gralloc_alloc_framebuffer_locked(size_t size, int usage,
buffer_handle_t* pHandle);
int gralloc_alloc_framebuffer(size_t size, int usage,
buffer_handle_t* pHandle);
int gralloc_alloc_buffer(size_t size, int usage, buffer_handle_t* pHandle);
int free_impl(private_handle_t const* hnd);
int alloc_impl(int w, int h, int format, int usage,
buffer_handle_t* pHandle, int* pStride);
static int gralloc_alloc(alloc_device_t* dev, int w, int h, int format,
int usage, buffer_handle_t* pHandle, int* pStride);
static int gralloc_free(alloc_device_t* dev, buffer_handle_t handle);
static int gralloc_close(struct hw_device_t *dev);
private:
Deps& deps;
PmemAllocator& pmemAllocator;
PmemAllocator& pmemAdspAllocator;
};
#endif // GRALLOC_QSD8K_GPU_H

View File

@ -14,44 +14,22 @@
* limitations under the License.
*/
#include <limits.h>
#include <unistd.h>
#include <fcntl.h>
#include <errno.h>
#include <pthread.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/ioctl.h>
#include <cutils/ashmem.h>
#include <cutils/log.h>
#include <cutils/atomic.h>
#include <hardware/hardware.h>
#include <hardware/gralloc.h>
#include "gralloc_priv.h"
#include "allocator.h"
#if HAVE_ANDROID_OS
#include <linux/android_pmem.h>
#endif
#include "allocator.h"
#include "gr.h"
#include "gpu.h"
/*****************************************************************************/
static SimpleBestFitAllocator sAllocator;
/*****************************************************************************/
struct gralloc_context_t {
alloc_device_t device;
/* our private data here */
};
static int gralloc_alloc_buffer(alloc_device_t* dev,
size_t size, int usage, buffer_handle_t* pHandle);
@ -82,8 +60,93 @@ extern int gralloc_perform(struct gralloc_module_t const* module,
/*****************************************************************************/
/* On-device dependency implementation */
class PmemAllocatorDepsDeviceImpl : public PmemUserspaceAllocator::Deps,
public PmemKernelAllocator::Deps {
virtual size_t getPmemTotalSize(int fd, size_t* size) {
pmem_region region;
int err = ioctl(fd, PMEM_GET_TOTAL_SIZE, &region);
if (err == 0) {
*size = region.len;
}
return err;
}
virtual int connectPmem(int fd, int master_fd) {
return ioctl(fd, PMEM_CONNECT, master_fd);
}
virtual int mapPmem(int fd, int offset, size_t size) {
struct pmem_region sub = { offset, size };
return ioctl(fd, PMEM_MAP, &sub);
}
virtual int unmapPmem(int fd, int offset, size_t size) {
struct pmem_region sub = { offset, size };
return ioctl(fd, PMEM_UNMAP, &sub);
}
virtual int getErrno() {
return errno;
}
virtual void* mmap(void* start, size_t length, int prot, int flags, int fd,
off_t offset) {
return ::mmap(start, length, prot, flags, fd, offset);
}
virtual int munmap(void* start, size_t length) {
return ::munmap(start, length);
}
virtual int open(const char* pathname, int flags, int mode) {
return ::open(pathname, flags, mode);
}
virtual int close(int fd) {
return ::close(fd);
}
};
class GpuContextDepsDeviceImpl : public gpu_context_t::Deps {
public:
virtual int ashmem_create_region(const char *name, size_t size) {
return ::ashmem_create_region(name, size);
}
virtual int mapFrameBufferLocked(struct private_module_t* module) {
return ::mapFrameBufferLocked(module);
}
virtual int terminateBuffer(gralloc_module_t const* module,
private_handle_t* hnd) {
return ::terminateBuffer(module, hnd);
}
virtual int close(int fd) {
return ::close(fd);
}
};
static PmemAllocatorDepsDeviceImpl pmemAllocatorDeviceDepsImpl;
static GpuContextDepsDeviceImpl gpuContextDeviceDepsImpl;
/*****************************************************************************/
static SimpleBestFitAllocator pmemAllocMgr;
static PmemUserspaceAllocator pmemAllocator(pmemAllocatorDeviceDepsImpl, pmemAllocMgr,
"/dev/pmem");
static PmemKernelAllocator pmemAdspAllocator(pmemAllocatorDeviceDepsImpl,
"/dev/pmem_adsp");
/*****************************************************************************/
static struct hw_module_methods_t gralloc_module_methods = {
open: gralloc_device_open
open: gralloc_device_open
};
struct private_module_t HAL_MODULE_INFO_SYM = {
@ -110,388 +173,21 @@ struct private_module_t HAL_MODULE_INFO_SYM = {
bufferMask: 0,
lock: PTHREAD_MUTEX_INITIALIZER,
currentBuffer: 0,
pmem_master: -1,
pmem_master_base: 0,
};
/*****************************************************************************/
static int gralloc_alloc_framebuffer_locked(alloc_device_t* dev,
size_t size, int usage, buffer_handle_t* pHandle)
{
private_module_t* m = reinterpret_cast<private_module_t*>(
dev->common.module);
// allocate the framebuffer
if (m->framebuffer == NULL) {
// initialize the framebuffer, the framebuffer is mapped once
// and forever.
int err = mapFrameBufferLocked(m);
if (err < 0) {
return err;
}
}
const uint32_t bufferMask = m->bufferMask;
const uint32_t numBuffers = m->numBuffers;
const size_t bufferSize = m->finfo.line_length * m->info.yres;
if (numBuffers == 1) {
// If we have only one buffer, we never use page-flipping. Instead,
// we return a regular buffer which will be memcpy'ed to the main
// screen when post is called.
int newUsage = (usage & ~GRALLOC_USAGE_HW_FB) | GRALLOC_USAGE_HW_2D;
return gralloc_alloc_buffer(dev, bufferSize, newUsage, pHandle);
}
if (bufferMask >= ((1LU<<numBuffers)-1)) {
// We ran out of buffers.
return -ENOMEM;
}
// create a "fake" handles for it
intptr_t vaddr = intptr_t(m->framebuffer->base);
private_handle_t* hnd = new private_handle_t(dup(m->framebuffer->fd), size,
private_handle_t::PRIV_FLAGS_USES_PMEM |
private_handle_t::PRIV_FLAGS_FRAMEBUFFER);
// find a free slot
for (uint32_t i=0 ; i<numBuffers ; i++) {
if ((bufferMask & (1LU<<i)) == 0) {
m->bufferMask |= (1LU<<i);
break;
}
vaddr += bufferSize;
}
hnd->base = vaddr;
hnd->offset = vaddr - intptr_t(m->framebuffer->base);
*pHandle = hnd;
return 0;
}
static int gralloc_alloc_framebuffer(alloc_device_t* dev,
size_t size, int usage, buffer_handle_t* pHandle)
{
private_module_t* m = reinterpret_cast<private_module_t*>(
dev->common.module);
pthread_mutex_lock(&m->lock);
int err = gralloc_alloc_framebuffer_locked(dev, size, usage, pHandle);
pthread_mutex_unlock(&m->lock);
return err;
}
static int init_pmem_area_locked(private_module_t* m)
{
int err = 0;
int master_fd = open("/dev/pmem", O_RDWR, 0);
if (master_fd >= 0) {
size_t size;
pmem_region region;
if (ioctl(master_fd, PMEM_GET_TOTAL_SIZE, &region) < 0) {
LOGE("PMEM_GET_TOTAL_SIZE failed, limp mode");
size = 8<<20; // 8 MiB
} else {
size = region.len;
}
sAllocator.setSize(size);
void* base = mmap(0, size,
PROT_READ|PROT_WRITE, MAP_SHARED, master_fd, 0);
if (base == MAP_FAILED) {
err = -errno;
base = 0;
close(master_fd);
master_fd = -1;
}
m->pmem_master = master_fd;
m->pmem_master_base = base;
} else {
err = -errno;
}
return err;
}
static int init_pmem_area(private_module_t* m)
{
pthread_mutex_lock(&m->lock);
int err = m->pmem_master;
if (err == -1) {
// first time, try to initialize pmem
err = init_pmem_area_locked(m);
if (err) {
m->pmem_master = err;
}
} else if (err < 0) {
// pmem couldn't be initialized, never use it
} else {
// pmem OK
err = 0;
}
pthread_mutex_unlock(&m->lock);
return err;
}
static int gralloc_alloc_buffer(alloc_device_t* dev,
size_t size, int usage, buffer_handle_t* pHandle)
{
int err = 0;
int flags = 0;
int fd = -1;
void* base = 0;
int offset = 0;
int lockState = 0;
size = roundUpToPageSize(size);
if (usage & GRALLOC_USAGE_HW_TEXTURE) {
// enable pmem in that case, so our software GL can fallback to
// the copybit module.
flags |= private_handle_t::PRIV_FLAGS_USES_PMEM;
}
if (usage & GRALLOC_USAGE_HW_2D) {
flags |= private_handle_t::PRIV_FLAGS_USES_PMEM;
}
if ((flags & private_handle_t::PRIV_FLAGS_USES_PMEM) == 0) {
try_ashmem:
fd = ashmem_create_region("gralloc-buffer", size);
if (fd < 0) {
LOGE("couldn't create ashmem (%s)", strerror(errno));
err = -errno;
}
} else {
private_module_t* m = reinterpret_cast<private_module_t*>(
dev->common.module);
err = init_pmem_area(m);
if (err == 0) {
// PMEM buffers are always mmapped
base = m->pmem_master_base;
lockState |= private_handle_t::LOCK_STATE_MAPPED;
offset = sAllocator.allocate(size);
if (offset < 0) {
// no more pmem memory
err = -ENOMEM;
} else {
struct pmem_region sub = { offset, size };
int openFlags = O_RDWR | O_SYNC;
uint32_t uread = usage & GRALLOC_USAGE_SW_READ_MASK;
uint32_t uwrite = usage & GRALLOC_USAGE_SW_WRITE_MASK;
if (uread == GRALLOC_USAGE_SW_READ_OFTEN ||
uwrite == GRALLOC_USAGE_SW_WRITE_OFTEN) {
openFlags &= ~O_SYNC;
}
// now create the "sub-heap"
fd = open("/dev/pmem", openFlags, 0);
err = fd < 0 ? fd : 0;
// and connect to it
if (err == 0)
err = ioctl(fd, PMEM_CONNECT, m->pmem_master);
// and make it available to the client process
if (err == 0)
err = ioctl(fd, PMEM_MAP, &sub);
if (err < 0) {
err = -errno;
close(fd);
sAllocator.deallocate(offset);
fd = -1;
} else {
memset((char*)base + offset, 0, size);
// clean and invalidate the new allocation
cacheflush(intptr_t(base) + offset, size, 0);
}
//LOGD_IF(!err, "allocating pmem size=%d, offset=%d", size, offset);
}
} else {
if ((usage & GRALLOC_USAGE_HW_2D) == 0) {
// the caller didn't request PMEM, so we can try something else
flags &= ~private_handle_t::PRIV_FLAGS_USES_PMEM;
err = 0;
goto try_ashmem;
} else {
LOGE("couldn't open pmem (%s)", strerror(errno));
}
}
}
if (err == 0) {
private_handle_t* hnd = new private_handle_t(fd, size, flags);
hnd->offset = offset;
hnd->base = int(base)+offset;
hnd->lockState = lockState;
*pHandle = hnd;
}
LOGE_IF(err, "gralloc failed err=%s", strerror(-err));
return err;
}
/*****************************************************************************/
static inline size_t ALIGN(size_t x, size_t align) {
return (x + align-1) & ~(align-1);
}
static int gralloc_alloc(alloc_device_t* dev,
int w, int h, int format, int usage,
buffer_handle_t* pHandle, int* pStride)
{
if (!pHandle || !pStride)
return -EINVAL;
size_t size, alignedw, alignedh;
alignedw = ALIGN(w, 32);
alignedh = ALIGN(h, 32);
switch (format) {
case HAL_PIXEL_FORMAT_RGBA_8888:
case HAL_PIXEL_FORMAT_RGBX_8888:
case HAL_PIXEL_FORMAT_BGRA_8888:
size = alignedw * alignedh * 4;
break;
case HAL_PIXEL_FORMAT_RGB_888:
size = alignedw * alignedh * 3;
break;
case HAL_PIXEL_FORMAT_RGB_565:
case HAL_PIXEL_FORMAT_RGBA_5551:
case HAL_PIXEL_FORMAT_RGBA_4444:
size = alignedw * alignedh * 2;
break;
// adreno formats
case HAL_PIXEL_FORMAT_YCrCb_420_SP: // NV21
size = ALIGN(alignedw*alignedh, 4096);
size += ALIGN(2 * ALIGN(w/2, 32) * ALIGN(h/2, 32), 4096);
break;
case HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED: // NV12
// The chroma plane is subsampled,
// but the pitch in bytes is unchanged
// The GPU needs 4K alignment, but the video decoder needs 8K
alignedw = ALIGN(w, 128);
size = ALIGN( ALIGN(w, 128) * alignedh, 8192);
size += ALIGN( ALIGN(w, 128) * ALIGN(h/2, 32), 4096);
break;
case HAL_PIXEL_FORMAT_YV12:
alignedw = ALIGN(w, 16);
alignedh = ALIGN(h, 16);
size = alignedw * alignedh;
size += size / 2;
break;
case HAL_PIXEL_FORMAT_YV16:
alignedh = ALIGN(h, 16);
size = alignedw * alignedh * 2;
break;
default:
return -EINVAL;
}
if ((ssize_t)size <= 0)
return -EINVAL;
int err;
if (usage & GRALLOC_USAGE_HW_FB) {
err = gralloc_alloc_framebuffer(dev, size, usage, pHandle);
} else {
err = gralloc_alloc_buffer(dev, size, usage, pHandle);
}
if (err < 0) {
return err;
}
*pStride = alignedw;
return 0;
}
static int gralloc_free(alloc_device_t* dev,
buffer_handle_t handle)
{
if (private_handle_t::validate(handle) < 0)
return -EINVAL;
private_handle_t const* hnd = reinterpret_cast<private_handle_t const*>(handle);
if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER) {
// free this buffer
private_module_t* m = reinterpret_cast<private_module_t*>(
dev->common.module);
const size_t bufferSize = m->finfo.line_length * m->info.yres;
int index = (hnd->base - m->framebuffer->base) / bufferSize;
m->bufferMask &= ~(1<<index);
} else {
if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_PMEM) {
if (hnd->fd >= 0) {
struct pmem_region sub = { hnd->offset, hnd->size };
int err = ioctl(hnd->fd, PMEM_UNMAP, &sub);
LOGE_IF(err<0, "PMEM_UNMAP failed (%s), "
"fd=%d, sub.offset=%lu, sub.size=%lu",
strerror(errno), hnd->fd, hnd->offset, hnd->size);
if (err == 0) {
// we can't deallocate the memory in case of UNMAP failure
// because it would give that process access to someone else's
// surfaces, which would be a security breach.
sAllocator.deallocate(hnd->offset);
}
}
}
gralloc_module_t* module = reinterpret_cast<gralloc_module_t*>(
dev->common.module);
terminateBuffer(module, const_cast<private_handle_t*>(hnd));
}
close(hnd->fd);
delete hnd;
return 0;
}
/*****************************************************************************/
static int gralloc_close(struct hw_device_t *dev)
{
gralloc_context_t* ctx = reinterpret_cast<gralloc_context_t*>(dev);
if (ctx) {
/* TODO: keep a list of all buffer_handle_t created, and free them
* all here.
*/
free(ctx);
}
return 0;
}
int gralloc_device_open(const hw_module_t* module, const char* name,
hw_device_t** device)
{
int status = -EINVAL;
if (!strcmp(name, GRALLOC_HARDWARE_GPU0)) {
gralloc_context_t *dev;
dev = (gralloc_context_t*)malloc(sizeof(*dev));
/* initialize our state here */
memset(dev, 0, sizeof(*dev));
/* initialize the procs */
dev->device.common.tag = HARDWARE_DEVICE_TAG;
dev->device.common.version = 0;
dev->device.common.module = const_cast<hw_module_t*>(module);
dev->device.common.close = gralloc_close;
dev->device.alloc = gralloc_alloc;
dev->device.free = gralloc_free;
*device = &dev->device.common;
const private_module_t* m = reinterpret_cast<const private_module_t*>(
module);
gpu_context_t *dev;
dev = new gpu_context_t(gpuContextDeviceDepsImpl, pmemAllocator,
pmemAdspAllocator, m);
*device = &dev->common;
status = 0;
} else {
status = fb_device_open(module, name, device);

View File

@ -29,10 +29,16 @@
#include <linux/fb.h>
enum {
/* gralloc usage bit indicating a pmem_adsp allocation should be used */
GRALLOC_USAGE_PRIVATE_PMEM_ADSP = GRALLOC_USAGE_PRIVATE_0,
};
/*****************************************************************************/
struct private_module_t;
struct private_handle_t;
struct PmemAllocator;
struct private_module_t {
gralloc_module_t base;
@ -44,8 +50,6 @@ struct private_module_t {
uint32_t bufferMask;
pthread_mutex_t lock;
buffer_handle_t currentBuffer;
int pmem_master;
void* pmem_master_base;
struct fb_var_screeninfo info;
struct fb_fix_screeninfo finfo;
@ -69,9 +73,10 @@ struct private_handle_t {
#endif
enum {
PRIV_FLAGS_FRAMEBUFFER = 0x00000001,
PRIV_FLAGS_USES_PMEM = 0x00000002,
PRIV_FLAGS_NEEDS_FLUSH = 0x00000004,
PRIV_FLAGS_FRAMEBUFFER = 0x00000001,
PRIV_FLAGS_USES_PMEM = 0x00000002,
PRIV_FLAGS_USES_PMEM_ADSP = 0x00000004,
PRIV_FLAGS_NEEDS_FLUSH = 0x00000008,
};
enum {

View File

@ -171,7 +171,8 @@ int terminateBuffer(gralloc_module_t const* module,
if (hnd->lockState & private_handle_t::LOCK_STATE_MAPPED) {
// this buffer was mapped, unmap it now
if (hnd->flags & private_handle_t::PRIV_FLAGS_USES_PMEM) {
if ((hnd->flags & private_handle_t::PRIV_FLAGS_USES_PMEM) ||
(hnd->flags & private_handle_t::PRIV_FLAGS_USES_PMEM_ADSP)) {
if (hnd->pid != getpid()) {
// ... unless it's a "master" pmem buffer, that is a buffer
// mapped in the process it's been allocated.

View File

@ -0,0 +1,324 @@
/*
* Copyright (C) 2010 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
//#define LOG_NDEBUG 0
#include <limits.h>
#include <unistd.h>
#include <fcntl.h>
#include <pthread.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <cutils/log.h>
#include <cutils/ashmem.h>
#include "gralloc_priv.h"
#include "pmemalloc.h"
#define BEGIN_FUNC LOGV("%s begin", __PRETTY_FUNCTION__)
#define END_FUNC LOGV("%s end", __PRETTY_FUNCTION__)
static int get_open_flags(int usage) {
int openFlags = O_RDWR | O_SYNC;
uint32_t uread = usage & GRALLOC_USAGE_SW_READ_MASK;
uint32_t uwrite = usage & GRALLOC_USAGE_SW_WRITE_MASK;
if (uread == GRALLOC_USAGE_SW_READ_OFTEN ||
uwrite == GRALLOC_USAGE_SW_WRITE_OFTEN) {
openFlags &= ~O_SYNC;
}
return openFlags;
}
PmemAllocator::~PmemAllocator()
{
BEGIN_FUNC;
END_FUNC;
}
PmemUserspaceAllocator::PmemUserspaceAllocator(Deps& deps, Deps::Allocator& allocator, const char* pmemdev):
deps(deps),
allocator(allocator),
pmemdev(pmemdev),
master_fd(MASTER_FD_INIT)
{
BEGIN_FUNC;
pthread_mutex_init(&lock, NULL);
END_FUNC;
}
PmemUserspaceAllocator::~PmemUserspaceAllocator()
{
BEGIN_FUNC;
END_FUNC;
}
void* PmemUserspaceAllocator::get_base_address() {
BEGIN_FUNC;
END_FUNC;
return master_base;
}
int PmemUserspaceAllocator::init_pmem_area_locked()
{
BEGIN_FUNC;
int err = 0;
int fd = deps.open(pmemdev, O_RDWR, 0);
if (fd >= 0) {
size_t size = 0;
err = deps.getPmemTotalSize(fd, &size);
if (err < 0) {
LOGE("%s: PMEM_GET_TOTAL_SIZE failed (%d), limp mode", pmemdev,
err);
size = 8<<20; // 8 MiB
}
allocator.setSize(size);
void* base = deps.mmap(0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd,
0);
if (base == MAP_FAILED) {
LOGE("%s: failed to map pmem master fd: %s", pmemdev,
strerror(deps.getErrno()));
err = -deps.getErrno();
base = 0;
deps.close(fd);
fd = -1;
} else {
master_fd = fd;
master_base = base;
}
} else {
LOGE("%s: failed to open pmem device: %s", pmemdev,
strerror(deps.getErrno()));
err = -deps.getErrno();
}
END_FUNC;
return err;
}
int PmemUserspaceAllocator::init_pmem_area()
{
BEGIN_FUNC;
pthread_mutex_lock(&lock);
int err = master_fd;
if (err == MASTER_FD_INIT) {
// first time, try to initialize pmem
err = init_pmem_area_locked();
if (err) {
LOGE("%s: failed to initialize pmem area", pmemdev);
master_fd = err;
}
} else if (err < 0) {
// pmem couldn't be initialized, never use it
} else {
// pmem OK
err = 0;
}
pthread_mutex_unlock(&lock);
END_FUNC;
return err;
}
int PmemUserspaceAllocator::alloc_pmem_buffer(size_t size, int usage,
void** pBase, int* pOffset, int* pFd)
{
BEGIN_FUNC;
int err = init_pmem_area();
if (err == 0) {
void* base = master_base;
int offset = allocator.allocate(size);
if (offset < 0) {
// no more pmem memory
LOGE("%s: no more pmem available", pmemdev);
err = -ENOMEM;
} else {
int openFlags = get_open_flags(usage);
//LOGD("%s: allocating pmem at offset 0x%p", pmemdev, offset);
// now create the "sub-heap"
int fd = deps.open(pmemdev, openFlags, 0);
err = fd < 0 ? fd : 0;
// and connect to it
if (err == 0)
err = deps.connectPmem(fd, master_fd);
// and make it available to the client process
if (err == 0)
err = deps.mapPmem(fd, offset, size);
if (err < 0) {
LOGE("%s: failed to initialize pmem sub-heap: %d", pmemdev,
err);
err = -deps.getErrno();
deps.close(fd);
allocator.deallocate(offset);
fd = -1;
} else {
LOGV("%s: mapped fd %d at offset %d, size %d", pmemdev, fd, offset, size);
memset((char*)base + offset, 0, size);
*pBase = base;
*pOffset = offset;
*pFd = fd;
}
//LOGD_IF(!err, "%s: allocating pmem size=%d, offset=%d", pmemdev, size, offset);
}
}
END_FUNC;
return err;
}
int PmemUserspaceAllocator::free_pmem_buffer(size_t size, void* base, int offset, int fd)
{
BEGIN_FUNC;
int err = 0;
if (fd >= 0) {
int err = deps.unmapPmem(fd, offset, size);
LOGE_IF(err<0, "PMEM_UNMAP failed (%s), fd=%d, sub.offset=%u, "
"sub.size=%u", strerror(deps.getErrno()), fd, offset, size);
if (err == 0) {
// we can't deallocate the memory in case of UNMAP failure
// because it would give that process access to someone else's
// surfaces, which would be a security breach.
allocator.deallocate(offset);
}
}
END_FUNC;
return err;
}
PmemUserspaceAllocator::Deps::Allocator::~Allocator()
{
BEGIN_FUNC;
END_FUNC;
}
PmemUserspaceAllocator::Deps::~Deps()
{
BEGIN_FUNC;
END_FUNC;
}
PmemKernelAllocator::PmemKernelAllocator(Deps& deps, const char* pmemdev):
deps(deps),
pmemdev(pmemdev)
{
BEGIN_FUNC;
END_FUNC;
}
PmemKernelAllocator::~PmemKernelAllocator()
{
BEGIN_FUNC;
END_FUNC;
}
void* PmemKernelAllocator::get_base_address() {
BEGIN_FUNC;
END_FUNC;
return 0;
}
static unsigned clp2(unsigned x) {
x = x - 1;
x = x | (x >> 1);
x = x | (x >> 2);
x = x | (x >> 4);
x = x | (x >> 8);
x = x | (x >>16);
return x + 1;
}
int PmemKernelAllocator::alloc_pmem_buffer(size_t size, int usage,
void** pBase,int* pOffset, int* pFd)
{
BEGIN_FUNC;
*pBase = 0;
*pOffset = 0;
*pFd = -1;
int err;
int openFlags = get_open_flags(usage);
int fd = deps.open(pmemdev, openFlags, 0);
if (fd < 0) {
err = -deps.getErrno();
END_FUNC;
return err;
}
// The size should already be page aligned, now round it up to a power of 2.
size = clp2(size);
void* base = deps.mmap(0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
if (base == MAP_FAILED) {
LOGE("%s: failed to map pmem fd: %s", pmemdev,
strerror(deps.getErrno()));
err = -deps.getErrno();
deps.close(fd);
END_FUNC;
return err;
}
memset(base, 0, size);
*pBase = base;
*pOffset = 0;
*pFd = fd;
END_FUNC;
return 0;
}
int PmemKernelAllocator::free_pmem_buffer(size_t size, void* base, int offset, int fd)
{
BEGIN_FUNC;
// The size should already be page aligned, now round it up to a power of 2
// like we did when allocating.
size = clp2(size);
int err = deps.munmap(base, size);
if (err < 0) {
err = deps.getErrno();
LOGW("%s: error unmapping pmem fd: %s", pmemdev, strerror(err));
return -err;
}
END_FUNC;
return 0;
}
PmemKernelAllocator::Deps::~Deps()
{
BEGIN_FUNC;
END_FUNC;
}

View File

@ -0,0 +1,161 @@
/*
* Copyright (C) 2010 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef GRALLOC_QSD8K_PMEMALLOC_H
#define GRALLOC_QSD8K_PMEMALLOC_H
#include <limits.h>
#include <stdlib.h>
#include <stdint.h>
#include <unistd.h>
/**
* An interface to the PMEM allocators.
*/
class PmemAllocator {
public:
virtual ~PmemAllocator();
// Only valid after init_pmem_area() has completed successfully.
virtual void* get_base_address() = 0;
virtual int alloc_pmem_buffer(size_t size, int usage, void** pBase,
int* pOffset, int* pFd) = 0;
virtual int free_pmem_buffer(size_t size, void* base, int offset, int fd) = 0;
};
/**
* A PMEM allocator that allocates the entire pmem memory from the kernel and
* then uses a user-space allocator to suballocate from that. This requires
* that the PMEM device driver have kernel allocation disabled.
*/
class PmemUserspaceAllocator: public PmemAllocator {
public:
class Deps {
public:
class Allocator {
public:
virtual ~Allocator();
virtual ssize_t setSize(size_t size) = 0;
virtual size_t size() const = 0;
virtual ssize_t allocate(size_t size, uint32_t flags = 0) = 0;
virtual ssize_t deallocate(size_t offset) = 0;
};
virtual ~Deps();
// pmem
virtual size_t getPmemTotalSize(int fd, size_t* size) = 0;
virtual int connectPmem(int fd, int master_fd) = 0;
virtual int mapPmem(int fd, int offset, size_t size) = 0;
virtual int unmapPmem(int fd, int offset, size_t size) = 0;
// C99
virtual int getErrno() = 0;
// POSIX
virtual void* mmap(void* start, size_t length, int prot, int flags, int fd,
off_t offset) = 0;
virtual int open(const char* pathname, int flags, int mode) = 0;
virtual int close(int fd) = 0;
};
PmemUserspaceAllocator(Deps& deps, Deps::Allocator& allocator, const char* pmemdev);
virtual ~PmemUserspaceAllocator();
// Only valid after init_pmem_area() has completed successfully.
virtual void* get_base_address();
virtual int init_pmem_area_locked();
virtual int init_pmem_area();
virtual int alloc_pmem_buffer(size_t size, int usage, void** pBase,
int* pOffset, int* pFd);
virtual int free_pmem_buffer(size_t size, void* base, int offset, int fd);
#ifndef ANDROID_OS
// DO NOT USE: For testing purposes only.
void set_master_values(int fd, void* base) {
master_fd = fd;
master_base = base;
}
#endif // ANDROID_OS
private:
enum {
MASTER_FD_INIT = -1,
};
Deps& deps;
Deps::Allocator& allocator;
pthread_mutex_t lock;
const char* pmemdev;
int master_fd;
void* master_base;
};
/**
* A PMEM allocator that allocates each individual allocation from the kernel
* (using the kernel's allocator). This requires the kernel driver for the
* particular PMEM device being allocated from to support kernel allocation.
*/
class PmemKernelAllocator: public PmemAllocator {
public:
class Deps {
public:
virtual ~Deps();
// C99
virtual int getErrno() = 0;
// POSIX
virtual void* mmap(void* start, size_t length, int prot, int flags, int fd,
off_t offset) = 0;
virtual int munmap(void* start, size_t length) = 0;
virtual int open(const char* pathname, int flags, int mode) = 0;
virtual int close(int fd) = 0;
};
PmemKernelAllocator(Deps& deps, const char* pmemdev);
virtual ~PmemKernelAllocator();
// Only valid after init_pmem_area() has completed successfully.
virtual void* get_base_address();
virtual int alloc_pmem_buffer(size_t size, int usage, void** pBase,
int* pOffset, int* pFd);
virtual int free_pmem_buffer(size_t size, void* base, int offset, int fd);
private:
Deps& deps;
const char* pmemdev;
};
#endif // GRALLOC_QSD8K_PMEMALLOC_H

View File

@ -0,0 +1,55 @@
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
LOCAL_PATH := $(call my-dir)
# you can use EXTRA_CFLAGS to indicate additional CFLAGS to use
# in the build. The variables will be cleaned on exit
#
#
libgralloc_test_includes:= \
bionic/libstdc++/include \
external/astl/include \
external/gtest/include \
$(LOCAL_PATH)/..
libgralloc_test_static_libs := \
libgralloc_qsd8k_host \
libgtest_main_host \
libgtest_host \
libastl_host \
liblog
define host-test
$(foreach file,$(1), \
$(eval include $(CLEAR_VARS)) \
$(eval LOCAL_CPP_EXTENSION := .cpp) \
$(eval LOCAL_SRC_FILES := $(file)) \
$(eval LOCAL_C_INCLUDES := $(libgralloc_test_includes)) \
$(eval LOCAL_MODULE := $(notdir $(file:%.cpp=%))) \
$(eval LOCAL_CFLAGS += $(EXTRA_CFLAGS)) \
$(eval LOCAL_LDLIBS += $(EXTRA_LDLIBS)) \
$(eval LOCAL_STATIC_LIBRARIES := $(libgralloc_test_static_libs)) \
$(eval LOCAL_MODULE_TAGS := eng tests) \
$(eval include $(BUILD_HOST_EXECUTABLE)) \
) \
$(eval EXTRA_CFLAGS :=) \
$(eval EXTRA_LDLIBS :=)
endef
TEST_SRC_FILES := \
pmemalloc_test.cpp
$(call host-test, $(TEST_SRC_FILES))

View File

@ -0,0 +1,601 @@
/*
* Copyright (C) 2010 The Android Open Source Project
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <gtest/gtest.h>
#include <errno.h>
#include <unistd.h>
#include <fcntl.h>
#include <sys/mman.h>
#include "pmemalloc.h"
class DepsStub : public PmemUserspaceAllocator::Deps, public PmemKernelAllocator::Deps {
public:
virtual size_t getPmemTotalSize(int fd, size_t* size) {
return 0;
}
virtual int connectPmem(int fd, int master_fd) {
return 0;
}
virtual int mapPmem(int fd, int offset, size_t size) {
return 0;
}
virtual int unmapPmem(int fd, int offset, size_t size) {
return 0;
}
virtual int getErrno() {
return 0;
}
virtual void* mmap(void* start, size_t length, int prot, int flags, int fd,
off_t offset) {
return 0;
}
virtual int munmap(void* start, size_t length) {
return 0;
}
virtual int open(const char* pathname, int flags, int mode) {
return 0;
}
virtual int close(int fd) {
return 0;
}
};
/******************************************************************************/
class AllocatorStub : public PmemUserspaceAllocator::Deps::Allocator {
virtual ssize_t setSize(size_t size) {
return 0;
}
virtual size_t size() const {
return 0;
}
virtual ssize_t allocate(size_t size, uint32_t flags = 0) {
return 0;
}
virtual ssize_t deallocate(size_t offset) {
return 0;
}
};
/******************************************************************************/
static const char* fakePmemDev = "/foo/bar";
/******************************************************************************/
struct Deps_InitPmemAreaLockedWithSuccessfulCompletion : public DepsStub {
virtual int open(const char* pathname, int flags, int mode) {
EXPECT_EQ(fakePmemDev, pathname);
EXPECT_EQ(O_RDWR, flags);
EXPECT_EQ(0, mode);
return 1234;
}
virtual size_t getPmemTotalSize(int fd, size_t* size) {
EXPECT_EQ(1234, fd);
*size = 16 << 20;
return 0;
}
virtual void* mmap(void* start, size_t length, int prot, int flags, int fd,
off_t offset) {
EXPECT_EQ(1234, fd);
return (void*)0x87654321;
}
};
struct Allocator_InitPmemAreaLockedWithSuccessfulCompletion : public AllocatorStub {
virtual ssize_t setSize(size_t size) {
EXPECT_EQ(size_t(16 << 20), size);
return 0;
}
};
TEST(test_pmem_userspace_allocator, testInitPmemAreaLockedWithSuccessfulCompletion) {
Deps_InitPmemAreaLockedWithSuccessfulCompletion depsMock;
Allocator_InitPmemAreaLockedWithSuccessfulCompletion allocMock;
PmemUserspaceAllocator pma(depsMock, allocMock, fakePmemDev);
int result = pma.init_pmem_area_locked();
ASSERT_EQ(0, result);
}
/******************************************************************************/
struct Deps_InitPmemAreaLockedWithEnomemOnMmap : public DepsStub {
virtual int open(const char* pathname, int flags, int mode) {
EXPECT_EQ(fakePmemDev, pathname);
EXPECT_EQ(O_RDWR, flags);
EXPECT_EQ(0, mode);
return 1234;
}
virtual size_t getPmemTotalSize(int fd, size_t* size) {
EXPECT_EQ(1234, fd);
*size = 16 << 20;
return 0;
}
virtual int getErrno() {
return ENOMEM;
}
virtual void* mmap(void* start, size_t length, int prot, int flags, int fd,
off_t offset) {
return (void*)MAP_FAILED;
}
};
struct Allocator_InitPmemAreaLockedWithEnomemOnMmap : public AllocatorStub {
virtual ssize_t setSize(size_t size) {
EXPECT_EQ(size_t(16 << 20), size);
return 0;
}
};
TEST(test_pmem_userspace_allocator, testInitPmemAreaLockedWthEnomemOnMmap) {
Deps_InitPmemAreaLockedWithEnomemOnMmap depsMock;
Allocator_InitPmemAreaLockedWithEnomemOnMmap allocMock;
PmemUserspaceAllocator pma(depsMock, allocMock, fakePmemDev);
int result = pma.init_pmem_area_locked();
ASSERT_EQ(-ENOMEM, result);
}
/******************************************************************************/
struct Deps_InitPmemAreaLockedWithEaccesOnGetPmemTotalSize : public DepsStub {
virtual int open(const char* pathname, int flags, int mode) {
EXPECT_EQ(fakePmemDev, pathname);
EXPECT_EQ(O_RDWR, flags);
EXPECT_EQ(0, mode);
return 1234;
}
virtual size_t getPmemTotalSize(int fd, size_t* size) {
EXPECT_EQ(1234, fd);
return -EACCES;
}
};
TEST(test_pmem_userspace_allocator, testInitPmemAreaLockedWthEaccesOnGetPmemTotalSize) {
Deps_InitPmemAreaLockedWithEaccesOnGetPmemTotalSize depsMock;
AllocatorStub allocStub;
PmemUserspaceAllocator pma(depsMock, allocStub, fakePmemDev);
int result = pma.init_pmem_area_locked();
ASSERT_EQ(-EACCES, result);
}
/******************************************************************************/
struct Deps_InitPmemAreaLockedWithEaccesOnOpen : public DepsStub {
virtual int getErrno() {
return EACCES;
}
virtual int open(const char* pathname, int flags, int mode) {
EXPECT_EQ(fakePmemDev, pathname);
EXPECT_EQ(O_RDWR, flags);
EXPECT_EQ(0, mode);
return -1;
}
};
TEST(test_pmem_userspace_allocator, testInitPmemAreaLockedWithEaccesOnOpenMaster) {
Deps_InitPmemAreaLockedWithEaccesOnOpen depsMock;
AllocatorStub allocStub;
PmemUserspaceAllocator pma(depsMock, allocStub, fakePmemDev);
int result = pma.init_pmem_area_locked();
ASSERT_EQ(-EACCES, result);
}
/******************************************************************************/
typedef Deps_InitPmemAreaLockedWithSuccessfulCompletion Deps_InitPmemAreaWithSuccessfulInitialCompletion;
TEST(test_pmem_userspace_allocator, testInitPmemAreaWithSuccessfulInitialCompletion) {
Deps_InitPmemAreaWithSuccessfulInitialCompletion depsMock;
AllocatorStub allocStub;
PmemUserspaceAllocator pma(depsMock, allocStub, fakePmemDev);
int result = pma.init_pmem_area();
ASSERT_EQ(0, result);
}
/******************************************************************************/
typedef Deps_InitPmemAreaLockedWithEaccesOnOpen Deps_InitPmemAreaWithEaccesOnInitLocked;
TEST(test_pmem_userspace_allocator, testInitPmemAreaWithEaccesOnInitLocked) {
Deps_InitPmemAreaWithEaccesOnInitLocked depsMock;
AllocatorStub allocStub;
PmemUserspaceAllocator pma(depsMock, allocStub, fakePmemDev);
int result = pma.init_pmem_area();
ASSERT_EQ(-EACCES, result);
}
/******************************************************************************/
TEST(test_pmem_userspace_allocator, testInitPmemAreaAfterSuccessfulInitialCompletion) {
DepsStub depsStub;
AllocatorStub allocStub;
PmemUserspaceAllocator pma(depsStub, allocStub, fakePmemDev);
pma.set_master_values(1234, 0); // Indicate that the pma has been successfully init'd
int result = pma.init_pmem_area();
ASSERT_EQ(0, result);
//XXX JMG: Add this back in maybe? ASSERT_EQ(1234, pmi.master); // Make sure the master fd wasn't changed
}
/******************************************************************************/
TEST(test_pmem_userspace_allocator, testInitPmemAreaAfterFailedInit) {
DepsStub depsStub;
AllocatorStub allocStub;
PmemUserspaceAllocator pma(depsStub, allocStub, fakePmemDev);
pma.set_master_values(-EACCES, 0); // Indicate that the pma has failed init
int result = pma.init_pmem_area();
ASSERT_EQ(-EACCES, result);
}
/******************************************************************************/
struct Deps_InitPmemAreaLockedWithSuccessfulCompletionWithNoFlags : public DepsStub {
virtual int open(const char* pathname, int flags, int mode) {
EXPECT_EQ(fakePmemDev, pathname);
EXPECT_EQ(O_RDWR, flags & O_RDWR);
EXPECT_EQ(0, mode);
return 5678;
}
virtual int connectPmem(int fd, int master_fd) {
EXPECT_EQ(5678, fd);
EXPECT_EQ(1234, master_fd);
return 0;
}
virtual int mapPmem(int fd, int offset, size_t size) {
EXPECT_EQ(5678, fd);
EXPECT_EQ(0x300, offset);
EXPECT_EQ(size_t(0x100), size);
return 0;
}
};
struct Allocator_AllocPmemBufferWithSuccessfulCompletionWithNoFlags : public AllocatorStub {
virtual ssize_t allocate(size_t size, uint32_t flags = 0) {
EXPECT_EQ(size_t(0x100), size);
EXPECT_EQ(uint32_t(0x0), flags);
return 0x300;
}
};
TEST(test_pmem_userspace_allocator, testAllocPmemBufferWithSuccessfulCompletionWithNoFlags) {
Deps_InitPmemAreaLockedWithSuccessfulCompletionWithNoFlags depsMock;
Allocator_AllocPmemBufferWithSuccessfulCompletionWithNoFlags allocMock;
PmemUserspaceAllocator pma(depsMock, allocMock, fakePmemDev);
uint8_t buf[0x300 + 0x100]; // Create a buffer to get memzero'd
pma.set_master_values(1234, buf); // Indicate that the pma has been successfully init'd
void* base = 0;
int offset = -9182, fd = -9182;
int size = 0x100;
int flags = 0;
int result = pma.alloc_pmem_buffer(size, flags, &base, &offset, &fd);
ASSERT_EQ(0, result);
ASSERT_EQ(0x300, offset);
ASSERT_EQ(5678, fd);
for (int i = 0x300; i < 0x400; ++i) {
ASSERT_EQ(uint8_t(0), buf[i]);
}
}
/******************************************************************************/
typedef Deps_InitPmemAreaLockedWithSuccessfulCompletionWithNoFlags Deps_InitPmemAreaLockedWithSuccessfulCompletionWithAllFlags;
typedef Allocator_AllocPmemBufferWithSuccessfulCompletionWithNoFlags Allocator_AllocPmemBufferWithSuccessfulCompletionWithAllFlags;
TEST(test_pmem_userspace_allocator, testAllocPmemBufferWithSuccessfulCompletionWithAllFlags) {
Deps_InitPmemAreaLockedWithSuccessfulCompletionWithAllFlags depsMock;
Allocator_AllocPmemBufferWithSuccessfulCompletionWithAllFlags allocMock;
PmemUserspaceAllocator pma(depsMock, allocMock, fakePmemDev);
uint8_t buf[0x300 + 0x100]; // Create a buffer to get memzero'd
pma.set_master_values(1234, buf); // Indicate that the pma has been successfully init'd
void* base = 0;
int offset = -9182, fd = -9182;
int size = 0x100;
int flags = ~0;
int result = pma.alloc_pmem_buffer(size, flags, &base, &offset, &fd);
ASSERT_EQ(0, result);
ASSERT_EQ(0x300, offset);
ASSERT_EQ(5678, fd);
for (int i = 0x300; i < 0x400; ++i) {
ASSERT_EQ(0, buf[i]);
}
}
/******************************************************************************/
struct Deps_InitPmemAreaLockedWithEnodevOnOpen : public Deps_InitPmemAreaLockedWithSuccessfulCompletionWithNoFlags {
virtual int getErrno() {
return ENODEV;
}
virtual int open(const char* pathname, int flags, int mode) {
EXPECT_EQ(fakePmemDev, pathname);
EXPECT_EQ(O_RDWR, flags & O_RDWR);
EXPECT_EQ(0, mode);
return -1;
}
};
typedef Allocator_AllocPmemBufferWithSuccessfulCompletionWithNoFlags Allocator_AllocPmemBufferWithEnodevOnOpen;
TEST(test_pmem_userspace_allocator, testAllocPmemBufferWithSuccessfulCompletionWithEnodevOnOpen) {
Deps_InitPmemAreaLockedWithEnodevOnOpen depsMock;
Allocator_AllocPmemBufferWithEnodevOnOpen allocMock;
PmemUserspaceAllocator pma(depsMock, allocMock, fakePmemDev);
uint8_t buf[0x300 + 0x100]; // Create a buffer to get memzero'd
pma.set_master_values(1234, buf); // Indicate that the pma has been successfully init'd
void* base = 0;
int offset = -9182, fd = -9182;
int size = 0x100;
int flags = ~0;
int result = pma.alloc_pmem_buffer(size, flags, &base, &offset, &fd);
ASSERT_EQ(-ENODEV, result);
}
/******************************************************************************/
struct Deps_InitPmemAreaLockedWithEnomemOnConnectPmem : public Deps_InitPmemAreaLockedWithSuccessfulCompletionWithNoFlags {
virtual int getErrno() {
return ENOMEM;
}
virtual int connectPmem(int fd, int master_fd) {
EXPECT_EQ(5678, fd);
EXPECT_EQ(1234, master_fd);
return -1;
}
};
typedef Allocator_AllocPmemBufferWithSuccessfulCompletionWithNoFlags Allocator_AllocPmemBufferWithEnomemOnConnectPmem;
TEST(test_pmem_userspace_allocator, testAllocPmemBufferWithSuccessfulCompletionWithEnomemOnConnectPmem) {
Deps_InitPmemAreaLockedWithEnomemOnConnectPmem depsMock;
Allocator_AllocPmemBufferWithEnomemOnConnectPmem allocMock;
PmemUserspaceAllocator pma(depsMock, allocMock, fakePmemDev);
uint8_t buf[0x300 + 0x100]; // Create a buffer to get memzero'd
pma.set_master_values(1234, buf); // Indicate that the pma has been successfully init'd
void* base = 0;
int offset = -9182, fd = -9182;
int size = 0x100;
int flags = ~0;
int result = pma.alloc_pmem_buffer(size, flags, &base, &offset, &fd);
ASSERT_EQ(-ENOMEM, result);
}
/******************************************************************************/
struct Deps_InitPmemAreaLockedWithEnomemOnMapPmem : public Deps_InitPmemAreaLockedWithSuccessfulCompletionWithNoFlags {
virtual int getErrno() {
return ENOMEM;
}
virtual int mapPmem(int fd, int offset, size_t size) {
EXPECT_EQ(5678, fd);
EXPECT_EQ(0x300, offset);
EXPECT_EQ(size_t(0x100), size);
return -1;
}
};
typedef Allocator_AllocPmemBufferWithSuccessfulCompletionWithNoFlags Allocator_AllocPmemBufferWithEnomemOnMapPmem;
TEST(test_pmem_userspace_allocator, testAllocPmemBufferWithEnomemOnMapPmem) {
Deps_InitPmemAreaLockedWithEnomemOnMapPmem depsMock;
Allocator_AllocPmemBufferWithEnomemOnMapPmem allocMock;
PmemUserspaceAllocator pma(depsMock, allocMock, fakePmemDev);
uint8_t buf[0x300 + 0x100]; // Create a buffer to get memzero'd
pma.set_master_values(1234, buf); // Indicate that the pma has been successfully init'd
void* base = 0;
int offset = -9182, fd = -9182;
int size = 0x100;
int flags = ~0;
int result = pma.alloc_pmem_buffer(size, flags, &base, &offset, &fd);
ASSERT_EQ(-ENOMEM, result);
}
/******************************************************************************/
struct Deps_KernelAllocPmemBufferWithSuccessfulCompletionWithNoFlags : public DepsStub {
void* mmapResult;
Deps_KernelAllocPmemBufferWithSuccessfulCompletionWithNoFlags(void* mmapResult) :
mmapResult(mmapResult) {}
virtual int open(const char* pathname, int flags, int mode) {
EXPECT_EQ(fakePmemDev, pathname);
EXPECT_EQ(O_RDWR, flags & O_RDWR);
EXPECT_EQ(0, mode);
return 5678;
}
virtual void* mmap(void* start, size_t length, int prot, int flags, int fd,
off_t offset) {
EXPECT_EQ(5678, fd);
return mmapResult;
}
};
TEST(test_pmem_kernel_allocator, testAllocPmemBufferWithSuccessfulCompletionWithNoFlags) {
uint8_t buf[0x100]; // Create a buffer to get memzero'd
Deps_KernelAllocPmemBufferWithSuccessfulCompletionWithNoFlags depsMock(buf);
PmemKernelAllocator pma(depsMock, fakePmemDev);
void* base = 0;
int offset = -9182, fd = -9182;
int size = 0x100;
int flags = 0;
int result = pma.alloc_pmem_buffer(size, flags, &base, &offset, &fd);
ASSERT_EQ(0, result);
ASSERT_EQ(buf, base);
ASSERT_EQ(0, offset);
ASSERT_EQ(5678, fd);
for (int i = 0; i < 0x100; ++i) {
ASSERT_EQ(0, buf[i]);
}
}
/******************************************************************************/
typedef Deps_KernelAllocPmemBufferWithSuccessfulCompletionWithNoFlags Deps_KernelAllocPmemBufferWithSuccessfulCompletionWithAllFlags;
TEST(test_pmem_kernel_allocator, testAllocPmemBufferWithSuccessfulCompletionWithAllFlags) {
uint8_t buf[0x100]; // Create a buffer to get memzero'd
Deps_KernelAllocPmemBufferWithSuccessfulCompletionWithAllFlags depsMock(buf);
PmemKernelAllocator pma(depsMock, fakePmemDev);
void* base = 0;
int offset = -9182, fd = -9182;
int size = 0x100;
int flags = ~0;
int result = pma.alloc_pmem_buffer(size, flags, &base, &offset, &fd);
ASSERT_EQ(0, result);
ASSERT_EQ(buf, base);
ASSERT_EQ(0, offset);
ASSERT_EQ(5678, fd);
for (int i = 0; i < 0x100; ++i) {
ASSERT_EQ(0, buf[i]);
}
}
/******************************************************************************/
struct Deps_KernelAllocPmemBufferWithEpermOnOpen : public DepsStub {
virtual int getErrno() {
return EPERM;
}
virtual int open(const char* pathname, int flags, int mode) {
EXPECT_EQ(fakePmemDev, pathname);
EXPECT_EQ(O_RDWR, flags & O_RDWR);
EXPECT_EQ(0, mode);
return -1;
}
};
TEST(test_pmem_kernel_allocator, testAllocPmemBufferWithEpermOnOpen) {
Deps_KernelAllocPmemBufferWithEpermOnOpen depsMock;
PmemKernelAllocator pma(depsMock, fakePmemDev);
void* base = 0;
int offset = -9182, fd = -9182;
int size = 0x100;
int flags = ~0;
int result = pma.alloc_pmem_buffer(size, flags, &base, &offset, &fd);
ASSERT_EQ(-EPERM, result);
ASSERT_EQ(0, base);
ASSERT_EQ(0, offset);
ASSERT_EQ(-1, fd);
}
/******************************************************************************/
struct Deps_KernelAllocPmemBufferWithEnomemOnMmap : DepsStub {
virtual int open(const char* pathname, int flags, int mode) {
EXPECT_EQ(fakePmemDev, pathname);
EXPECT_EQ(O_RDWR, flags & O_RDWR);
EXPECT_EQ(0, mode);
return 5678;
}
virtual void* mmap(void* start, size_t length, int prot, int flags, int fd,
off_t offset) {
return (void*)MAP_FAILED;
}
virtual int getErrno() {
return ENOMEM;
}
};
TEST(test_pmem_kernel_allocator, testAllocPmemBufferWithEnomemOnMmap) {
Deps_KernelAllocPmemBufferWithEnomemOnMmap depsMock;
PmemKernelAllocator pma(depsMock, fakePmemDev);
void* base = 0;
int offset = -9182, fd = -9182;
int size = 0x100;
int flags = ~0;
int result = pma.alloc_pmem_buffer(size, flags, &base, &offset, &fd);
ASSERT_EQ(-ENOMEM, result);
ASSERT_EQ(0, base);
ASSERT_EQ(0, offset);
ASSERT_EQ(-1, fd);
}
/******************************************************************************/