display: Add support for pmem in the new gralloc

Change-Id: Ice6f436a01c3344a6d859ee0afc77673f151dbe4
This commit is contained in:
Naseer Ahmed 2011-11-19 09:55:49 -08:00
parent 96311b0091
commit 78ec9e4e53
12 changed files with 565 additions and 461 deletions

View File

@ -1,8 +1,4 @@
#Enables the listed display HAL modules
display-hals := libhwcomposer liboverlay
ifeq ($(TARGET_USES_ION),true)
display-hals += libgralloc
include $(call all-named-subdir-makefiles,$(display-hals))
endif
display-hals := libhwcomposer liboverlay libgralloc
include $(call all-named-subdir-makefiles,$(display-hals))

View File

@ -13,7 +13,6 @@
# limitations under the License.
# Use this flag until pmem/ashmem is implemented in the new gralloc
ifeq ($(TARGET_USES_ION),true)
LOCAL_PATH := $(call my-dir)
# HAL module implemenation, not prelinked and stored in
@ -28,13 +27,11 @@ LOCAL_ADDITIONAL_DEPENDENCIES += $(TARGET_OUT_INTERMEDIATES)/KERNEL_OBJ/usr
LOCAL_SRC_FILES := framebuffer.cpp \
gpu.cpp \
gralloc.cpp \
mapper.cpp \
pmemalloc.cpp \
pmem_bestfit_alloc.cpp
mapper.cpp
LOCAL_MODULE := gralloc.$(TARGET_BOARD_PLATFORM)
LOCAL_MODULE_TAGS := optional
LOCAL_CFLAGS:= -DLOG_TAG=\"$(TARGET_BOARD_PLATFORM).gralloc\" -DHOST -DDEBUG_CALC_FPS -DUSE_ION
LOCAL_CFLAGS:= -DLOG_TAG=\"$(TARGET_BOARD_PLATFORM).gralloc\" -DHOST -DDEBUG_CALC_FPS
ifeq ($(call is-board-platform,msm7627_surf msm7627_6x),true)
LOCAL_CFLAGS += -DTARGET_MSM7x27
@ -53,6 +50,7 @@ endif
ifeq ($(TARGET_GRALLOC_USES_ASHMEM),true)
LOCAL_CFLAGS += -DUSE_ASHMEM
endif
include $(BUILD_SHARED_LIBRARY)
#MemAlloc Library
@ -64,9 +62,15 @@ LOCAL_ADDITIONAL_DEPENDENCIES += $(TARGET_OUT_INTERMEDIATES)/KERNEL_OBJ/usr
LOCAL_SHARED_LIBRARIES := liblog libcutils libutils
LOCAL_SRC_FILES := ionalloc.cpp \
ashmemalloc.cpp \
pmemalloc.cpp \
pmem_bestfit_alloc.cpp \
alloc_controller.cpp
LOCAL_CFLAGS:= -DLOG_TAG=\"memalloc\" -DLOG_NDDEBUG=0 -DUSE_ION
LOCAL_CFLAGS:= -DLOG_TAG=\"memalloc\" -DLOG_NDDEBUG=0
ifeq ($(TARGET_USES_ION),true)
LOCAL_CFLAGS += -DUSE_ION
endif
LOCAL_MODULE := libmemalloc
LOCAL_MODULE_TAGS := optional
include $(BUILD_SHARED_LIBRARY)
endif #TARGET_USES_ION

View File

@ -29,33 +29,19 @@
#include <cutils/log.h>
#include <utils/RefBase.h>
#include <fcntl.h>
#include "gralloc_priv.h"
#include "alloc_controller.h"
#include "memalloc.h"
#include "ionalloc.h"
#include "pmemalloc.h"
#include "ashmemalloc.h"
using namespace gralloc;
using android::sp;
sp<IAllocController> IAllocController::sController = NULL;
sp<IAllocController> IAllocController::getInstance(void)
{
if(sController == NULL) {
#ifdef USE_ION
sController = new IonController();
#else
// XXX: Return pmem/ashmem controller when completed
#endif
}
return sController;
}
IonController::IonController()
{
mIonAlloc = new IonAlloc();
}
static bool canFallback(int compositionType, int usage, int flags)
//Common functions
static bool canFallback(int compositionType, int usage, bool triedSystem)
{
// Fallback to system heap when alloc fails unless
// 1. Composition type is MDP
@ -64,7 +50,7 @@ static bool canFallback(int compositionType, int usage, int flags)
if(compositionType == MDP_COMPOSITION)
return false;
if(flags & ION_HEAP_SYSTEM_ID)
if(triedSystem)
return false;
if(usage &(GRALLOC_USAGE_PRIVATE_ADSP_HEAP|
GRALLOC_USAGE_PRIVATE_EBI_HEAP |
@ -74,6 +60,29 @@ static bool canFallback(int compositionType, int usage, int flags)
return true;
}
sp<IAllocController> IAllocController::sController = NULL;
sp<IAllocController> IAllocController::getInstance(bool useMasterHeap)
{
if(sController == NULL) {
#ifdef USE_ION
sController = new IonController();
#else
if(useMasterHeap)
sController = new PmemAshmemController();
else
sController = new PmemKernelController();
#endif
}
return sController;
}
//-------------- IonController-----------------------//
IonController::IonController()
{
mIonAlloc = new IonAlloc();
}
int IonController::allocate(alloc_data& data, int usage,
int compositionType)
{
@ -110,7 +119,10 @@ int IonController::allocate(alloc_data& data, int usage,
ret = mIonAlloc->alloc_buffer(data);
// Fallback
if(ret < 0 && canFallback(compositionType, usage, ionFlags)) {
if(ret < 0 && canFallback(compositionType,
usage,
(ionFlags & ION_HEAP_SYSTEM_ID)))
{
LOGW("Falling back to system heap");
data.flags = 1 << ION_HEAP_SYSTEM_ID;
ret = mIonAlloc->alloc_buffer(data);
@ -134,20 +146,144 @@ sp<IMemAlloc> IonController::getAllocator(int flags)
return memalloc;
}
//-------------- PmemKernelController-----------------------//
PmemKernelController::PmemKernelController()
{
mPmemAdspAlloc = new PmemKernelAlloc(DEVICE_PMEM_ADSP);
// XXX: Right now, there is no need to maintain an instance
// of the SMI allocator as we need it only in a few cases
}
PmemKernelController::~PmemKernelController()
{
}
int PmemKernelController::allocate(alloc_data& data, int usage,
int compositionType)
{
int ret = 0;
bool adspFallback = false;
// Try SMI first
if ((usage & GRALLOC_USAGE_PRIVATE_SMI_HEAP) ||
(usage & GRALLOC_USAGE_EXTERNAL_DISP) ||
(usage & GRALLOC_USAGE_PROTECTED))
{
int tempFd = open(DEVICE_PMEM_SMIPOOL, O_RDWR, 0);
if(tempFd > 0) {
close(tempFd);
sp<IMemAlloc> memalloc;
memalloc = new PmemKernelAlloc(DEVICE_PMEM_SMIPOOL);
ret = memalloc->alloc_buffer(data);
if(ret >= 0)
return ret;
else {
adspFallback = true;
LOGW("Allocation from SMI failed, trying ADSP");
}
}
}
if ((usage & GRALLOC_USAGE_PRIVATE_ADSP_HEAP) || adspFallback) {
ret = mPmemAdspAlloc->alloc_buffer(data);
}
return ret;
}
sp<IMemAlloc> PmemKernelController::getAllocator(int flags)
{
sp<IMemAlloc> memalloc;
if (flags & private_handle_t::PRIV_FLAGS_USES_PMEM_ADSP)
memalloc = mPmemAdspAlloc;
else {
LOGE("%s: Invalid flags passed: 0x%x", __FUNCTION__, flags);
memalloc = NULL;
}
return memalloc;
}
//-------------- PmemAshmmemController-----------------------//
PmemAshmemController::PmemAshmemController()
{
mPmemUserspaceAlloc = new PmemUserspaceAlloc();
mAshmemAlloc = new AshmemAlloc();
mPmemKernelCtrl = new PmemKernelController();
}
PmemAshmemController::~PmemAshmemController()
{
}
int PmemAshmemController::allocate(alloc_data& data, int usage,
int compositionType)
{
//XXX PMEM with ashmem fallback strategy
return 0;
int ret = 0;
// Decide caching
// Decide based on usage
uint32_t uread = usage & GRALLOC_USAGE_SW_READ_MASK;
uint32_t uwrite = usage & GRALLOC_USAGE_SW_WRITE_MASK;
if (uread == GRALLOC_USAGE_SW_READ_OFTEN ||
uwrite == GRALLOC_USAGE_SW_WRITE_OFTEN) {
data.uncached = false;
} else {
data.uncached = true;
}
// Override if we explicitly need uncached buffers
if (usage & GRALLOC_USAGE_PRIVATE_UNCACHED)
data.uncached = true;
// If ADSP or SMI is requested use the kernel controller
if(usage & (GRALLOC_USAGE_PRIVATE_ADSP_HEAP|
GRALLOC_USAGE_PRIVATE_SMI_HEAP)) {
ret = mPmemKernelCtrl->allocate(data, usage, compositionType);
if(ret < 0)
LOGE("%s: Failed to allocate ADSP/SMI memory", __func__);
else
data.allocType = private_handle_t::PRIV_FLAGS_USES_PMEM_ADSP;
return ret;
}
if(usage & GRALLOC_USAGE_PRIVATE_SYSTEM_HEAP) {
ret = mAshmemAlloc->alloc_buffer(data);
if(ret >= 0)
data.allocType = private_handle_t::PRIV_FLAGS_USES_ASHMEM;
return ret;
}
// if no memory specific flags are set,
// default to EBI heap, so that bypass
// can work. We can fall back to system
// heap if we run out.
ret = mPmemUserspaceAlloc->alloc_buffer(data);
// Fallback
if(ret >= 0 ) {
data.allocType = private_handle_t::PRIV_FLAGS_USES_PMEM;
} else if(ret < 0 && canFallback(compositionType, usage, false)) {
LOGW("Falling back to ashmem");
ret = mAshmemAlloc->alloc_buffer(data);
if(ret >= 0)
data.allocType = private_handle_t::PRIV_FLAGS_USES_ASHMEM;
}
return ret;
}
sp<IMemAlloc> PmemAshmemController::getAllocator(int flags)
{
sp<IMemAlloc> memalloc;
if (flags & private_handle_t::PRIV_FLAGS_USES_PMEM) {
// XXX Return right allocator based on flags
memalloc = NULL;
} else {
if (flags & private_handle_t::PRIV_FLAGS_USES_PMEM)
memalloc = mPmemUserspaceAlloc;
else if (flags & private_handle_t::PRIV_FLAGS_USES_PMEM_ADSP)
memalloc = mPmemKernelCtrl->getAllocator(flags);
else if (flags & private_handle_t::PRIV_FLAGS_USES_ASHMEM)
memalloc = mAshmemAlloc;
else {
LOGE("%s: Invalid flags passed: 0x%x", __FUNCTION__, flags);
memalloc = NULL;
}

View File

@ -50,7 +50,7 @@ namespace gralloc {
virtual ~IAllocController() {};
static android::sp<IAllocController> getInstance(void);
static android::sp<IAllocController> getInstance(bool useMasterHeap);
private:
static android::sp<IAllocController> sController;
@ -72,6 +72,25 @@ namespace gralloc {
};
class PmemKernelController : public IAllocController {
public:
virtual int allocate(alloc_data& data, int usage,
int compositionType);
virtual android::sp<IMemAlloc> getAllocator(int flags);
PmemKernelController ();
~PmemKernelController ();
private:
android::sp<IMemAlloc> mPmemAdspAlloc;
};
// Main pmem controller - this should only
// be used within gralloc
class PmemAshmemController : public IAllocController {
public:
@ -79,7 +98,15 @@ namespace gralloc {
int compositionType);
virtual android::sp<IMemAlloc> getAllocator(int flags);
// XXX: Pmem and ashmem alloc objects
PmemAshmemController();
~PmemAshmemController();
private:
android::sp<IMemAlloc> mPmemUserspaceAlloc;
android::sp<IMemAlloc> mAshmemAlloc;
android::sp<IAllocController> mPmemKernelCtrl;
};

View File

@ -161,7 +161,6 @@ int gpu_context_t::gralloc_alloc_buffer(size_t size, int usage,
else
data.align = getpagesize();
data.pHandle = (unsigned int) pHandle;
data.bufferType = bufferType;
err = mAllocCtrl->allocate(data, usage, compositionType);
if (err == 0) {
@ -307,8 +306,8 @@ int gpu_context_t::free_impl(private_handle_t const* hnd) {
hnd->offset, hnd->fd);
if(err)
return err;
terminateBuffer(&m->base, const_cast<private_handle_t*>(hnd));
}
// XXX any additional cleanup.
delete hnd;
return 0;
}

View File

@ -102,7 +102,7 @@ int gralloc_device_open(const hw_module_t* module, const char* name,
const private_module_t* m = reinterpret_cast<const private_module_t*>(
module);
gpu_context_t *dev;
sp<IAllocController> alloc_ctrl = IAllocController::getInstance();
sp<IAllocController> alloc_ctrl = IAllocController::getInstance(true);
dev = new gpu_context_t(m, alloc_ctrl);
*device = &dev->common;
status = 0;

View File

@ -69,6 +69,7 @@ enum {
#define INTERLACE_MASK 0x80
#define S3D_FORMAT_MASK 0xFF000
#define COLOR_FORMAT(x) (x & 0xFFF) // Max range for colorFormats is 0 - FFF
#define DEVICE_PMEM "/dev/pmem"
#define DEVICE_PMEM_ADSP "/dev/pmem_adsp"
#define DEVICE_PMEM_SMIPOOL "/dev/pmem_smipool"
/*****************************************************************************/

View File

@ -39,12 +39,10 @@
#include "gralloc_priv.h"
#include "gr.h"
#include "ionalloc.h"
#include "ashmemalloc.h"
#include "alloc_controller.h"
#include "memalloc.h"
using gralloc::IMemAlloc;
using gralloc::IonAlloc;
using gralloc::AshmemAlloc;
using namespace gralloc;
using android::sp;
/*****************************************************************************/
@ -53,14 +51,8 @@ using android::sp;
static sp<IMemAlloc> getAllocator(int flags)
{
sp<IMemAlloc> memalloc;
if (flags & private_handle_t::PRIV_FLAGS_USES_ION) {
memalloc = new IonAlloc();
}
if (flags & private_handle_t::PRIV_FLAGS_USES_ASHMEM) {
memalloc = new AshmemAlloc();
}
// XXX Return allocator for pmem
sp<IAllocController> alloc_ctrl = IAllocController::getInstance(true);
memalloc = alloc_ctrl->getAllocator(flags);
return memalloc;
}
@ -192,7 +184,8 @@ int terminateBuffer(gralloc_module_t const* module,
// this buffer was mapped, unmap it now
if (hnd->flags & (private_handle_t::PRIV_FLAGS_USES_PMEM |
private_handle_t::PRIV_FLAGS_USES_PMEM_ADSP |
private_handle_t::PRIV_FLAGS_USES_ASHMEM)) {
private_handle_t::PRIV_FLAGS_USES_ASHMEM |
private_handle_t::PRIV_FLAGS_USES_ION)) {
if (hnd->pid != getpid()) {
// ... unless it's a "master" pmem buffer, that is a buffer
// mapped in the process it's been allocated.

View File

@ -44,7 +44,6 @@ namespace gralloc {
unsigned int pHandle;
bool uncached;
unsigned int flags;
int bufferType;
int allocType;
};

View File

@ -30,7 +30,6 @@
/*
* A simple templatized doubly linked-list implementation
*/
template <typename NODE>
class LinkedList
{
@ -93,7 +92,7 @@ public:
}
};
class SimpleBestFitAllocator : public PmemUserspaceAllocator::Deps::Allocator
class SimpleBestFitAllocator : public gralloc::PmemUserspaceAlloc::Allocator
{
public:
@ -127,5 +126,4 @@ private:
LinkedList<chunk_t> mList;
size_t mHeapSize;
};
#endif /* GRALLOC_ALLOCATOR_H_ */

View File

@ -1,135 +1,157 @@
/*
* Copyright (C) 2010 The Android Open Source Project
* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
//#define LOG_NDEBUG 0
#include <limits.h>
#include <unistd.h>
#include <fcntl.h>
#include <pthread.h>
#include <stdlib.h>
#include <string.h>
#include <sys/mman.h>
#include <stdlib.h>
#include <cutils/log.h>
#include <cutils/ashmem.h>
#include <errno.h>
#include <linux/android_pmem.h>
#include "gralloc_priv.h"
#include "pmemalloc.h"
#include "pmem_bestfit_alloc.h"
using namespace gralloc;
using android::sp;
#define BEGIN_FUNC LOGV("%s begin", __PRETTY_FUNCTION__)
#define END_FUNC LOGV("%s end", __PRETTY_FUNCTION__)
static int get_open_flags(int usage) {
int openFlags = O_RDWR | O_SYNC;
uint32_t uread = usage & GRALLOC_USAGE_SW_READ_MASK;
uint32_t uwrite = usage & GRALLOC_USAGE_SW_WRITE_MASK;
if (uread == GRALLOC_USAGE_SW_READ_OFTEN ||
uwrite == GRALLOC_USAGE_SW_WRITE_OFTEN) {
openFlags &= ~O_SYNC;
}
return openFlags;
}
PmemAllocator::~PmemAllocator()
// Common functions between userspace
// and kernel allocators
static int getPmemTotalSize(int fd, size_t* size)
{
BEGIN_FUNC;
END_FUNC;
}
PmemUserspaceAllocator::PmemUserspaceAllocator(Deps& deps,
Deps::Allocator& allocator, const char* pmemdev):
deps(deps),
allocator(allocator),
pmemdev(pmemdev),
master_fd(MASTER_FD_INIT)
{
BEGIN_FUNC;
pthread_mutex_init(&lock, NULL);
END_FUNC;
}
PmemUserspaceAllocator::~PmemUserspaceAllocator()
{
BEGIN_FUNC;
END_FUNC;
}
void* PmemUserspaceAllocator::get_base_address() {
BEGIN_FUNC;
END_FUNC;
return master_base;
}
int PmemUserspaceAllocator::init_pmem_area_locked()
{
BEGIN_FUNC;
//XXX: 7x27
int err = 0;
int fd = deps.open(pmemdev, O_RDWR, 0);
if (fd >= 0) {
size_t size = 0;
err = deps.getPmemTotalSize(fd, &size);
if (err < 0) {
LOGE("%s: PMEM_GET_TOTAL_SIZE failed (%d), limp mode", pmemdev,
err);
size = 8<<20; // 8 MiB
}
allocator.setSize(size);
void* base = deps.mmap(0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd,
0);
if (base == MAP_FAILED) {
LOGE("%s: failed to map pmem master fd: %s", pmemdev,
strerror(deps.getErrno()));
err = -deps.getErrno();
base = 0;
deps.close(fd);
fd = -1;
} else {
master_fd = fd;
master_base = base;
}
} else {
LOGE("%s: failed to open pmem device: %s", pmemdev,
strerror(deps.getErrno()));
err = -deps.getErrno();
pmem_region region;
err = ioctl(fd, PMEM_GET_TOTAL_SIZE, &region);
if (err == 0) {
*size = region.len;
}
END_FUNC;
return err;
}
int PmemUserspaceAllocator::init_pmem_area()
static int getOpenFlags(bool uncached)
{
BEGIN_FUNC;
pthread_mutex_lock(&lock);
int err = master_fd;
if (err == MASTER_FD_INIT) {
if(uncached)
return O_RDWR | O_SYNC;
else
return O_RDWR;
}
static int connectPmem(int fd, int master_fd) {
return ioctl(fd, PMEM_CONNECT, master_fd);
}
static int mapSubRegion(int fd, int offset, size_t size) {
struct pmem_region sub = { offset, size };
return ioctl(fd, PMEM_MAP, &sub);
}
static int unmapSubRegion(int fd, int offset, size_t size) {
struct pmem_region sub = { offset, size };
return ioctl(fd, PMEM_UNMAP, &sub);
}
static int alignPmem(int fd, size_t size, int align) {
struct pmem_allocation allocation;
allocation.size = size;
allocation.align = align;
return ioctl(fd, PMEM_ALLOCATE_ALIGNED, &allocation);
}
static int cleanPmem(void *base, size_t size, int offset, int fd) {
struct pmem_addr pmem_addr;
pmem_addr.vaddr = (unsigned long) base;
pmem_addr.offset = offset;
pmem_addr.length = size;
return ioctl(fd, PMEM_CLEAN_INV_CACHES, &pmem_addr);
}
//-------------- PmemUserspaceAlloc-----------------------//
PmemUserspaceAlloc::PmemUserspaceAlloc()
{
mPmemDev = DEVICE_PMEM;
mMasterFd = FD_INIT;
mAllocator = new SimpleBestFitAllocator();
pthread_mutex_init(&mLock, NULL);
}
PmemUserspaceAlloc::~PmemUserspaceAlloc()
{
}
int PmemUserspaceAlloc::init_pmem_area_locked()
{
LOGD("%s: Opening master pmem FD", __FUNCTION__);
int err = 0;
int fd = open(mPmemDev, O_RDWR, 0);
if (fd >= 0) {
size_t size = 0;
err = getPmemTotalSize(fd, &size);
LOGD("%s: Total pmem size: %d", __FUNCTION__, size);
if (err < 0) {
LOGE("%s: PMEM_GET_TOTAL_SIZE failed (%d), limp mode", mPmemDev,
err);
size = 8<<20; // 8 MiB
}
mAllocator->setSize(size);
void* base = mmap(0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd,
0);
if (base == MAP_FAILED) {
LOGE("%s: Failed to map pmem master fd: %s", mPmemDev,
strerror(errno));
err = -errno;
base = 0;
close(fd);
fd = -1;
} else {
mMasterFd = fd;
mMasterBase = base;
}
} else {
LOGE("%s: Failed to open pmem device: %s", mPmemDev,
strerror(errno));
err = -errno;
}
return err;
}
int PmemUserspaceAlloc::init_pmem_area()
{
pthread_mutex_lock(&mLock);
int err = mMasterFd;
if (err == FD_INIT) {
// first time, try to initialize pmem
LOGD("%s: Initializing pmem area", __FUNCTION__);
err = init_pmem_area_locked();
if (err) {
LOGE("%s: failed to initialize pmem area", pmemdev);
master_fd = err;
LOGE("%s: failed to initialize pmem area", mPmemDev);
mMasterFd = err;
}
} else if (err < 0) {
// pmem couldn't be initialized, never use it
@ -137,227 +159,211 @@ int PmemUserspaceAllocator::init_pmem_area()
// pmem OK
err = 0;
}
pthread_mutex_unlock(&lock);
END_FUNC;
pthread_mutex_unlock(&mLock);
return err;
}
int PmemUserspaceAllocator::alloc_pmem_buffer(size_t size, int usage,
void** pBase, int* pOffset, int* pFd, int format)
int PmemUserspaceAlloc::alloc_buffer(alloc_data& data)
{
BEGIN_FUNC;
int err = init_pmem_area();
if (err == 0) {
void* base = master_base;
int offset = allocator.allocate(size);
void* base = mMasterBase;
size_t size = data.size;
int offset = mAllocator->allocate(size);
if (offset < 0) {
// no more pmem memory
LOGE("%s: no more pmem available", pmemdev);
LOGE("%s: No more pmem available", mPmemDev);
err = -ENOMEM;
} else {
int openFlags = get_open_flags(usage);
//LOGD("%s: allocating pmem at offset 0x%p", pmemdev, offset);
int openFlags = getOpenFlags(data.uncached);
// now create the "sub-heap"
int fd = deps.open(pmemdev, openFlags, 0);
int fd = open(mPmemDev, openFlags, 0);
err = fd < 0 ? fd : 0;
// and connect to it
if (err == 0)
err = deps.connectPmem(fd, master_fd);
err = connectPmem(fd, mMasterFd);
// and make it available to the client process
if (err == 0)
err = deps.mapPmem(fd, offset, size);
err = mapSubRegion(fd, offset, size);
if (err < 0) {
LOGE("%s: failed to initialize pmem sub-heap: %d", pmemdev,
LOGE("%s: Failed to initialize pmem sub-heap: %d", mPmemDev,
err);
err = -deps.getErrno();
deps.close(fd);
allocator.deallocate(offset);
err = -errno;
close(fd);
mAllocator->deallocate(offset);
fd = -1;
} else {
LOGV("%s: mapped fd %d at offset %d, size %d", pmemdev, fd, offset, size);
LOGD("%s: Allocated buffer base:%p size:%d offset:%d fd:%d",
mPmemDev, base, size, offset, fd);
memset((char*)base + offset, 0, size);
//Clean cache before flushing to ensure pmem is properly flushed
err = deps.cleanPmem(fd, (unsigned long) base + offset, offset, size);
err = clean_buffer((void*)((intptr_t) base + offset), size, offset, fd);
if (err < 0) {
LOGE("cleanPmem failed: (%s)", strerror(deps.getErrno()));
LOGE("cleanPmem failed: (%s)", strerror(errno));
}
#ifdef HOST
cacheflush(intptr_t(base) + offset, intptr_t(base) + offset + size, 0);
#endif
*pBase = base;
*pOffset = offset;
*pFd = fd;
cacheflush(intptr_t(base) + offset, intptr_t(base) + offset + size, 0);
data.base = base;
data.offset = offset;
data.fd = fd;
}
//LOGD_IF(!err, "%s: allocating pmem size=%d, offset=%d", pmemdev, size, offset);
}
}
END_FUNC;
return err;
}
int PmemUserspaceAllocator::free_pmem_buffer(size_t size, void* base,
int offset, int fd)
int PmemUserspaceAlloc::free_buffer(void* base, size_t size, int offset, int fd)
{
BEGIN_FUNC;
LOGD("%s: Freeing buffer base:%p size:%d offset:%d fd:%d",
mPmemDev, base, size, offset, fd);
int err = 0;
if (fd >= 0) {
int err = deps.unmapPmem(fd, offset, size);
int err = unmapSubRegion(fd, offset, size);
LOGE_IF(err<0, "PMEM_UNMAP failed (%s), fd=%d, sub.offset=%u, "
"sub.size=%u", strerror(deps.getErrno()), fd, offset, size);
"sub.size=%u", strerror(errno), fd, offset, size);
if (err == 0) {
// we can't deallocate the memory in case of UNMAP failure
// because it would give that process access to someone else's
// surfaces, which would be a security breach.
allocator.deallocate(offset);
mAllocator->deallocate(offset);
}
close(fd);
}
END_FUNC;
return err;
}
PmemUserspaceAllocator::Deps::Allocator::~Allocator()
int PmemUserspaceAlloc::map_buffer(void **pBase, size_t size, int offset, int fd)
{
BEGIN_FUNC;
END_FUNC;
}
PmemUserspaceAllocator::Deps::~Deps()
{
BEGIN_FUNC;
END_FUNC;
}
PmemKernelAllocator::PmemKernelAllocator(Deps& deps):
deps(deps)
{
BEGIN_FUNC;
END_FUNC;
}
PmemKernelAllocator::~PmemKernelAllocator()
{
BEGIN_FUNC;
END_FUNC;
}
void* PmemKernelAllocator::get_base_address() {
BEGIN_FUNC;
END_FUNC;
return 0;
}
static unsigned clp2(unsigned x) {
x = x - 1;
x = x | (x >> 1);
x = x | (x >> 2);
x = x | (x >> 4);
x = x | (x >> 8);
x = x | (x >>16);
return x + 1;
}
int PmemKernelAllocator::alloc_pmem_buffer(size_t size, int usage,
void** pBase,int* pOffset, int* pFd, int format)
{
BEGIN_FUNC;
*pBase = 0;
*pOffset = 0;
*pFd = -1;
int err, offset = 0;
int openFlags = get_open_flags(usage);
const char *device;
if (usage & GRALLOC_USAGE_PRIVATE_ADSP_HEAP) {
device = DEVICE_PMEM_ADSP;
} else if (usage & GRALLOC_USAGE_PRIVATE_SMI_HEAP) {
device = DEVICE_PMEM_SMIPOOL;
} else if ((usage & GRALLOC_USAGE_EXTERNAL_DISP) ||
(usage & GRALLOC_USAGE_PROTECTED)) {
int tempFd = deps.open(DEVICE_PMEM_SMIPOOL, openFlags, 0);
if (tempFd < 0) {
device = DEVICE_PMEM_ADSP;
} else {
close(tempFd);
device = DEVICE_PMEM_SMIPOOL;
}
int err = 0;
size += offset;
void *base = mmap(0, size, PROT_READ| PROT_WRITE,
MAP_SHARED, fd, 0);
*pBase = base;
if(base == MAP_FAILED) {
LOGD("%s: Failed to map memory in the client: %s",
mPmemDev, strerror(errno));
err = -errno;
} else {
LOGE("Invalid device");
return -EINVAL;
LOGD("%s: Mapped buffer base:%p size:%d offset:%d fd:%d",
mPmemDev, base, size, offset, fd);
}
return err;
}
int PmemUserspaceAlloc::unmap_buffer(void *base, size_t size, int offset)
{
int err = 0;
//pmem hack
base = (void*)(intptr_t(base) - offset);
size += offset;
LOGD("%s: Unmapping buffer base:%p size:%d offset:%d",
mPmemDev , base, size, offset);
if (munmap(base, size) < 0) {
LOGE("Could not unmap %s", strerror(errno));
err = -errno;
}
int fd = deps.open(device, openFlags, 0);
return err;
}
int PmemUserspaceAlloc::clean_buffer(void *base, size_t size, int offset, int fd)
{
return cleanPmem(base, size, offset, fd);
}
//-------------- PmemKernelAlloc-----------------------//
PmemKernelAlloc::PmemKernelAlloc(const char* pmemdev) :
mPmemDev(pmemdev)
{
}
PmemKernelAlloc::~PmemKernelAlloc()
{
}
int PmemKernelAlloc::alloc_buffer(alloc_data& data)
{
int err, offset = 0;
int openFlags = getOpenFlags(data.uncached);
int size = data.size;
int fd = open(mPmemDev, openFlags, 0);
if (fd < 0) {
err = -deps.getErrno();
END_FUNC;
LOGE("Error opening %s", device);
err = -errno;
LOGE("%s: Error opening %s", __FUNCTION__, mPmemDev);
return err;
}
// The size should already be page aligned, now round it up to a power of 2.
//size = clp2(size);
if (format == HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED) {
if (data.align == 8192) {
// Tile format buffers need physical alignment to 8K
err = deps.alignPmem(fd, size, 8192);
// Default page size does not need this ioctl
err = alignPmem(fd, size, 8192);
if (err < 0) {
LOGE("alignPmem failed");
}
}
void* base = deps.mmap(0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
void* base = mmap(0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
if (base == MAP_FAILED) {
LOGE("%s: failed to map pmem fd: %s", device,
strerror(deps.getErrno()));
err = -deps.getErrno();
deps.close(fd);
END_FUNC;
LOGE("%s: failed to map pmem fd: %s", mPmemDev,
strerror(errno));
err = -errno;
close(fd);
return err;
}
memset(base, 0, size);
//XXX: Flush here if cached
data.base = base;
data.offset = 0;
data.fd = fd;
return 0;
}
int PmemKernelAlloc::free_buffer(void* base, size_t size, int offset, int fd)
{
int err = unmap_buffer(base, size, offset);
close(fd);
return err;
}
int PmemKernelAlloc::map_buffer(void **pBase, size_t size, int offset, int fd)
{
int err = 0;
void *base = mmap(0, size, PROT_READ| PROT_WRITE,
MAP_SHARED, fd, 0);
*pBase = base;
*pOffset = 0;
*pFd = fd;
END_FUNC;
return 0;
}
int PmemKernelAllocator::free_pmem_buffer(size_t size, void* base,
int offset, int fd)
{
BEGIN_FUNC;
// The size should already be page aligned,
// now round it up to a power of 2
// like we did when allocating.
//size = clp2(size);
int err = deps.munmap(base, size);
if (err < 0) {
err = deps.getErrno();
LOGW("error unmapping pmem fd: %s", strerror(err));
return -err;
if(base == MAP_FAILED) {
LOGD("%s: Failed to map memory in the client: %s",
__func__, strerror(errno));
err = -errno;
} else {
LOGD("%s: Mapped %d bytes", __func__, size);
}
return err;
END_FUNC;
return 0;
}
PmemKernelAllocator::Deps::~Deps()
int PmemKernelAlloc::unmap_buffer(void *base, size_t size, int offset)
{
BEGIN_FUNC;
END_FUNC;
int err = 0;
munmap(base, size);
if (err < 0) {
err = -errno;
LOGW("Error unmapping pmem fd: %s", strerror(err));
}
return err;
}
int PmemKernelAlloc::clean_buffer(void *base, size_t size, int offset, int fd)
{
return cleanPmem(base, size, offset, fd);
}

View File

@ -1,161 +1,106 @@
/*
* Copyright (C) 2010 The Android Open Source Project
* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef GRALLOC_QSD8K_PMEMALLOC_H
#define GRALLOC_QSD8K_PMEMALLOC_H
#ifndef GRALLOC_PMEMALLOC_H
#define GRALLOC_PMEMALLOC_H
#include <limits.h>
#include <stdlib.h>
#include <stdint.h>
#include <unistd.h>
#include <linux/ion.h>
#include <utils/RefBase.h>
#include "memalloc.h"
namespace gralloc {
class PmemUserspaceAlloc : public IMemAlloc {
/**
* An interface to the PMEM allocators.
*/
class PmemAllocator {
public:
class Allocator: public android::RefBase {
public:
virtual ~Allocator() {};
virtual ssize_t setSize(size_t size) = 0;
virtual size_t size() const = 0;
virtual ssize_t allocate(size_t size, uint32_t flags = 0) = 0;
virtual ssize_t deallocate(size_t offset) = 0;
};
public:
virtual int alloc_buffer(alloc_data& data);
virtual ~PmemAllocator();
virtual int free_buffer(void *base, size_t size,
int offset, int fd);
// Only valid after init_pmem_area() has completed successfully.
virtual void* get_base_address() = 0;
virtual int map_buffer(void **pBase, size_t size,
int offset, int fd);
virtual int alloc_pmem_buffer(size_t size, int usage, void** pBase,
int* pOffset, int* pFd, int format) = 0;
virtual int free_pmem_buffer(size_t size, void* base, int offset, int fd) = 0;
};
virtual int unmap_buffer(void *base, size_t size,
int offset);
virtual int clean_buffer(void*base, size_t size,
int offset, int fd);
/**
* A PMEM allocator that allocates the entire pmem memory from the kernel and
* then uses a user-space allocator to suballocate from that. This requires
* that the PMEM device driver have kernel allocation disabled.
*/
class PmemUserspaceAllocator: public PmemAllocator {
PmemUserspaceAlloc();
public:
~PmemUserspaceAlloc();
class Deps {
public:
private:
int mMasterFd;
void* mMasterBase;
const char* mPmemDev;
android::sp<Allocator> mAllocator;
pthread_mutex_t mLock;
int init_pmem_area();
int init_pmem_area_locked();
class Allocator {
public:
virtual ~Allocator();
virtual ssize_t setSize(size_t size) = 0;
virtual size_t size() const = 0;
virtual ssize_t allocate(size_t size, uint32_t flags = 0) = 0;
virtual ssize_t deallocate(size_t offset) = 0;
};
virtual ~Deps();
// pmem
virtual size_t getPmemTotalSize(int fd, size_t* size) = 0;
virtual int connectPmem(int fd, int master_fd) = 0;
virtual int mapPmem(int fd, int offset, size_t size) = 0;
virtual int unmapPmem(int fd, int offset, size_t size) = 0;
virtual int cleanPmem(int fd, unsigned long base, int offset, size_t size) = 0;
// C99
virtual int getErrno() = 0;
// POSIX
virtual void* mmap(void* start, size_t length, int prot, int flags, int fd,
off_t offset) = 0;
virtual int open(const char* pathname, int flags, int mode) = 0;
virtual int close(int fd) = 0;
};
PmemUserspaceAllocator(Deps& deps, Deps::Allocator& allocator, const char* pmemdev);
virtual ~PmemUserspaceAllocator();
class PmemKernelAlloc : public IMemAlloc {
// Only valid after init_pmem_area() has completed successfully.
virtual void* get_base_address();
public:
virtual int alloc_buffer(alloc_data& data);
virtual int init_pmem_area_locked();
virtual int init_pmem_area();
virtual int alloc_pmem_buffer(size_t size, int usage, void** pBase,
int* pOffset, int* pFd, int format);
virtual int free_pmem_buffer(size_t size, void* base, int offset, int fd);
virtual int free_buffer(void *base, size_t size,
int offset, int fd);
#ifndef ANDROID_OS
// DO NOT USE: For testing purposes only.
void set_master_values(int fd, void* base) {
master_fd = fd;
master_base = base;
}
#endif // ANDROID_OS
virtual int map_buffer(void **pBase, size_t size,
int offset, int fd);
virtual int unmap_buffer(void *base, size_t size,
int offset);
virtual int clean_buffer(void*base, size_t size,
int offset, int fd);
PmemKernelAlloc(const char* device);
~PmemKernelAlloc();
private:
const char* mPmemDev;
private:
enum {
MASTER_FD_INIT = -1,
};
Deps& deps;
Deps::Allocator& allocator;
pthread_mutex_t lock;
const char* pmemdev;
int master_fd;
void* master_base;
};
/**
* A PMEM allocator that allocates each individual allocation from the kernel
* (using the kernel's allocator). This requires the kernel driver for the
* particular PMEM device being allocated from to support kernel allocation.
*/
class PmemKernelAllocator: public PmemAllocator {
public:
class Deps {
public:
virtual ~Deps();
// C99
virtual int getErrno() = 0;
// POSIX
virtual void* mmap(void* start, size_t length, int prot, int flags, int fd,
off_t offset) = 0;
virtual int munmap(void* start, size_t length) = 0;
virtual int open(const char* pathname, int flags, int mode) = 0;
virtual int close(int fd) = 0;
virtual int alignPmem(int fd, size_t size, int align) = 0;
};
PmemKernelAllocator(Deps& deps);
virtual ~PmemKernelAllocator();
// Only valid after init_pmem_area() has completed successfully.
virtual void* get_base_address();
virtual int alloc_pmem_buffer(size_t size, int usage, void** pBase,
int* pOffset, int* pFd, int format);
virtual int free_pmem_buffer(size_t size, void* base, int offset, int fd);
private:
Deps& deps;
};
#endif // GRALLOC_QSD8K_PMEMALLOC_H
}
#endif /* GRALLOC_PMEMALLOC_H */