initial GL libraries for msm8960

Change-Id: Ic44834c421f32529da2ab73962246f6e25aa3117
Signed-off-by: Gohulan Balachandran <gohulanb@codeaurora.org>
This commit is contained in:
Gohulan Balachandran 2012-07-10 01:59:05 -06:00
parent 83718cd8a2
commit a6f47452bd
40 changed files with 13111 additions and 0 deletions

11
Android.mk Normal file
View File

@ -0,0 +1,11 @@
#Enables the listed display HAL modules
#Libs to be built for all targets (including SDK)
display-hals := libqcomui
#libs to be built for QCOM targets only
#ifeq ($(call is-vendor-board-platform,QCOM),true)
display-hals += libgralloc libgenlock libcopybit
#endif
include $(call all-named-subdir-makefiles,$(display-hals))

15
libgenlock/Android.mk Normal file
View File

@ -0,0 +1,15 @@
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
LOCAL_PRELINK_MODULE := false
LOCAL_MODULE_PATH := $(TARGET_OUT_SHARED_LIBRARIES)
LOCAL_SHARED_LIBRARIES := liblog libcutils
LOCAL_C_INCLUDES :=
LOCAL_C_INCLUDES += hardware/qcom/display/libgralloc
LOCAL_ADDITIONAL_DEPENDENCIES :=
LOCAL_SRC_FILES := genlock.cpp
LOCAL_CFLAGS:= -DLOG_TAG=\"libgenlock\"
LOCAL_MODULE_TAGS := optional
LOCAL_MODULE := libgenlock
include $(BUILD_SHARED_LIBRARY)

329
libgenlock/genlock.cpp Normal file
View File

@ -0,0 +1,329 @@
/*
* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cutils/log.h>
#include <cutils/native_handle.h>
#include <gralloc_priv.h>
#include <linux/genlock.h>
#include <fcntl.h>
#include <sys/ioctl.h>
#include "genlock.h"
#define GENLOCK_DEVICE "/dev/genlock"
#ifndef USE_GENLOCK
#define USE_GENLOCK
#endif
namespace {
/* Internal function to map the userspace locks to the kernel lock types */
int get_kernel_lock_type(genlock_lock_type lockType)
{
int kLockType = 0;
// If the user sets both a read and write lock, higher preference is
// given to the write lock.
if (lockType & GENLOCK_WRITE_LOCK) {
kLockType = GENLOCK_WRLOCK;
} else if (lockType & GENLOCK_READ_LOCK) {
kLockType = GENLOCK_RDLOCK;
} else {
ALOGE("%s: invalid lockType (lockType = %d)", __FUNCTION__, lockType);
return -1;
}
return kLockType;
}
/* Internal function to perform the actual lock/unlock operations */
genlock_status_t perform_lock_unlock_operation(native_handle_t *buffer_handle,
int lockType, int timeout)
{
if (private_handle_t::validate(buffer_handle)) {
ALOGE("%s: handle is invalid", __FUNCTION__);
return GENLOCK_FAILURE;
}
private_handle_t *hnd = reinterpret_cast<private_handle_t*>(buffer_handle);
if ((hnd->flags & private_handle_t::PRIV_FLAGS_UNSYNCHRONIZED) == 0) {
if (hnd->genlockPrivFd < 0) {
ALOGE("%s: the lock has not been created, or has not been attached",
__FUNCTION__);
return GENLOCK_FAILURE;
}
genlock_lock lock;
lock.op = lockType;
lock.flags = 0;
lock.timeout = timeout;
lock.fd = hnd->genlockHandle;
if (ioctl(hnd->genlockPrivFd, GENLOCK_IOC_LOCK, &lock)) {
ALOGE("%s: GENLOCK_IOC_LOCK failed (lockType0x%x, err=%s fd=%d)", __FUNCTION__,
lockType, strerror(errno), hnd->fd);
if (ETIMEDOUT == errno)
return GENLOCK_TIMEDOUT;
return GENLOCK_FAILURE;
}
}
return GENLOCK_NO_ERROR;
}
/* Internal function to close the fd and release the handle */
void close_genlock_fd_and_handle(int& fd, int& handle)
{
if (fd >=0 ) {
close(fd);
fd = -1;
}
if (handle >= 0) {
close(handle);
handle = -1;
}
}
}
/*
* Create a genlock lock. The genlock lock file descriptor and the lock
* handle are stored in the buffer_handle.
*
* @param: handle of the buffer
* @return error status.
*/
genlock_status_t genlock_create_lock(native_handle_t *buffer_handle)
{
genlock_status_t ret = GENLOCK_NO_ERROR;
if (private_handle_t::validate(buffer_handle)) {
ALOGE("%s: handle is invalid", __FUNCTION__);
return GENLOCK_FAILURE;
}
private_handle_t *hnd = reinterpret_cast<private_handle_t*>(buffer_handle);
#ifdef USE_GENLOCK
if ((hnd->flags & private_handle_t::PRIV_FLAGS_UNSYNCHRONIZED) == 0) {
// Open the genlock device
int fd = open(GENLOCK_DEVICE, O_RDWR);
if (fd < 0) {
ALOGE("%s: open genlock device failed (err=%s)", __FUNCTION__,
strerror(errno));
return GENLOCK_FAILURE;
}
// Create a new lock
genlock_lock lock;
if (ioctl(fd, GENLOCK_IOC_NEW, NULL)) {
ALOGE("%s: GENLOCK_IOC_NEW failed (error=%s)", __FUNCTION__,
strerror(errno));
close_genlock_fd_and_handle(fd, lock.fd);
ret = GENLOCK_FAILURE;
}
// Export the lock for other processes to be able to use it.
if (GENLOCK_FAILURE != ret) {
if (ioctl(fd, GENLOCK_IOC_EXPORT, &lock)) {
ALOGE("%s: GENLOCK_IOC_EXPORT failed (error=%s)", __FUNCTION__,
strerror(errno));
close_genlock_fd_and_handle(fd, lock.fd);
ret = GENLOCK_FAILURE;
}
}
// Store the lock params in the handle.
hnd->genlockPrivFd = fd;
hnd->genlockHandle = lock.fd;
} else {
hnd->genlockHandle = 0;
}
#else
hnd->genlockHandle = 0;
#endif
return ret;
}
/*
* Release a genlock lock associated with the handle.
*
* @param: handle of the buffer
* @return error status.
*/
genlock_status_t genlock_release_lock(native_handle_t *buffer_handle)
{
genlock_status_t ret = GENLOCK_NO_ERROR;
#ifdef USE_GENLOCK
if (private_handle_t::validate(buffer_handle)) {
ALOGE("%s: handle is invalid", __FUNCTION__);
return GENLOCK_FAILURE;
}
private_handle_t *hnd = reinterpret_cast<private_handle_t*>(buffer_handle);
if ((hnd->flags & private_handle_t::PRIV_FLAGS_UNSYNCHRONIZED) == 0) {
if (hnd->genlockPrivFd < 0) {
ALOGE("%s: the lock is invalid", __FUNCTION__);
return GENLOCK_FAILURE;
}
// Close the fd and reset the parameters.
close_genlock_fd_and_handle(hnd->genlockPrivFd, hnd->genlockHandle);
}
#endif
return ret;
}
/*
* Attach a lock to the buffer handle passed via an IPC.
*
* @param: handle of the buffer
* @return error status.
*/
genlock_status_t genlock_attach_lock(native_handle_t *buffer_handle)
{
genlock_status_t ret = GENLOCK_NO_ERROR;
#ifdef USE_GENLOCK
if (private_handle_t::validate(buffer_handle)) {
ALOGE("%s: handle is invalid", __FUNCTION__);
return GENLOCK_FAILURE;
}
private_handle_t *hnd = reinterpret_cast<private_handle_t*>(buffer_handle);
if ((hnd->flags & private_handle_t::PRIV_FLAGS_UNSYNCHRONIZED) == 0) {
// Open the genlock device
int fd = open(GENLOCK_DEVICE, O_RDWR);
if (fd < 0) {
ALOGE("%s: open genlock device failed (err=%s)", __FUNCTION__,
strerror(errno));
return GENLOCK_FAILURE;
}
// Attach the local handle to an existing lock
genlock_lock lock;
lock.fd = hnd->genlockHandle;
if (ioctl(fd, GENLOCK_IOC_ATTACH, &lock)) {
ALOGE("%s: GENLOCK_IOC_ATTACH failed (err=%s)", __FUNCTION__,
strerror(errno));
close_genlock_fd_and_handle(fd, lock.fd);
ret = GENLOCK_FAILURE;
}
// Store the relavant information in the handle
hnd->genlockPrivFd = fd;
}
#endif
return ret;
}
/*
* Lock the buffer specified by the buffer handle. The lock held by the buffer
* is specified by the lockType. This function will block if a write lock is
* requested on the buffer which has previously been locked for a read or write
* operation. A buffer can be locked by multiple clients for read. An optional
* timeout value can be specified. By default, there is no timeout.
*
* @param: handle of the buffer
* @param: type of lock to be acquired by the buffer.
* @param: timeout value in ms. GENLOCK_MAX_TIMEOUT is the maximum timeout value.
* @return error status.
*/
genlock_status_t genlock_lock_buffer(native_handle_t *buffer_handle,
genlock_lock_type_t lockType,
int timeout)
{
genlock_status_t ret = GENLOCK_NO_ERROR;
#ifdef USE_GENLOCK
// Translate the locktype
int kLockType = get_kernel_lock_type(lockType);
if (-1 == kLockType) {
ALOGE("%s: invalid lockType", __FUNCTION__);
return GENLOCK_FAILURE;
}
if (0 == timeout) {
ALOGW("%s: trying to lock a buffer with timeout = 0", __FUNCTION__);
}
// Call the private function to perform the lock operation specified.
ret = perform_lock_unlock_operation(buffer_handle, kLockType, timeout);
#endif
return ret;
}
/*
* Unlocks a buffer that has previously been locked by the client.
*
* @param: handle of the buffer to be unlocked.
* @return: error status.
*/
genlock_status_t genlock_unlock_buffer(native_handle_t *buffer_handle)
{
genlock_status_t ret = GENLOCK_NO_ERROR;
#ifdef USE_GENLOCK
// Do the unlock operation by setting the unlock flag. Timeout is always
// 0 in this case.
ret = perform_lock_unlock_operation(buffer_handle, GENLOCK_UNLOCK, 0);
#endif
return ret;
}
/*
* Blocks the calling process until the lock held on the handle is unlocked.
*
* @param: handle of the buffer
* @param: timeout value for the wait.
* return: error status.
*/
genlock_status_t genlock_wait(native_handle_t *buffer_handle, int timeout) {
#ifdef USE_GENLOCK
if (private_handle_t::validate(buffer_handle)) {
ALOGE("%s: handle is invalid", __FUNCTION__);
return GENLOCK_FAILURE;
}
private_handle_t *hnd = reinterpret_cast<private_handle_t*>(buffer_handle);
if ((hnd->flags & private_handle_t::PRIV_FLAGS_UNSYNCHRONIZED) == 0) {
if (hnd->genlockPrivFd < 0) {
ALOGE("%s: the lock is invalid", __FUNCTION__);
return GENLOCK_FAILURE;
}
if (0 == timeout)
ALOGW("%s: timeout = 0", __FUNCTION__);
genlock_lock lock;
lock.fd = hnd->genlockHandle;
lock.timeout = timeout;
if (ioctl(hnd->genlockPrivFd, GENLOCK_IOC_WAIT, &lock)) {
ALOGE("%s: GENLOCK_IOC_WAIT failed (err=%s)", __FUNCTION__, strerror(errno));
return GENLOCK_FAILURE;
}
}
#endif
return GENLOCK_NO_ERROR;
}

118
libgenlock/genlock.h Normal file
View File

@ -0,0 +1,118 @@
/*
* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef INCLUDE_LIBGENLOCK
#define INCLUDE_LIBGENLOCK
#include <cutils/native_handle.h>
#ifdef __cplusplus
extern "C" {
#endif
/* Genlock lock types */
typedef enum genlock_lock_type{
GENLOCK_READ_LOCK = 1<<0, // Read lock
GENLOCK_WRITE_LOCK = 1<<1, // Write lock
}genlock_lock_type_t;
/* Genlock return values */
typedef enum genlock_status{
GENLOCK_NO_ERROR = 0,
GENLOCK_TIMEDOUT,
GENLOCK_FAILURE,
} genlock_status_t;
/* Genlock defines */
#define GENLOCK_MAX_TIMEOUT 1000 // Max 1s timeout
/*
* Create a genlock lock. The genlock lock file descriptor and the lock
* handle are stored in the buffer_handle.
*
* @param: handle of the buffer
* @return error status.
*/
genlock_status_t genlock_create_lock(native_handle_t *buffer_handle);
/*
* Release a genlock lock associated with the handle.
*
* @param: handle of the buffer
* @return error status.
*/
genlock_status_t genlock_release_lock(native_handle_t *buffer_handle);
/*
* Attach a lock to the buffer handle passed via an IPC.
*
* @param: handle of the buffer
* @return error status.
*/
genlock_status_t genlock_attach_lock(native_handle_t *buffer_handle);
/*
* Lock the buffer specified by the buffer handle. The lock held by the buffer
* is specified by the lockType. This function will block if a write lock is
* requested on the buffer which has previously been locked for a read or write
* operation. A buffer can be locked by multiple clients for read. An optional
* timeout value can be specified. By default, there is no timeout.
*
* @param: handle of the buffer
* @param: type of lock to be acquired by the buffer.
* @param: timeout value in ms. GENLOCK_MAX_TIMEOUT is the maximum timeout value.
* @return error status.
*/
genlock_status_t genlock_lock_buffer(native_handle_t *buffer_handle,
genlock_lock_type_t lockType,
int timeout);
/*
* Unlocks a buffer that has previously been locked by the client.
*
* @param: handle of the buffer to be unlocked.
* @return: error status.
*/
genlock_status_t genlock_unlock_buffer(native_handle_t *buffer_handle);
/*
* Blocks the calling process until the lock held on the handle is unlocked.
*
* @param: handle of the buffer
* @param: timeout value for the wait.
* return: error status.
*/
genlock_status_t genlock_wait(native_handle_t *buffer_handle, int timeout);
#ifdef __cplusplus
}
#endif
#endif

75
libgralloc/Android.mk Normal file
View File

@ -0,0 +1,75 @@
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Use this flag until pmem/ashmem is implemented in the new gralloc
LOCAL_PATH := $(call my-dir)
# HAL module implemenation, not prelinked and stored in
# hw/<OVERLAY_HARDWARE_MODULE_ID>.<ro.product.board>.so
include $(CLEAR_VARS)
LOCAL_PRELINK_MODULE := false
LOCAL_MODULE_PATH := $(TARGET_OUT_SHARED_LIBRARIES)/hw
LOCAL_SHARED_LIBRARIES := liblog libcutils libGLESv1_CM libutils libmemalloc libQcomUI
LOCAL_SHARED_LIBRARIES += libgenlock
LOCAL_C_INCLUDES += hardware/qcom/display/libgenlock
LOCAL_C_INCLUDES += hardware/qcom/display/libqcomui
LOCAL_ADDITIONAL_DEPENDENCIES +=
LOCAL_SRC_FILES := framebuffer.cpp \
gpu.cpp \
gralloc.cpp \
mapper.cpp
LOCAL_MODULE := gralloc.$(TARGET_BOARD_PLATFORM)
LOCAL_MODULE_TAGS := optional
LOCAL_CFLAGS:= -DLOG_TAG=\"$(TARGET_BOARD_PLATFORM).gralloc\" -DHOST -DDEBUG_CALC_FPS
ifeq ($(call is-board-platform,msm7627_surf msm7627_6x),true)
LOCAL_CFLAGS += -DTARGET_MSM7x27
endif
ifeq ($(TARGET_HAVE_HDMI_OUT),true)
LOCAL_CFLAGS += -DHDMI_DUAL_DISPLAY
LOCAL_C_INCLUDES += hardware/qcom/display/liboverlay
LOCAL_SHARED_LIBRARIES += liboverlay
endif
ifeq ($(TARGET_USES_SF_BYPASS),true)
LOCAL_CFLAGS += -DSF_BYPASS
endif
ifeq ($(TARGET_GRALLOC_USES_ASHMEM),true)
LOCAL_CFLAGS += -DUSE_ASHMEM
endif
include $(BUILD_SHARED_LIBRARY)
#MemAlloc Library
include $(CLEAR_VARS)
LOCAL_PRELINK_MODULE := false
LOCAL_MODULE_PATH := $(TARGET_OUT_SHARED_LIBRARIES)
LOCAL_C_INCLUDES += hardware/qcom/display/libqcomui
LOCAL_ADDITIONAL_DEPENDENCIES +=
LOCAL_SHARED_LIBRARIES := liblog libcutils libutils
LOCAL_SRC_FILES := ionalloc.cpp \
alloc_controller.cpp
LOCAL_CFLAGS:= -DLOG_TAG=\"memalloc\"
ifeq ($(TARGET_USES_ION),true)
LOCAL_CFLAGS += -DUSE_ION
endif
LOCAL_MODULE := libmemalloc
LOCAL_MODULE_TAGS := optional
include $(BUILD_SHARED_LIBRARY)

View File

190
libgralloc/NOTICE Normal file
View File

@ -0,0 +1,190 @@
Copyright (c) 2008-2009, The Android Open Source Project
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS

View File

@ -0,0 +1,438 @@
/*
* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cutils/log.h>
#include <utils/RefBase.h>
#include <fcntl.h>
#include "gralloc_priv.h"
#include "alloc_controller.h"
#include "memalloc.h"
#include "ionalloc.h"
#include "ashmemalloc.h"
#include "gr.h"
using namespace gralloc;
using android::sp;
const int GRALLOC_HEAP_MASK = GRALLOC_USAGE_PRIVATE_ADSP_HEAP |
GRALLOC_USAGE_PRIVATE_UI_CONTIG_HEAP |
GRALLOC_USAGE_PRIVATE_SMI_HEAP |
GRALLOC_USAGE_PRIVATE_SYSTEM_HEAP |
GRALLOC_USAGE_PRIVATE_IOMMU_HEAP |
GRALLOC_USAGE_PRIVATE_MM_HEAP |
GRALLOC_USAGE_PRIVATE_WRITEBACK_HEAP |
GRALLOC_USAGE_PRIVATE_CAMERA_HEAP;
//Common functions
static bool canFallback(int compositionType, int usage, bool triedSystem)
{
// Fallback to system heap when alloc fails unless
// 1. Composition type is MDP
// 2. Alloc from system heap was already tried
// 3. The heap type is requsted explicitly
// 4. The heap type is protected
// 5. The buffer is meant for external display only
if(compositionType == MDP_COMPOSITION)
return false;
if(triedSystem)
return false;
if(usage & (GRALLOC_HEAP_MASK | GRALLOC_USAGE_PROTECTED))
return false;
if(usage & (GRALLOC_HEAP_MASK | GRALLOC_USAGE_EXTERNAL_ONLY))
return false;
//Return true by default
return true;
}
static bool useUncached(int usage)
{
// System heaps cannot be uncached
if(usage & (GRALLOC_USAGE_PRIVATE_SYSTEM_HEAP |
GRALLOC_USAGE_PRIVATE_IOMMU_HEAP))
return false;
if (usage & GRALLOC_USAGE_PRIVATE_UNCACHED)
return true;
return false;
}
sp<IAllocController> IAllocController::sController = NULL;
sp<IAllocController> IAllocController::getInstance(bool useMasterHeap)
{
if(sController == NULL) {
#ifdef USE_ION
sController = new IonController();
#else
if(useMasterHeap)
sController = new PmemAshmemController();
else
sController = new PmemKernelController();
#endif
}
return sController;
}
//-------------- IonController-----------------------//
IonController::IonController()
{
mIonAlloc = new IonAlloc();
}
int IonController::allocate(alloc_data& data, int usage,
int compositionType)
{
int ionFlags = 0;
int ret;
bool noncontig = false;
data.uncached = useUncached(usage);
if(usage & GRALLOC_USAGE_PRIVATE_UI_CONTIG_HEAP)
ionFlags |= ION_HEAP(ION_SF_HEAP_ID);
if(usage & GRALLOC_USAGE_PRIVATE_SYSTEM_HEAP) {
ionFlags |= ION_HEAP(ION_SYSTEM_HEAP_ID);
noncontig = true;
}
if(usage & GRALLOC_USAGE_PRIVATE_IOMMU_HEAP)
ionFlags |= ION_HEAP(ION_IOMMU_HEAP_ID);
if(usage & GRALLOC_USAGE_PRIVATE_MM_HEAP)
ionFlags |= ION_HEAP(ION_CP_MM_HEAP_ID);
if(usage & GRALLOC_USAGE_PRIVATE_WRITEBACK_HEAP)
ionFlags |= ION_HEAP(ION_CP_WB_HEAP_ID);
if(usage & GRALLOC_USAGE_PRIVATE_CAMERA_HEAP)
ionFlags |= ION_HEAP(ION_CAMERA_HEAP_ID);
if(usage & GRALLOC_USAGE_PROTECTED)
ionFlags |= ION_SECURE;
if(usage & GRALLOC_USAGE_PRIVATE_DO_NOT_MAP)
data.allocType = private_handle_t::PRIV_FLAGS_NOT_MAPPED;
// if no flags are set, default to
// SF + IOMMU heaps, so that bypass can work
// we can fall back to system heap if
// we run out.
if(!ionFlags)
ionFlags = ION_HEAP(ION_SF_HEAP_ID) | ION_HEAP(ION_IOMMU_HEAP_ID);
data.flags = ionFlags;
ret = mIonAlloc->alloc_buffer(data);
// Fallback
if(ret < 0 && canFallback(compositionType,
usage,
(ionFlags & ION_SYSTEM_HEAP_ID)))
{
ALOGW("Falling back to system heap");
data.flags = ION_HEAP(ION_SYSTEM_HEAP_ID);
noncontig = true;
ret = mIonAlloc->alloc_buffer(data);
}
if(ret >= 0 ) {
data.allocType = private_handle_t::PRIV_FLAGS_USES_ION;
if(noncontig)
data.allocType |= private_handle_t::PRIV_FLAGS_NONCONTIGUOUS_MEM;
if(ionFlags & ION_SECURE)
data.allocType |= private_handle_t::PRIV_FLAGS_SECURE_BUFFER;
}
return ret;
}
sp<IMemAlloc> IonController::getAllocator(int flags)
{
sp<IMemAlloc> memalloc;
if (flags & private_handle_t::PRIV_FLAGS_USES_ION) {
memalloc = mIonAlloc;
} else {
ALOGE("%s: Invalid flags passed: 0x%x", __FUNCTION__, flags);
}
return memalloc;
}
#if 0
//-------------- PmemKernelController-----------------------//
PmemKernelController::PmemKernelController()
{
mPmemAdspAlloc = new PmemKernelAlloc(DEVICE_PMEM_ADSP);
// XXX: Right now, there is no need to maintain an instance
// of the SMI allocator as we need it only in a few cases
}
PmemKernelController::~PmemKernelController()
{
}
int PmemKernelController::allocate(alloc_data& data, int usage,
int compositionType)
{
int ret = 0;
bool adspFallback = false;
if (!(usage & GRALLOC_USAGE_PRIVATE_SMI_HEAP))
adspFallback = true;
// Try SMI first
if ((usage & GRALLOC_USAGE_PRIVATE_SMI_HEAP) ||
(usage & GRALLOC_USAGE_EXTERNAL_DISP) ||
(usage & GRALLOC_USAGE_PROTECTED))
{
int tempFd = open(DEVICE_PMEM_SMIPOOL, O_RDWR, 0);
if(tempFd > 0) {
close(tempFd);
sp<IMemAlloc> memalloc;
memalloc = new PmemKernelAlloc(DEVICE_PMEM_SMIPOOL);
ret = memalloc->alloc_buffer(data);
if(ret >= 0)
return ret;
else {
if(adspFallback)
ALOGW("Allocation from SMI failed, trying ADSP");
}
}
}
if ((usage & GRALLOC_USAGE_PRIVATE_ADSP_HEAP) || adspFallback) {
ret = mPmemAdspAlloc->alloc_buffer(data);
}
return ret;
}
sp<IMemAlloc> PmemKernelController::getAllocator(int flags)
{
sp<IMemAlloc> memalloc;
if (flags & private_handle_t::PRIV_FLAGS_USES_PMEM_ADSP)
memalloc = mPmemAdspAlloc;
else {
ALOGE("%s: Invalid flags passed: 0x%x", __FUNCTION__, flags);
memalloc = NULL;
}
return memalloc;
}
//-------------- PmemAshmmemController-----------------------//
PmemAshmemController::PmemAshmemController()
{
mPmemUserspaceAlloc = new PmemUserspaceAlloc();
mAshmemAlloc = new AshmemAlloc();
mPmemKernelCtrl = new PmemKernelController();
}
PmemAshmemController::~PmemAshmemController()
{
}
int PmemAshmemController::allocate(alloc_data& data, int usage,
int compositionType)
{
int ret = 0;
// Make buffers cacheable by default
data.uncached = false;
// Override if we explicitly need uncached buffers
if (usage & GRALLOC_USAGE_PRIVATE_UNCACHED)
data.uncached = true;
// If ADSP or SMI is requested use the kernel controller
if(usage & (GRALLOC_USAGE_PRIVATE_ADSP_HEAP|
GRALLOC_USAGE_PRIVATE_SMI_HEAP)) {
ret = mPmemKernelCtrl->allocate(data, usage, compositionType);
if(ret < 0)
ALOGE("%s: Failed to allocate ADSP/SMI memory", __func__);
else
data.allocType = private_handle_t::PRIV_FLAGS_USES_PMEM_ADSP;
return ret;
}
if(usage & GRALLOC_USAGE_PRIVATE_SYSTEM_HEAP) {
ret = mAshmemAlloc->alloc_buffer(data);
if(ret >= 0) {
data.allocType = private_handle_t::PRIV_FLAGS_USES_ASHMEM;
data.allocType |= private_handle_t::PRIV_FLAGS_NONCONTIGUOUS_MEM;
}
return ret;
}
// if no memory specific flags are set,
// default to EBI heap, so that bypass
// can work. We can fall back to system
// heap if we run out.
ret = mPmemUserspaceAlloc->alloc_buffer(data);
// Fallback
if(ret >= 0 ) {
data.allocType = private_handle_t::PRIV_FLAGS_USES_PMEM;
} else if(ret < 0 && canFallback(compositionType, usage, false)) {
ALOGW("Falling back to ashmem");
ret = mAshmemAlloc->alloc_buffer(data);
if(ret >= 0) {
data.allocType = private_handle_t::PRIV_FLAGS_USES_ASHMEM;
data.allocType |= private_handle_t::PRIV_FLAGS_NONCONTIGUOUS_MEM;
}
}
return ret;
}
sp<IMemAlloc> PmemAshmemController::getAllocator(int flags)
{
sp<IMemAlloc> memalloc;
if (flags & private_handle_t::PRIV_FLAGS_USES_PMEM)
memalloc = mPmemUserspaceAlloc;
else if (flags & private_handle_t::PRIV_FLAGS_USES_PMEM_ADSP)
memalloc = mPmemKernelCtrl->getAllocator(flags);
else if (flags & private_handle_t::PRIV_FLAGS_USES_ASHMEM)
memalloc = mAshmemAlloc;
else {
ALOGE("%s: Invalid flags passed: 0x%x", __FUNCTION__, flags);
memalloc = NULL;
}
return memalloc;
}
#endif
size_t getBufferSizeAndDimensions(int width, int height, int format,
int& alignedw, int &alignedh)
{
size_t size;
alignedw = ALIGN(width, 32);
alignedh = ALIGN(height, 32);
switch (format) {
case HAL_PIXEL_FORMAT_RGBA_8888:
case HAL_PIXEL_FORMAT_RGBX_8888:
case HAL_PIXEL_FORMAT_BGRA_8888:
size = alignedw * alignedh * 4;
break;
case HAL_PIXEL_FORMAT_RGB_888:
size = alignedw * alignedh * 3;
break;
case HAL_PIXEL_FORMAT_RGB_565:
case HAL_PIXEL_FORMAT_RGBA_5551:
case HAL_PIXEL_FORMAT_RGBA_4444:
size = alignedw * alignedh * 2;
break;
// adreno formats
case HAL_PIXEL_FORMAT_YCrCb_420_SP_ADRENO: // NV21
size = ALIGN(alignedw*alignedh, 4096);
size += ALIGN(2 * ALIGN(width/2, 32) * ALIGN(height/2, 32), 4096);
break;
case HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED: // NV12
// The chroma plane is subsampled,
// but the pitch in bytes is unchanged
// The GPU needs 4K alignment, but the video decoder needs 8K
alignedw = ALIGN(width, 128);
size = ALIGN( alignedw * alignedh, 8192);
size += ALIGN( alignedw * ALIGN(height/2, 32), 8192);
break;
case HAL_PIXEL_FORMAT_NV12_ENCODEABLE:
case HAL_PIXEL_FORMAT_YCbCr_420_SP:
case HAL_PIXEL_FORMAT_YCrCb_420_SP:
case HAL_PIXEL_FORMAT_YV12:
if ((format == HAL_PIXEL_FORMAT_YV12) && ((width&1) || (height&1))) {
ALOGE("w or h is odd for the YV12 format");
return -EINVAL;
}
alignedw = ALIGN(width, 16);
alignedh = height;
if (HAL_PIXEL_FORMAT_NV12_ENCODEABLE == format) {
// The encoder requires a 2K aligned chroma offset.
size = ALIGN(alignedw*alignedh, 2048) +
(ALIGN(alignedw/2, 16) * (alignedh/2))*2;
} else {
size = alignedw*alignedh +
(ALIGN(alignedw/2, 16) * (alignedh/2))*2;
}
size = ALIGN(size, 4096);
break;
default:
ALOGE("unrecognized pixel format: %d", format);
return -EINVAL;
}
return size;
}
// Allocate buffer from width, height and format into a
// private_handle_t. It is the responsibility of the caller
// to free the buffer using the free_buffer function
int alloc_buffer(private_handle_t **pHnd, int w, int h, int format, int usage)
{
alloc_data data;
int alignedw, alignedh;
android::sp<gralloc::IAllocController> sAlloc =
gralloc::IAllocController::getInstance(false);
data.base = 0;
data.fd = -1;
data.offset = 0;
data.size = getBufferSizeAndDimensions(w, h, format, alignedw, alignedh);
data.align = getpagesize();
data.uncached = useUncached(usage);
int allocFlags = usage;
int err = sAlloc->allocate(data, allocFlags, 0);
if (0 != err) {
ALOGE("%s: allocate failed", __FUNCTION__);
return -ENOMEM;
}
private_handle_t* hnd = new private_handle_t(data.fd, data.size,
data.allocType, 0, format, alignedw, alignedh);
hnd->base = (int) data.base;
hnd->offset = data.offset;
hnd->gpuaddr = 0;
*pHnd = hnd;
return 0;
}
void free_buffer(private_handle_t *hnd)
{
android::sp<gralloc::IAllocController> sAlloc =
gralloc::IAllocController::getInstance(false);
if (hnd && hnd->fd > 0) {
sp<IMemAlloc> memalloc = sAlloc->getAllocator(hnd->flags);
memalloc->free_buffer((void*)hnd->base, hnd->size, hnd->offset, hnd->fd);
}
if(hnd)
delete hnd;
}

View File

@ -0,0 +1,114 @@
/*
* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef GRALLOC_ALLOCCONTROLLER_H
#define GRALLOC_ALLOCCONTROLLER_H
#include <utils/RefBase.h>
namespace gralloc {
struct alloc_data;
class IMemAlloc;
class IonAlloc;
class IAllocController : public android::RefBase {
public:
/* Allocate using a suitable method
* Returns the type of buffer allocated
*/
virtual int allocate(alloc_data& data, int usage,
int compositionType) = 0;
virtual android::sp<IMemAlloc> getAllocator(int flags) = 0;
virtual ~IAllocController() {};
static android::sp<IAllocController> getInstance(bool useMasterHeap);
private:
static android::sp<IAllocController> sController;
};
class IonController : public IAllocController {
public:
virtual int allocate(alloc_data& data, int usage,
int compositionType);
virtual android::sp<IMemAlloc> getAllocator(int flags);
IonController();
private:
android::sp<IonAlloc> mIonAlloc;
};
class PmemKernelController : public IAllocController {
public:
virtual int allocate(alloc_data& data, int usage,
int compositionType);
virtual android::sp<IMemAlloc> getAllocator(int flags);
PmemKernelController ();
~PmemKernelController ();
private:
android::sp<IMemAlloc> mPmemAdspAlloc;
};
// Main pmem controller - this should only
// be used within gralloc
class PmemAshmemController : public IAllocController {
public:
virtual int allocate(alloc_data& data, int usage,
int compositionType);
virtual android::sp<IMemAlloc> getAllocator(int flags);
PmemAshmemController();
~PmemAshmemController();
private:
android::sp<IMemAlloc> mPmemUserspaceAlloc;
android::sp<IMemAlloc> mAshmemAlloc;
android::sp<IAllocController> mPmemKernelCtrl;
};
} //end namespace gralloc
#endif // GRALLOC_ALLOCCONTROLLER_H

138
libgralloc/ashmemalloc.cpp Normal file
View File

@ -0,0 +1,138 @@
/*
* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <unistd.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <stdlib.h>
#include <cutils/log.h>
#include <linux/ashmem.h>
#include <cutils/ashmem.h>
#include <errno.h>
#include "ashmemalloc.h"
using gralloc::AshmemAlloc;
int AshmemAlloc::alloc_buffer(alloc_data& data)
{
int err = 0;
int fd = -1;
void* base = 0;
int offset = 0;
char name[ASHMEM_NAME_LEN];
snprintf(name, ASHMEM_NAME_LEN, "gralloc-buffer-%x", data.pHandle);
int prot = PROT_READ | PROT_WRITE;
fd = ashmem_create_region(name, data.size);
if (fd < 0) {
ALOGE("couldn't create ashmem (%s)", strerror(errno));
err = -errno;
} else {
if (ashmem_set_prot_region(fd, prot) < 0) {
ALOGE("ashmem_set_prot_region(fd=%d, prot=%x) failed (%s)",
fd, prot, strerror(errno));
close(fd);
err = -errno;
} else {
base = mmap(0, data.size, prot, MAP_SHARED|MAP_POPULATE|MAP_LOCKED, fd, 0);
if (base == MAP_FAILED) {
ALOGE("alloc mmap(fd=%d, size=%d, prot=%x) failed (%s)",
fd, data.size, prot, strerror(errno));
close(fd);
err = -errno;
} else {
memset((char*)base + offset, 0, data.size);
}
}
}
if(err == 0) {
data.fd = fd;
data.base = base;
data.offset = offset;
clean_buffer(base, data.size, offset, fd);
ALOGD("ashmem: Allocated buffer base:%p size:%d fd:%d",
base, data.size, fd);
}
return err;
}
int AshmemAlloc::free_buffer(void* base, size_t size, int offset, int fd)
{
ALOGD("ashmem: Freeing buffer base:%p size:%d fd:%d",
base, size, fd);
int err = 0;
if(!base) {
ALOGE("Invalid free");
return -EINVAL;
}
err = unmap_buffer(base, size, offset);
close(fd);
return err;
}
int AshmemAlloc::map_buffer(void **pBase, size_t size, int offset, int fd)
{
int err = 0;
void *base = 0;
base = mmap(0, size, PROT_READ| PROT_WRITE,
MAP_SHARED|MAP_POPULATE, fd, 0);
*pBase = base;
if(base == MAP_FAILED) {
ALOGE("ashmem: Failed to map memory in the client: %s",
strerror(errno));
err = -errno;
} else {
ALOGD("ashmem: Mapped buffer base:%p size:%d fd:%d",
base, size, fd);
}
return err;
}
int AshmemAlloc::unmap_buffer(void *base, size_t size, int offset)
{
ALOGD("ashmem: Unmapping buffer base: %p size: %d", base, size);
int err = munmap(base, size);
if(err) {
ALOGE("ashmem: Failed to unmap memory at %p: %s",
base, strerror(errno));
}
return err;
}
int AshmemAlloc::clean_buffer(void *base, size_t size, int offset, int fd)
{
int err = 0;
if (ioctl(fd, ASHMEM_CACHE_FLUSH_RANGE, NULL)) {
ALOGE("ashmem: ASHMEM_CACHE_FLUSH_RANGE failed fd = %d", fd);
}
return err;
}

56
libgralloc/ashmemalloc.h Normal file
View File

@ -0,0 +1,56 @@
/*
* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef GRALLOC_ASHMEMALLOC_H
#define GRALLOC_ASHMEMALLOC_H
#include "memalloc.h"
#include <linux/ion.h>
namespace gralloc {
class AshmemAlloc : public IMemAlloc {
public:
virtual int alloc_buffer(alloc_data& data);
virtual int free_buffer(void *base, size_t size,
int offset, int fd);
virtual int map_buffer(void **pBase, size_t size,
int offset, int fd);
virtual int unmap_buffer(void *base, size_t size,
int offset);
virtual int clean_buffer(void*base, size_t size,
int offset, int fd);
};
}
#endif /* GRALLOC_ASHMEMALLOC_H */

937
libgralloc/framebuffer.cpp Normal file
View File

@ -0,0 +1,937 @@
/*
* Copyright (C) 2008 The Android Open Source Project
* Copyright (c) 2010-2012 Code Aurora Forum. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <sys/mman.h>
#include <dlfcn.h>
#include <cutils/ashmem.h>
#include <cutils/log.h>
#include <cutils/properties.h>
#include <utils/Timers.h>
#include <hardware/hardware.h>
#include <hardware/gralloc.h>
#include <fcntl.h>
#include <errno.h>
#include <sys/ioctl.h>
#include <string.h>
#include <stdlib.h>
#include <pthread.h>
#include <utils/Timers.h>
#include <cutils/log.h>
#include <cutils/atomic.h>
#include <linux/fb.h>
#include <linux/msm_mdp.h>
#include <GLES/gl.h>
#include "gralloc_priv.h"
#include "gr.h"
#ifdef NO_SURFACEFLINGER_SWAPINTERVAL
#include <cutils/properties.h>
#endif
#include <qcom_ui.h>
#define FB_DEBUG 0
#if defined(HDMI_DUAL_DISPLAY)
#define EVEN_OUT(x) if (x & 0x0001) {x--;}
using overlay::Overlay;
/** min of int a, b */
static inline int min(int a, int b) {
return (a<b) ? a : b;
}
/** max of int a, b */
static inline int max(int a, int b) {
return (a>b) ? a : b;
}
#endif
char framebufferStateName[] = {'S', 'R', 'A'};
/*****************************************************************************/
enum {
MDDI_PANEL = '1',
EBI2_PANEL = '2',
LCDC_PANEL = '3',
EXT_MDDI_PANEL = '4',
TV_PANEL = '5'
};
enum {
PAGE_FLIP = 0x00000001,
LOCKED = 0x00000002
};
struct fb_context_t {
framebuffer_device_t device;
};
static int neworientation;
/*****************************************************************************/
static void
msm_copy_buffer(buffer_handle_t handle, int fd,
int width, int height, int format,
int x, int y, int w, int h);
static int fb_setSwapInterval(struct framebuffer_device_t* dev,
int interval)
{
char pval[PROPERTY_VALUE_MAX];
property_get("debug.gr.swapinterval", pval, "-1");
int property_interval = atoi(pval);
if (property_interval >= 0)
interval = property_interval;
fb_context_t* ctx = (fb_context_t*)dev;
private_module_t* m = reinterpret_cast<private_module_t*>(
dev->common.module);
if (interval < dev->minSwapInterval || interval > dev->maxSwapInterval)
return -EINVAL;
m->swapInterval = interval;
return 0;
}
static int fb_setUpdateRect(struct framebuffer_device_t* dev,
int l, int t, int w, int h)
{
if (((w|h) <= 0) || ((l|t)<0))
return -EINVAL;
fb_context_t* ctx = (fb_context_t*)dev;
private_module_t* m = reinterpret_cast<private_module_t*>(
dev->common.module);
m->info.reserved[0] = 0x54445055; // "UPDT";
m->info.reserved[1] = (uint16_t)l | ((uint32_t)t << 16);
m->info.reserved[2] = (uint16_t)(l+w) | ((uint32_t)(t+h) << 16);
return 0;
}
static void *disp_loop(void *ptr)
{
struct qbuf_t nxtBuf;
static int cur_buf=-1;
private_module_t *m = reinterpret_cast<private_module_t*>(ptr);
while (1) {
pthread_mutex_lock(&(m->qlock));
// wait (sleep) while display queue is empty;
if (m->disp.isEmpty()) {
pthread_cond_wait(&(m->qpost),&(m->qlock));
}
// dequeue next buff to display and lock it
nxtBuf = m->disp.getHeadValue();
m->disp.pop();
pthread_mutex_unlock(&(m->qlock));
// post buf out to display synchronously
private_handle_t const* hnd = reinterpret_cast<private_handle_t const*>
(nxtBuf.buf);
const size_t offset = hnd->base - m->framebuffer->base;
m->info.activate = FB_ACTIVATE_VBL;
m->info.yoffset = offset / m->finfo.line_length;
#if defined(HDMI_DUAL_DISPLAY)
pthread_mutex_lock(&m->overlayLock);
m->orientation = neworientation;
m->currentOffset = offset;
m->hdmiStateChanged = true;
pthread_cond_signal(&(m->overlayPost));
pthread_mutex_unlock(&m->overlayLock);
#endif
if (ioctl(m->framebuffer->fd, FBIOPUT_VSCREENINFO, &m->info) == -1) {
ALOGE("ERROR FBIOPUT_VSCREENINFO failed; frame not displayed");
}
CALC_FPS();
if (cur_buf == -1) {
int nxtAvail = ((nxtBuf.idx + 1) % m->numBuffers);
pthread_mutex_lock(&(m->avail[nxtBuf.idx].lock));
m->avail[nxtBuf.idx].is_avail = true;
m->avail[nxtBuf.idx].state = REF;
pthread_cond_broadcast(&(m->avail[nxtBuf.idx].cond));
pthread_mutex_unlock(&(m->avail[nxtBuf.idx].lock));
} else {
pthread_mutex_lock(&(m->avail[nxtBuf.idx].lock));
if (m->avail[nxtBuf.idx].state != SUB) {
ALOGE_IF(m->swapInterval != 0, "[%d] state %c, expected %c", nxtBuf.idx,
framebufferStateName[m->avail[nxtBuf.idx].state],
framebufferStateName[SUB]);
}
m->avail[nxtBuf.idx].state = REF;
pthread_mutex_unlock(&(m->avail[nxtBuf.idx].lock));
pthread_mutex_lock(&(m->avail[cur_buf].lock));
m->avail[cur_buf].is_avail = true;
if (m->avail[cur_buf].state != REF) {
ALOGE_IF(m->swapInterval != 0, "[%d] state %c, expected %c", cur_buf,
framebufferStateName[m->avail[cur_buf].state],
framebufferStateName[REF]);
}
m->avail[cur_buf].state = AVL;
pthread_cond_broadcast(&(m->avail[cur_buf].cond));
pthread_mutex_unlock(&(m->avail[cur_buf].lock));
}
cur_buf = nxtBuf.idx;
}
return NULL;
}
#if defined(HDMI_DUAL_DISPLAY)
static int closeHDMIChannel(private_module_t* m)
{
Overlay* pTemp = m->pobjOverlay;
if(pTemp != NULL)
pTemp->closeChannel();
return 0;
}
static void getSecondaryDisplayDestinationInfo(private_module_t* m, overlay_rect&
rect, int& orientation)
{
Overlay* pTemp = m->pobjOverlay;
int width = pTemp->getFBWidth();
int height = pTemp->getFBHeight();
int fbwidth = m->info.xres, fbheight = m->info.yres;
rect.x = 0; rect.y = 0;
rect.w = width; rect.h = height;
int rot = m->orientation;
switch(rot) {
// ROT_0
case 0:
// ROT_180
case HAL_TRANSFORM_ROT_180:
pTemp->getAspectRatioPosition(fbwidth, fbheight,
&rect);
if(rot == HAL_TRANSFORM_ROT_180)
orientation = HAL_TRANSFORM_ROT_180;
else
orientation = 0;
break;
// ROT_90
case HAL_TRANSFORM_ROT_90:
// ROT_270
case HAL_TRANSFORM_ROT_270:
//Calculate the Aspectratio for the UI
//in the landscape mode
//Width and height will be swapped as there
//is rotation
pTemp->getAspectRatioPosition(fbheight, fbwidth,
&rect);
if(rot == HAL_TRANSFORM_ROT_90)
orientation = HAL_TRANSFORM_ROT_270;
else if(rot == HAL_TRANSFORM_ROT_270)
orientation = HAL_TRANSFORM_ROT_90;
break;
}
return;
}
static void *hdmi_ui_loop(void *ptr)
{
private_module_t* m = reinterpret_cast<private_module_t*>(
ptr);
while (1) {
pthread_mutex_lock(&m->overlayLock);
while(!(m->hdmiStateChanged))
pthread_cond_wait(&(m->overlayPost), &(m->overlayLock));
m->hdmiStateChanged = false;
if (m->exitHDMIUILoop) {
pthread_mutex_unlock(&m->overlayLock);
return NULL;
}
bool waitForVsync = true;
int flags = WAIT_FOR_VSYNC;
if (m->pobjOverlay) {
Overlay* pTemp = m->pobjOverlay;
if (m->hdmiMirroringState == HDMI_NO_MIRRORING)
closeHDMIChannel(m);
else if(m->hdmiMirroringState == HDMI_UI_MIRRORING) {
if (!pTemp->isChannelUP()) {
int alignedW = ALIGN(m->info.xres, 32);
private_handle_t const* hnd =
reinterpret_cast<private_handle_t const*>(m->framebuffer);
overlay_buffer_info info;
info.width = alignedW;
info.height = hnd->height;
info.format = hnd->format;
info.size = hnd->size;
if (m->trueMirrorSupport)
flags &= ~WAIT_FOR_VSYNC;
// start the overlay Channel for mirroring
// m->enableHDMIOutput corresponds to the fbnum
if (pTemp->startChannel(info, m->enableHDMIOutput,
false, true, 0, VG0_PIPE, flags)) {
pTemp->setFd(m->framebuffer->fd);
pTemp->setCrop(0, 0, m->info.xres, m->info.yres);
} else
closeHDMIChannel(m);
}
if (pTemp->isChannelUP()) {
overlay_rect destRect;
int rot = 0;
int currOrientation = 0;
getSecondaryDisplayDestinationInfo(m, destRect, rot);
pTemp->getOrientation(currOrientation);
if(rot != currOrientation) {
pTemp->setTransform(rot);
}
EVEN_OUT(destRect.x);
EVEN_OUT(destRect.y);
EVEN_OUT(destRect.w);
EVEN_OUT(destRect.h);
int currentX = 0, currentY = 0;
uint32_t currentW = 0, currentH = 0;
if (pTemp->getPosition(currentX, currentY, currentW, currentH)) {
if ((currentX != destRect.x) || (currentY != destRect.y) ||
(currentW != destRect.w) || (currentH != destRect.h)) {
pTemp->setPosition(destRect.x, destRect.y, destRect.w,
destRect.h);
}
}
if (m->trueMirrorSupport) {
// if video is started the UI channel should be NO_WAIT.
flags = !m->videoOverlay ? WAIT_FOR_VSYNC : 0;
pTemp->updateOverlayFlags(flags);
}
pTemp->queueBuffer(m->currentOffset);
}
}
else
closeHDMIChannel(m);
}
pthread_mutex_unlock(&m->overlayLock);
}
return NULL;
}
static int fb_videoOverlayStarted(struct framebuffer_device_t* dev, int started)
{
private_module_t* m = reinterpret_cast<private_module_t*>(
dev->common.module);
pthread_mutex_lock(&m->overlayLock);
Overlay* pTemp = m->pobjOverlay;
if(started != m->videoOverlay) {
m->videoOverlay = started;
if (!m->trueMirrorSupport) {
m->hdmiStateChanged = true;
if (started && pTemp) {
m->hdmiMirroringState = HDMI_NO_MIRRORING;
closeHDMIChannel(m);
} else if (m->enableHDMIOutput)
m->hdmiMirroringState = HDMI_UI_MIRRORING;
pthread_cond_signal(&(m->overlayPost));
}
}
pthread_mutex_unlock(&m->overlayLock);
return 0;
}
static int fb_enableHDMIOutput(struct framebuffer_device_t* dev, int externaltype)
{
private_module_t* m = reinterpret_cast<private_module_t*>(
dev->common.module);
pthread_mutex_lock(&m->overlayLock);
Overlay* pTemp = m->pobjOverlay;
//Check if true mirroring can be supported
m->trueMirrorSupport = FrameBufferInfo::getInstance()->canSupportTrueMirroring();
m->enableHDMIOutput = externaltype;
ALOGE("In fb_enableHDMIOutput: externaltype = %d", m->enableHDMIOutput);
if(externaltype) {
if (m->trueMirrorSupport) {
m->hdmiMirroringState = HDMI_UI_MIRRORING;
} else {
if(!m->videoOverlay)
m->hdmiMirroringState = HDMI_UI_MIRRORING;
}
} else if (!externaltype && pTemp) {
m->hdmiMirroringState = HDMI_NO_MIRRORING;
closeHDMIChannel(m);
}
m->hdmiStateChanged = true;
pthread_cond_signal(&(m->overlayPost));
pthread_mutex_unlock(&m->overlayLock);
return 0;
}
static int fb_setActionSafeWidthRatio(struct framebuffer_device_t* dev, float asWidthRatio)
{
private_module_t* m = reinterpret_cast<private_module_t*>(
dev->common.module);
pthread_mutex_lock(&m->overlayLock);
m->actionsafeWidthRatio = asWidthRatio;
pthread_mutex_unlock(&m->overlayLock);
return 0;
}
static int fb_setActionSafeHeightRatio(struct framebuffer_device_t* dev, float asHeightRatio)
{
private_module_t* m = reinterpret_cast<private_module_t*>(
dev->common.module);
pthread_mutex_lock(&m->overlayLock);
m->actionsafeHeightRatio = asHeightRatio;
pthread_mutex_unlock(&m->overlayLock);
return 0;
}
static int fb_orientationChanged(struct framebuffer_device_t* dev, int orientation)
{
private_module_t* m = reinterpret_cast<private_module_t*>(
dev->common.module);
pthread_mutex_lock(&m->overlayLock);
neworientation = orientation;
pthread_mutex_unlock(&m->overlayLock);
return 0;
}
#endif
static int fb_post(struct framebuffer_device_t* dev, buffer_handle_t buffer)
{
if (private_handle_t::validate(buffer) < 0)
return -EINVAL;
int nxtIdx, futureIdx = -1;
bool reuse;
struct qbuf_t qb;
fb_context_t* ctx = (fb_context_t*)dev;
private_handle_t const* hnd = reinterpret_cast<private_handle_t const*>(buffer);
private_module_t* m = reinterpret_cast<private_module_t*>(
dev->common.module);
if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER) {
reuse = false;
nxtIdx = (m->currentIdx + 1) % m->numBuffers;
futureIdx = (nxtIdx + 1) % m->numBuffers;
if (m->swapInterval == 0) {
// if SwapInterval = 0 and no buffers available then reuse
// current buf for next rendering so don't post new buffer
if (pthread_mutex_trylock(&(m->avail[nxtIdx].lock))) {
reuse = true;
} else {
if (! m->avail[nxtIdx].is_avail)
reuse = true;
pthread_mutex_unlock(&(m->avail[nxtIdx].lock));
}
}
if(!reuse){
// unlock previous ("current") Buffer and lock the new buffer
m->base.lock(&m->base, buffer,
private_module_t::PRIV_USAGE_LOCKED_FOR_POST,
0,0, m->info.xres, m->info.yres, NULL);
// post/queue the new buffer
pthread_mutex_lock(&(m->avail[nxtIdx].lock));
if (m->avail[nxtIdx].is_avail != true) {
ALOGE_IF(m->swapInterval != 0, "Found %d buf to be not avail", nxtIdx);
}
m->avail[nxtIdx].is_avail = false;
if (m->avail[nxtIdx].state != AVL) {
ALOGD("[%d] state %c, expected %c", nxtIdx,
framebufferStateName[m->avail[nxtIdx].state],
framebufferStateName[AVL]);
}
m->avail[nxtIdx].state = SUB;
pthread_mutex_unlock(&(m->avail[nxtIdx].lock));
qb.idx = nxtIdx;
qb.buf = buffer;
pthread_mutex_lock(&(m->qlock));
m->disp.push(qb);
pthread_cond_signal(&(m->qpost));
pthread_mutex_unlock(&(m->qlock));
if (m->currentBuffer)
m->base.unlock(&m->base, m->currentBuffer);
m->currentBuffer = buffer;
m->currentIdx = nxtIdx;
} else {
if (m->currentBuffer)
m->base.unlock(&m->base, m->currentBuffer);
m->base.lock(&m->base, buffer,
private_module_t::PRIV_USAGE_LOCKED_FOR_POST,
0,0, m->info.xres, m->info.yres, NULL);
m->currentBuffer = buffer;
}
} else {
void* fb_vaddr;
void* buffer_vaddr;
m->base.lock(&m->base, m->framebuffer,
GRALLOC_USAGE_SW_WRITE_RARELY,
0, 0, m->info.xres, m->info.yres,
&fb_vaddr);
m->base.lock(&m->base, buffer,
GRALLOC_USAGE_SW_READ_RARELY,
0, 0, m->info.xres, m->info.yres,
&buffer_vaddr);
//memcpy(fb_vaddr, buffer_vaddr, m->finfo.line_length * m->info.yres);
msm_copy_buffer(
m->framebuffer, m->framebuffer->fd,
m->info.xres, m->info.yres, m->fbFormat,
m->info.xoffset, m->info.yoffset,
m->info.width, m->info.height);
m->base.unlock(&m->base, buffer);
m->base.unlock(&m->base, m->framebuffer);
}
ALOGD_IF(FB_DEBUG, "Framebuffer state: [0] = %c [1] = %c [2] = %c",
framebufferStateName[m->avail[0].state],
framebufferStateName[m->avail[1].state],
framebufferStateName[m->avail[2].state]);
return 0;
}
static int fb_compositionComplete(struct framebuffer_device_t* dev)
{
// TODO: Properly implement composition complete callback
glFinish();
return 0;
}
static int fb_lockBuffer(struct framebuffer_device_t* dev, int index)
{
private_module_t* m = reinterpret_cast<private_module_t*>(
dev->common.module);
// Return immediately if the buffer is available
if ((m->avail[index].state == AVL) || (m->swapInterval == 0))
return 0;
pthread_mutex_lock(&(m->avail[index].lock));
while (m->avail[index].state != AVL) {
pthread_cond_wait(&(m->avail[index].cond),
&(m->avail[index].lock));
}
pthread_mutex_unlock(&(m->avail[index].lock));
return 0;
}
/*****************************************************************************/
int mapFrameBufferLocked(struct private_module_t* module)
{
// already initialized...
if (module->framebuffer) {
return 0;
}
char const * const device_template[] = {
"/dev/graphics/fb%u",
"/dev/fb%u",
0 };
int fd = -1;
int i=0;
char name[64];
char property[PROPERTY_VALUE_MAX];
while ((fd==-1) && device_template[i]) {
snprintf(name, 64, device_template[i], 0);
fd = open(name, O_RDWR, 0);
i++;
}
if (fd < 0)
return -errno;
struct fb_fix_screeninfo finfo;
if (ioctl(fd, FBIOGET_FSCREENINFO, &finfo) == -1)
return -errno;
struct fb_var_screeninfo info;
if (ioctl(fd, FBIOGET_VSCREENINFO, &info) == -1)
return -errno;
info.reserved[0] = 0;
info.reserved[1] = 0;
info.reserved[2] = 0;
info.xoffset = 0;
info.yoffset = 0;
info.activate = FB_ACTIVATE_NOW;
/* Interpretation of offset for color fields: All offsets are from the right,
* inside a "pixel" value, which is exactly 'bits_per_pixel' wide (means: you
* can use the offset as right argument to <<). A pixel afterwards is a bit
* stream and is written to video memory as that unmodified. This implies
* big-endian byte order if bits_per_pixel is greater than 8.
*/
if(info.bits_per_pixel == 32) {
/*
* Explicitly request RGBA_8888
*/
info.bits_per_pixel = 32;
info.red.offset = 24;
info.red.length = 8;
info.green.offset = 16;
info.green.length = 8;
info.blue.offset = 8;
info.blue.length = 8;
info.transp.offset = 0;
info.transp.length = 8;
/* Note: the GL driver does not have a r=8 g=8 b=8 a=0 config, so if we do
* not use the MDP for composition (i.e. hw composition == 0), ask for
* RGBA instead of RGBX. */
if (property_get("debug.sf.hw", property, NULL) > 0 && atoi(property) == 0)
module->fbFormat = HAL_PIXEL_FORMAT_RGBX_8888;
else if(property_get("debug.composition.type", property, NULL) > 0 && (strncmp(property, "mdp", 3) == 0))
module->fbFormat = HAL_PIXEL_FORMAT_RGBX_8888;
else
module->fbFormat = HAL_PIXEL_FORMAT_RGBA_8888;
} else {
/*
* Explicitly request 5/6/5
*/
info.bits_per_pixel = 16;
info.red.offset = 11;
info.red.length = 5;
info.green.offset = 5;
info.green.length = 6;
info.blue.offset = 0;
info.blue.length = 5;
info.transp.offset = 0;
info.transp.length = 0;
module->fbFormat = HAL_PIXEL_FORMAT_RGB_565;
}
//adreno needs 4k aligned offsets. Max hole size is 4096-1
int size = roundUpToPageSize(info.yres * info.xres * (info.bits_per_pixel/8));
/*
* Request NUM_BUFFERS screens (at lest 2 for page flipping)
*/
int numberOfBuffers = (int)(finfo.smem_len/size);
ALOGV("num supported framebuffers in kernel = %d", numberOfBuffers);
if (property_get("debug.gr.numframebuffers", property, NULL) > 0) {
int num = atoi(property);
if ((num >= NUM_FRAMEBUFFERS_MIN) && (num <= NUM_FRAMEBUFFERS_MAX)) {
numberOfBuffers = num;
}
}
if (numberOfBuffers > NUM_FRAMEBUFFERS_MAX)
numberOfBuffers = NUM_FRAMEBUFFERS_MAX;
ALOGV("We support %d buffers", numberOfBuffers);
//consider the included hole by 4k alignment
uint32_t line_length = (info.xres * info.bits_per_pixel / 8);
info.yres_virtual = (size * numberOfBuffers) / line_length;
uint32_t flags = PAGE_FLIP;
if (ioctl(fd, FBIOPUT_VSCREENINFO, &info) == -1) {
info.yres_virtual = size / line_length;
flags &= ~PAGE_FLIP;
ALOGW("FBIOPUT_VSCREENINFO failed, page flipping not supported");
}
if (info.yres_virtual < ((size * 2) / line_length) ) {
// we need at least 2 for page-flipping
info.yres_virtual = size / line_length;
flags &= ~PAGE_FLIP;
ALOGW("page flipping not supported (yres_virtual=%d, requested=%d)",
info.yres_virtual, info.yres*2);
}
if (ioctl(fd, FBIOGET_VSCREENINFO, &info) == -1)
return -errno;
if (int(info.width) <= 0 || int(info.height) <= 0) {
// the driver doesn't return that information
// default to 160 dpi
info.width = ((info.xres * 25.4f)/160.0f + 0.5f);
info.height = ((info.yres * 25.4f)/160.0f + 0.5f);
}
float xdpi = (info.xres * 25.4f) / info.width;
float ydpi = (info.yres * 25.4f) / info.height;
//The reserved[4] field is used to store FPS by the driver.
float fps = info.reserved[4];
ALOGI( "using (fd=%d)\n"
"id = %s\n"
"xres = %d px\n"
"yres = %d px\n"
"xres_virtual = %d px\n"
"yres_virtual = %d px\n"
"bpp = %d\n"
"r = %2u:%u\n"
"g = %2u:%u\n"
"b = %2u:%u\n",
fd,
finfo.id,
info.xres,
info.yres,
info.xres_virtual,
info.yres_virtual,
info.bits_per_pixel,
info.red.offset, info.red.length,
info.green.offset, info.green.length,
info.blue.offset, info.blue.length
);
ALOGI( "width = %d mm (%f dpi)\n"
"height = %d mm (%f dpi)\n"
"refresh rate = %.2f Hz\n",
info.width, xdpi,
info.height, ydpi,
fps
);
if (ioctl(fd, FBIOGET_FSCREENINFO, &finfo) == -1)
return -errno;
if (finfo.smem_len <= 0)
return -errno;
module->flags = flags;
module->info = info;
module->finfo = finfo;
module->xdpi = xdpi;
module->ydpi = ydpi;
module->fps = fps;
#ifdef NO_SURFACEFLINGER_SWAPINTERVAL
char pval[PROPERTY_VALUE_MAX];
property_get("debug.gr.swapinterval", pval, "1");
module->swapInterval = atoi(pval);
if (module->swapInterval < private_module_t::PRIV_MIN_SWAP_INTERVAL ||
module->swapInterval > private_module_t::PRIV_MAX_SWAP_INTERVAL) {
module->swapInterval = 1;
ALOGW("Out of range (%d to %d) value for debug.gr.swapinterval, using 1",
private_module_t::PRIV_MIN_SWAP_INTERVAL,
private_module_t::PRIV_MAX_SWAP_INTERVAL);
}
#else
/* when surfaceflinger supports swapInterval then can just do this */
module->swapInterval = 1;
#endif
CALC_INIT();
module->currentIdx = -1;
pthread_cond_init(&(module->qpost), NULL);
pthread_mutex_init(&(module->qlock), NULL);
for (i = 0; i < NUM_FRAMEBUFFERS_MAX; i++) {
pthread_mutex_init(&(module->avail[i].lock), NULL);
pthread_cond_init(&(module->avail[i].cond), NULL);
module->avail[i].is_avail = true;
module->avail[i].state = AVL;
}
/* create display update thread */
pthread_t thread1;
if (pthread_create(&thread1, NULL, &disp_loop, (void *) module)) {
return -errno;
}
/*
* map the framebuffer
*/
int err;
module->numBuffers = info.yres_virtual / info.yres;
module->bufferMask = 0;
//adreno needs page aligned offsets. Align the fbsize to pagesize.
size_t fbSize = roundUpToPageSize(finfo.line_length * info.yres) * module->numBuffers;
module->framebuffer = new private_handle_t(fd, fbSize,
private_handle_t::PRIV_FLAGS_USES_PMEM, BUFFER_TYPE_UI,
module->fbFormat, info.xres, info.yres);
void* vaddr = mmap(0, fbSize, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
if (vaddr == MAP_FAILED) {
ALOGE("Error mapping the framebuffer (%s)", strerror(errno));
return -errno;
}
module->framebuffer->base = intptr_t(vaddr);
memset(vaddr, 0, fbSize);
#if defined(HDMI_DUAL_DISPLAY)
/* Overlay for HDMI*/
pthread_mutex_init(&(module->overlayLock), NULL);
pthread_cond_init(&(module->overlayPost), NULL);
module->pobjOverlay = new Overlay();
module->currentOffset = 0;
module->exitHDMIUILoop = false;
module->hdmiStateChanged = false;
pthread_t hdmiUIThread;
pthread_create(&hdmiUIThread, NULL, &hdmi_ui_loop, (void *) module);
module->hdmiMirroringState = HDMI_NO_MIRRORING;
module->trueMirrorSupport = false;
#endif
return 0;
}
static int mapFrameBuffer(struct private_module_t* module)
{
pthread_mutex_lock(&module->lock);
int err = mapFrameBufferLocked(module);
pthread_mutex_unlock(&module->lock);
return err;
}
/*****************************************************************************/
static int fb_close(struct hw_device_t *dev)
{
fb_context_t* ctx = (fb_context_t*)dev;
#if defined(HDMI_DUAL_DISPLAY)
private_module_t* m = reinterpret_cast<private_module_t*>(
ctx->device.common.module);
pthread_mutex_lock(&m->overlayLock);
m->exitHDMIUILoop = true;
pthread_cond_signal(&(m->overlayPost));
pthread_mutex_unlock(&m->overlayLock);
#endif
if (ctx) {
free(ctx);
}
return 0;
}
int fb_device_open(hw_module_t const* module, const char* name,
hw_device_t** device)
{
int status = -EINVAL;
if (!strcmp(name, GRALLOC_HARDWARE_FB0)) {
alloc_device_t* gralloc_device;
status = gralloc_open(module, &gralloc_device);
if (status < 0)
return status;
/* initialize our state here */
fb_context_t *dev = (fb_context_t*)malloc(sizeof(*dev));
memset(dev, 0, sizeof(*dev));
/* initialize the procs */
dev->device.common.tag = HARDWARE_DEVICE_TAG;
dev->device.common.version = 0;
dev->device.common.module = const_cast<hw_module_t*>(module);
dev->device.common.close = fb_close;
dev->device.setSwapInterval = fb_setSwapInterval;
dev->device.post = fb_post;
dev->device.setUpdateRect = 0;
dev->device.compositionComplete = fb_compositionComplete;
//dev->device.lockBuffer = fb_lockBuffer;
#if defined(HDMI_DUAL_DISPLAY)
dev->device.orientationChanged = fb_orientationChanged;
dev->device.videoOverlayStarted = fb_videoOverlayStarted;
dev->device.enableHDMIOutput = fb_enableHDMIOutput;
dev->device.setActionSafeWidthRatio = fb_setActionSafeWidthRatio;
dev->device.setActionSafeHeightRatio = fb_setActionSafeHeightRatio;
#endif
private_module_t* m = (private_module_t*)module;
status = mapFrameBuffer(m);
if (status >= 0) {
int stride = m->finfo.line_length / (m->info.bits_per_pixel >> 3);
const_cast<uint32_t&>(dev->device.flags) = 0;
const_cast<uint32_t&>(dev->device.width) = m->info.xres;
const_cast<uint32_t&>(dev->device.height) = m->info.yres;
const_cast<int&>(dev->device.stride) = stride;
const_cast<int&>(dev->device.format) = m->fbFormat;
const_cast<float&>(dev->device.xdpi) = m->xdpi;
const_cast<float&>(dev->device.ydpi) = m->ydpi;
const_cast<float&>(dev->device.fps) = m->fps;
const_cast<int&>(dev->device.minSwapInterval) = private_module_t::PRIV_MIN_SWAP_INTERVAL;
const_cast<int&>(dev->device.maxSwapInterval) = private_module_t::PRIV_MAX_SWAP_INTERVAL;
//const_cast<int&>(dev->device.numFramebuffers) = m->numBuffers;
if (m->finfo.reserved[0] == 0x5444 &&
m->finfo.reserved[1] == 0x5055) {
dev->device.setUpdateRect = fb_setUpdateRect;
ALOGD("UPDATE_ON_DEMAND supported");
}
*device = &dev->device.common;
}
// Close the gralloc module
gralloc_close(gralloc_device);
}
return status;
}
/* Copy a pmem buffer to the framebuffer */
static void
msm_copy_buffer(buffer_handle_t handle, int fd,
int width, int height, int format,
int x, int y, int w, int h)
{
struct {
unsigned int count;
mdp_blit_req req;
} blit;
private_handle_t *priv = (private_handle_t*) handle;
memset(&blit, 0, sizeof(blit));
blit.count = 1;
blit.req.flags = 0;
blit.req.alpha = 0xff;
blit.req.transp_mask = 0xffffffff;
blit.req.src.width = width;
blit.req.src.height = height;
blit.req.src.offset = 0;
blit.req.src.memory_id = priv->fd;
blit.req.dst.width = width;
blit.req.dst.height = height;
blit.req.dst.offset = 0;
blit.req.dst.memory_id = fd;
blit.req.dst.format = format;
blit.req.src_rect.x = blit.req.dst_rect.x = x;
blit.req.src_rect.y = blit.req.dst_rect.y = y;
blit.req.src_rect.w = blit.req.dst_rect.w = w;
blit.req.src_rect.h = blit.req.dst_rect.h = h;
if (ioctl(fd, MSMFB_BLIT, &blit))
ALOGE("MSMFB_BLIT failed = %d", -errno);
}

333
libgralloc/gpu.cpp Executable file
View File

@ -0,0 +1,333 @@
/*
* Copyright (C) 2010 The Android Open Source Project
* Copyright (c) 2011-2012 Code Aurora Forum. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <limits.h>
#include <unistd.h>
#include <fcntl.h>
#include <cutils/properties.h>
#include <sys/mman.h>
#include <genlock.h>
#include "gr.h"
#include "gpu.h"
#include "memalloc.h"
#include "alloc_controller.h"
using namespace gralloc;
using android::sp;
gpu_context_t::gpu_context_t(const private_module_t* module,
sp<IAllocController> alloc_ctrl ) :
mAllocCtrl(alloc_ctrl)
{
// Zero out the alloc_device_t
memset(static_cast<alloc_device_t*>(this), 0, sizeof(alloc_device_t));
char property[PROPERTY_VALUE_MAX];
if (property_get("debug.sf.hw", property, NULL) > 0) {
if(atoi(property) == 0) {
//debug.sf.hw = 0
compositionType = CPU_COMPOSITION;
} else { //debug.sf.hw = 1
// Get the composition type
property_get("debug.composition.type", property, NULL);
if (property == NULL) {
compositionType = GPU_COMPOSITION;
} else if ((strncmp(property, "mdp", 3)) == 0) {
compositionType = MDP_COMPOSITION;
} else if ((strncmp(property, "c2d", 3)) == 0) {
compositionType = C2D_COMPOSITION;
} else {
compositionType = GPU_COMPOSITION;
}
}
} else { //debug.sf.hw is not set. Use cpu composition
compositionType = CPU_COMPOSITION;
}
// Initialize the procs
common.tag = HARDWARE_DEVICE_TAG;
common.version = 0;
common.module = const_cast<hw_module_t*>(&module->base.common);
common.close = gralloc_close;
alloc = gralloc_alloc;
#if 0
allocSize = gralloc_alloc_size;
#endif
free = gralloc_free;
}
int gpu_context_t::gralloc_alloc_framebuffer_locked(size_t size, int usage,
buffer_handle_t* pHandle)
{
private_module_t* m = reinterpret_cast<private_module_t*>(common.module);
// we don't support allocations with both the FB and PMEM_ADSP flags
if (usage & GRALLOC_USAGE_PRIVATE_ADSP_HEAP) {
return -EINVAL;
}
if (m->framebuffer == NULL) {
ALOGE("%s: Invalid framebuffer", __FUNCTION__);
return -EINVAL;
}
const uint32_t bufferMask = m->bufferMask;
const uint32_t numBuffers = m->numBuffers;
size_t bufferSize = m->finfo.line_length * m->info.yres;
//adreno needs FB size to be page aligned
bufferSize = roundUpToPageSize(bufferSize);
if (numBuffers == 1) {
// If we have only one buffer, we never use page-flipping. Instead,
// we return a regular buffer which will be memcpy'ed to the main
// screen when post is called.
int newUsage = (usage & ~GRALLOC_USAGE_HW_FB) | GRALLOC_USAGE_HW_2D;
return gralloc_alloc_buffer(bufferSize, newUsage, pHandle, BUFFER_TYPE_UI,
m->fbFormat, m->info.xres, m->info.yres);
}
if (bufferMask >= ((1LU<<numBuffers)-1)) {
// We ran out of buffers.
return -ENOMEM;
}
// create a "fake" handles for it
// Set the PMEM flag as well, since adreno
// treats the FB memory as pmem
intptr_t vaddr = intptr_t(m->framebuffer->base);
private_handle_t* hnd = new private_handle_t(dup(m->framebuffer->fd), bufferSize,
private_handle_t::PRIV_FLAGS_USES_PMEM |
private_handle_t::PRIV_FLAGS_FRAMEBUFFER,
BUFFER_TYPE_UI, m->fbFormat, m->info.xres,
m->info.yres);
// find a free slot
for (uint32_t i=0 ; i<numBuffers ; i++) {
if ((bufferMask & (1LU<<i)) == 0) {
m->bufferMask |= (1LU<<i);
break;
}
vaddr += bufferSize;
}
hnd->base = vaddr;
hnd->offset = vaddr - intptr_t(m->framebuffer->base);
*pHandle = hnd;
return 0;
}
int gpu_context_t::gralloc_alloc_framebuffer(size_t size, int usage,
buffer_handle_t* pHandle)
{
private_module_t* m = reinterpret_cast<private_module_t*>(common.module);
pthread_mutex_lock(&m->lock);
int err = gralloc_alloc_framebuffer_locked(size, usage, pHandle);
pthread_mutex_unlock(&m->lock);
return err;
}
int gpu_context_t::gralloc_alloc_buffer(size_t size, int usage,
buffer_handle_t* pHandle, int bufferType,
int format, int width, int height)
{
int err = 0;
int flags = 0;
size = roundUpToPageSize(size);
alloc_data data;
data.offset = 0;
data.fd = -1;
data.base = 0;
data.size = size;
if(format == HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED)
data.align = 8192;
else
data.align = getpagesize();
data.pHandle = (unsigned int) pHandle;
err = mAllocCtrl->allocate(data, usage, compositionType);
if (usage & GRALLOC_USAGE_PRIVATE_UNSYNCHRONIZED) {
flags |= private_handle_t::PRIV_FLAGS_UNSYNCHRONIZED;
}
if (usage & GRALLOC_USAGE_EXTERNAL_ONLY) {
flags |= private_handle_t::PRIV_FLAGS_EXTERNAL_ONLY;
//The EXTERNAL_BLOCK flag is always an add-on
if (usage & GRALLOC_USAGE_EXTERNAL_BLOCK) {
flags |= private_handle_t::PRIV_FLAGS_EXTERNAL_BLOCK;
}
}
if (err == 0) {
flags |= data.allocType;
private_handle_t* hnd = new private_handle_t(data.fd, size, flags,
bufferType, format, width, height);
hnd->offset = data.offset;
hnd->base = int(data.base) + data.offset;
*pHandle = hnd;
}
ALOGE_IF(err, "gralloc failed err=%s", strerror(-err));
return err;
}
void gpu_context_t::getGrallocInformationFromFormat(int inputFormat,
int *colorFormat,
int *bufferType)
{
*bufferType = BUFFER_TYPE_VIDEO;
*colorFormat = inputFormat;
if (inputFormat == HAL_PIXEL_FORMAT_YV12) {
*bufferType = BUFFER_TYPE_VIDEO;
} else if (inputFormat & S3D_FORMAT_MASK) {
// S3D format
*colorFormat = COLOR_FORMAT(inputFormat);
} else if (inputFormat & INTERLACE_MASK) {
// Interlaced
*colorFormat = inputFormat ^ HAL_PIXEL_FORMAT_INTERLACE;
} else if (inputFormat < 0x7) {
// RGB formats
*colorFormat = inputFormat;
*bufferType = BUFFER_TYPE_UI;
} else if ((inputFormat == HAL_PIXEL_FORMAT_R_8) ||
(inputFormat == HAL_PIXEL_FORMAT_RG_88)) {
*colorFormat = inputFormat;
*bufferType = BUFFER_TYPE_UI;
}
}
int gpu_context_t::alloc_impl(int w, int h, int format, int usage,
buffer_handle_t* pHandle, int* pStride, size_t bufferSize) {
if (!pHandle || !pStride)
return -EINVAL;
size_t size;
int alignedw, alignedh;
int colorFormat, bufferType;
getGrallocInformationFromFormat(format, &colorFormat, &bufferType);
size = getBufferSizeAndDimensions(w, h, colorFormat, alignedw, alignedh);
if ((ssize_t)size <= 0)
return -EINVAL;
size = (bufferSize >= size)? bufferSize : size;
// All buffers marked as protected or for external
// display need to go to overlay
if ((usage & GRALLOC_USAGE_EXTERNAL_DISP) ||
(usage & GRALLOC_USAGE_PROTECTED)) {
bufferType = BUFFER_TYPE_VIDEO;
}
int err;
if (usage & GRALLOC_USAGE_HW_FB) {
err = gralloc_alloc_framebuffer(size, usage, pHandle);
} else {
err = gralloc_alloc_buffer(size, usage, pHandle, bufferType,
format, alignedw, alignedh);
}
if (err < 0) {
return err;
}
// Create a genlock lock for this buffer handle.
err = genlock_create_lock((native_handle_t*)(*pHandle));
if (err) {
ALOGE("%s: genlock_create_lock failed", __FUNCTION__);
free_impl(reinterpret_cast<private_handle_t*>(pHandle));
return err;
}
*pStride = alignedw;
return 0;
}
int gpu_context_t::free_impl(private_handle_t const* hnd) {
private_module_t* m = reinterpret_cast<private_module_t*>(common.module);
if (hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER) {
// free this buffer
const size_t bufferSize = m->finfo.line_length * m->info.yres;
int index = (hnd->base - m->framebuffer->base) / bufferSize;
m->bufferMask &= ~(1<<index);
} else {
sp<IMemAlloc> memalloc = mAllocCtrl->getAllocator(hnd->flags);
int err = memalloc->free_buffer((void*)hnd->base, (size_t) hnd->size,
hnd->offset, hnd->fd);
if(err)
return err;
terminateBuffer(&m->base, const_cast<private_handle_t*>(hnd));
}
// Release the genlock
int err = genlock_release_lock((native_handle_t*)hnd);
if (err) {
ALOGE("%s: genlock_release_lock failed", __FUNCTION__);
}
delete hnd;
return 0;
}
int gpu_context_t::gralloc_alloc(alloc_device_t* dev, int w, int h, int format,
int usage, buffer_handle_t* pHandle, int* pStride)
{
if (!dev) {
return -EINVAL;
}
gpu_context_t* gpu = reinterpret_cast<gpu_context_t*>(dev);
return gpu->alloc_impl(w, h, format, usage, pHandle, pStride, 0);
}
int gpu_context_t::gralloc_alloc_size(alloc_device_t* dev, int w, int h, int format,
int usage, buffer_handle_t* pHandle, int* pStride, int bufferSize)
{
if (!dev) {
return -EINVAL;
}
gpu_context_t* gpu = reinterpret_cast<gpu_context_t*>(dev);
return gpu->alloc_impl(w, h, format, usage, pHandle, pStride, bufferSize);
}
int gpu_context_t::gralloc_free(alloc_device_t* dev,
buffer_handle_t handle)
{
if (private_handle_t::validate(handle) < 0)
return -EINVAL;
private_handle_t const* hnd = reinterpret_cast<private_handle_t const*>(handle);
gpu_context_t* gpu = reinterpret_cast<gpu_context_t*>(dev);
return gpu->free_impl(hnd);
}
/*****************************************************************************/
int gpu_context_t::gralloc_close(struct hw_device_t *dev)
{
gpu_context_t* ctx = reinterpret_cast<gpu_context_t*>(dev);
if (ctx) {
/* TODO: keep a list of all buffer_handle_t created, and free them
* all here.
*/
delete ctx;
}
return 0;
}

81
libgralloc/gpu.h Normal file
View File

@ -0,0 +1,81 @@
/*
* Copyright (C) 2008 The Android Open Source Project
* Copyright (c) 2011 Code Aurora Forum. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef GRALLOC_GPU_H_
#define GRALLOC_GPU_H_
#include <errno.h>
#include <pthread.h>
#include <stdlib.h>
#include <string.h>
#include <cutils/log.h>
#include <cutils/ashmem.h>
#include <utils/RefBase.h>
#include "gralloc_priv.h"
namespace gralloc {
class IAllocController;
class gpu_context_t : public alloc_device_t {
public:
gpu_context_t(const private_module_t* module,
android::sp<IAllocController>alloc_ctrl);
int gralloc_alloc_framebuffer_locked(size_t size, int usage,
buffer_handle_t* pHandle);
int gralloc_alloc_framebuffer(size_t size, int usage,
buffer_handle_t* pHandle);
int gralloc_alloc_buffer(size_t size, int usage,
buffer_handle_t* pHandle,
int bufferType, int format,
int width, int height);
int free_impl(private_handle_t const* hnd);
int alloc_impl(int w, int h, int format, int usage,
buffer_handle_t* pHandle, int* pStride,
size_t bufferSize = 0);
static int gralloc_alloc(alloc_device_t* dev, int w, int h,
int format, int usage,
buffer_handle_t* pHandle,
int* pStride);
static int gralloc_free(alloc_device_t* dev, buffer_handle_t handle);
static int gralloc_alloc_size(alloc_device_t* dev,
int w, int h, int format,
int usage, buffer_handle_t* pHandle,
int* pStride, int bufferSize);
static int gralloc_close(struct hw_device_t *dev);
int get_composition_type() const { return compositionType; }
private:
android::sp<IAllocController> mAllocCtrl;
int compositionType;
void getGrallocInformationFromFormat(int inputFormat,
int *colorFormat,
int *bufferType);
};
}
#endif // GRALLOC_GPU_H

82
libgralloc/gr.h Normal file
View File

@ -0,0 +1,82 @@
/*
* Copyright (C) 2008 The Android Open Source Project
* Copyright (c) 2011 Code Aurora Forum. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef GR_H_
#define GR_H_
#include <stdint.h>
#ifdef HAVE_ANDROID_OS // just want PAGE_SIZE define
# include <asm/page.h>
#else
# include <sys/user.h>
#endif
#include <limits.h>
#include <sys/cdefs.h>
#include <hardware/gralloc.h>
#include <pthread.h>
#include <errno.h>
#include <cutils/native_handle.h>
/*****************************************************************************/
struct private_module_t;
struct private_handle_t;
inline size_t roundUpToPageSize(size_t x) {
return (x + (PAGE_SIZE-1)) & ~(PAGE_SIZE-1);
}
inline size_t ALIGN(size_t x, size_t align) {
return (x + align-1) & ~(align-1);
}
#define FALSE 0
#define TRUE 1
int mapFrameBufferLocked(struct private_module_t* module);
int terminateBuffer(gralloc_module_t const* module, private_handle_t* hnd);
size_t getBufferSizeAndDimensions(int width, int height, int format,
int& alignedw, int &alignedh);
int decideBufferHandlingMechanism(int format, const char *compositionUsed,
int hasBlitEngine, int *needConversion,
int *useBufferDirectly);
// Allocate buffer from width, height, format into a private_handle_t
// It is the responsibility of the caller to free the buffer
int alloc_buffer(private_handle_t **pHnd, int w, int h, int format, int usage);
void free_buffer(private_handle_t *hnd);
/*****************************************************************************/
class Locker {
pthread_mutex_t mutex;
public:
class Autolock {
Locker& locker;
public:
inline Autolock(Locker& locker) : locker(locker) { locker.lock(); }
inline ~Autolock() { locker.unlock(); }
};
inline Locker() { pthread_mutex_init(&mutex, 0); }
inline ~Locker() { pthread_mutex_destroy(&mutex); }
inline void lock() { pthread_mutex_lock(&mutex); }
inline void unlock() { pthread_mutex_unlock(&mutex); }
};
#endif /* GR_H_ */

113
libgralloc/gralloc.cpp Normal file
View File

@ -0,0 +1,113 @@
/*
* Copyright (C) 2008, The Android Open Source Project
* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <unistd.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/ioctl.h>
#include <cutils/properties.h>
#include <utils/RefBase.h>
#include <linux/android_pmem.h>
#include "gr.h"
#include "gpu.h"
#include "memalloc.h"
#include "alloc_controller.h"
using namespace gralloc;
using android::sp;
int fb_device_open(const hw_module_t* module, const char* name,
hw_device_t** device);
static int gralloc_device_open(const hw_module_t* module, const char* name,
hw_device_t** device);
extern int gralloc_lock(gralloc_module_t const* module,
buffer_handle_t handle, int usage,
int l, int t, int w, int h,
void** vaddr);
extern int gralloc_unlock(gralloc_module_t const* module,
buffer_handle_t handle);
extern int gralloc_register_buffer(gralloc_module_t const* module,
buffer_handle_t handle);
extern int gralloc_unregister_buffer(gralloc_module_t const* module,
buffer_handle_t handle);
extern int gralloc_perform(struct gralloc_module_t const* module,
int operation, ... );
// HAL module methods
static struct hw_module_methods_t gralloc_module_methods = {
open: gralloc_device_open
};
// HAL module initialize
struct private_module_t HAL_MODULE_INFO_SYM = {
base: {
common: {
tag: HARDWARE_MODULE_TAG,
version_major: 1,
version_minor: 0,
id: GRALLOC_HARDWARE_MODULE_ID,
name: "Graphics Memory Allocator Module",
author: "The Android Open Source Project",
methods: &gralloc_module_methods,
dso: 0,
reserved: {0},
},
registerBuffer: gralloc_register_buffer,
unregisterBuffer: gralloc_unregister_buffer,
lock: gralloc_lock,
unlock: gralloc_unlock,
perform: gralloc_perform,
reserved_proc: {0},
},
framebuffer: 0,
fbFormat: 0,
flags: 0,
numBuffers: 0,
bufferMask: 0,
lock: PTHREAD_MUTEX_INITIALIZER,
currentBuffer: 0,
};
// Open Gralloc device
int gralloc_device_open(const hw_module_t* module, const char* name,
hw_device_t** device)
{
int status = -EINVAL;
if (!strcmp(name, GRALLOC_HARDWARE_GPU0)) {
const private_module_t* m = reinterpret_cast<const private_module_t*>(
module);
gpu_context_t *dev;
sp<IAllocController> alloc_ctrl = IAllocController::getInstance(true);
dev = new gpu_context_t(m, alloc_ctrl);
*device = &dev->common;
status = 0;
} else {
status = fb_device_open(module, name, device);
}
return status;
}

400
libgralloc/gralloc_priv.h Normal file
View File

@ -0,0 +1,400 @@
/*
* Copyright (C) 2008 The Android Open Source Project
* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef GRALLOC_PRIV_H_
#define GRALLOC_PRIV_H_
#include <stdint.h>
#include <limits.h>
#include <sys/cdefs.h>
#include <hardware/gralloc.h>
#include <pthread.h>
#include <errno.h>
#include <unistd.h>
#include <cutils/native_handle.h>
#include <linux/fb.h>
#if defined(__cplusplus) && defined(HDMI_DUAL_DISPLAY)
#include "overlayLib.h"
using namespace overlay;
#endif
#include <cutils/log.h>
enum {
/* gralloc usage bits indicating the type
* of allocation that should be used */
/* ADSP heap is deprecated, use only if using pmem */
GRALLOC_USAGE_PRIVATE_ADSP_HEAP = GRALLOC_USAGE_PRIVATE_0,
/* SF heap is used for application buffers, is not secured */
GRALLOC_USAGE_PRIVATE_UI_CONTIG_HEAP = GRALLOC_USAGE_PRIVATE_1,
/* SMI heap is deprecated, use only if using pmem */
GRALLOC_USAGE_PRIVATE_SMI_HEAP = GRALLOC_USAGE_PRIVATE_2,
/* SYSTEM heap comes from kernel vmalloc,
* can never be uncached, is not secured*/
GRALLOC_USAGE_PRIVATE_SYSTEM_HEAP = GRALLOC_USAGE_PRIVATE_3,
/* IOMMU heap comes from manually allocated pages,
* can be cached/uncached, is not secured */
GRALLOC_USAGE_PRIVATE_IOMMU_HEAP = 0x01000000,
/* MM heap is a carveout heap for video, can be secured*/
GRALLOC_USAGE_PRIVATE_MM_HEAP = 0x02000000,
/* WRITEBACK heap is a carveout heap for writeback, can be secured*/
GRALLOC_USAGE_PRIVATE_WRITEBACK_HEAP = 0x04000000,
/* CAMERA heap is a carveout heap for camera, is not secured*/
GRALLOC_USAGE_PRIVATE_CAMERA_HEAP = 0x08000000,
/* Set this for allocating uncached memory (using O_DSYNC)
* cannot be used with noncontiguous heaps */
GRALLOC_USAGE_PRIVATE_UNCACHED = 0x00100000,
/* This flag needs to be set when using a non-contiguous heap from ION.
* If not set, the system heap is assumed to be coming from ashmem
*/
GRALLOC_USAGE_PRIVATE_ION = 0x00200000,
/* This flag can be set to disable genlock synchronization
* for the gralloc buffer. If this flag is set the caller
* is required to perform explicit synchronization.
* WARNING - flag is outside the standard PRIVATE region
* and may need to be moved if the gralloc API changes
*/
GRALLOC_USAGE_PRIVATE_UNSYNCHRONIZED = 0X00400000,
/* Set this flag when you need to avoid mapping the memory in userspace */
GRALLOC_USAGE_PRIVATE_DO_NOT_MAP = 0X00800000,
/* Buffer content should be displayed on an external display only */
GRALLOC_USAGE_EXTERNAL_ONLY = 0x00010000,
/* Only this buffer content should be displayed on external, even if
* other EXTERNAL_ONLY buffers are available. Used during suspend.
*/
GRALLOC_USAGE_EXTERNAL_BLOCK = 0x00020000,
};
enum {
/* Gralloc perform enums
*/
GRALLOC_MODULE_PERFORM_CREATE_HANDLE_FROM_BUFFER = 0x080000001,
};
enum {
GPU_COMPOSITION,
C2D_COMPOSITION,
MDP_COMPOSITION,
CPU_COMPOSITION,
};
/* numbers of max buffers for page flipping */
#define NUM_FRAMEBUFFERS_MIN 2
#define NUM_FRAMEBUFFERS_MAX 3
/* number of default bufers for page flipping */
#define NUM_DEF_FRAME_BUFFERS 2
#define NO_SURFACEFLINGER_SWAPINTERVAL
#define INTERLACE_MASK 0x80
#define S3D_FORMAT_MASK 0xFF000
#define COLOR_FORMAT(x) (x & 0xFFF) // Max range for colorFormats is 0 - FFF
#define DEVICE_PMEM "/dev/pmem"
#define DEVICE_PMEM_ADSP "/dev/pmem_adsp"
#define DEVICE_PMEM_SMIPOOL "/dev/pmem_smipool"
/*****************************************************************************/
#ifdef __cplusplus
//XXX: Remove framebuffer specific classes and defines to a different header
template <class T>
struct Node
{
T data;
Node<T> *next;
};
template <class T>
class Queue
{
public:
Queue(): front(NULL), back(NULL), len(0) {dummy = new T;}
~Queue()
{
clear();
delete dummy;
}
void push(const T& item) //add an item to the back of the queue
{
if(len != 0) { //if the queue is not empty
back->next = new Node<T>; //create a new node
back = back->next; //set the new node as the back node
back->data = item;
back->next = NULL;
} else {
back = new Node<T>;
back->data = item;
back->next = NULL;
front = back;
}
len++;
}
void pop() //remove the first item from the queue
{
if (isEmpty())
return; //if the queue is empty, no node to dequeue
T item = front->data;
Node<T> *tmp = front;
front = front->next;
delete tmp;
if(front == NULL) //if the queue is empty, update the back pointer
back = NULL;
len--;
return;
}
T& getHeadValue() const //return the value of the first item in the queue
{ //without modification to the structure
if (isEmpty()) {
ALOGE("Error can't get head of empty queue");
return *dummy;
}
return front->data;
}
bool isEmpty() const //returns true if no elements are in the queue
{
return (front == NULL);
}
size_t size() const //returns the amount of elements in the queue
{
return len;
}
private:
Node<T> *front;
Node<T> *back;
size_t len;
void clear()
{
while (!isEmpty())
pop();
}
T *dummy;
};
#endif
enum {
/* OEM specific HAL formats */
HAL_PIXEL_FORMAT_NV12_ENCODEABLE = 0x102,
HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED = 0x108,
HAL_PIXEL_FORMAT_YCbCr_420_SP = 0x109,
HAL_PIXEL_FORMAT_YCrCb_420_SP_ADRENO = 0x10A,
HAL_PIXEL_FORMAT_YCrCb_422_SP = 0x10B,
HAL_PIXEL_FORMAT_R_8 = 0x10D,
HAL_PIXEL_FORMAT_RG_88 = 0x10E,
HAL_PIXEL_FORMAT_INTERLACE = 0x180,
};
/* possible formats for 3D content*/
enum {
HAL_NO_3D = 0x0000,
HAL_3D_IN_SIDE_BY_SIDE_L_R = 0x10000,
HAL_3D_IN_TOP_BOTTOM = 0x20000,
HAL_3D_IN_INTERLEAVE = 0x40000,
HAL_3D_IN_SIDE_BY_SIDE_R_L = 0x80000,
HAL_3D_OUT_SIDE_BY_SIDE = 0x1000,
HAL_3D_OUT_TOP_BOTTOM = 0x2000,
HAL_3D_OUT_INTERLEAVE = 0x4000,
HAL_3D_OUT_MONOSCOPIC = 0x8000
};
enum {
BUFFER_TYPE_UI = 0,
BUFFER_TYPE_VIDEO
};
#if defined(HDMI_DUAL_DISPLAY)
enum hdmi_mirroring_state {
HDMI_NO_MIRRORING,
HDMI_UI_MIRRORING,
HDMI_ORIGINAL_RESOLUTION_MIRRORING
};
#endif
/*****************************************************************************/
struct private_module_t;
struct private_handle_t;
struct PmemAllocator;
struct qbuf_t {
buffer_handle_t buf;
int idx;
};
enum buf_state {
SUB,
REF,
AVL
};
struct avail_t {
pthread_mutex_t lock;
pthread_cond_t cond;
#ifdef __cplusplus
bool is_avail;
buf_state state;
#endif
};
struct private_module_t {
gralloc_module_t base;
struct private_handle_t* framebuffer;
uint32_t fbFormat;
uint32_t flags;
uint32_t numBuffers;
uint32_t bufferMask;
pthread_mutex_t lock;
buffer_handle_t currentBuffer;
struct fb_var_screeninfo info;
struct fb_fix_screeninfo finfo;
float xdpi;
float ydpi;
float fps;
int swapInterval;
#ifdef __cplusplus
Queue<struct qbuf_t> disp; // non-empty when buffer is ready for display
#endif
int currentIdx;
struct avail_t avail[NUM_FRAMEBUFFERS_MAX];
pthread_mutex_t qlock;
pthread_cond_t qpost;
enum {
// flag to indicate we'll post this buffer
PRIV_USAGE_LOCKED_FOR_POST = 0x80000000,
PRIV_MIN_SWAP_INTERVAL = 0,
PRIV_MAX_SWAP_INTERVAL = 1,
};
#if defined(__cplusplus) && defined(HDMI_DUAL_DISPLAY)
Overlay* pobjOverlay;
int orientation;
bool videoOverlay;
uint32_t currentOffset;
int enableHDMIOutput; // holds the type of external display
bool trueMirrorSupport;
bool exitHDMIUILoop;
float actionsafeWidthRatio;
float actionsafeHeightRatio;
bool hdmiStateChanged;
hdmi_mirroring_state hdmiMirroringState;
pthread_mutex_t overlayLock;
pthread_cond_t overlayPost;
#endif
};
/*****************************************************************************/
#ifdef __cplusplus
struct private_handle_t : public native_handle {
#else
struct private_handle_t {
native_handle_t nativeHandle;
#endif
enum {
PRIV_FLAGS_FRAMEBUFFER = 0x00000001,
PRIV_FLAGS_USES_PMEM = 0x00000002,
PRIV_FLAGS_USES_PMEM_ADSP = 0x00000004,
PRIV_FLAGS_USES_ION = 0x00000008,
PRIV_FLAGS_USES_ASHMEM = 0x00000010,
PRIV_FLAGS_NEEDS_FLUSH = 0x00000020,
PRIV_FLAGS_DO_NOT_FLUSH = 0x00000040,
PRIV_FLAGS_SW_LOCK = 0x00000080,
PRIV_FLAGS_NONCONTIGUOUS_MEM = 0x00000100,
PRIV_FLAGS_HWC_LOCK = 0x00000200, // Set by HWC when storing the handle
PRIV_FLAGS_SECURE_BUFFER = 0x00000400,
PRIV_FLAGS_UNSYNCHRONIZED = 0x00000800, // For explicit synchronization
PRIV_FLAGS_NOT_MAPPED = 0x00001000, // Not mapped in userspace
PRIV_FLAGS_EXTERNAL_ONLY = 0x00002000, // Display on external only
PRIV_FLAGS_EXTERNAL_BLOCK = 0x00004000, // Display only this buffer on external
};
// file-descriptors
int fd;
int genlockHandle; // genlock handle to be dup'd by the binder
// ints
int magic;
int flags;
int size;
int offset;
int bufferType;
// FIXME: the attributes below should be out-of-line
int base;
int gpuaddr; // The gpu address mapped into the mmu. If using ashmem, set to 0 They don't care
int pid;
int format;
int width;
int height;
int genlockPrivFd; // local fd of the genlock device.
#ifdef __cplusplus
static const int sNumInts = 12;
static const int sNumFds = 2;
static const int sMagic = 'gmsm';
private_handle_t(int fd, int size, int flags, int bufferType, int format, int width, int height) :
fd(fd), genlockHandle(-1), magic(sMagic), flags(flags), size(size), offset(0),
bufferType(bufferType), base(0), gpuaddr(0), pid(getpid()), format(format),
width(width), height(height), genlockPrivFd(-1)
{
version = sizeof(native_handle);
numInts = sNumInts;
numFds = sNumFds;
}
~private_handle_t() {
magic = 0;
}
bool usesPhysicallyContiguousMemory() {
return (flags & PRIV_FLAGS_USES_PMEM) != 0;
}
static int validate(const native_handle* h) {
const private_handle_t* hnd = (const private_handle_t*)h;
if (!h || h->version != sizeof(native_handle) ||
h->numInts != sNumInts || h->numFds != sNumFds ||
hnd->magic != sMagic)
{
ALOGE("invalid gralloc handle (at %p)", h);
return -EINVAL;
}
return 0;
}
static private_handle_t* dynamicCast(const native_handle* in) {
if (validate(in) == 0) {
return (private_handle_t*) in;
}
return NULL;
}
#endif
};
#endif /* GRALLOC_PRIV_H_ */

836
libgralloc/ion_msm.h Normal file
View File

@ -0,0 +1,836 @@
/*
* include/linux/ion.h
*
* Copyright (C) 2011 Google, Inc.
* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _LINUX_ION_H
#define _LINUX_ION_H
#include <linux/ioctl.h>
#include <linux/types.h>
struct ion_handle;
/**
* enum ion_heap_types - list of all possible types of heaps
* @ION_HEAP_TYPE_SYSTEM: memory allocated via vmalloc
* @ION_HEAP_TYPE_SYSTEM_CONTIG: memory allocated via kmalloc
* @ION_HEAP_TYPE_CARVEOUT: memory allocated from a prereserved
* carveout heap, allocations are physically
* contiguous
* @ION_HEAP_TYPE_IOMMU: IOMMU memory
* @ION_HEAP_TYPE_CP: memory allocated from a prereserved
* carveout heap, allocations are physically
* contiguous. Used for content protection.
* @ION_HEAP_END: helper for iterating over heaps
*/
enum ion_heap_type {
ION_HEAP_TYPE_SYSTEM,
ION_HEAP_TYPE_SYSTEM_CONTIG,
ION_HEAP_TYPE_CARVEOUT,
ION_HEAP_TYPE_IOMMU,
ION_HEAP_TYPE_CP,
ION_HEAP_TYPE_CUSTOM, /* must be last so device specific heaps always
are at the end of this enum */
ION_NUM_HEAPS,
};
#define ION_HEAP_SYSTEM_MASK (1 << ION_HEAP_TYPE_SYSTEM)
#define ION_HEAP_SYSTEM_CONTIG_MASK (1 << ION_HEAP_TYPE_SYSTEM_CONTIG)
#define ION_HEAP_CARVEOUT_MASK (1 << ION_HEAP_TYPE_CARVEOUT)
#define ION_HEAP_CP_MASK (1 << ION_HEAP_TYPE_CP)
/**
* These are the only ids that should be used for Ion heap ids.
* The ids listed are the order in which allocation will be attempted
* if specified. Don't swap the order of heap ids unless you know what
* you are doing!
* Id's are spaced by purpose to allow new Id's to be inserted in-between (for
* possible fallbacks)
*/
enum ion_heap_ids {
INVALID_HEAP_ID = -1,
ION_CP_MM_HEAP_ID = 8,
ION_CP_MFC_HEAP_ID = 12,
ION_CP_WB_HEAP_ID = 16, /* 8660 only */
ION_CAMERA_HEAP_ID = 20, /* 8660 only */
ION_SF_HEAP_ID = 24,
ION_IOMMU_HEAP_ID = 25,
ION_QSECOM_HEAP_ID = 27,
ION_AUDIO_HEAP_ID = 28,
ION_MM_FIRMWARE_HEAP_ID = 29,
ION_SYSTEM_HEAP_ID = 30,
ION_HEAP_ID_RESERVED = 31 /** Bit reserved for ION_SECURE flag */
};
enum ion_fixed_position {
NOT_FIXED,
FIXED_LOW,
FIXED_MIDDLE,
FIXED_HIGH,
};
/**
* Flag to use when allocating to indicate that a heap is secure.
*/
#define ION_SECURE (1 << ION_HEAP_ID_RESERVED)
/**
* Macro should be used with ion_heap_ids defined above.
*/
#define ION_HEAP(bit) (1 << (bit))
#define ION_VMALLOC_HEAP_NAME "vmalloc"
#define ION_AUDIO_HEAP_NAME "audio"
#define ION_SF_HEAP_NAME "sf"
#define ION_MM_HEAP_NAME "mm"
#define ION_CAMERA_HEAP_NAME "camera_preview"
#define ION_IOMMU_HEAP_NAME "iommu"
#define ION_MFC_HEAP_NAME "mfc"
#define ION_WB_HEAP_NAME "wb"
#define ION_MM_FIRMWARE_HEAP_NAME "mm_fw"
#define ION_QSECOM_HEAP_NAME "qsecom"
#define ION_FMEM_HEAP_NAME "fmem"
#define CACHED 1
#define UNCACHED 0
#define ION_CACHE_SHIFT 0
#define ION_SET_CACHE(__cache) ((__cache) << ION_CACHE_SHIFT)
#define ION_IS_CACHED(__flags) ((__flags) & (1 << ION_CACHE_SHIFT))
/*
* This flag allows clients when mapping into the IOMMU to specify to
* defer un-mapping from the IOMMU until the buffer memory is freed.
*/
#define ION_IOMMU_UNMAP_DELAYED 1
#ifdef __KERNEL__
#include <linux/err.h>
#include <mach/ion.h>
struct ion_device;
struct ion_heap;
struct ion_mapper;
struct ion_client;
struct ion_buffer;
/* This should be removed some day when phys_addr_t's are fully
plumbed in the kernel, and all instances of ion_phys_addr_t should
be converted to phys_addr_t. For the time being many kernel interfaces
do not accept phys_addr_t's that would have to */
#define ion_phys_addr_t unsigned long
#define ion_virt_addr_t unsigned long
/**
* struct ion_platform_heap - defines a heap in the given platform
* @type: type of the heap from ion_heap_type enum
* @id: unique identifier for heap. When allocating (lower numbers
* will be allocated from first)
* @name: used for debug purposes
* @base: base address of heap in physical memory if applicable
* @size: size of the heap in bytes if applicable
* @memory_type:Memory type used for the heap
* @extra_data: Extra data specific to each heap type
*/
struct ion_platform_heap {
enum ion_heap_type type;
unsigned int id;
const char *name;
ion_phys_addr_t base;
size_t size;
enum ion_memory_types memory_type;
void *extra_data;
};
/**
* struct ion_cp_heap_pdata - defines a content protection heap in the given
* platform
* @permission_type: Memory ID used to identify the memory to TZ
* @align: Alignment requirement for the memory
* @secure_base: Base address for securing the heap.
* Note: This might be different from actual base address
* of this heap in the case of a shared heap.
* @secure_size: Memory size for securing the heap.
* Note: This might be different from actual size
* of this heap in the case of a shared heap.
* @reusable Flag indicating whether this heap is reusable of not.
* (see FMEM)
* @mem_is_fmem Flag indicating whether this memory is coming from fmem
* or not.
* @fixed_position If nonzero, position in the fixed area.
* @virt_addr: Virtual address used when using fmem.
* @request_region: function to be called when the number of allocations
* goes from 0 -> 1
* @release_region: function to be called when the number of allocations
* goes from 1 -> 0
* @setup_region: function to be called upon ion registration
*
*/
struct ion_cp_heap_pdata {
enum ion_permission_type permission_type;
unsigned int align;
ion_phys_addr_t secure_base; /* Base addr used when heap is shared */
size_t secure_size; /* Size used for securing heap when heap is shared*/
int reusable;
int mem_is_fmem;
enum ion_fixed_position fixed_position;
ion_virt_addr_t *virt_addr;
int (*request_region)(void *);
int (*release_region)(void *);
void *(*setup_region)(void);
};
/**
* struct ion_co_heap_pdata - defines a carveout heap in the given platform
* @adjacent_mem_id: Id of heap that this heap must be adjacent to.
* @align: Alignment requirement for the memory
* @mem_is_fmem Flag indicating whether this memory is coming from fmem
* or not.
* @fixed_position If nonzero, position in the fixed area.
* @request_region: function to be called when the number of allocations
* goes from 0 -> 1
* @release_region: function to be called when the number of allocations
* goes from 1 -> 0
* @setup_region: function to be called upon ion registration
*
*/
struct ion_co_heap_pdata {
int adjacent_mem_id;
unsigned int align;
int mem_is_fmem;
enum ion_fixed_position fixed_position;
int (*request_region)(void *);
int (*release_region)(void *);
void *(*setup_region)(void);
};
/**
* struct ion_platform_data - array of platform heaps passed from board file
* @nr: number of structures in the array
* @request_region: function to be called when the number of allocations goes
* from 0 -> 1
* @release_region: function to be called when the number of allocations goes
* from 1 -> 0
* @setup_region: function to be called upon ion registration
* @heaps: array of platform_heap structions
*
* Provided by the board file in the form of platform data to a platform device.
*/
struct ion_platform_data {
int nr;
int (*request_region)(void *);
int (*release_region)(void *);
void *(*setup_region)(void);
struct ion_platform_heap heaps[];
};
#ifdef CONFIG_ION
/**
* ion_client_create() - allocate a client and returns it
* @dev: the global ion device
* @heap_mask: mask of heaps this client can allocate from
* @name: used for debugging
*/
struct ion_client *ion_client_create(struct ion_device *dev,
unsigned int heap_mask, const char *name);
/**
* msm_ion_client_create - allocate a client using the ion_device specified in
* drivers/gpu/ion/msm/msm_ion.c
*
* heap_mask and name are the same as ion_client_create, return values
* are the same as ion_client_create.
*/
struct ion_client *msm_ion_client_create(unsigned int heap_mask,
const char *name);
/**
* ion_client_destroy() - free's a client and all it's handles
* @client: the client
*
* Free the provided client and all it's resources including
* any handles it is holding.
*/
void ion_client_destroy(struct ion_client *client);
/**
* ion_alloc - allocate ion memory
* @client: the client
* @len: size of the allocation
* @align: requested allocation alignment, lots of hardware blocks have
* alignment requirements of some kind
* @flags: mask of heaps to allocate from, if multiple bits are set
* heaps will be tried in order from lowest to highest order bit
*
* Allocate memory in one of the heaps provided in heap mask and return
* an opaque handle to it.
*/
struct ion_handle *ion_alloc(struct ion_client *client, size_t len,
size_t align, unsigned int flags);
/**
* ion_free - free a handle
* @client: the client
* @handle: the handle to free
*
* Free the provided handle.
*/
void ion_free(struct ion_client *client, struct ion_handle *handle);
/**
* ion_phys - returns the physical address and len of a handle
* @client: the client
* @handle: the handle
* @addr: a pointer to put the address in
* @len: a pointer to put the length in
*
* This function queries the heap for a particular handle to get the
* handle's physical address. It't output is only correct if
* a heap returns physically contiguous memory -- in other cases
* this api should not be implemented -- ion_map_dma should be used
* instead. Returns -EINVAL if the handle is invalid. This has
* no implications on the reference counting of the handle --
* the returned value may not be valid if the caller is not
* holding a reference.
*/
int ion_phys(struct ion_client *client, struct ion_handle *handle,
ion_phys_addr_t *addr, size_t *len);
/**
* ion_map_kernel - create mapping for the given handle
* @client: the client
* @handle: handle to map
* @flags: flags for this mapping
*
* Map the given handle into the kernel and return a kernel address that
* can be used to access this address. If no flags are specified, this
* will return a non-secure uncached mapping.
*/
void *ion_map_kernel(struct ion_client *client, struct ion_handle *handle,
unsigned long flags);
/**
* ion_unmap_kernel() - destroy a kernel mapping for a handle
* @client: the client
* @handle: handle to unmap
*/
void ion_unmap_kernel(struct ion_client *client, struct ion_handle *handle);
/**
* ion_map_dma - create a dma mapping for a given handle
* @client: the client
* @handle: handle to map
*
* Return an sglist describing the given handle
*/
struct scatterlist *ion_map_dma(struct ion_client *client,
struct ion_handle *handle,
unsigned long flags);
/**
* ion_unmap_dma() - destroy a dma mapping for a handle
* @client: the client
* @handle: handle to unmap
*/
void ion_unmap_dma(struct ion_client *client, struct ion_handle *handle);
/**
* ion_share() - given a handle, obtain a buffer to pass to other clients
* @client: the client
* @handle: the handle to share
*
* Given a handle, return a buffer, which exists in a global name
* space, and can be passed to other clients. Should be passed into ion_import
* to obtain a new handle for this buffer.
*
* NOTE: This function does do not an extra reference. The burden is on the
* caller to make sure the buffer doesn't go away while it's being passed to
* another client. That is, ion_free should not be called on this handle until
* the buffer has been imported into the other client.
*/
struct ion_buffer *ion_share(struct ion_client *client,
struct ion_handle *handle);
/**
* ion_import() - given an buffer in another client, import it
* @client: this blocks client
* @buffer: the buffer to import (as obtained from ion_share)
*
* Given a buffer, add it to the client and return the handle to use to refer
* to it further. This is called to share a handle from one kernel client to
* another.
*/
struct ion_handle *ion_import(struct ion_client *client,
struct ion_buffer *buffer);
/**
* ion_import_fd() - given an fd obtained via ION_IOC_SHARE ioctl, import it
* @client: this blocks client
* @fd: the fd
*
* A helper function for drivers that will be recieving ion buffers shared
* with them from userspace. These buffers are represented by a file
* descriptor obtained as the return from the ION_IOC_SHARE ioctl.
* This function coverts that fd into the underlying buffer, and returns
* the handle to use to refer to it further.
*/
struct ion_handle *ion_import_fd(struct ion_client *client, int fd);
/**
* ion_handle_get_flags - get the flags for a given handle
*
* @client - client who allocated the handle
* @handle - handle to get the flags
* @flags - pointer to store the flags
*
* Gets the current flags for a handle. These flags indicate various options
* of the buffer (caching, security, etc.)
*/
int ion_handle_get_flags(struct ion_client *client, struct ion_handle *handle,
unsigned long *flags);
/**
* ion_map_iommu - map the given handle into an iommu
*
* @client - client who allocated the handle
* @handle - handle to map
* @domain_num - domain number to map to
* @partition_num - partition number to allocate iova from
* @align - alignment for the iova
* @iova_length - length of iova to map. If the iova length is
* greater than the handle length, the remaining
* address space will be mapped to a dummy buffer.
* @iova - pointer to store the iova address
* @buffer_size - pointer to store the size of the buffer
* @flags - flags for options to map
* @iommu_flags - flags specific to the iommu.
*
* Maps the handle into the iova space specified via domain number. Iova
* will be allocated from the partition specified via partition_num.
* Returns 0 on success, negative value on error.
*/
int ion_map_iommu(struct ion_client *client, struct ion_handle *handle,
int domain_num, int partition_num, unsigned long align,
unsigned long iova_length, unsigned long *iova,
unsigned long *buffer_size,
unsigned long flags, unsigned long iommu_flags);
/**
* ion_handle_get_size - get the allocated size of a given handle
*
* @client - client who allocated the handle
* @handle - handle to get the size
* @size - pointer to store the size
*
* gives the allocated size of a handle. returns 0 on success, negative
* value on error
*
* NOTE: This is intended to be used only to get a size to pass to map_iommu.
* You should *NOT* rely on this for any other usage.
*/
int ion_handle_get_size(struct ion_client *client, struct ion_handle *handle,
unsigned long *size);
/**
* ion_unmap_iommu - unmap the handle from an iommu
*
* @client - client who allocated the handle
* @handle - handle to unmap
* @domain_num - domain to unmap from
* @partition_num - partition to unmap from
*
* Decrement the reference count on the iommu mapping. If the count is
* 0, the mapping will be removed from the iommu.
*/
void ion_unmap_iommu(struct ion_client *client, struct ion_handle *handle,
int domain_num, int partition_num);
/**
* ion_secure_heap - secure a heap
*
* @client - a client that has allocated from the heap heap_id
* @heap_id - heap id to secure.
*
* Secure a heap
* Returns 0 on success
*/
int ion_secure_heap(struct ion_device *dev, int heap_id);
/**
* ion_unsecure_heap - un-secure a heap
*
* @client - a client that has allocated from the heap heap_id
* @heap_id - heap id to un-secure.
*
* Un-secure a heap
* Returns 0 on success
*/
int ion_unsecure_heap(struct ion_device *dev, int heap_id);
/**
* msm_ion_secure_heap - secure a heap. Wrapper around ion_secure_heap.
*
* @heap_id - heap id to secure.
*
* Secure a heap
* Returns 0 on success
*/
int msm_ion_secure_heap(int heap_id);
/**
* msm_ion_unsecure_heap - unsecure a heap. Wrapper around ion_unsecure_heap.
*
* @heap_id - heap id to secure.
*
* Un-secure a heap
* Returns 0 on success
*/
int msm_ion_unsecure_heap(int heap_id);
/**
* msm_ion_do_cache_op - do cache operations.
*
* @client - pointer to ION client.
* @handle - pointer to buffer handle.
* @vaddr - virtual address to operate on.
* @len - Length of data to do cache operation on.
* @cmd - Cache operation to perform:
* ION_IOC_CLEAN_CACHES
* ION_IOC_INV_CACHES
* ION_IOC_CLEAN_INV_CACHES
*
* Returns 0 on success
*/
int msm_ion_do_cache_op(struct ion_client *client, struct ion_handle *handle,
void *vaddr, unsigned long len, unsigned int cmd);
#else
static inline struct ion_client *ion_client_create(struct ion_device *dev,
unsigned int heap_mask, const char *name)
{
return ERR_PTR(-ENODEV);
}
static inline struct ion_client *msm_ion_client_create(unsigned int heap_mask,
const char *name)
{
return ERR_PTR(-ENODEV);
}
static inline void ion_client_destroy(struct ion_client *client) { }
static inline struct ion_handle *ion_alloc(struct ion_client *client,
size_t len, size_t align, unsigned int flags)
{
return ERR_PTR(-ENODEV);
}
static inline void ion_free(struct ion_client *client,
struct ion_handle *handle) { }
static inline int ion_phys(struct ion_client *client,
struct ion_handle *handle, ion_phys_addr_t *addr, size_t *len)
{
return -ENODEV;
}
static inline void *ion_map_kernel(struct ion_client *client,
struct ion_handle *handle, unsigned long flags)
{
return ERR_PTR(-ENODEV);
}
static inline void ion_unmap_kernel(struct ion_client *client,
struct ion_handle *handle) { }
static inline struct scatterlist *ion_map_dma(struct ion_client *client,
struct ion_handle *handle, unsigned long flags)
{
return ERR_PTR(-ENODEV);
}
static inline void ion_unmap_dma(struct ion_client *client,
struct ion_handle *handle) { }
static inline struct ion_buffer *ion_share(struct ion_client *client,
struct ion_handle *handle)
{
return ERR_PTR(-ENODEV);
}
static inline struct ion_handle *ion_import(struct ion_client *client,
struct ion_buffer *buffer)
{
return ERR_PTR(-ENODEV);
}
static inline struct ion_handle *ion_import_fd(struct ion_client *client,
int fd)
{
return ERR_PTR(-ENODEV);
}
static inline int ion_handle_get_flags(struct ion_client *client,
struct ion_handle *handle, unsigned long *flags)
{
return -ENODEV;
}
static inline int ion_map_iommu(struct ion_client *client,
struct ion_handle *handle, int domain_num,
int partition_num, unsigned long align,
unsigned long iova_length, unsigned long *iova,
unsigned long *buffer_size,
unsigned long flags,
unsigned long iommu_flags)
{
return -ENODEV;
}
static inline void ion_unmap_iommu(struct ion_client *client,
struct ion_handle *handle, int domain_num,
int partition_num)
{
return;
}
static inline int ion_secure_heap(struct ion_device *dev, int heap_id)
{
return -ENODEV;
}
static inline int ion_unsecure_heap(struct ion_device *dev, int heap_id)
{
return -ENODEV;
}
static inline int msm_ion_secure_heap(int heap_id)
{
return -ENODEV;
}
static inline int msm_ion_unsecure_heap(int heap_id)
{
return -ENODEV;
}
static inline int msm_ion_do_cache_op(struct ion_client *client,
struct ion_handle *handle, void *vaddr,
unsigned long len, unsigned int cmd)
{
return -ENODEV;
}
#endif /* CONFIG_ION */
#endif /* __KERNEL__ */
/**
* DOC: Ion Userspace API
*
* create a client by opening /dev/ion
* most operations handled via following ioctls
*
*/
/**
* struct ion_allocation_data - metadata passed from userspace for allocations
* @len: size of the allocation
* @align: required alignment of the allocation
* @flags: flags passed to heap
* @handle: pointer that will be populated with a cookie to use to refer
* to this allocation
*
* Provided by userspace as an argument to the ioctl
*/
struct ion_allocation_data {
size_t len;
size_t align;
unsigned int flags;
struct ion_handle *handle;
};
/**
* struct ion_fd_data - metadata passed to/from userspace for a handle/fd pair
* @handle: a handle
* @fd: a file descriptor representing that handle
*
* For ION_IOC_SHARE or ION_IOC_MAP userspace populates the handle field with
* the handle returned from ion alloc, and the kernel returns the file
* descriptor to share or map in the fd field. For ION_IOC_IMPORT, userspace
* provides the file descriptor and the kernel returns the handle.
*/
struct ion_fd_data {
struct ion_handle *handle;
int fd;
};
/**
* struct ion_handle_data - a handle passed to/from the kernel
* @handle: a handle
*/
struct ion_handle_data {
struct ion_handle *handle;
};
/**
* struct ion_custom_data - metadata passed to/from userspace for a custom ioctl
* @cmd: the custom ioctl function to call
* @arg: additional data to pass to the custom ioctl, typically a user
* pointer to a predefined structure
*
* This works just like the regular cmd and arg fields of an ioctl.
*/
struct ion_custom_data {
unsigned int cmd;
unsigned long arg;
};
/* struct ion_flush_data - data passed to ion for flushing caches
*
* @handle: handle with data to flush
* @fd: fd to flush
* @vaddr: userspace virtual address mapped with mmap
* @offset: offset into the handle to flush
* @length: length of handle to flush
*
* Performs cache operations on the handle. If p is the start address
* of the handle, p + offset through p + offset + length will have
* the cache operations performed
*/
struct ion_flush_data {
struct ion_handle *handle;
int fd;
void *vaddr;
unsigned int offset;
unsigned int length;
};
/* struct ion_flag_data - information about flags for this buffer
*
* @handle: handle to get flags from
* @flags: flags of this handle
*
* Takes handle as an input and outputs the flags from the handle
* in the flag field.
*/
struct ion_flag_data {
struct ion_handle *handle;
unsigned long flags;
};
#define ION_IOC_MAGIC 'I'
/**
* DOC: ION_IOC_ALLOC - allocate memory
*
* Takes an ion_allocation_data struct and returns it with the handle field
* populated with the opaque handle for the allocation.
*/
#define ION_IOC_ALLOC _IOWR(ION_IOC_MAGIC, 0, \
struct ion_allocation_data)
/**
* DOC: ION_IOC_FREE - free memory
*
* Takes an ion_handle_data struct and frees the handle.
*/
#define ION_IOC_FREE _IOWR(ION_IOC_MAGIC, 1, struct ion_handle_data)
/**
* DOC: ION_IOC_MAP - get a file descriptor to mmap
*
* Takes an ion_fd_data struct with the handle field populated with a valid
* opaque handle. Returns the struct with the fd field set to a file
* descriptor open in the current address space. This file descriptor
* can then be used as an argument to mmap.
*/
#define ION_IOC_MAP _IOWR(ION_IOC_MAGIC, 2, struct ion_fd_data)
/**
* DOC: ION_IOC_SHARE - creates a file descriptor to use to share an allocation
*
* Takes an ion_fd_data struct with the handle field populated with a valid
* opaque handle. Returns the struct with the fd field set to a file
* descriptor open in the current address space. This file descriptor
* can then be passed to another process. The corresponding opaque handle can
* be retrieved via ION_IOC_IMPORT.
*/
#define ION_IOC_SHARE _IOWR(ION_IOC_MAGIC, 4, struct ion_fd_data)
/**
* DOC: ION_IOC_IMPORT - imports a shared file descriptor
*
* Takes an ion_fd_data struct with the fd field populated with a valid file
* descriptor obtained from ION_IOC_SHARE and returns the struct with the handle
* filed set to the corresponding opaque handle.
*/
#define ION_IOC_IMPORT _IOWR(ION_IOC_MAGIC, 5, int)
/**
* DOC: ION_IOC_CUSTOM - call architecture specific ion ioctl
*
* Takes the argument of the architecture specific ioctl to call and
* passes appropriate userdata for that ioctl
*/
#define ION_IOC_CUSTOM _IOWR(ION_IOC_MAGIC, 6, struct ion_custom_data)
/**
* DOC: ION_IOC_CLEAN_CACHES - clean the caches
*
* Clean the caches of the handle specified.
*/
#define ION_IOC_CLEAN_CACHES _IOWR(ION_IOC_MAGIC, 7, \
struct ion_flush_data)
/**
* DOC: ION_MSM_IOC_INV_CACHES - invalidate the caches
*
* Invalidate the caches of the handle specified.
*/
#define ION_IOC_INV_CACHES _IOWR(ION_IOC_MAGIC, 8, \
struct ion_flush_data)
/**
* DOC: ION_MSM_IOC_CLEAN_CACHES - clean and invalidate the caches
*
* Clean and invalidate the caches of the handle specified.
*/
#define ION_IOC_CLEAN_INV_CACHES _IOWR(ION_IOC_MAGIC, 9, \
struct ion_flush_data)
/**
* DOC: ION_IOC_GET_FLAGS - get the flags of the handle
*
* Gets the flags of the current handle which indicate cachability,
* secure state etc.
*/
#define ION_IOC_GET_FLAGS _IOWR(ION_IOC_MAGIC, 10, \
struct ion_flag_data)
#endif /* _LINUX_ION_H */

240
libgralloc/ionalloc.cpp Normal file
View File

@ -0,0 +1,240 @@
/*
* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <linux/ioctl.h>
#include <sys/mman.h>
#include <stdlib.h>
#include <fcntl.h>
#include <cutils/log.h>
#include <errno.h>
#include "gralloc_priv.h"
#include "ionalloc.h"
using gralloc::IonAlloc;
#define ION_DEVICE "/dev/ion"
int IonAlloc::open_device()
{
if(mIonFd == FD_INIT)
mIonFd = open(ION_DEVICE, O_RDONLY);
if(mIonFd < 0 ) {
ALOGE("%s: Failed to open ion device - %s",
__FUNCTION__, strerror(errno));
mIonFd = FD_INIT;
return -errno;
}
return 0;
}
void IonAlloc::close_device()
{
if(mIonFd >= 0)
close(mIonFd);
mIonFd = FD_INIT;
}
int IonAlloc::alloc_buffer(alloc_data& data)
{
int err = 0;
int ionSyncFd = FD_INIT;
int iFd = FD_INIT;
struct ion_handle_data handle_data;
struct ion_fd_data fd_data;
struct ion_allocation_data ionAllocData;
void *base = 0;
ionAllocData.len = data.size;
ionAllocData.align = data.align;
ionAllocData.flags = data.flags;
err = open_device();
if (err)
return err;
if(data.uncached) {
// Use the sync FD to alloc and map
// when we need uncached memory
// FIX: ОDSYNC defined to open uncached - add that in kernel
//ionSyncFd = open(ION_DEVICE, O_RDONLY|O_DSYNC);
ionSyncFd = open(ION_DEVICE, O_RDONLY);
if(ionSyncFd < 0) {
ALOGE("%s: Failed to open ion device - %s",
__FUNCTION__, strerror(errno));
return -errno;
}
iFd = ionSyncFd;
} else {
iFd = mIonFd;
}
if(ioctl(iFd, ION_IOC_ALLOC, &ionAllocData)) {
err = -errno;
ALOGE("ION_IOC_ALLOC failed with error - %s", strerror(errno));
if(ionSyncFd >= 0)
close(ionSyncFd);
ionSyncFd = FD_INIT;
return err;
}
fd_data.handle = ionAllocData.handle;
handle_data.handle = ionAllocData.handle;
if(ioctl(iFd, ION_IOC_MAP, &fd_data)) {
err = -errno;
ALOGE("%s: ION_IOC_MAP failed with error - %s",
__FUNCTION__, strerror(errno));
ioctl(mIonFd, ION_IOC_FREE, &handle_data);
if(ionSyncFd >= 0)
close(ionSyncFd);
ionSyncFd = FD_INIT;
return err;
}
//if(!(data.flags & ION_SECURE) &&
if(!(data.allocType & private_handle_t::PRIV_FLAGS_NOT_MAPPED)) {
base = mmap(0, ionAllocData.len, PROT_READ|PROT_WRITE,
MAP_SHARED, fd_data.fd, 0);
if(base == MAP_FAILED) {
err = -errno;
ALOGE("%s: Failed to map the allocated memory: %s",
__FUNCTION__, strerror(errno));
ioctl(mIonFd, ION_IOC_FREE, &handle_data);
ionSyncFd = FD_INIT;
return err;
}
memset(base, 0, ionAllocData.len);
// Clean cache after memset
clean_buffer(base, data.size, data.offset, fd_data.fd);
}
//Close the uncached FD since we no longer need it;
if(ionSyncFd >= 0)
close(ionSyncFd);
ionSyncFd = FD_INIT;
data.base = base;
data.fd = fd_data.fd;
ioctl(mIonFd, ION_IOC_FREE, &handle_data);
ALOGD("ion: Allocated buffer base:%p size:%d fd:%d",
data.base, ionAllocData.len, data.fd);
return 0;
}
int IonAlloc::free_buffer(void* base, size_t size, int offset, int fd)
{
ALOGD("ion: Freeing buffer base:%p size:%d fd:%d",
base, size, fd);
int err = 0;
err = open_device();
if (err)
return err;
if(base)
err = unmap_buffer(base, size, offset);
close(fd);
return err;
}
int IonAlloc::map_buffer(void **pBase, size_t size, int offset, int fd)
{
int err = 0;
void *base = 0;
// It is a (quirky) requirement of ION to have opened the
// ion fd in the process that is doing the mapping
err = open_device();
if (err)
return err;
base = mmap(0, size, PROT_READ| PROT_WRITE,
MAP_SHARED, fd, 0);
*pBase = base;
if(base == MAP_FAILED) {
err = -errno;
ALOGD("ion: Failed to map memory in the client: %s",
strerror(errno));
} else {
ALOGD("ion: Mapped buffer base:%p size:%d offset:%d fd:%d",
base, size, offset, fd);
}
return err;
}
int IonAlloc::unmap_buffer(void *base, size_t size, int offset)
{
ALOGD("ion: Unmapping buffer base:%p size:%d", base, size);
int err = 0;
if(munmap(base, size)) {
err = -errno;
ALOGE("ion: Failed to unmap memory at %p : %s",
base, strerror(errno));
}
return err;
}
int IonAlloc::clean_buffer(void *base, size_t size, int offset, int fd)
{
struct ion_flush_data flush_data;
struct ion_fd_data fd_data;
struct ion_handle_data handle_data;
struct ion_handle* handle;
int err = 0;
err = open_device();
if (err)
return err;
fd_data.fd = fd;
if (ioctl(mIonFd, ION_IOC_IMPORT, &fd_data)) {
err = -errno;
ALOGE("%s: ION_IOC_IMPORT failed with error - %s",
__FUNCTION__, strerror(errno));
return err;
}
handle_data.handle = fd_data.handle;
flush_data.handle = fd_data.handle;
flush_data.vaddr = base;
flush_data.offset = offset;
flush_data.length = size;
if(ioctl(mIonFd, ION_IOC_CLEAN_INV_CACHES, &flush_data)) {
err = -errno;
ALOGE("%s: ION_IOC_CLEAN_INV_CACHES failed with error - %s",
__FUNCTION__, strerror(errno));
ioctl(mIonFd, ION_IOC_FREE, &handle_data);
return err;
}
ioctl(mIonFd, ION_IOC_FREE, &handle_data);
return 0;
}

71
libgralloc/ionalloc.h Normal file
View File

@ -0,0 +1,71 @@
/*
* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef GRALLOC_IONALLOC_H
#define GRALLOC_IONALLOC_H
#include "memalloc.h"
#include "ion_msm.h"
namespace gralloc {
class IonAlloc : public IMemAlloc {
public:
virtual int alloc_buffer(alloc_data& data);
virtual int free_buffer(void *base, size_t size,
int offset, int fd);
virtual int map_buffer(void **pBase, size_t size,
int offset, int fd);
virtual int unmap_buffer(void *base, size_t size,
int offset);
virtual int clean_buffer(void*base, size_t size,
int offset, int fd);
IonAlloc() { mIonFd = FD_INIT; }
~IonAlloc() { close_device(); }
private:
int mIonFd;
int open_device();
void close_device();
};
}
#endif /* GRALLOC_IONALLOC_H */

366
libgralloc/mapper.cpp Executable file
View File

@ -0,0 +1,366 @@
/*
* Copyright (C) 2008 The Android Open Source Project
* Copyright (c) 2011 Code Aurora Forum. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <limits.h>
#include <errno.h>
#include <pthread.h>
#include <unistd.h>
#include <string.h>
#include <stdarg.h>
#include <sys/mman.h>
#include <sys/stat.h>
#include <sys/types.h>
#include <sys/ioctl.h>
#include <linux/ashmem.h>
#include <cutils/log.h>
#include <cutils/atomic.h>
#include <cutils/ashmem.h>
#include <hardware/hardware.h>
#include <hardware/gralloc.h>
#include <genlock.h>
#include <linux/android_pmem.h>
#include "gralloc_priv.h"
#include "gr.h"
#include "alloc_controller.h"
#include "memalloc.h"
using namespace gralloc;
using android::sp;
/*****************************************************************************/
// Return the type of allocator -
// these are used for mapping/unmapping
static sp<IMemAlloc> getAllocator(int flags)
{
sp<IMemAlloc> memalloc;
sp<IAllocController> alloc_ctrl = IAllocController::getInstance(true);
memalloc = alloc_ctrl->getAllocator(flags);
return memalloc;
}
static int gralloc_map(gralloc_module_t const* module,
buffer_handle_t handle,
void** vaddr)
{
private_handle_t* hnd = (private_handle_t*)handle;
void *mappedAddress;
if (!(hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER) &&
!(hnd->flags & private_handle_t::PRIV_FLAGS_SECURE_BUFFER)) {
size_t size = hnd->size;
sp<IMemAlloc> memalloc = getAllocator(hnd->flags) ;
int err = memalloc->map_buffer(&mappedAddress, size,
hnd->offset, hnd->fd);
if(err) {
ALOGE("Could not mmap handle %p, fd=%d (%s)",
handle, hnd->fd, strerror(errno));
hnd->base = 0;
return -errno;
}
if (mappedAddress == MAP_FAILED) {
ALOGE("Could not mmap handle %p, fd=%d (%s)",
handle, hnd->fd, strerror(errno));
hnd->base = 0;
return -errno;
}
hnd->base = intptr_t(mappedAddress) + hnd->offset;
//ALOGD("gralloc_map() succeeded fd=%d, off=%d, size=%d, vaddr=%p",
// hnd->fd, hnd->offset, hnd->size, mappedAddress);
}
*vaddr = (void*)hnd->base;
return 0;
}
static int gralloc_unmap(gralloc_module_t const* module,
buffer_handle_t handle)
{
private_handle_t* hnd = (private_handle_t*)handle;
if (!(hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER)) {
int err = -EINVAL;
void* base = (void*)hnd->base;
size_t size = hnd->size;
sp<IMemAlloc> memalloc = getAllocator(hnd->flags) ;
if(memalloc != NULL)
err = memalloc->unmap_buffer(base, size, hnd->offset);
if (err) {
ALOGE("Could not unmap memory at address %p", base);
}
}
hnd->base = 0;
return 0;
}
/*****************************************************************************/
static pthread_mutex_t sMapLock = PTHREAD_MUTEX_INITIALIZER;
/*****************************************************************************/
int gralloc_register_buffer(gralloc_module_t const* module,
buffer_handle_t handle)
{
if (private_handle_t::validate(handle) < 0)
return -EINVAL;
// In this implementation, we don't need to do anything here
/* NOTE: we need to initialize the buffer as not mapped/not locked
* because it shouldn't when this function is called the first time
* in a new process. Ideally these flags shouldn't be part of the
* handle, but instead maintained in the kernel or at least
* out-of-line
*/
// if this handle was created in this process, then we keep it as is.
private_handle_t* hnd = (private_handle_t*)handle;
if (hnd->pid != getpid()) {
hnd->base = 0;
void *vaddr;
int err = gralloc_map(module, handle, &vaddr);
if (err) {
ALOGE("%s: gralloc_map failed", __FUNCTION__);
return err;
}
// Reset the genlock private fd flag in the handle
hnd->genlockPrivFd = -1;
// Check if there is a valid lock attached to the handle.
if (-1 == hnd->genlockHandle) {
ALOGE("%s: the lock is invalid.", __FUNCTION__);
gralloc_unmap(module, handle);
hnd->base = 0;
return -EINVAL;
}
// Attach the genlock handle
if (GENLOCK_NO_ERROR != genlock_attach_lock((native_handle_t *)handle)) {
ALOGE("%s: genlock_attach_lock failed", __FUNCTION__);
gralloc_unmap(module, handle);
hnd->base = 0;
return -EINVAL;
}
}
return 0;
}
int gralloc_unregister_buffer(gralloc_module_t const* module,
buffer_handle_t handle)
{
if (private_handle_t::validate(handle) < 0)
return -EINVAL;
/*
* If the buffer has been mapped during a lock operation, it's time
* to un-map it. It's an error to be here with a locked buffer.
* NOTE: the framebuffer is handled differently and is never unmapped.
*/
private_handle_t* hnd = (private_handle_t*)handle;
// never unmap buffers that were created in this process
if (hnd->pid != getpid()) {
if (hnd->base != 0) {
gralloc_unmap(module, handle);
}
hnd->base = 0;
// Release the genlock
if (-1 != hnd->genlockHandle) {
return genlock_release_lock((native_handle_t *)handle);
} else {
ALOGE("%s: there was no genlock attached to this buffer", __FUNCTION__);
return -EINVAL;
}
}
return 0;
}
int terminateBuffer(gralloc_module_t const* module,
private_handle_t* hnd)
{
/*
* If the buffer has been mapped during a lock operation, it's time
* to un-map it. It's an error to be here with a locked buffer.
*/
if (hnd->base != 0) {
// this buffer was mapped, unmap it now
if (hnd->flags & (private_handle_t::PRIV_FLAGS_USES_PMEM |
private_handle_t::PRIV_FLAGS_USES_PMEM_ADSP |
private_handle_t::PRIV_FLAGS_USES_ASHMEM |
private_handle_t::PRIV_FLAGS_USES_ION)) {
if (hnd->pid != getpid()) {
// ... unless it's a "master" pmem buffer, that is a buffer
// mapped in the process it's been allocated.
// (see gralloc_alloc_buffer())
gralloc_unmap(module, hnd);
}
} else {
ALOGE("terminateBuffer: unmapping a non pmem/ashmem buffer flags = 0x%x", hnd->flags);
gralloc_unmap(module, hnd);
}
}
return 0;
}
int gralloc_lock(gralloc_module_t const* module,
buffer_handle_t handle, int usage,
int l, int t, int w, int h,
void** vaddr)
{
if (private_handle_t::validate(handle) < 0)
return -EINVAL;
int err = 0;
private_handle_t* hnd = (private_handle_t*)handle;
if (usage & (GRALLOC_USAGE_SW_READ_MASK | GRALLOC_USAGE_SW_WRITE_MASK)) {
if (hnd->base == 0) {
// we need to map for real
pthread_mutex_t* const lock = &sMapLock;
pthread_mutex_lock(lock);
err = gralloc_map(module, handle, vaddr);
pthread_mutex_unlock(lock);
}
*vaddr = (void*)hnd->base;
// Lock the buffer for read/write operation as specified. Write lock
// has a higher priority over read lock.
int lockType = 0;
if (usage & GRALLOC_USAGE_SW_WRITE_MASK) {
lockType = GENLOCK_WRITE_LOCK;
} else if (usage & GRALLOC_USAGE_SW_READ_MASK) {
lockType = GENLOCK_READ_LOCK;
}
int timeout = GENLOCK_MAX_TIMEOUT;
if (GENLOCK_NO_ERROR != genlock_lock_buffer((native_handle_t *)handle,
(genlock_lock_type)lockType,
timeout)) {
ALOGE("%s: genlock_lock_buffer (lockType=0x%x) failed", __FUNCTION__,
lockType);
return -EINVAL;
} else {
// Mark this buffer as locked for SW read/write operation.
hnd->flags |= private_handle_t::PRIV_FLAGS_SW_LOCK;
}
if ((usage & GRALLOC_USAGE_SW_WRITE_MASK) &&
!(hnd->flags & private_handle_t::PRIV_FLAGS_FRAMEBUFFER)) {
// Mark the buffer to be flushed after cpu read/write
hnd->flags |= private_handle_t::PRIV_FLAGS_NEEDS_FLUSH;
}
}
return err;
}
int gralloc_unlock(gralloc_module_t const* module,
buffer_handle_t handle)
{
if (private_handle_t::validate(handle) < 0)
return -EINVAL;
private_handle_t* hnd = (private_handle_t*)handle;
if (hnd->flags & private_handle_t::PRIV_FLAGS_NEEDS_FLUSH) {
int err;
sp<IMemAlloc> memalloc = getAllocator(hnd->flags) ;
err = memalloc->clean_buffer((void*)hnd->base,
hnd->size, hnd->offset, hnd->fd);
ALOGE_IF(err < 0, "cannot flush handle %p (offs=%x len=%x, flags = 0x%x) err=%s\n",
hnd, hnd->offset, hnd->size, hnd->flags, strerror(errno));
hnd->flags &= ~private_handle_t::PRIV_FLAGS_NEEDS_FLUSH;
}
if ((hnd->flags & private_handle_t::PRIV_FLAGS_SW_LOCK)) {
// Unlock the buffer.
if (GENLOCK_NO_ERROR != genlock_unlock_buffer((native_handle_t *)handle)) {
ALOGE("%s: genlock_unlock_buffer failed", __FUNCTION__);
return -EINVAL;
} else
hnd->flags &= ~private_handle_t::PRIV_FLAGS_SW_LOCK;
}
return 0;
}
/*****************************************************************************/
int gralloc_perform(struct gralloc_module_t const* module,
int operation, ... )
{
int res = -EINVAL;
va_list args;
va_start(args, operation);
switch (operation) {
case GRALLOC_MODULE_PERFORM_CREATE_HANDLE_FROM_BUFFER:
{
int fd = va_arg(args, int);
size_t size = va_arg(args, size_t);
size_t offset = va_arg(args, size_t);
void* base = va_arg(args, void*);
int width = va_arg(args, int);
int height = va_arg(args, int);
int format = va_arg(args, int);
native_handle_t** handle = va_arg(args, native_handle_t**);
int memoryFlags = va_arg(args, int);
private_handle_t* hnd = (private_handle_t*)native_handle_create(
private_handle_t::sNumFds, private_handle_t::sNumInts);
hnd->magic = private_handle_t::sMagic;
hnd->fd = fd;
unsigned int contigFlags = GRALLOC_USAGE_PRIVATE_ADSP_HEAP |
GRALLOC_USAGE_PRIVATE_UI_CONTIG_HEAP |
GRALLOC_USAGE_PRIVATE_SMI_HEAP;
if (memoryFlags & contigFlags) {
// check if the buffer is a pmem buffer
pmem_region region;
if (ioctl(fd, PMEM_GET_SIZE, &region) < 0)
hnd->flags = private_handle_t::PRIV_FLAGS_USES_ION;
else
hnd->flags = private_handle_t::PRIV_FLAGS_USES_PMEM |
private_handle_t::PRIV_FLAGS_DO_NOT_FLUSH;
} else {
if (memoryFlags & GRALLOC_USAGE_PRIVATE_ION)
hnd->flags = private_handle_t::PRIV_FLAGS_USES_ION;
else
hnd->flags = private_handle_t::PRIV_FLAGS_USES_ASHMEM;
}
hnd->size = size;
hnd->offset = offset;
hnd->base = intptr_t(base) + offset;
hnd->gpuaddr = 0;
hnd->width = width;
hnd->height = height;
hnd->format = format;
*handle = (native_handle_t *)hnd;
res = 0;
break;
}
default:
break;
}
va_end(args);
return res;
}

84
libgralloc/memalloc.h Normal file
View File

@ -0,0 +1,84 @@
/*
* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef GRALLOC_MEMALLOC_H
#define GRALLOC_MEMALLOC_H
#include <stdlib.h>
#include <utils/RefBase.h>
namespace gralloc {
struct alloc_data {
void *base;
int fd;
int offset;
size_t size;
size_t align;
unsigned int pHandle;
bool uncached;
unsigned int flags;
int allocType;
};
class IMemAlloc : public android::RefBase {
public:
// Allocate buffer - fill in the alloc_data
// structure and pass it in. Mapped address
// and fd are returned in the alloc_data struct
virtual int alloc_buffer(alloc_data& data) = 0;
// Free buffer
virtual int free_buffer(void *base, size_t size,
int offset, int fd) = 0;
// Map buffer
virtual int map_buffer(void **pBase, size_t size,
int offset, int fd) = 0;
// Unmap buffer
virtual int unmap_buffer(void *base, size_t size,
int offset) = 0;
// Clean and invalidate
virtual int clean_buffer(void *base, size_t size,
int offset, int fd) = 0;
// Destructor
virtual ~IMemAlloc() {};
enum {
FD_INIT = -1,
};
};
} // end gralloc namespace
#endif // GRALLOC_MEMALLOC_H

View File

@ -0,0 +1,195 @@
/*
* Copyright (C) 2009 The Android Open Source Project
* Copyright (c) 2011 Code Aurora Forum. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <cutils/log.h>
#include "pmem_bestfit_alloc.h"
// align all the memory blocks on a cache-line boundary
const int SimpleBestFitAllocator::kMemoryAlign = 32;
SimpleBestFitAllocator::SimpleBestFitAllocator()
: mHeapSize(0)
{
}
SimpleBestFitAllocator::SimpleBestFitAllocator(size_t size)
: mHeapSize(0)
{
setSize(size);
}
SimpleBestFitAllocator::~SimpleBestFitAllocator()
{
while(!mList.isEmpty()) {
delete mList.remove(mList.head());
}
}
ssize_t SimpleBestFitAllocator::setSize(size_t size)
{
Locker::Autolock _l(mLock);
if (mHeapSize != 0) return -EINVAL;
size_t pagesize = getpagesize();
mHeapSize = ((size + pagesize-1) & ~(pagesize-1));
chunk_t* node = new chunk_t(0, mHeapSize / kMemoryAlign);
mList.insertHead(node);
return size;
}
size_t SimpleBestFitAllocator::size() const
{
return mHeapSize;
}
ssize_t SimpleBestFitAllocator::allocate(size_t size, uint32_t flags)
{
Locker::Autolock _l(mLock);
if (mHeapSize == 0) return -EINVAL;
ssize_t offset = alloc(size, flags);
return offset;
}
ssize_t SimpleBestFitAllocator::deallocate(size_t offset)
{
Locker::Autolock _l(mLock);
if (mHeapSize == 0) return -EINVAL;
chunk_t const * const freed = dealloc(offset);
if (freed) {
return 0;
}
return -ENOENT;
}
ssize_t SimpleBestFitAllocator::alloc(size_t size, uint32_t flags)
{
if (size == 0) {
return 0;
}
size = (size + kMemoryAlign-1) / kMemoryAlign;
chunk_t* free_chunk = 0;
chunk_t* cur = mList.head();
size_t pagesize = getpagesize();
while (cur) {
int extra = ( -cur->start & ((pagesize/kMemoryAlign)-1) ) ;
// best fit
if (cur->free && (cur->size >= (size+extra))) {
if ((!free_chunk) || (cur->size < free_chunk->size)) {
free_chunk = cur;
}
if (cur->size == size) {
break;
}
}
cur = cur->next;
}
if (free_chunk) {
const size_t free_size = free_chunk->size;
free_chunk->free = 0;
free_chunk->size = size;
if (free_size > size) {
int extra = ( -free_chunk->start & ((pagesize/kMemoryAlign)-1) ) ;
if (extra) {
chunk_t* split = new chunk_t(free_chunk->start, extra);
free_chunk->start += extra;
mList.insertBefore(free_chunk, split);
}
LOGE_IF(((free_chunk->start*kMemoryAlign)&(pagesize-1)),
"page is not aligned!!!");
const ssize_t tail_free = free_size - (size+extra);
if (tail_free > 0) {
chunk_t* split = new chunk_t(
free_chunk->start + free_chunk->size, tail_free);
mList.insertAfter(free_chunk, split);
}
}
return (free_chunk->start)*kMemoryAlign;
}
// we are out of PMEM. Print pmem stats
// check if there is any leak or fragmentation
LOGD (" Out of PMEM. Dumping PMEM stats for debugging");
LOGD (" ------------- PRINT PMEM STATS --------------");
cur = mList.head();
static uint32_t node_count;
static uint64_t allocated, free_space;
while (cur) {
LOGD (" Node %d -> Start Address : %u Size %u Free info %d",\
node_count++, cur->start, cur->size, cur->free);
// if cur-> free is 1 , the node is free
// calculate the total allocated and total free stats also
if (cur->free)
free_space += cur->size;
else
allocated += cur->size;
// read next node
cur = cur->next;
}
LOGD (" Total Allocated: %l Total Free: %l", allocated, free_space );
node_count = 0;
allocated = 0;
free_space = 0;
LOGD ("----------------------------------------------");
return -ENOMEM;
}
SimpleBestFitAllocator::chunk_t* SimpleBestFitAllocator::dealloc(size_t start)
{
start = start / kMemoryAlign;
chunk_t* cur = mList.head();
while (cur) {
if (cur->start == start) {
LOG_FATAL_IF(cur->free,
"block at offset 0x%08lX of size 0x%08lX already freed",
cur->start*kMemoryAlign, cur->size*kMemoryAlign);
// merge freed blocks together
chunk_t* freed = cur;
cur->free = 1;
do {
chunk_t* const p = cur->prev;
chunk_t* const n = cur->next;
if (p && (p->free || !cur->size)) {
freed = p;
p->size += cur->size;
mList.remove(cur);
delete cur;
}
cur = n;
} while (cur && cur->free);
LOG_FATAL_IF(!freed->free,
"freed block at offset 0x%08lX of size 0x%08lX is not free!",
freed->start * kMemoryAlign, freed->size * kMemoryAlign);
return freed;
}
cur = cur->next;
}
return 0;
}

View File

@ -0,0 +1,129 @@
/*
* Copyright (C) 2009 The Android Open Source Project
* Copyright (c) 2011 Code Aurora Forum. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef GRALLOC_ALLOCATOR_H_
#define GRALLOC_ALLOCATOR_H_
#include <stdint.h>
#include <sys/types.h>
#include "gr.h"
#include "pmemalloc.h"
// ----------------------------------------------------------------------------
/*
* A simple templatized doubly linked-list implementation
*/
template <typename NODE>
class LinkedList
{
NODE* mFirst;
NODE* mLast;
public:
LinkedList() : mFirst(0), mLast(0) { }
bool isEmpty() const { return mFirst == 0; }
NODE const* head() const { return mFirst; }
NODE* head() { return mFirst; }
NODE const* tail() const { return mLast; }
NODE* tail() { return mLast; }
void insertAfter(NODE* node, NODE* newNode) {
newNode->prev = node;
newNode->next = node->next;
if (node->next == 0) mLast = newNode;
else node->next->prev = newNode;
node->next = newNode;
}
void insertBefore(NODE* node, NODE* newNode) {
newNode->prev = node->prev;
newNode->next = node;
if (node->prev == 0) mFirst = newNode;
else node->prev->next = newNode;
node->prev = newNode;
}
void insertHead(NODE* newNode) {
if (mFirst == 0) {
mFirst = mLast = newNode;
newNode->prev = newNode->next = 0;
} else {
newNode->prev = 0;
newNode->next = mFirst;
mFirst->prev = newNode;
mFirst = newNode;
}
}
void insertTail(NODE* newNode) {
if (mLast == 0) {
insertHead(newNode);
} else {
newNode->prev = mLast;
newNode->next = 0;
mLast->next = newNode;
mLast = newNode;
}
}
NODE* remove(NODE* node) {
if (node->prev == 0) mFirst = node->next;
else node->prev->next = node->next;
if (node->next == 0) mLast = node->prev;
else node->next->prev = node->prev;
return node;
}
};
class SimpleBestFitAllocator : public gralloc::PmemUserspaceAlloc::Allocator
{
public:
SimpleBestFitAllocator();
SimpleBestFitAllocator(size_t size);
virtual ~SimpleBestFitAllocator();
virtual ssize_t setSize(size_t size);
virtual ssize_t allocate(size_t size, uint32_t flags = 0);
virtual ssize_t deallocate(size_t offset);
virtual size_t size() const;
private:
struct chunk_t {
chunk_t(size_t start, size_t size)
: start(start), size(size), free(1), prev(0), next(0) {
}
size_t start;
size_t size : 28;
int free : 4;
mutable chunk_t* prev;
mutable chunk_t* next;
};
ssize_t alloc(size_t size, uint32_t flags);
chunk_t* dealloc(size_t start);
static const int kMemoryAlign;
mutable Locker mLock;
LinkedList<chunk_t> mList;
size_t mHeapSize;
};
#endif /* GRALLOC_ALLOCATOR_H_ */

388
libgralloc/pmemalloc.cpp Normal file
View File

@ -0,0 +1,388 @@
/*
* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <unistd.h>
#include <fcntl.h>
#include <sys/mman.h>
#include <stdlib.h>
#include <cutils/log.h>
#include <errno.h>
#include <linux/android_pmem.h>
#include "gralloc_priv.h"
#include "pmemalloc.h"
#include "pmem_bestfit_alloc.h"
using namespace gralloc;
using android::sp;
// Common functions between userspace
// and kernel allocators
static int getPmemTotalSize(int fd, size_t* size)
{
//XXX: 7x27
int err = 0;
pmem_region region;
if (ioctl(fd, PMEM_GET_TOTAL_SIZE, &region)) {
err = -errno;
} else {
*size = region.len;
}
return err;
}
static int getOpenFlags(bool uncached)
{
if(uncached)
return O_RDWR | O_SYNC;
else
return O_RDWR;
}
static int connectPmem(int fd, int master_fd) {
if (ioctl(fd, PMEM_CONNECT, master_fd))
return -errno;
return 0;
}
static int mapSubRegion(int fd, int offset, size_t size) {
struct pmem_region sub = { offset, size };
if (ioctl(fd, PMEM_MAP, &sub))
return -errno;
return 0;
}
static int unmapSubRegion(int fd, int offset, size_t size) {
struct pmem_region sub = { offset, size };
if (ioctl(fd, PMEM_UNMAP, &sub))
return -errno;
return 0;
}
static int alignPmem(int fd, size_t size, int align) {
struct pmem_allocation allocation;
allocation.size = size;
allocation.align = align;
if (ioctl(fd, PMEM_ALLOCATE_ALIGNED, &allocation))
return -errno;
return 0;
}
static int cleanPmem(void *base, size_t size, int offset, int fd) {
struct pmem_addr pmem_addr;
pmem_addr.vaddr = (unsigned long) base;
pmem_addr.offset = offset;
pmem_addr.length = size;
if (ioctl(fd, PMEM_CLEAN_INV_CACHES, &pmem_addr))
return -errno;
return 0;
}
//-------------- PmemUserspaceAlloc-----------------------//
PmemUserspaceAlloc::PmemUserspaceAlloc()
{
mPmemDev = DEVICE_PMEM;
mMasterFd = FD_INIT;
mAllocator = new SimpleBestFitAllocator();
pthread_mutex_init(&mLock, NULL);
}
PmemUserspaceAlloc::~PmemUserspaceAlloc()
{
}
int PmemUserspaceAlloc::init_pmem_area_locked()
{
ALOGD("%s: Opening master pmem FD", __FUNCTION__);
int err = 0;
int fd = open(mPmemDev, O_RDWR, 0);
if (fd >= 0) {
size_t size = 0;
err = getPmemTotalSize(fd, &size);
ALOGD("%s: Total pmem size: %d", __FUNCTION__, size);
if (err < 0) {
ALOGE("%s: PMEM_GET_TOTAL_SIZE failed (%d), limp mode", mPmemDev,
err);
size = 8<<20; // 8 MiB
}
mAllocator->setSize(size);
void* base = mmap(0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd,
0);
if (base == MAP_FAILED) {
err = -errno;
ALOGE("%s: Failed to map pmem master fd: %s", mPmemDev,
strerror(errno));
base = 0;
close(fd);
fd = -1;
} else {
mMasterFd = fd;
mMasterBase = base;
}
} else {
err = -errno;
ALOGE("%s: Failed to open pmem device: %s", mPmemDev,
strerror(errno));
}
return err;
}
int PmemUserspaceAlloc::init_pmem_area()
{
pthread_mutex_lock(&mLock);
int err = mMasterFd;
if (err == FD_INIT) {
// first time, try to initialize pmem
ALOGD("%s: Initializing pmem area", __FUNCTION__);
err = init_pmem_area_locked();
if (err) {
ALOGE("%s: failed to initialize pmem area", mPmemDev);
mMasterFd = err;
}
} else if (err < 0) {
// pmem couldn't be initialized, never use it
} else {
// pmem OK
err = 0;
}
pthread_mutex_unlock(&mLock);
return err;
}
int PmemUserspaceAlloc::alloc_buffer(alloc_data& data)
{
int err = init_pmem_area();
if (err == 0) {
void* base = mMasterBase;
size_t size = data.size;
int offset = mAllocator->allocate(size);
if (offset < 0) {
// no more pmem memory
ALOGE("%s: No more pmem available", mPmemDev);
err = -ENOMEM;
} else {
int openFlags = getOpenFlags(data.uncached);
// now create the "sub-heap"
int fd = open(mPmemDev, openFlags, 0);
err = fd < 0 ? fd : 0;
// and connect to it
if (err == 0)
err = connectPmem(fd, mMasterFd);
// and make it available to the client process
if (err == 0)
err = mapSubRegion(fd, offset, size);
if (err < 0) {
ALOGE("%s: Failed to initialize pmem sub-heap: %d", mPmemDev,
err);
close(fd);
mAllocator->deallocate(offset);
fd = -1;
} else {
ALOGD("%s: Allocated buffer base:%p size:%d offset:%d fd:%d",
mPmemDev, base, size, offset, fd);
memset((char*)base + offset, 0, size);
//Clean cache before flushing to ensure pmem is properly flushed
err = clean_buffer((void*)((intptr_t) base + offset), size, offset, fd);
if (err < 0) {
ALOGE("cleanPmem failed: (%s)", strerror(errno));
}
cacheflush(intptr_t(base) + offset, intptr_t(base) + offset + size, 0);
data.base = base;
data.offset = offset;
data.fd = fd;
}
}
}
return err;
}
int PmemUserspaceAlloc::free_buffer(void* base, size_t size, int offset, int fd)
{
ALOGD("%s: Freeing buffer base:%p size:%d offset:%d fd:%d",
mPmemDev, base, size, offset, fd);
int err = 0;
if (fd >= 0) {
int err = unmapSubRegion(fd, offset, size);
ALOGE_IF(err<0, "PMEM_UNMAP failed (%s), fd=%d, sub.offset=%u, "
"sub.size=%u", strerror(errno), fd, offset, size);
if (err == 0) {
// we can't deallocate the memory in case of UNMAP failure
// because it would give that process access to someone else's
// surfaces, which would be a security breach.
mAllocator->deallocate(offset);
}
close(fd);
}
return err;
}
int PmemUserspaceAlloc::map_buffer(void **pBase, size_t size, int offset, int fd)
{
int err = 0;
size += offset;
void *base = mmap(0, size, PROT_READ| PROT_WRITE,
MAP_SHARED, fd, 0);
*pBase = base;
if(base == MAP_FAILED) {
err = -errno;
ALOGE("%s: Failed to map buffer size:%d offset:%d fd:%d Error: %s",
mPmemDev, size, offset, fd, strerror(errno));
} else {
ALOGD("%s: Mapped buffer base:%p size:%d offset:%d fd:%d",
mPmemDev, base, size, offset, fd);
}
return err;
}
int PmemUserspaceAlloc::unmap_buffer(void *base, size_t size, int offset)
{
int err = 0;
//pmem hack
base = (void*)(intptr_t(base) - offset);
size += offset;
ALOGD("%s: Unmapping buffer base:%p size:%d offset:%d",
mPmemDev , base, size, offset);
if (munmap(base, size) < 0) {
err = -errno;
ALOGE("%s: Failed to unmap memory at %p :%s",
mPmemDev, base, strerror(errno));
}
return err;
}
int PmemUserspaceAlloc::clean_buffer(void *base, size_t size, int offset, int fd)
{
return cleanPmem(base, size, offset, fd);
}
//-------------- PmemKernelAlloc-----------------------//
PmemKernelAlloc::PmemKernelAlloc(const char* pmemdev) :
mPmemDev(pmemdev)
{
}
PmemKernelAlloc::~PmemKernelAlloc()
{
}
int PmemKernelAlloc::alloc_buffer(alloc_data& data)
{
int err, offset = 0;
int openFlags = getOpenFlags(data.uncached);
int size = data.size;
int fd = open(mPmemDev, openFlags, 0);
if (fd < 0) {
err = -errno;
ALOGE("%s: Error opening %s", __FUNCTION__, mPmemDev);
return err;
}
if (data.align == 8192) {
// Tile format buffers need physical alignment to 8K
// Default page size does not need this ioctl
err = alignPmem(fd, size, 8192);
if (err < 0) {
ALOGE("alignPmem failed");
}
}
void* base = mmap(0, size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
if (base == MAP_FAILED) {
err = -errno;
ALOGE("%s: failed to map pmem fd: %s", mPmemDev,
strerror(errno));
close(fd);
return err;
}
memset(base, 0, size);
clean_buffer((void*)((intptr_t) base + offset), size, offset, fd);
data.base = base;
data.offset = 0;
data.fd = fd;
ALOGD("%s: Allocated buffer base:%p size:%d fd:%d",
mPmemDev, base, size, fd);
return 0;
}
int PmemKernelAlloc::free_buffer(void* base, size_t size, int offset, int fd)
{
ALOGD("%s: Freeing buffer base:%p size:%d fd:%d",
mPmemDev, base, size, fd);
int err = unmap_buffer(base, size, offset);
close(fd);
return err;
}
int PmemKernelAlloc::map_buffer(void **pBase, size_t size, int offset, int fd)
{
int err = 0;
void *base = mmap(0, size, PROT_READ| PROT_WRITE,
MAP_SHARED, fd, 0);
*pBase = base;
if(base == MAP_FAILED) {
err = -errno;
ALOGE("%s: Failed to map memory in the client: %s",
mPmemDev, strerror(errno));
} else {
ALOGD("%s: Mapped buffer base:%p size:%d, fd:%d",
mPmemDev, base, size, fd);
}
return err;
}
int PmemKernelAlloc::unmap_buffer(void *base, size_t size, int offset)
{
int err = 0;
if (munmap(base, size)) {
err = -errno;
ALOGW("%s: Error unmapping memory at %p: %s",
mPmemDev, base, strerror(err));
}
return err;
}
int PmemKernelAlloc::clean_buffer(void *base, size_t size, int offset, int fd)
{
return cleanPmem(base, size, offset, fd);
}

106
libgralloc/pmemalloc.h Normal file
View File

@ -0,0 +1,106 @@
/*
* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef GRALLOC_PMEMALLOC_H
#define GRALLOC_PMEMALLOC_H
#include <linux/ion.h>
#include <utils/RefBase.h>
#include "memalloc.h"
namespace gralloc {
class PmemUserspaceAlloc : public IMemAlloc {
public:
class Allocator: public android::RefBase {
public:
virtual ~Allocator() {};
virtual ssize_t setSize(size_t size) = 0;
virtual size_t size() const = 0;
virtual ssize_t allocate(size_t size, uint32_t flags = 0) = 0;
virtual ssize_t deallocate(size_t offset) = 0;
};
virtual int alloc_buffer(alloc_data& data);
virtual int free_buffer(void *base, size_t size,
int offset, int fd);
virtual int map_buffer(void **pBase, size_t size,
int offset, int fd);
virtual int unmap_buffer(void *base, size_t size,
int offset);
virtual int clean_buffer(void*base, size_t size,
int offset, int fd);
PmemUserspaceAlloc();
~PmemUserspaceAlloc();
private:
int mMasterFd;
void* mMasterBase;
const char* mPmemDev;
android::sp<Allocator> mAllocator;
pthread_mutex_t mLock;
int init_pmem_area();
int init_pmem_area_locked();
};
class PmemKernelAlloc : public IMemAlloc {
public:
virtual int alloc_buffer(alloc_data& data);
virtual int free_buffer(void *base, size_t size,
int offset, int fd);
virtual int map_buffer(void **pBase, size_t size,
int offset, int fd);
virtual int unmap_buffer(void *base, size_t size,
int offset);
virtual int clean_buffer(void*base, size_t size,
int offset, int fd);
PmemKernelAlloc(const char* device);
~PmemKernelAlloc();
private:
const char* mPmemDev;
};
}
#endif /* GRALLOC_PMEMALLOC_H */

39
libhwcomposer/Android.mk Normal file
View File

@ -0,0 +1,39 @@
LOCAL_PATH := $(call my-dir)
# HAL module implemenation, not prelinked and stored in
# hw/<OVERLAY_HARDWARE_MODULE_ID>.<ro.product.board>.so
include $(CLEAR_VARS)
LOCAL_PRELINK_MODULE := false
LOCAL_MODULE_PATH := $(TARGET_OUT_SHARED_LIBRARIES)/hw
LOCAL_SHARED_LIBRARIES := liblog libcutils libEGL libhardware libutils liboverlay
LOCAL_SHARED_LIBRARIES += libgenlock libQcomUI libmemalloc
LOCAL_SRC_FILES := \
hwcomposer.cpp \
external_display_only.h
LOCAL_MODULE := hwcomposer.$(TARGET_BOARD_PLATFORM)
LOCAL_CFLAGS:= -DLOG_TAG=\"$(TARGET_BOARD_PLATFORM).hwcomposer\" -DDEBUG_CALC_FPS
LOCAL_C_INCLUDES += hardware/qcom/display/libgralloc
LOCAL_C_INCLUDES += hardware/qcom/display/liboverlay
LOCAL_C_INCLUDES += hardware/qcom/display/libcopybit
LOCAL_C_INCLUDES += hardware/qcom/display/libgenlock
LOCAL_C_INCLUDES += hardware/qcom/display/libqcomui
LOCAL_C_INCLUDES += $(TARGET_OUT_INTERMEDIATES)/KERNEL_OBJ/usr/include
LOCAL_ADDITIONAL_DEPENDENCIES += $(TARGET_OUT_INTERMEDIATES)/KERNEL_OBJ/usr
ifeq ($(TARGET_HAVE_HDMI_OUT),true)
LOCAL_CFLAGS += -DHDMI_DUAL_DISPLAY
endif
ifeq ($(TARGET_USES_OVERLAY),true)
LOCAL_CFLAGS += -DUSE_OVERLAY
endif
ifeq ($(TARGET_HAVE_BYPASS),true)
LOCAL_CFLAGS += -DCOMPOSITION_BYPASS
endif
ifeq ($(TARGET_USE_HDMI_AS_PRIMARY),true)
LOCAL_CFLAGS += -DHDMI_AS_PRIMARY
endif
LOCAL_MODULE_TAGS := optional eng
include $(BUILD_SHARED_LIBRARY)

View File

@ -0,0 +1,498 @@
/*
* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#define EXTDEBUG 0
class ExtDispOnly {
enum ExternalOnlyMode {
EXT_ONLY_MODE_OFF = 0,
EXT_ONLY_MODE_ON = 1,
};
enum {
MAX_EXT_ONLY_LAYERS = 2,
};
public:
/* Initialize, allocate data members */
static void init();
/* Deallocate data members */
static void destroy();
/* Closes all the overlay channels */
static void close();
/* Prepare overlay and configures mdp pipes */
static int prepare(hwc_context_t *ctx, hwc_layer_t *layer, int index,
bool waitForVsync);
/* Returns status of external-only mode */
static bool isModeOn();
/* Updates stats and pipe config related to external_only and external_block layers
* If we are staring or stopping this mode, update default mirroring.
*/
static int update(hwc_context_t* ctx, hwc_layer_list_t* list);
/* Stores the locked handle for the buffer that was successfully queued */
static void storeLockedHandles(hwc_layer_list_t* list);
/* Queue buffers to mdp for display */
static int draw(hwc_context_t *ctx, hwc_layer_list_t *list);
private:
/* Locks a buffer and marks it as locked */
static void lockBuffer(native_handle_t *hnd);
/* Unlocks a buffer and clears the locked flag */
static void unlockBuffer(native_handle_t *hnd);
/* Unlocks buffers queued in previous round (and displayed by now)
* Clears the handle cache.
*/
static void unlockPreviousBuffers();
/* Closes the a range of overlay channels */
static void closeRange(int start);
/* Start default external mirroring */
static void startDefaultMirror(hwc_context_t* ctx);
/* Stop default external mirroring */
static void stopDefaultMirror(hwc_context_t* ctx);
/* Checks if external-only mode is starting */
static bool isExtModeStarting(hwc_context_t* ctx, const int&
numExtLayers);
/* Checks if external-only mode is stopping */
static bool isExtModeStopping(hwc_context_t* ctx, const int&
numExtLayers);
//Data members
#if defined (HDMI_DUAL_DISPLAY) && defined (USE_OVERLAY)
static overlay::OverlayUI* sOvExtUI[MAX_EXT_ONLY_LAYERS];
static native_handle_t* sPreviousExtHandle[MAX_EXT_ONLY_LAYERS];
static ExternalOnlyMode sExtOnlyMode;
static int sNumExtOnlyLayers;
static bool sSkipLayerPresent;
static bool sBlockLayerPresent;
static int sBlockLayerIndex;
#endif
}; //class ExtDispOnly
void ExtDispOnly::lockBuffer(native_handle_t *hnd) {
#if defined (HDMI_DUAL_DISPLAY) && defined (USE_OVERLAY)
private_handle_t* phnd = (private_handle_t*)hnd;
//Genlock is reference counted and recursive.
//Do not accidently lock a locked buffer.
if(phnd && (phnd->flags & private_handle_t::PRIV_FLAGS_HWC_LOCK)) {
LOGE_IF(EXTDEBUG, "%s: handle %p already locked", __func__, phnd);
return;
}
if (GENLOCK_FAILURE == genlock_lock_buffer(hnd, GENLOCK_READ_LOCK,
GENLOCK_MAX_TIMEOUT)) {
LOGE("%s: genlock_lock_buffer(READ) failed", __func__);
return;
}
phnd->flags |= private_handle_t::PRIV_FLAGS_HWC_LOCK;
LOGE_IF(EXTDEBUG, "%s: locked handle = %p", __func__, hnd);
#endif
}
void ExtDispOnly::unlockBuffer(native_handle_t *hnd) {
#if defined (HDMI_DUAL_DISPLAY) && defined (USE_OVERLAY)
//Check if buffer is still around
if(private_handle_t::validate(hnd) != 0) {
LOGE("%s Handle already deallocated", __func__);
return;
}
private_handle_t* phnd = (private_handle_t*)hnd;
//Check if buffer was locked in the first place
if((phnd->flags & private_handle_t::PRIV_FLAGS_HWC_LOCK) == 0) {
LOGE("%s Handle not locked, cannot unlock", __func__);
return;
}
//Actually try to unlock
if (GENLOCK_FAILURE == genlock_unlock_buffer(hnd)) {
LOGE("%s: genlock_unlock_buffer failed", __func__);
return;
}
//Clear the locked flag
phnd->flags &= ~private_handle_t::PRIV_FLAGS_HWC_LOCK;
LOGE_IF(EXTDEBUG, "%s: unlocked handle = %p", __func__, hnd);
#endif
}
void ExtDispOnly::unlockPreviousBuffers() {
#if defined (HDMI_DUAL_DISPLAY) && defined (USE_OVERLAY)
for(int i = 0; (i < MAX_EXT_ONLY_LAYERS) && sPreviousExtHandle[i]; i++) {
LOGE_IF(EXTDEBUG, "%s", __func__);
ExtDispOnly::unlockBuffer(sPreviousExtHandle[i]);
sPreviousExtHandle[i] = NULL;
}
#endif
}
void ExtDispOnly::init() {
#if defined (HDMI_DUAL_DISPLAY) && defined (USE_OVERLAY)
for(int i = 0; i < MAX_EXT_ONLY_LAYERS; i++) {
sOvExtUI[i] = new overlay::OverlayUI();
sPreviousExtHandle[i] = NULL;
}
sExtOnlyMode = EXT_ONLY_MODE_OFF;
sNumExtOnlyLayers = 0;
sSkipLayerPresent = false;
sBlockLayerPresent = false;
sBlockLayerIndex = -1;
LOGE_IF(EXTDEBUG, "%s", __func__);
#endif
}
void ExtDispOnly::destroy() {
#if defined (HDMI_DUAL_DISPLAY) && defined (USE_OVERLAY)
for(int i = 0; i < MAX_EXT_ONLY_LAYERS; i++) {
delete sOvExtUI[i];
}
#endif
}
void ExtDispOnly::closeRange(int start) {
#if defined (HDMI_DUAL_DISPLAY) && defined (USE_OVERLAY)
for (int index = start; index < MAX_EXT_ONLY_LAYERS; index++) {
if(sPreviousExtHandle[index]) {
LOGE_IF(EXTDEBUG, "%s", __func__);
ExtDispOnly::unlockBuffer(sPreviousExtHandle[index]);
sPreviousExtHandle[index] = NULL;
}
sOvExtUI[index]->closeChannel();
}
#endif
}
void inline ExtDispOnly::close() {
#if defined (HDMI_DUAL_DISPLAY) && defined (USE_OVERLAY)
closeRange(0);
#endif
}
int ExtDispOnly::prepare(hwc_context_t *ctx, hwc_layer_t *layer, int index,
bool waitForVsync) {
#if defined (HDMI_DUAL_DISPLAY) && defined (USE_OVERLAY)
if(ctx->mHDMIEnabled == EXT_DISPLAY_OFF ||
ctx->pendingHDMI == true)
return -1;
if (ctx && sOvExtUI[index]) {
private_hwc_module_t* hwcModule = reinterpret_cast<
private_hwc_module_t*>(ctx->device.common.module);
if (!hwcModule) {
LOGE("%s null module", __func__);
return -1;
}
private_handle_t *hnd = (private_handle_t *)layer->handle;
if(!hnd) {
LOGE("%s handle null", __func__);
return -1;
}
overlay::OverlayUI *ovUI = sOvExtUI[index];
int ret = 0;
//int orientation = layer->transform;
//Assuming layers will always be source landscape
const int orientation = 0;
overlay_buffer_info info;
hwc_rect_t sourceCrop = layer->sourceCrop;
info.width = sourceCrop.right - sourceCrop.left;
info.height = sourceCrop.bottom - sourceCrop.top;
info.format = hnd->format;
info.size = hnd->size;
const int fbnum = ctx->mHDMIEnabled; //HDMI or WFD
const bool isFg = false;
//Just to differentiate zorders for different layers
const int zorder = index;
const bool isVGPipe = true;
ovUI->setSource(info, orientation);
ovUI->setDisplayParams(fbnum, waitForVsync, isFg, zorder, isVGPipe);
const int fbWidth = ovUI->getFBWidth();
const int fbHeight = ovUI->getFBHeight();
ovUI->setPosition(0, 0, fbWidth, fbHeight);
if(ovUI->commit() != overlay::NO_ERROR) {
LOGE("%s: Overlay Commit failed", __func__);
return -1;
}
}
LOGE_IF(EXTDEBUG, "%s", __func__);
#endif
return overlay::NO_ERROR;
}
inline void ExtDispOnly::startDefaultMirror(hwc_context_t* ctx) {
#if defined (HDMI_DUAL_DISPLAY) && defined (USE_OVERLAY)
hwc_composer_device_t* dev = (hwc_composer_device_t*) ctx;
private_hwc_module_t* hwcModule =
reinterpret_cast<private_hwc_module_t*>(dev->common.module);
framebuffer_device_t *fbDev = hwcModule->fbDevice;
if (fbDev) {
//mHDMIEnabled could be HDMI/WFD/NO EXTERNAL
fbDev->enableHDMIOutput(fbDev, ctx->mHDMIEnabled);
}
#endif
}
inline void ExtDispOnly::stopDefaultMirror(hwc_context_t* ctx) {
#if defined (HDMI_DUAL_DISPLAY) && defined (USE_OVERLAY)
hwc_composer_device_t* dev = (hwc_composer_device_t*) ctx;
private_hwc_module_t* hwcModule =
reinterpret_cast<private_hwc_module_t*>(dev->common.module);
framebuffer_device_t *fbDev = hwcModule->fbDevice;
if (fbDev) {
fbDev->enableHDMIOutput(fbDev, EXT_DISPLAY_OFF);
}
#endif
}
inline bool ExtDispOnly::isExtModeStarting(hwc_context_t* ctx, const int&
numExtLayers) {
#if defined (HDMI_DUAL_DISPLAY) && defined (USE_OVERLAY)
return ((sExtOnlyMode == EXT_ONLY_MODE_OFF) && numExtLayers);
#endif
return false;
}
inline bool ExtDispOnly::isExtModeStopping(hwc_context_t* ctx, const int&
numExtLayers) {
#if defined (HDMI_DUAL_DISPLAY) && defined (USE_OVERLAY)
return ((sExtOnlyMode == EXT_ONLY_MODE_ON) && (numExtLayers == 0));
#endif
return false;
}
inline bool ExtDispOnly::isModeOn() {
#if defined (HDMI_DUAL_DISPLAY) && defined (USE_OVERLAY)
return (sExtOnlyMode == EXT_ONLY_MODE_ON);
#endif
return false;
}
int ExtDispOnly::update(hwc_context_t* ctx, hwc_layer_list_t* list) {
#if defined (HDMI_DUAL_DISPLAY) && defined (USE_OVERLAY)
int aNumExtLayers = 0;
bool aSkipLayerPresent = false;
bool aBlockLayerPresent = false;
int aBlockLayerIndex = -1;
//Book-keeping done each cycle
for (size_t i = 0; i < list->numHwLayers; i++) {
private_handle_t *hnd = (private_handle_t *)list->hwLayers[i].handle;
// Dont draw in this round
if(list->hwLayers[i].flags & HWC_SKIP_LAYER) {
aSkipLayerPresent = true;
}
if(hnd && (hnd->flags & private_handle_t::PRIV_FLAGS_EXTERNAL_ONLY)) {
aNumExtLayers++;
// No way we can let this be drawn by GPU to fb0
if(list->hwLayers[i].flags & HWC_SKIP_LAYER) {
list->hwLayers[i].flags &= ~ HWC_SKIP_LAYER;
}
list->hwLayers[i].flags |= HWC_USE_EXT_ONLY;
list->hwLayers[i].compositionType = HWC_USE_OVERLAY;
list->hwLayers[i].hints &= ~HWC_HINT_CLEAR_FB;
//EXTERNAL_BLOCK is always an add-on
if(hnd && (hnd->flags &
private_handle_t::PRIV_FLAGS_EXTERNAL_BLOCK)) {
aBlockLayerPresent = true;
aBlockLayerIndex = i;
list->hwLayers[i].flags |= HWC_USE_EXT_BLOCK;
}
}
}
//Update Default mirroring state
if (isExtModeStarting(ctx, aNumExtLayers)) {
stopDefaultMirror(ctx);
} else if (isExtModeStopping(ctx, aNumExtLayers)) {
startDefaultMirror(ctx);
}
//Cache our stats
sExtOnlyMode = aNumExtLayers ? EXT_ONLY_MODE_ON : EXT_ONLY_MODE_OFF;
sNumExtOnlyLayers = aNumExtLayers;
sSkipLayerPresent = aSkipLayerPresent;
sBlockLayerPresent = aBlockLayerPresent;
sBlockLayerIndex = aBlockLayerIndex;
LOGE_IF(EXTDEBUG, "%s: numExtLayers = %d skipLayerPresent = %d", __func__,
aNumExtLayers, aSkipLayerPresent);
//If skip layer present return. Buffers to be unlocked in draw phase.
if(aSkipLayerPresent) {
return overlay::NO_ERROR;
}
//If External is not connected, dont setup pipes, just return
if(ctx->mHDMIEnabled == EXT_DISPLAY_OFF ||
ctx->pendingHDMI == true) {
ExtDispOnly::close();
return -1;
}
//Update pipes
bool waitForVsync = true;
bool index = 0;
if (aBlockLayerPresent) {
ExtDispOnly::closeRange(1);
ExtDispOnly::prepare(ctx, &(list->hwLayers[aBlockLayerIndex]),
index, waitForVsync);
} else if (aNumExtLayers) {
ExtDispOnly::closeRange(aNumExtLayers);
for (size_t i = 0; i < list->numHwLayers; i++) {
private_handle_t *hnd = (private_handle_t *)list->hwLayers[i].handle;
if(hnd && hnd->flags & private_handle_t::PRIV_FLAGS_EXTERNAL_ONLY) {
waitForVsync = (index == (aNumExtLayers - 1));
ExtDispOnly::prepare(ctx, &(list->hwLayers[i]),
index, waitForVsync);
index++;
}
}
} else {
ExtDispOnly::close();
}
#endif
return overlay::NO_ERROR;
}
void ExtDispOnly::storeLockedHandles(hwc_layer_list_t* list) {
#if defined (HDMI_DUAL_DISPLAY) && defined (USE_OVERLAY)
int index = 0;
if(sBlockLayerPresent) {
private_handle_t *hnd = (private_handle_t *)
list->hwLayers[sBlockLayerIndex].handle;
if(list->hwLayers[sBlockLayerIndex].flags & HWC_USE_EXT_ONLY) {
if(!(hnd->flags & private_handle_t::PRIV_FLAGS_HWC_LOCK)) {
ExtDispOnly::lockBuffer(hnd);
}
sPreviousExtHandle[index] = hnd;
LOGE_IF(EXTDEBUG, "%s BLOCK: handle = %p", __func__, hnd);
return;
}
}
for(int i = 0; i < list->numHwLayers; i++) {
private_handle_t *hnd = (private_handle_t *)list->hwLayers[i].handle;
if(list->hwLayers[i].flags & HWC_USE_EXT_ONLY) {
if(!(hnd->flags & private_handle_t::PRIV_FLAGS_HWC_LOCK)) {
ExtDispOnly::lockBuffer(hnd);
}
sPreviousExtHandle[index] = hnd;
index++;
LOGE_IF(EXTDEBUG, "%s: handle = %p", __func__, hnd);
}
}
#endif
}
int ExtDispOnly::draw(hwc_context_t *ctx, hwc_layer_list_t *list) {
#if defined (HDMI_DUAL_DISPLAY) && defined (USE_OVERLAY)
LOGE_IF(EXTDEBUG, "%s", __func__);
if(ctx->mHDMIEnabled == EXT_DISPLAY_OFF ||
ctx->pendingHDMI == true) {
ExtDispOnly::close();
return -1;
}
int ret = overlay::NO_ERROR;
int index = 0;
//If skip layer present or list invalid unlock and return.
if(sSkipLayerPresent || list == NULL) {
ExtDispOnly::unlockPreviousBuffers();
return overlay::NO_ERROR;
}
if(sBlockLayerPresent) {
private_handle_t *hnd = (private_handle_t*)
list->hwLayers[sBlockLayerIndex].handle;
ExtDispOnly::lockBuffer(hnd);
ret = sOvExtUI[index]->queueBuffer(hnd);
if (ret) {
LOGE("%s queueBuffer failed", __func__);
// Unlock the locked buffer
ExtDispOnly::unlockBuffer(hnd);
ExtDispOnly::close();
return -1;
}
ExtDispOnly::unlockPreviousBuffers();
ExtDispOnly::storeLockedHandles(list);
return overlay::NO_ERROR;
}
for(int i = 0; i < list->numHwLayers; i++) {
private_handle_t *hnd = (private_handle_t *)list->hwLayers[i].handle;
if(hnd && list->hwLayers[i].flags & HWC_USE_EXT_ONLY) {
overlay::OverlayUI *ovUI = sOvExtUI[index];
ExtDispOnly::lockBuffer(hnd);
ret = ovUI->queueBuffer(hnd);
if (ret) {
LOGE("%s queueBuffer failed", __func__);
// Unlock the all the currently locked buffers
for (int j = 0; j <= i; j++) {
private_handle_t *tmphnd =
(private_handle_t *)list->hwLayers[j].handle;
if(hnd && list->hwLayers[j].flags & HWC_USE_EXT_ONLY)
ExtDispOnly::unlockBuffer(tmphnd);
}
ExtDispOnly::close();
return -1;
}
index++;
}
}
ExtDispOnly::unlockPreviousBuffers();
ExtDispOnly::storeLockedHandles(list);
#endif
return overlay::NO_ERROR;
}
#if defined (HDMI_DUAL_DISPLAY) && defined (USE_OVERLAY)
overlay::OverlayUI* ExtDispOnly::sOvExtUI[MAX_EXT_ONLY_LAYERS];
native_handle_t* ExtDispOnly::sPreviousExtHandle[MAX_EXT_ONLY_LAYERS];
ExtDispOnly::ExternalOnlyMode ExtDispOnly::sExtOnlyMode;
int ExtDispOnly::sNumExtOnlyLayers;
bool ExtDispOnly::sSkipLayerPresent;
bool ExtDispOnly::sBlockLayerPresent;
int ExtDispOnly::sBlockLayerIndex;
#endif

1734
libhwcomposer/hwcomposer.cpp Executable file

File diff suppressed because it is too large Load Diff

43
liboverlay/Android.mk Executable file
View File

@ -0,0 +1,43 @@
# Copyright (C) 2008 The Android Open Source Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License
#
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
LOCAL_PRELINK_MODULE := false
LOCAL_MODULE_PATH := $(TARGET_OUT_SHARED_LIBRARIES)
LOCAL_SHARED_LIBRARIES := liblog libcutils libutils libmemalloc
LOCAL_C_INCLUDES += $(TARGET_OUT_INTERMEDIATES)/KERNEL_OBJ/usr/include
LOCAL_ADDITIONAL_DEPENDENCIES += $(TARGET_OUT_INTERMEDIATES)/KERNEL_OBJ/usr
LOCAL_C_INCLUDES += hardware/qcom/display/libgralloc
LOCAL_SRC_FILES := \
overlayLib.cpp \
overlayLibUI.cpp \
LOCAL_CFLAGS:= -DLOG_TAG=\"OverlayLib\"
ifeq ($(TARGET_USE_HDMI_AS_PRIMARY),true)
LOCAL_CFLAGS += -DHDMI_AS_PRIMARY
endif
ifeq ($(TARGET_USES_POST_PROCESSING),true)
LOCAL_CFLAGS += -DUSES_POST_PROCESSING
LOCAL_SHARED_LIBRARIES += libmm-abl
LOCAL_C_INCLUDES += $(TARGET_OUT_HEADERS)/pp/inc
LOCAL_ADDITIONAL_DEPENDENCIES += $(TARGET_OUT_INTERMEDIATES)/lib/
endif
LOCAL_MODULE := liboverlay
#LGE_CHANGE, for userdebug mode
LOCAL_MODULE_TAGS := optional
include $(BUILD_SHARED_LIBRARY)

2352
liboverlay/overlayLib.cpp Executable file

File diff suppressed because it is too large Load Diff

451
liboverlay/overlayLib.h Executable file
View File

@ -0,0 +1,451 @@
/*
* Copyright (C) 2008 The Android Open Source Project
* Copyright (c) 2010-2012, Code Aurora Forum. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef INCLUDE_OVERLAY_LIB
#define INCLUDE_OVERLAY_LIB
#include <cutils/log.h>
#include <cutils/properties.h>
#include <cutils/atomic.h>
#include <hardware/hardware.h>
#include <hardware/gralloc.h>
#include <dlfcn.h>
#include <fcntl.h>
#include <errno.h>
#include <string.h>
#include <stdlib.h>
#include <pthread.h>
#include <linux/fb.h>
#include <linux/msm_mdp.h>
#include <linux/msm_rotator.h>
#include <linux/android_pmem.h>
#include <sys/mman.h>
#include <sys/ioctl.h>
#include <utils/threads.h>
#include <utils/RefBase.h>
#include <alloc_controller.h>
#include <memalloc.h>
#ifdef USES_POST_PROCESSING
#include "lib-postproc.h"
#endif
#define HW_OVERLAY_MAGNIFICATION_LIMIT 8
#define HW_OVERLAY_MINIFICATION_LIMIT HW_OVERLAY_MAGNIFICATION_LIMIT
#define EVEN_OUT(x) if (x & 0x0001) {x--;}
#define NO_PIPE -1
#define VG0_PIPE 0
#define VG1_PIPE 1
#define NUM_CHANNELS 2
#define NUM_FB_DEVICES 3
#define FRAMEBUFFER_0 0
#define FRAMEBUFFER_1 1
#define FRAMEBUFFER_2 2
#define NUM_SHARPNESS_VALS 256
#define SHARPNESS_RANGE 1.0f
#define HUE_RANGE 180
#define BRIGHTNESS_RANGE 255
#define CON_SAT_RANGE 1.0f
#define CAP_RANGE(value,max,min) do { if (value - min < -0.0001)\
{value = min;}\
else if(value - max > 0.0001)\
{value = max;}\
} while(0);
enum {
HDMI_OFF,
HDMI_ON
};
enum {
OVERLAY_CHANNEL_DOWN,
OVERLAY_CHANNEL_UP
};
enum {
NEW_REQUEST,
UPDATE_REQUEST
};
enum {
WAIT_FOR_VSYNC = 1<<0,
DISABLE_FRAMEBUFFER_FETCH = 1<<1,
INTERLACED_CONTENT = 1<<2,
OVERLAY_PIPE_SHARE = 1<<3,
SECURE_OVERLAY_SESSION = 1<<4,
};
/* ------------------------------- 3D defines ---------------------------------------*/
// The compound format passed to the overlay is
// ABCCC where A is the input 3D format,
// B is the output 3D format
// CCC is the color format e.g YCbCr420SP YCrCb420SP etc.
#define FORMAT_3D(x) (x & 0xFF000)
#define COLOR_FORMAT(x) (x & 0xFFF)
// in the final 3D format, the MSB 2Bytes are the input format and the
// LSB 2bytes are the output format. Shift the output byte 12 bits.
#define SHIFT_OUTPUT_3D 12
#define FORMAT_3D_OUTPUT(x) ((x & 0xF000) >> SHIFT_OUTPUT_3D)
#define FORMAT_3D_INPUT(x) (x & 0xF0000)
#define INPUT_MASK_3D 0xFFFF0000
#define OUTPUT_MASK_3D 0x0000FFFF
#define SHIFT_3D 16
// The output format is the 2MSB bytes. Shift the format by 12 to reflect this
#define HAL_3D_OUT_SIDE_BY_SIDE_MASK (HAL_3D_OUT_SIDE_BY_SIDE >> SHIFT_OUTPUT_3D)
#define HAL_3D_OUT_TOP_BOTTOM_MASK (HAL_3D_OUT_TOP_BOTTOM >> SHIFT_OUTPUT_3D)
#define HAL_3D_OUT_INTERLEAVE_MASK (HAL_3D_OUT_INTERLEAVE >> SHIFT_OUTPUT_3D)
#define HAL_3D_OUT_MONOSCOPIC_MASK (HAL_3D_OUT_MONOSCOPIC >> SHIFT_OUTPUT_3D)
// 3D panel barrier orientation
#define BARRIER_LANDSCAPE 1
#define BARRIER_PORTRAIT 2
#ifdef HDMI_AS_PRIMARY
#define FORMAT_3D_FILE "/sys/class/graphics/fb0/format_3d"
#define EDID_3D_INFO_FILE "/sys/class/graphics/fb0/3d_present"
#else
#define FORMAT_3D_FILE "/sys/class/graphics/fb1/format_3d"
#define EDID_3D_INFO_FILE "/sys/class/graphics/fb1/3d_present"
#endif
#define BARRIER_FILE "/sys/devices/platform/mipi_novatek.0/enable_3d_barrier"
/* -------------------------- end 3D defines ----------------------------------------*/
// Struct to hold the buffer info: geometry and size
struct overlay_buffer_info {
int width;
int height;
int format;
int size;
};
using android::Mutex;
namespace overlay {
#define FB_DEVICE_TEMPLATE "/dev/graphics/fb%u"
//Utility Class to query the framebuffer info
class FrameBufferInfo {
int mFBWidth;
int mFBHeight;
bool mBorderFillSupported;
static FrameBufferInfo *sFBInfoInstance;
FrameBufferInfo():mFBWidth(0),mFBHeight(0), mBorderFillSupported(false) {
char const * const device_name =
"/dev/graphics/fb0";
int fd = open(device_name, O_RDWR, 0);
mdp_overlay ov;
memset(&ov, 0, sizeof(ov));
if (fd < 0) {
LOGE("FrameBufferInfo: Cant open framebuffer ");
return;
}
fb_var_screeninfo vinfo;
if (ioctl(fd, FBIOGET_VSCREENINFO, &vinfo) == -1) {
LOGE("FrameBufferInfo: FBIOGET_VSCREENINFO on fb0 failed");
close(fd);
fd = -1;
return;
}
ov.id = 1;
if(ioctl(fd, MSMFB_OVERLAY_GET, &ov)) {
LOGE("FrameBufferInfo: MSMFB_OVERLAY_GET on fb0 failed");
close(fd);
fd = -1;
return;
}
close(fd);
fd = -1;
mFBWidth = vinfo.xres;
mFBHeight = vinfo.yres;
mBorderFillSupported = (ov.flags & MDP_BORDERFILL_SUPPORTED) ?
true : false;
}
public:
static FrameBufferInfo* getInstance(){
if (!sFBInfoInstance){
sFBInfoInstance = new FrameBufferInfo;
}
return sFBInfoInstance;
}
int getWidth() const { return mFBWidth; }
int getHeight() const { return mFBHeight; }
bool canSupportTrueMirroring() const {
return mBorderFillSupported; }
};
enum {
OV_UI_MIRROR_TV = 0,
OV_2D_VIDEO_ON_PANEL,
OV_2D_VIDEO_ON_TV,
OV_3D_VIDEO_2D_PANEL,
OV_3D_VIDEO_2D_TV,
OV_3D_VIDEO_3D_PANEL,
OV_3D_VIDEO_3D_TV
};
bool isHDMIConnected();
bool is3DTV();
bool isPanel3D();
bool usePanel3D();
bool send3DInfoPacket(unsigned int format3D);
bool enableBarrier(unsigned int orientation);
unsigned int getOverlayConfig (unsigned int format3D, bool poll = true,
bool isHDMI = false);
int getColorFormat(int format);
bool isInterlacedContent(int format);
int get_mdp_format(int format);
int get_size(int format, int w, int h);
int get_rot_output_format(int format);
int get_mdp_orientation(int value);
void normalize_crop(uint32_t& xy, uint32_t& wh);
//Initializes the overlay - cleans up any existing overlay pipes
int initOverlay();
/* Print values being sent to driver in case of ioctl failures
These logs are enabled only if DEBUG_OVERLAY is true */
void dump(msm_rotator_img_info& mRotInfo);
void dump(mdp_overlay& mOvInfo);
const char* getFormatString(int format);
//singleton class to decide the z order of new overlay surfaces
class ZOrderManager {
bool mFB0Pipes[NUM_CHANNELS];
bool mFB1Pipes[NUM_CHANNELS+1]; //FB1 can have 3 pipes
int mPipesInuse; // Holds the number of pipes in use
int mMaxPipes; // Max number of pipes
static ZOrderManager *sInstance;
Mutex *mObjMutex;
ZOrderManager(){
mPipesInuse = 0;
// for true mirroring support there can be 3 pipes on secondary
mMaxPipes = FrameBufferInfo::getInstance()->canSupportTrueMirroring()?
NUM_CHANNELS+1 : NUM_CHANNELS;
for (int i = 0; i < NUM_CHANNELS; i++)
mFB0Pipes[i] = false;
for (int j = 0; j < mMaxPipes; j++)
mFB1Pipes[j] = false;
mObjMutex = new Mutex();
}
~ZOrderManager() {
delete sInstance;
delete mObjMutex;
}
public:
static ZOrderManager* getInstance(){
if (!sInstance){
sInstance = new ZOrderManager;
}
return sInstance;
}
int getZ(int fbnum);
void decZ(int fbnum, int zorder);
};
const int max_num_buffers = 3;
typedef struct mdp_rect overlay_rect;
class OverlayControlChannel {
enum {
SET_NONE = 0,
SET_SHARPNESS,
#ifdef USES_POST_PROCESSING
SET_HUE,
SET_BRIGHTNESS,
SET_SATURATION,
SET_CONTRAST,
#endif
RESET_ALL,
};
bool mNoRot;
int mFBNum;
int mFBWidth;
int mFBHeight;
int mFBbpp;
int mFBystride;
int mFormat;
int mFD;
int mRotFD;
int mSize;
int mOrientation;
unsigned int mFormat3D;
bool mUIChannel;
#ifdef USES_POST_PROCESSING
struct display_pp_conv_cfg hsic_cfg;
#endif
mdp_overlay mOVInfo;
msm_rotator_img_info mRotInfo;
msmfb_overlay_3d m3DOVInfo;
bool mIsChannelUpdated;
bool openDevices(int fbnum = -1);
bool setOverlayInformation(const overlay_buffer_info& info,
int zorder = 0, int flags = 0,
int requestType = NEW_REQUEST);
bool startOVRotatorSessions(const overlay_buffer_info& info, int requestType);
void swapOVRotWidthHeight();
int commitVisualParam(int8_t paramType, float paramValue);
void setInformationFromFlags(int flags, mdp_overlay& ov);
public:
OverlayControlChannel();
~OverlayControlChannel();
bool startControlChannel(const overlay_buffer_info& info,
int fbnum, bool norot = false,
bool uichannel = false,
unsigned int format3D = 0, int zorder = 0,
int flags = 0);
bool closeControlChannel();
bool setPosition(int x, int y, uint32_t w, uint32_t h);
bool setTransform(int value, bool fetch = true);
void setSize (int size) { mSize = size; }
bool getPosition(int& x, int& y, uint32_t& w, uint32_t& h);
bool getOvSessionID(int& sessionID) const;
bool getRotSessionID(int& sessionID) const;
bool getSize(int& size) const;
bool isChannelUP() const { return (mFD > 0); }
int getFBWidth() const { return mFBWidth; }
int getFBHeight() const { return mFBHeight; }
int getFormat3D() const { return mFormat3D; }
bool getOrientation(int& orientation) const;
bool updateOverlayFlags(int flags);
bool getAspectRatioPosition(int w, int h, overlay_rect *rect);
// Calculates the aspect ratio for video on HDMI based on primary
// aspect ratio used in case of true mirroring
bool getAspectRatioPosition(int w, int h, int orientation,
overlay_rect *inRect, overlay_rect *outRect);
bool getPositionS3D(int channel, int format, overlay_rect *rect);
bool updateOverlaySource(const overlay_buffer_info& info, int orientation, int flags);
bool getFormat() const { return mFormat; }
bool setVisualParam(int8_t paramType, float paramValue);
bool useVirtualFB ();
bool doFlagsNeedUpdate(int flags);
};
class OverlayDataChannel {
bool mNoRot;
bool mSecure;
int mFD;
int mRotFD;
int mPmemFD;
void* mPmemAddr;
uint32_t mPmemOffset;
uint32_t mNewPmemOffset;
msmfb_overlay_data mOvData;
msmfb_overlay_data mOvDataRot;
msm_rotator_data_info mRotData;
int mRotOffset[max_num_buffers];
int mCurrentItem;
int mNumBuffers;
bool mUpdateDataChannel;
android::sp<gralloc::IAllocController> mAlloc;
int mBufferType;
bool openDevices(int fbnum = -1, bool uichannel = false, int num_buffers = 2);
bool mapRotatorMemory(int num_buffers, bool uiChannel, int requestType);
bool queue(uint32_t offset);
public:
OverlayDataChannel();
~OverlayDataChannel();
bool startDataChannel(const OverlayControlChannel& objOvCtrlChannel,
int fbnum, bool norot = false, bool secure = false,
bool uichannel = false, int num_buffers = 2);
bool startDataChannel(int ovid, int rotid, int size,
int fbnum, bool norot = false, bool uichannel = false,
int num_buffers = 2);
bool closeDataChannel();
bool setFd(int fd);
bool queueBuffer(uint32_t offset);
bool waitForHdmiVsync();
bool setCrop(uint32_t x, uint32_t y, uint32_t w, uint32_t h);
bool getCropS3D(overlay_rect *inRect, int channel, int format, overlay_rect *rect);
bool isChannelUP() const { return (mFD > 0); }
bool updateDataChannel(int size);
};
/*
* Overlay class for single thread application
* A multiple thread/process application need to use Overlay HAL
*/
class Overlay {
bool mChannelUP;
//stores the connected external display Ex: HDMI(1) WFD(2)
int mExternalDisplay;
unsigned int mS3DFormat;
//Actual cropped source width and height of overlay
int mCroppedSrcWidth;
int mCroppedSrcHeight;
overlay_buffer_info mOVBufferInfo;
int mState;
// Stores the current device orientation
int mDevOrientation;
OverlayControlChannel objOvCtrlChannel[2];
OverlayDataChannel objOvDataChannel[2];
public:
Overlay();
~Overlay();
static bool sHDMIAsPrimary;
bool startChannel(const overlay_buffer_info& info, int fbnum, bool norot = false,
bool uichannel = false, unsigned int format3D = 0,
int channel = 0, int flags = 0,
int num_buffers = 2);
bool closeChannel();
bool setDeviceOrientation(int orientation);
bool setPosition(int x, int y, uint32_t w, uint32_t h);
bool setTransform(int value);
bool setOrientation(int value, int channel = 0);
bool setFd(int fd, int channel = 0);
bool queueBuffer(uint32_t offset, int channel = 0);
bool getPosition(int& x, int& y, uint32_t& w, uint32_t& h, int channel = 0);
bool isChannelUP() const { return mChannelUP; }
int getFBWidth(int channel = 0) const;
int getFBHeight(int channel = 0) const;
bool getOrientation(int& orientation, int channel = 0) const;
bool queueBuffer(buffer_handle_t buffer);
bool setSource(const overlay_buffer_info& info, int orientation, int hdmiConnected,
int flags, int numBuffers = 2);
bool getAspectRatioPosition(int w, int h, overlay_rect *rect, int channel = 0);
bool setCrop(uint32_t x, uint32_t y, uint32_t w, uint32_t h);
bool updateOverlayFlags(int flags);
void setVisualParam(int8_t paramType, float paramValue);
bool waitForHdmiVsync(int channel);
int getChannelStatus() const { return (mChannelUP ? OVERLAY_CHANNEL_UP: OVERLAY_CHANNEL_DOWN); }
void closeExternalChannel();
private:
bool setChannelPosition(int x, int y, uint32_t w, uint32_t h, int channel = 0);
bool setChannelCrop(uint32_t x, uint32_t y, uint32_t w, uint32_t h, int channel);
bool queueBuffer(int fd, uint32_t offset, int channel);
bool updateOverlaySource(const overlay_buffer_info& info, int orientation, int flags);
int getS3DFormat(int format);
};
struct overlay_shared_data {
volatile bool isControlSetup;
unsigned int state;
int rotid[2];
int ovid[2];
};
};
#endif

482
liboverlay/overlayLibUI.cpp Executable file
View File

@ -0,0 +1,482 @@
/*
* Copyright (C) 2008 The Android Open Source Project
* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "overlayLibUI.h"
#include "gralloc_priv.h"
#define LOG_TAG "OverlayUI"
using android::sp;
using gralloc::IMemAlloc;
using gralloc::alloc_data;
namespace {
/* helper functions */
void swapOVRotWidthHeight(msm_rotator_img_info& rotInfo,
mdp_overlay& ovInfo) {
int srcWidth = ovInfo.src.width;
ovInfo.src.width = ovInfo.src.height;
ovInfo.src.height = srcWidth;
int srcRectWidth = ovInfo.src_rect.w;
ovInfo.src_rect.w = ovInfo.src_rect.h;
ovInfo.src_rect.h = srcRectWidth;
int dstWidth = rotInfo.dst.width;
rotInfo.dst.width = rotInfo.dst.height;
rotInfo.dst.height = dstWidth;
}
bool isRGBType(int format) {
bool ret = false;
switch(format) {
case MDP_RGBA_8888:
case MDP_BGRA_8888:
case MDP_RGBX_8888:
case MDP_RGB_565:
ret = true;
break;
default:
ret = false;
break;
}
return ret;
}
int getRGBBpp(int format) {
int ret = -1;
switch(format) {
case MDP_RGBA_8888:
case MDP_BGRA_8888:
case MDP_RGBX_8888:
ret = 4;
break;
case MDP_RGB_565:
ret = 2;
break;
default:
ret = -1;
break;
}
return ret;
}
bool turnOFFVSync() {
static int swapIntervalPropVal = -1;
if (swapIntervalPropVal == -1) {
char pval[PROPERTY_VALUE_MAX];
property_get("debug.gr.swapinterval", pval, "1");
swapIntervalPropVal = atoi(pval);
}
return (swapIntervalPropVal == 0);
}
};
namespace overlay {
status_t Display::openDisplay(int fbnum) {
if (mFD != NO_INIT)
return NO_ERROR;
status_t ret = NO_INIT;
char dev_name[64];
snprintf(dev_name, 64, FB_DEVICE_TEMPLATE, fbnum);
mFD = open(dev_name, O_RDWR, 0);
if (mFD < 0) {
LOGE("Failed to open FB %d", fbnum);
return ret;
}
fb_var_screeninfo vinfo;
if (ioctl(mFD, FBIOGET_VSCREENINFO, &vinfo)) {
LOGE("FBIOGET_VSCREENINFO on failed on FB %d", fbnum);
close(mFD);
mFD = NO_INIT;
return ret;
}
mFBWidth = vinfo.xres;
mFBHeight = vinfo.yres;
mFBBpp = vinfo.bits_per_pixel;
ret = NO_ERROR;
return ret;
}
void Display::closeDisplay() {
close(mFD);
mFD = NO_INIT;
}
Rotator::Rotator() : mFD(NO_INIT), mSessionID(NO_INIT), mPmemFD(NO_INIT)
{
mAlloc = gralloc::IAllocController::getInstance(false);
}
Rotator::~Rotator()
{
closeRotSession();
}
status_t Rotator::startRotSession(msm_rotator_img_info& rotInfo,
int size, int numBuffers) {
status_t ret = NO_ERROR;
if (mSessionID == NO_INIT && mFD == NO_INIT) {
mNumBuffers = numBuffers;
mFD = open("/dev/msm_rotator", O_RDWR, 0);
if (mFD < 0) {
LOGE("Couldnt open rotator device");
return NO_INIT;
}
if (ioctl(mFD, MSM_ROTATOR_IOCTL_START, &rotInfo)) {
close(mFD);
mFD = NO_INIT;
return NO_INIT;
}
mSessionID = rotInfo.session_id;
alloc_data data;
data.base = 0;
data.fd = -1;
data.offset = 0;
data.size = mSize * mNumBuffers;
data.align = getpagesize();
data.uncached = true;
int allocFlags = GRALLOC_USAGE_PRIVATE_MM_HEAP |
GRALLOC_USAGE_PRIVATE_WRITEBACK_HEAP |
GRALLOC_USAGE_PRIVATE_ADSP_HEAP |
GRALLOC_USAGE_PRIVATE_IOMMU_HEAP |
GRALLOC_USAGE_PRIVATE_SMI_HEAP |
GRALLOC_USAGE_PRIVATE_DO_NOT_MAP;
int err = mAlloc->allocate(data, allocFlags, 0);
if(err) {
LOGE("%s: Can't allocate rotator memory", __func__);
closeRotSession();
return NO_INIT;
}
mPmemFD = data.fd;
mPmemAddr = data.base;
mBufferType = data.allocType;
mCurrentItem = 0;
for (int i = 0; i < mNumBuffers; i++)
mRotOffset[i] = i * mSize;
ret = NO_ERROR;
}
return ret;
}
status_t Rotator::closeRotSession() {
if (mSessionID != NO_INIT && mFD != NO_INIT) {
ioctl(mFD, MSM_ROTATOR_IOCTL_FINISH, &mSessionID);
close(mFD);
if (NO_INIT != mPmemFD) {
sp<IMemAlloc> memalloc = mAlloc->getAllocator(mBufferType);
memalloc->free_buffer(mPmemAddr, mSize * mNumBuffers, 0, mPmemFD);
close(mPmemFD);
}
}
mFD = NO_INIT;
mSessionID = NO_INIT;
mPmemFD = NO_INIT;
mPmemAddr = MAP_FAILED;
return NO_ERROR;
}
status_t Rotator::rotateBuffer(msm_rotator_data_info& rotData) {
status_t ret = NO_INIT;
if (mSessionID != NO_INIT) {
rotData.dst.memory_id = mPmemFD;
rotData.dst.offset = mRotOffset[mCurrentItem];
rotData.session_id = mSessionID;
mCurrentItem = (mCurrentItem + 1) % mNumBuffers;
if (ioctl(mFD, MSM_ROTATOR_IOCTL_ROTATE, &rotData)) {
LOGE("Rotator failed to rotate");
return BAD_VALUE;
}
return NO_ERROR;
}
return ret;
}
//===================== OverlayUI =================//
OverlayUI::OverlayUI() : mChannelState(CLOSED), mOrientation(NO_INIT),
mFBNum(NO_INIT), mZorder(NO_INIT), mWaitForVsync(false), mIsFg(false),
mSessionID(NO_INIT), mParamsChanged(false) {
memset(&mOvInfo, 0, sizeof(mOvInfo));
memset(&mRotInfo, 0, sizeof(mRotInfo));
}
OverlayUI::~OverlayUI() {
closeChannel();
}
void OverlayUI::setSource(const overlay_buffer_info& info, int orientation) {
status_t ret = NO_INIT;
int format3D = FORMAT_3D(info.format);
int colorFormat = COLOR_FORMAT(info.format);
int format = get_mdp_format(colorFormat);
if (format3D || !isRGBType(format)) {
LOGE("%s: Unsupported format", __func__);
return;
}
mParamsChanged |= (mSource.width ^ info.width) ||
(mSource.height ^ info.height) ||
(mSource.format ^ format) ||
(mSource.size ^ info.size) ||
(mOrientation ^ orientation);
mSource.width = info.width;
mSource.height = info.height;
mSource.format = format;
mSource.size = info.size;
mOrientation = orientation;
setupOvRotInfo();
}
void OverlayUI::setDisplayParams(int fbNum, bool waitForVsync, bool isFg, int
zorder, bool isVGPipe) {
int flags = 0;
if(false == waitForVsync)
flags |= MDP_OV_PLAY_NOWAIT;
else
flags &= ~MDP_OV_PLAY_NOWAIT;
if(isVGPipe)
flags |= MDP_OV_PIPE_SHARE;
else
flags &= ~MDP_OV_PIPE_SHARE;
if (turnOFFVSync())
flags |= MDP_OV_PLAY_NOWAIT;
mParamsChanged |= (mFBNum ^ fbNum) ||
(mOvInfo.is_fg ^ isFg) ||
(mOvInfo.flags ^ flags) ||
(mOvInfo.z_order ^ zorder);
mFBNum = fbNum;
mOvInfo.is_fg = isFg;
mOvInfo.flags = flags;
mOvInfo.z_order = zorder;
mobjDisplay.openDisplay(mFBNum);
}
void OverlayUI::setPosition(int x, int y, int w, int h) {
mParamsChanged |= (mOvInfo.dst_rect.x ^ x) ||
(mOvInfo.dst_rect.y ^ y) ||
(mOvInfo.dst_rect.w ^ w) ||
(mOvInfo.dst_rect.h ^ h);
mOvInfo.dst_rect.x = x;
mOvInfo.dst_rect.y = y;
mOvInfo.dst_rect.w = w;
mOvInfo.dst_rect.h = h;
}
void OverlayUI::setCrop(int x, int y, int w, int h) {
mParamsChanged |= (mOvInfo.src_rect.x ^ x) ||
(mOvInfo.src_rect.y ^ y) ||
(mOvInfo.src_rect.w ^ w) ||
(mOvInfo.src_rect.h ^ h);
mOvInfo.src_rect.x = x;
mOvInfo.src_rect.y = y;
mOvInfo.src_rect.w = w;
mOvInfo.src_rect.h = h;
}
void OverlayUI::setupOvRotInfo() {
int w = mSource.width;
int h = mSource.height;
int format = mSource.format;
int srcw = (w + 31) & ~31;
int srch = (h + 31) & ~31;
mOvInfo.src.width = srcw;
mOvInfo.src.height = srch;
mOvInfo.src.format = format;
mOvInfo.src_rect.w = w;
mOvInfo.src_rect.h = h;
mOvInfo.alpha = 0xff;
mOvInfo.transp_mask = 0xffffffff;
mRotInfo.src.format = format;
mRotInfo.dst.format = format;
mRotInfo.src.width = srcw;
mRotInfo.src.height = srch;
mRotInfo.src_rect.w = srcw;
mRotInfo.src_rect.h = srch;
mRotInfo.dst.width = srcw;
mRotInfo.dst.height = srch;
int rot = mOrientation;
switch(rot) {
case 0:
case HAL_TRANSFORM_FLIP_H:
case HAL_TRANSFORM_FLIP_V:
rot = 0;
break;
case HAL_TRANSFORM_ROT_90:
case (HAL_TRANSFORM_ROT_90|HAL_TRANSFORM_FLIP_H):
case (HAL_TRANSFORM_ROT_90|HAL_TRANSFORM_FLIP_V): {
int tmp = mOvInfo.src_rect.x;
mOvInfo.src_rect.x = mOvInfo.src.height -
(mOvInfo.src_rect.y + mOvInfo.src_rect.h);
mOvInfo.src_rect.y = tmp;
swapOVRotWidthHeight(mRotInfo, mOvInfo);
rot = HAL_TRANSFORM_ROT_90;
break;
}
case HAL_TRANSFORM_ROT_180:
break;
case HAL_TRANSFORM_ROT_270: {
int tmp = mOvInfo.src_rect.y;
mOvInfo.src_rect.y = mOvInfo.src.width -
(mOvInfo.src_rect.x + mOvInfo.src_rect.w);
mOvInfo.src_rect.x = tmp;
swapOVRotWidthHeight(mRotInfo, mOvInfo);
break;
}
default:
break;
}
int mdp_rotation = overlay::get_mdp_orientation(rot);
if (mdp_rotation < 0)
mdp_rotation = 0;
mOvInfo.user_data[0] = mdp_rotation;
mRotInfo.rotations = mOvInfo.user_data[0];
if (mdp_rotation)
mRotInfo.enable = 1;
}
status_t OverlayUI::commit() {
status_t ret = BAD_VALUE;
if(mChannelState != UP)
mOvInfo.id = MSMFB_NEW_REQUEST;
ret = startOVSession();
if (ret == NO_ERROR && mOrientation) {
ret = mobjRotator.startRotSession(mRotInfo, mSource.size);
}
if (ret == NO_ERROR) {
mChannelState = UP;
} else {
LOGE("start channel failed.");
}
return ret;
}
status_t OverlayUI::closeChannel() {
if( mChannelState != UP ) {
return NO_ERROR;
}
if(NO_ERROR != closeOVSession()) {
LOGE("%s: closeOVSession() failed.", __FUNCTION__);
return BAD_VALUE;
}
if(NO_ERROR != mobjRotator.closeRotSession()) {
LOGE("%s: closeRotSession() failed.", __FUNCTION__);
return BAD_VALUE;
}
mChannelState = CLOSED;
mParamsChanged = false;
memset(&mOvInfo, 0, sizeof(mOvInfo));
memset(&mRotInfo, 0, sizeof(mRotInfo));
return NO_ERROR;
}
status_t OverlayUI::startOVSession() {
status_t ret = NO_INIT;
ret = mobjDisplay.openDisplay(mFBNum);
if (ret != NO_ERROR)
return ret;
if(mParamsChanged) {
mParamsChanged = false;
mdp_overlay ovInfo = mOvInfo;
if (ioctl(mobjDisplay.getFD(), MSMFB_OVERLAY_SET, &ovInfo)) {
LOGE("Overlay set failed..");
ret = BAD_VALUE;
} else {
mSessionID = ovInfo.id;
mOvInfo = ovInfo;
ret = NO_ERROR;
}
}
return ret;
}
status_t OverlayUI::closeOVSession() {
status_t ret = NO_ERROR;
int err = 0;
if(err = ioctl(mobjDisplay.getFD(), MSMFB_OVERLAY_UNSET, &mSessionID)) {
LOGE("%s: MSMFB_OVERLAY_UNSET failed. (%d)", __FUNCTION__, err);
ret = BAD_VALUE;
} else {
mobjDisplay.closeDisplay();
mSessionID = NO_INIT;
}
return ret;
}
status_t OverlayUI::queueBuffer(buffer_handle_t buffer) {
status_t ret = NO_INIT;
if (mChannelState != UP)
return ret;
msmfb_overlay_data ovData;
memset(&ovData, 0, sizeof(ovData));
private_handle_t const* hnd = reinterpret_cast
<private_handle_t const*>(buffer);
ovData.data.memory_id = hnd->fd;
ovData.data.offset = hnd->offset;
if (mOrientation) {
msm_rotator_data_info rotData;
memset(&rotData, 0, sizeof(rotData));
rotData.src.memory_id = hnd->fd;
rotData.src.offset = hnd->offset;
if (mobjRotator.rotateBuffer(rotData) != NO_ERROR) {
LOGE("Rotator failed.. ");
return BAD_VALUE;
}
ovData.data.memory_id = rotData.dst.memory_id;
ovData.data.offset = rotData.dst.offset;
}
ovData.id = mSessionID;
if (ioctl(mobjDisplay.getFD(), MSMFB_OVERLAY_PLAY, &ovData)) {
LOGE("Queuebuffer failed ");
return BAD_VALUE;
}
return NO_ERROR;
}
};

140
liboverlay/overlayLibUI.h Normal file
View File

@ -0,0 +1,140 @@
/*
* Copyright (C) 2008 The Android Open Source Project
* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef INCLUDE_OVERLAY_LIB_UI
#define INCLUDE_OVERLAY_LIB_UI
#include <errno.h>
#include "overlayLib.h"
namespace overlay {
enum channel_state_t { UP, CLOSED, PENDING_CLOSE };
enum status_t {
NO_ERROR,
INVALID_OPERATION = -ENOSYS,
BAD_VALUE = -EINVAL,
NO_INIT = -ENODEV,
ALREADY_EXISTS = -EEXIST
};
/*
* Display class provides following services
* Open FB
* FB information (Width, Height and Bpp)
*/
class Display {
int mFD;
int mFBWidth;
int mFBHeight;
int mFBBpp;
Display(const Display& objDisplay);
Display& operator=(const Display& objDisplay);
public:
explicit Display() : mFD(NO_INIT) { };
~Display() { close(mFD); };
int getFD() const { return mFD; };
int getFBWidth() const { return mFBWidth; };
int getFBHeight() const { return mFBHeight; };
int getFBBpp() const { return mFBBpp; };
status_t openDisplay(int fbnum);
void closeDisplay();
};
/*
* Rotator class, manages rotation of the buffers
* It communicates with Rotator driver, provides following services
* Start rotator session
* Rotate buffer
*/
class Rotator {
int mFD;
int mSessionID;
int mPmemFD;
void* mPmemAddr;
int mRotOffset[max_num_buffers];
int mCurrentItem;
int mNumBuffers;
int mSize;
android::sp<gralloc::IAllocController> mAlloc;
int mBufferType;
Rotator(const Rotator& objROtator);
Rotator& operator=(const Rotator& objRotator);
public:
explicit Rotator();
~Rotator();
status_t startRotSession(msm_rotator_img_info& rotInfo, int size,
int numBuffers = max_num_buffers);
status_t closeRotSession();
status_t rotateBuffer(msm_rotator_data_info& rotData);
};
/*
* Overlay class for Comp. Bypass
* We merge control and data channel classes.
*/
class OverlayUI {
channel_state_t mChannelState;
overlay_buffer_info mSource;
int mZorder;
int mOrientation;
int mFBNum;
bool mWaitForVsync;
bool mIsFg;
int mSessionID;
Display mobjDisplay;
Rotator mobjRotator;
mdp_overlay mOvInfo;
msm_rotator_img_info mRotInfo;
bool mParamsChanged;
OverlayUI(const OverlayUI& objOverlay);
OverlayUI& operator=(const OverlayUI& objOverlay);
status_t startOVSession();
status_t closeOVSession();
void setupOvRotInfo();
public:
enum fbnum_t { FB0, FB1 };
OverlayUI();
~OverlayUI();
void setSource(const overlay_buffer_info& info, int orientation);
void setPosition(int x, int y, int w, int h);
void setCrop(int x, int y, int w, int h);
void setDisplayParams(int fbNum, bool waitForVsync, bool isFg, int zorder,
bool isVGPipe);
status_t commit();
status_t closeChannel();
channel_state_t isChannelUP() const { return mChannelState; };
int getFBWidth() const { return mobjDisplay.getFBWidth(); };
int getFBHeight() const { return mobjDisplay.getFBHeight(); };
status_t queueBuffer(buffer_handle_t buffer);
};
};
#endif

31
libqcomui/Android.mk Normal file
View File

@ -0,0 +1,31 @@
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
LOCAL_SRC_FILES := \
qcom_ui.cpp
LOCAL_SHARED_LIBRARIES := \
libutils \
libcutils \
libui \
libEGL \
libskia
LOCAL_C_INCLUDES := $(TOP)/hardware/qcom/display/libgralloc \
$(TOP)/frameworks/native/services/surfaceflinger \
$(TOP)/external/skia/include/core \
$(TOP)/external/skia/include/images
LOCAL_CFLAGS := -DLOG_TAG=\"libQcomUI\"
ifneq ($(call is-vendor-board-platform,QCOM),true)
LOCAL_CFLAGS += -DNON_QCOM_TARGET
else
LOCAL_SHARED_LIBRARIES += libmemalloc
endif
LOCAL_CFLAGS += -DDEBUG_CALC_FPS
LOCAL_MODULE := libQcomUI
LOCAL_MODULE_TAGS := optional
include $(BUILD_SHARED_LIBRARY)

937
libqcomui/qcom_ui.cpp Normal file
View File

@ -0,0 +1,937 @@
/*
* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#include <cutils/log.h>
#include <cutils/memory.h>
#include <qcom_ui.h>
#include <gralloc_priv.h>
#include <alloc_controller.h>
#include <memalloc.h>
#include <errno.h>
#include <EGL/eglext.h>
#include <sys/stat.h>
#include <SkBitmap.h>
#include <SkImageEncoder.h>
#include <Transform.h>
#include <EGL/egl.h>
#include <GLES2/gl2.h>
#include <GLES2/gl2ext.h>
using gralloc::IMemAlloc;
using gralloc::IonController;
using gralloc::alloc_data;
using android::sp;
static int sCompositionType = -1;
namespace {
static android::sp<gralloc::IAllocController> sAlloc = 0;
int reallocate_memory(native_handle_t *buffer_handle, int mReqSize, int usage)
{
int ret = 0;
#ifndef NON_QCOM_TARGET
if (sAlloc == 0) {
sAlloc = gralloc::IAllocController::getInstance(true);
}
if (sAlloc == 0) {
ALOGE("sAlloc is still NULL");
return -EINVAL;
}
// Dealloc the old memory
private_handle_t *hnd = (private_handle_t *)buffer_handle;
sp<IMemAlloc> memalloc = sAlloc->getAllocator(hnd->flags);
ret = memalloc->free_buffer((void*)hnd->base, hnd->size, hnd->offset, hnd->fd);
if (ret) {
ALOGE("%s: free_buffer failed", __FUNCTION__);
return -1;
}
// Realloc new memory
alloc_data data;
data.base = 0;
data.fd = -1;
data.offset = 0;
data.size = mReqSize;
data.align = getpagesize();
data.uncached = true;
int allocFlags = usage;
switch (hnd->format) {
case HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED:
case (HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED^HAL_PIXEL_FORMAT_INTERLACE): {
data.align = 8192;
} break;
default: break;
}
ret = sAlloc->allocate(data, allocFlags, 0);
if (ret == 0) {
hnd->fd = data.fd;
hnd->base = (int)data.base;
hnd->offset = data.offset;
hnd->size = data.size;
} else {
ALOGE("%s: allocate failed", __FUNCTION__);
return -EINVAL;
}
#endif
return ret;
}
}; // ANONYNMOUS NAMESPACE
/*
* Gets the number of arguments required for this operation.
*
* @param: operation whose argument count is required.
*
* @return -EINVAL if the operation is invalid.
*/
int getNumberOfArgsForOperation(int operation) {
int num_args = -EINVAL;
switch(operation) {
case NATIVE_WINDOW_SET_BUFFERS_SIZE:
num_args = 1;
break;
case NATIVE_WINDOW_UPDATE_BUFFERS_GEOMETRY:
num_args = 3;
break;
default: ALOGE("%s: invalid operation(0x%x)", __FUNCTION__, operation);
break;
};
return num_args;
}
/*
* Checks if the format is supported by the GPU.
*
* @param: format to check
*
* @return true if the format is supported by the GPU.
*/
bool isGPUSupportedFormat(int format) {
if (format == HAL_PIXEL_FORMAT_YV12) {
// We check the YV12 formats, since some Qcom specific formats
// could have the bits set.
return true;
} else if (format & INTERLACE_MASK) {
// Interlaced content
return false;
} else if (format & S3D_FORMAT_MASK) {
// S3D Formats are not supported by the GPU
return false;
}
return true;
}
/* decide the texture target dynamically, based on the pixel format*/
int decideTextureTarget(int pixel_format)
{
// Default the return value to GL_TEXTURE_EXTERAL_OES
int retVal = GL_TEXTURE_EXTERNAL_OES;
// Change texture target to TEXTURE_2D for RGB formats
switch (pixel_format) {
case HAL_PIXEL_FORMAT_RGBA_8888:
case HAL_PIXEL_FORMAT_RGBX_8888:
case HAL_PIXEL_FORMAT_RGB_888:
case HAL_PIXEL_FORMAT_RGB_565:
case HAL_PIXEL_FORMAT_BGRA_8888:
case HAL_PIXEL_FORMAT_RGBA_5551:
case HAL_PIXEL_FORMAT_RGBA_4444:
retVal = GL_TEXTURE_2D;
break;
default:
retVal = GL_TEXTURE_EXTERNAL_OES;
break;
}
return retVal;
}
/*
* Function to check if the allocated buffer is of the correct size.
* Reallocate the buffer with the correct size, if the size doesn't
* match
*
* @param: handle of the allocated buffer
* @param: requested size for the buffer
* @param: usage flags
*
* return 0 on success
*/
int checkBuffer(native_handle_t *buffer_handle, int size, int usage)
{
// If the client hasn't set a size, return
if (0 >= size) {
return 0;
}
// Validate the handle
if (private_handle_t::validate(buffer_handle)) {
ALOGE("%s: handle is invalid", __FUNCTION__);
return -EINVAL;
}
// Obtain the private_handle from the native handle
private_handle_t *hnd = reinterpret_cast<private_handle_t*>(buffer_handle);
if (hnd->size != size) {
return reallocate_memory(hnd, size, usage);
}
return 0;
}
/*
* Checks if memory needs to be reallocated for this buffer.
*
* @param: Geometry of the current buffer.
* @param: Required Geometry.
* @param: Geometry of the updated buffer.
*
* @return True if a memory reallocation is required.
*/
bool needNewBuffer(const qBufGeometry currentGeometry,
const qBufGeometry requiredGeometry,
const qBufGeometry updatedGeometry)
{
// If the current buffer info matches the updated info,
// we do not require any memory allocation.
if (updatedGeometry.width && updatedGeometry.height &&
updatedGeometry.format) {
return false;
}
if (currentGeometry.width != requiredGeometry.width ||
currentGeometry.height != requiredGeometry.height ||
currentGeometry.format != requiredGeometry.format) {
// Current and required geometry do not match. Allocation
// required.
return true;
}
return false;
}
/*
* Update the geometry of this buffer without reallocation.
*
* @param: buffer whose geometry needs to be updated.
* @param: Updated width
* @param: Updated height
* @param: Updated format
*/
int updateBufferGeometry(sp<GraphicBuffer> buffer, const qBufGeometry updatedGeometry)
{
if (buffer == 0) {
ALOGE("%s: graphic buffer is NULL", __FUNCTION__);
return -EINVAL;
}
if (!updatedGeometry.width || !updatedGeometry.height ||
!updatedGeometry.format) {
// No update required. Return.
return 0;
}
if (buffer->width == updatedGeometry.width &&
buffer->height == updatedGeometry.height &&
buffer->format == updatedGeometry.format) {
// The buffer has already been updated. Return.
return 0;
}
// Validate the handle
if (private_handle_t::validate(buffer->handle)) {
ALOGE("%s: handle is invalid", __FUNCTION__);
return -EINVAL;
}
buffer->width = updatedGeometry.width;
buffer->height = updatedGeometry.height;
buffer->format = updatedGeometry.format;
private_handle_t *hnd = (private_handle_t*)(buffer->handle);
if (hnd) {
hnd->width = updatedGeometry.width;
hnd->height = updatedGeometry.height;
hnd->format = updatedGeometry.format;
} else {
ALOGE("%s: hnd is NULL", __FUNCTION__);
return -EINVAL;
}
return 0;
}
/* Update the S3D format of this buffer.
*
* @param: buffer whosei S3D format needs to be updated.
* @param: Updated buffer S3D format
*/
int updateBufferS3DFormat(sp<GraphicBuffer> buffer, const int s3dFormat)
{
if (buffer == 0) {
ALOGE("%s: graphic buffer is NULL", __FUNCTION__);
return -EINVAL;
}
buffer->format |= s3dFormat;
return 0;
}
/*
* Updates the flags for the layer
*
* @param: Attribute
* @param: Identifies if the attribute was enabled or disabled.
*
* @return: -EINVAL if the attribute is invalid
*/
int updateLayerQcomFlags(eLayerAttrib attribute, bool enable, int& currentFlags)
{
int ret = 0;
switch (attribute) {
case LAYER_UPDATE_STATUS: {
if (enable)
currentFlags |= LAYER_UPDATING;
else
currentFlags &= ~LAYER_UPDATING;
} break;
case LAYER_ASYNCHRONOUS_STATUS: {
if (enable)
currentFlags |= LAYER_ASYNCHRONOUS;
else
currentFlags &= ~LAYER_ASYNCHRONOUS;
} break;
default: ALOGE("%s: invalid attribute(0x%x)", __FUNCTION__, attribute);
break;
}
return ret;
}
/*
* Gets the per frame HWC flags for this layer.
*
* @param: current hwcl flags
* @param: current layerFlags
*
* @return: the per frame flags.
*/
int getPerFrameFlags(int hwclFlags, int layerFlags) {
int flags = hwclFlags;
if (layerFlags & LAYER_UPDATING)
flags &= ~HWC_LAYER_NOT_UPDATING;
else
flags |= HWC_LAYER_NOT_UPDATING;
if (layerFlags & LAYER_ASYNCHRONOUS)
flags |= HWC_LAYER_ASYNCHRONOUS;
else
flags &= ~HWC_LAYER_ASYNCHRONOUS;
return flags;
}
/*
* Checks if FB is updated by this composition type
*
* @param: composition type
* @return: true if FB is updated, false if not
*/
bool isUpdatingFB(HWCCompositionType compositionType)
{
switch(compositionType)
{
case HWC_USE_COPYBIT:
return true;
default:
ALOGE("%s: invalid composition type(%d)", __FUNCTION__, compositionType);
return false;
};
}
/*
* Get the current composition Type
*
* @return the compositon Type
*/
int getCompositionType() {
char property[PROPERTY_VALUE_MAX];
int compositionType = 0;
if (property_get("debug.sf.hw", property, NULL) > 0) {
if(atoi(property) == 0) {
compositionType = COMPOSITION_TYPE_CPU;
} else { //debug.sf.hw = 1
property_get("debug.composition.type", property, NULL);
if (property == NULL) {
compositionType = COMPOSITION_TYPE_GPU;
} else if ((strncmp(property, "mdp", 3)) == 0) {
compositionType = COMPOSITION_TYPE_MDP;
} else if ((strncmp(property, "c2d", 3)) == 0) {
compositionType = COMPOSITION_TYPE_C2D;
} else if ((strncmp(property, "dyn", 3)) == 0) {
compositionType = COMPOSITION_TYPE_DYN;
} else {
compositionType = COMPOSITION_TYPE_GPU;
}
}
} else { //debug.sf.hw is not set. Use cpu composition
compositionType = COMPOSITION_TYPE_CPU;
}
return compositionType;
}
/*
* Clear Region implementation for C2D/MDP versions.
*
* @param: region to be cleared
* @param: EGL Display
* @param: EGL Surface
*
* @return 0 on success
*/
int qcomuiClearRegion(Region region, EGLDisplay dpy, EGLSurface sur)
{
#if 0 /* FIXME DIE */
int ret = 0;
if (-1 == sCompositionType) {
sCompositionType = getCompositionType();
}
if ((COMPOSITION_TYPE_MDP != sCompositionType) &&
(COMPOSITION_TYPE_C2D != sCompositionType) &&
(COMPOSITION_TYPE_CPU != sCompositionType)) {
// For non CPU/C2D/MDP composition, return an error, so that SF can use
// the GPU to draw the wormhole.
return -1;
}
android_native_buffer_t *renderBuffer = (android_native_buffer_t *)
eglGetRenderBufferANDROID(dpy, sur);
if (!renderBuffer) {
ALOGE("%s: eglGetRenderBufferANDROID returned NULL buffer",
__FUNCTION__);
return -1;
}
private_handle_t *fbHandle = (private_handle_t *)renderBuffer->handle;
if(!fbHandle) {
ALOGE("%s: Framebuffer handle is NULL", __FUNCTION__);
return -1;
}
int bytesPerPixel = 4;
if (HAL_PIXEL_FORMAT_RGB_565 == fbHandle->format) {
bytesPerPixel = 2;
}
Region::const_iterator it = region.begin();
Region::const_iterator const end = region.end();
const int32_t stride = renderBuffer->stride*bytesPerPixel;
while (it != end) {
const Rect& r = *it++;
uint8_t* dst = (uint8_t*) fbHandle->base +
(r.left + r.top*renderBuffer->stride)*bytesPerPixel;
int w = r.width()*bytesPerPixel;
int h = r.height();
do {
if(4 == bytesPerPixel)
android_memset32((uint32_t*)dst, 0, w);
else
android_memset16((uint16_t*)dst, 0, w);
dst += stride;
} while(--h);
}
#endif
return 0;
}
/*
* Handles the externalDisplay event
* HDMI has highest priority compared to WifiDisplay
* Based on the current and the new display event, decides the
* external display to be enabled
*
* @param: newEvent - new external event
* @param: currEvent - currently enabled external event
* @return: external display to be enabled
*
*/
external_display handleEventHDMI(external_display newState, external_display
currState)
{
external_display retState = currState;
switch(newState) {
case EXT_DISPLAY_HDMI:
retState = EXT_DISPLAY_HDMI;
break;
case EXT_DISPLAY_WIFI:
if(currState != EXT_DISPLAY_HDMI) {
retState = EXT_DISPLAY_WIFI;
}
break;
case EXT_DISPLAY_OFF:
retState = EXT_DISPLAY_OFF;
break;
default:
ALOGE("handleEventHDMI: unknown Event");
break;
}
return retState;
}
// Using global variables for layer dumping since "property_set("debug.sf.dump",
// property)" does not work.
int sfdump_countlimit_raw = 0;
int sfdump_counter_raw = 1;
char sfdump_propstr_persist_raw[PROPERTY_VALUE_MAX] = "";
char sfdumpdir_raw[256] = "";
int sfdump_countlimit_png = 0;
int sfdump_counter_png = 1;
char sfdump_propstr_persist_png[PROPERTY_VALUE_MAX] = "";
char sfdumpdir_png[256] = "";
bool needToDumpLayers()
{
bool bDumpLayer = false;
char sfdump_propstr[PROPERTY_VALUE_MAX];
time_t timenow;
tm sfdump_time;
time(&timenow);
localtime_r(&timenow, &sfdump_time);
if ((property_get("debug.sf.dump.png", sfdump_propstr, NULL) > 0) &&
(strncmp(sfdump_propstr, sfdump_propstr_persist_png,
PROPERTY_VALUE_MAX - 1))) {
// Strings exist & not equal implies it has changed, so trigger a dump
strncpy(sfdump_propstr_persist_png, sfdump_propstr,
PROPERTY_VALUE_MAX - 1);
sfdump_countlimit_png = atoi(sfdump_propstr);
sfdump_countlimit_png = (sfdump_countlimit_png < 0) ? 0:
(sfdump_countlimit_png >= LONG_MAX) ? (LONG_MAX - 1):
sfdump_countlimit_png;
if (sfdump_countlimit_png) {
sprintf(sfdumpdir_png,"/data/sfdump.png%04d%02d%02d.%02d%02d%02d",
sfdump_time.tm_year + 1900, sfdump_time.tm_mon + 1,
sfdump_time.tm_mday, sfdump_time.tm_hour,
sfdump_time.tm_min, sfdump_time.tm_sec);
if (0 == mkdir(sfdumpdir_png, 0777))
sfdump_counter_png = 0;
else
ALOGE("sfdump: Error: %s. Failed to create sfdump directory"
": %s", strerror(errno), sfdumpdir_png);
}
}
if (sfdump_counter_png <= sfdump_countlimit_png)
sfdump_counter_png++;
if ((property_get("debug.sf.dump", sfdump_propstr, NULL) > 0) &&
(strncmp(sfdump_propstr, sfdump_propstr_persist_raw,
PROPERTY_VALUE_MAX - 1))) {
// Strings exist & not equal implies it has changed, so trigger a dump
strncpy(sfdump_propstr_persist_raw, sfdump_propstr,
PROPERTY_VALUE_MAX - 1);
sfdump_countlimit_raw = atoi(sfdump_propstr);
sfdump_countlimit_raw = (sfdump_countlimit_raw < 0) ? 0:
(sfdump_countlimit_raw >= LONG_MAX) ? (LONG_MAX - 1):
sfdump_countlimit_raw;
if (sfdump_countlimit_raw) {
sprintf(sfdumpdir_raw,"/data/sfdump.raw%04d%02d%02d.%02d%02d%02d",
sfdump_time.tm_year + 1900, sfdump_time.tm_mon + 1,
sfdump_time.tm_mday, sfdump_time.tm_hour,
sfdump_time.tm_min, sfdump_time.tm_sec);
if (0 == mkdir(sfdumpdir_raw, 0777))
sfdump_counter_raw = 0;
else
ALOGE("sfdump: Error: %s. Failed to create sfdump directory"
": %s", strerror(errno), sfdumpdir_raw);
}
}
if (sfdump_counter_raw <= sfdump_countlimit_raw)
sfdump_counter_raw++;
bDumpLayer = (sfdump_countlimit_png || sfdump_countlimit_raw)? true : false;
return bDumpLayer;
}
inline void getHalPixelFormatStr(int format, char pixelformatstr[])
{
if (!pixelformatstr)
return;
switch(format) {
case HAL_PIXEL_FORMAT_RGBA_8888:
strcpy(pixelformatstr, "RGBA_8888");
break;
case HAL_PIXEL_FORMAT_RGBX_8888:
strcpy(pixelformatstr, "RGBX_8888");
break;
case HAL_PIXEL_FORMAT_RGB_888:
strcpy(pixelformatstr, "RGB_888");
break;
case HAL_PIXEL_FORMAT_RGB_565:
strcpy(pixelformatstr, "RGB_565");
break;
case HAL_PIXEL_FORMAT_BGRA_8888:
strcpy(pixelformatstr, "BGRA_8888");
break;
case HAL_PIXEL_FORMAT_RGBA_5551:
strcpy(pixelformatstr, "RGBA_5551");
break;
case HAL_PIXEL_FORMAT_RGBA_4444:
strcpy(pixelformatstr, "RGBA_4444");
break;
case HAL_PIXEL_FORMAT_YV12:
strcpy(pixelformatstr, "YV12");
break;
case HAL_PIXEL_FORMAT_YCbCr_422_SP:
strcpy(pixelformatstr, "YCbCr_422_SP_NV16");
break;
case HAL_PIXEL_FORMAT_YCrCb_420_SP:
strcpy(pixelformatstr, "YCrCb_420_SP_NV21");
break;
case HAL_PIXEL_FORMAT_YCbCr_422_I:
strcpy(pixelformatstr, "YCbCr_422_I_YUY2");
break;
case HAL_PIXEL_FORMAT_NV12_ENCODEABLE:
strcpy(pixelformatstr, "NV12_ENCODEABLE");
break;
case HAL_PIXEL_FORMAT_YCbCr_420_SP_TILED:
strcpy(pixelformatstr, "YCbCr_420_SP_TILED_TILE_4x2");
break;
case HAL_PIXEL_FORMAT_YCbCr_420_SP:
strcpy(pixelformatstr, "YCbCr_420_SP");
break;
case HAL_PIXEL_FORMAT_YCrCb_420_SP_ADRENO:
strcpy(pixelformatstr, "YCrCb_420_SP_ADRENO");
break;
case HAL_PIXEL_FORMAT_YCrCb_422_SP:
strcpy(pixelformatstr, "YCrCb_422_SP");
break;
case HAL_PIXEL_FORMAT_R_8:
strcpy(pixelformatstr, "R_8");
break;
case HAL_PIXEL_FORMAT_RG_88:
strcpy(pixelformatstr, "RG_88");
break;
case HAL_PIXEL_FORMAT_INTERLACE:
strcpy(pixelformatstr, "INTERLACE");
break;
default:
sprintf(pixelformatstr, "Unknown0x%X", format);
break;
}
}
void dumpLayer(int moduleCompositionType, int listFlags, size_t layerIndex,
hwc_layer_t hwLayers[])
{
char dumplogstr_png[128] = "";
char dumplogstr_raw[128] = "";
if (sfdump_counter_png <= sfdump_countlimit_png) {
sprintf(dumplogstr_png, "[png-dump-frame: %03d of %03d] ",
sfdump_counter_png, sfdump_countlimit_png);
}
if (sfdump_counter_raw <= sfdump_countlimit_raw) {
sprintf(dumplogstr_raw, "[raw-dump-frame: %03d of %03d]",
sfdump_counter_raw, sfdump_countlimit_raw);
}
if (NULL == hwLayers) {
ALOGE("sfdump: Error.%s%sLayer[%d] No hwLayers to dump.",
dumplogstr_raw, dumplogstr_png, layerIndex);
return;
}
hwc_layer *layer = &hwLayers[layerIndex];
hwc_rect_t sourceCrop = layer->sourceCrop;
hwc_rect_t displayFrame = layer->displayFrame;
private_handle_t *hnd = (private_handle_t *)layer->handle;
char pixelformatstr[32] = "None";
if (hnd)
getHalPixelFormatStr(hnd->format, pixelformatstr);
#if 0
ALOGE("sfdump: %s%s[%s]-Composition, Layer[%d] SrcBuff[%dx%d] "
"SrcCrop[%dl, %dt, %dr, %db] "
"DispFrame[%dl, %dt, %dr, %db] Composition-type = %s, Format = %s, "
"Orientation = %s, Flags = %s%s%s%s%s%s%s%s%s%s",
dumplogstr_raw, dumplogstr_png,
(moduleCompositionType == COMPOSITION_TYPE_GPU)? "GPU":
(moduleCompositionType == COMPOSITION_TYPE_MDP)? "MDP":
(moduleCompositionType == COMPOSITION_TYPE_C2D)? "C2D":
(moduleCompositionType == COMPOSITION_TYPE_CPU)? "CPU":
(moduleCompositionType == COMPOSITION_TYPE_DYN)? "DYN": "???",
layerIndex,
(hnd)? hnd->width : -1, (hnd)? hnd->height : -1,
sourceCrop.left, sourceCrop.top,
sourceCrop.right, sourceCrop.bottom,
displayFrame.left, displayFrame.top,
displayFrame.right, displayFrame.bottom,
(layer->compositionType == HWC_FRAMEBUFFER)? "Framebuffer (OpenGL ES)":
(layer->compositionType == HWC_OVERLAY)? "Overlay":
(layer->compositionType == HWC_USE_COPYBIT)? "Copybit": "???",
pixelformatstr,
(layer->transform == Transform::ROT_0)? "ROT_0":
(layer->transform == Transform::FLIP_H)? "FLIP_H":
(layer->transform == Transform::FLIP_V)? "FLIP_V":
(layer->transform == Transform::ROT_90)? "ROT_90":
(layer->transform == Transform::ROT_180)? "ROT_180":
(layer->transform == Transform::ROT_270)? "ROT_270":
(layer->transform == Transform::ROT_INVALID)? "ROT_INVALID":"???",
(layer->flags == 0)? "[None]":"",
(layer->flags & HWC_SKIP_LAYER)? "[Skip layer]":"",
(layer->flags & HWC_LAYER_NOT_UPDATING)? "[Layer not updating]":"",
(layer->flags & HWC_USE_ORIGINAL_RESOLUTION)? "[Original Resolution]":"",
(layer->flags & HWC_DO_NOT_USE_OVERLAY)? "[Do not use Overlay]":"",
(layer->flags & HWC_COMP_BYPASS)? "[Bypass]":"",
(layer->flags & HWC_BYPASS_RESERVE_0)? "[Bypass Reserve 0]":"",
(layer->flags & HWC_BYPASS_RESERVE_1)? "[Bypass Reserve 1]":"",
(listFlags & HWC_GEOMETRY_CHANGED)? "[List: Geometry Changed]":"",
(listFlags & HWC_SKIP_COMPOSITION)? "[List: Skip Composition]":"");
#endif
if (NULL == hnd) {
ALOGE("sfdump: %s%sLayer[%d] private-handle is invalid.",
dumplogstr_raw, dumplogstr_png, layerIndex);
return;
}
if ((sfdump_counter_png <= sfdump_countlimit_png) && hnd->base) {
bool bResult = false;
char sfdumpfile_name[256];
SkBitmap *tempSkBmp = new SkBitmap();
SkBitmap::Config tempSkBmpConfig = SkBitmap::kNo_Config;
sprintf(sfdumpfile_name, "%s/sfdump%03d_layer%d.png", sfdumpdir_png,
sfdump_counter_png, layerIndex);
switch (hnd->format) {
case HAL_PIXEL_FORMAT_RGBA_8888:
case HAL_PIXEL_FORMAT_RGBX_8888:
case HAL_PIXEL_FORMAT_BGRA_8888:
tempSkBmpConfig = SkBitmap::kARGB_8888_Config;
break;
case HAL_PIXEL_FORMAT_RGB_565:
case HAL_PIXEL_FORMAT_RGBA_5551:
case HAL_PIXEL_FORMAT_RGBA_4444:
tempSkBmpConfig = SkBitmap::kRGB_565_Config;
break;
case HAL_PIXEL_FORMAT_RGB_888:
default:
tempSkBmpConfig = SkBitmap::kNo_Config;
break;
}
if (SkBitmap::kNo_Config != tempSkBmpConfig) {
tempSkBmp->setConfig(tempSkBmpConfig, hnd->width, hnd->height);
tempSkBmp->setPixels((void*)hnd->base);
bResult = SkImageEncoder::EncodeFile(sfdumpfile_name,
*tempSkBmp, SkImageEncoder::kPNG_Type, 100);
ALOGE("sfdump: %sDumped Layer[%d] to %s: %s", dumplogstr_png,
layerIndex, sfdumpfile_name, bResult ? "Success" : "Fail");
}
else {
ALOGE("sfdump: %sSkipping Layer[%d] dump: Unsupported layer "
"format %s for png encoder.", dumplogstr_png, layerIndex,
pixelformatstr);
}
delete tempSkBmp; // Calls SkBitmap::freePixels() internally.
}
if ((sfdump_counter_raw <= sfdump_countlimit_raw) && hnd->base) {
char sfdumpfile_name[256];
bool bResult = false;
sprintf(sfdumpfile_name, "%s/sfdump%03d_layer%d_%dx%d_%s.raw",
sfdumpdir_raw,
sfdump_counter_raw, layerIndex, hnd->width, hnd->height,
pixelformatstr);
FILE* fp = fopen(sfdumpfile_name, "w+");
if (fp != NULL) {
bResult = (bool) fwrite((void*)hnd->base, hnd->size, 1, fp);
fclose(fp);
}
ALOGE("sfdump: %s Dumped Layer[%d] to %s: %s", dumplogstr_raw,
layerIndex, sfdumpfile_name, bResult ? "Success" : "Fail");
}
}
#ifdef DEBUG_CALC_FPS
ANDROID_SINGLETON_STATIC_INSTANCE(CalcFps) ;
CalcFps::CalcFps() {
debug_fps_level = 0;
Init();
}
CalcFps::~CalcFps() {
}
void CalcFps::Init() {
char prop[PROPERTY_VALUE_MAX];
property_get("debug.gr.calcfps", prop, "0");
debug_fps_level = atoi(prop);
if (debug_fps_level > MAX_DEBUG_FPS_LEVEL) {
ALOGW("out of range value for debug.gr.calcfps, using 0");
debug_fps_level = 0;
}
ALOGE("DEBUG_CALC_FPS: %d", debug_fps_level);
populate_debug_fps_metadata();
}
void CalcFps::Fps() {
if (debug_fps_level > 0)
calc_fps(ns2us(systemTime()));
}
void CalcFps::populate_debug_fps_metadata(void)
{
char prop[PROPERTY_VALUE_MAX];
/*defaults calculation of fps to based on number of frames*/
property_get("debug.gr.calcfps.type", prop, "0");
debug_fps_metadata.type = (debug_fps_metadata_t::DfmType) atoi(prop);
/*defaults to 1000ms*/
property_get("debug.gr.calcfps.timeperiod", prop, "1000");
debug_fps_metadata.time_period = atoi(prop);
property_get("debug.gr.calcfps.period", prop, "10");
debug_fps_metadata.period = atoi(prop);
if (debug_fps_metadata.period > MAX_FPS_CALC_PERIOD_IN_FRAMES) {
debug_fps_metadata.period = MAX_FPS_CALC_PERIOD_IN_FRAMES;
}
/* default ignorethresh_us: 500 milli seconds */
property_get("debug.gr.calcfps.ignorethresh_us", prop, "500000");
debug_fps_metadata.ignorethresh_us = atoi(prop);
debug_fps_metadata.framearrival_steps =
(debug_fps_metadata.ignorethresh_us / 16666);
if (debug_fps_metadata.framearrival_steps > MAX_FRAMEARRIVAL_STEPS) {
debug_fps_metadata.framearrival_steps = MAX_FRAMEARRIVAL_STEPS;
debug_fps_metadata.ignorethresh_us =
debug_fps_metadata.framearrival_steps * 16666;
}
/* 2ms margin of error for the gettimeofday */
debug_fps_metadata.margin_us = 2000;
for (unsigned int i = 0; i < MAX_FRAMEARRIVAL_STEPS; i++)
debug_fps_metadata.accum_framearrivals[i] = 0;
ALOGE("period: %d", debug_fps_metadata.period);
ALOGE("ignorethresh_us: %lld", debug_fps_metadata.ignorethresh_us);
}
void CalcFps::print_fps(float fps)
{
if (debug_fps_metadata_t::DFM_FRAMES == debug_fps_metadata.type)
ALOGE("FPS for last %d frames: %3.2f", debug_fps_metadata.period, fps);
else
ALOGE("FPS for last (%f ms, %d frames): %3.2f",
debug_fps_metadata.time_elapsed,
debug_fps_metadata.curr_frame, fps);
debug_fps_metadata.curr_frame = 0;
debug_fps_metadata.time_elapsed = 0.0;
if (debug_fps_level > 1) {
ALOGE("Frame Arrival Distribution:");
for (unsigned int i = 0;
i < ((debug_fps_metadata.framearrival_steps / 6) + 1);
i++) {
ALOGE("%lld %lld %lld %lld %lld %lld",
debug_fps_metadata.accum_framearrivals[i*6],
debug_fps_metadata.accum_framearrivals[i*6+1],
debug_fps_metadata.accum_framearrivals[i*6+2],
debug_fps_metadata.accum_framearrivals[i*6+3],
debug_fps_metadata.accum_framearrivals[i*6+4],
debug_fps_metadata.accum_framearrivals[i*6+5]);
}
/* We are done with displaying, now clear the stats */
for (unsigned int i = 0;
i < debug_fps_metadata.framearrival_steps;
i++)
debug_fps_metadata.accum_framearrivals[i] = 0;
}
return;
}
void CalcFps::calc_fps(nsecs_t currtime_us)
{
static nsecs_t oldtime_us = 0;
nsecs_t diff = currtime_us - oldtime_us;
oldtime_us = currtime_us;
if (debug_fps_metadata_t::DFM_FRAMES == debug_fps_metadata.type &&
diff > debug_fps_metadata.ignorethresh_us) {
return;
}
if (debug_fps_metadata.curr_frame < MAX_FPS_CALC_PERIOD_IN_FRAMES) {
debug_fps_metadata.framearrivals[debug_fps_metadata.curr_frame] = diff;
}
debug_fps_metadata.curr_frame++;
if (debug_fps_level > 1) {
unsigned int currstep = (diff + debug_fps_metadata.margin_us) / 16666;
if (currstep < debug_fps_metadata.framearrival_steps) {
debug_fps_metadata.accum_framearrivals[currstep-1]++;
}
}
if (debug_fps_metadata_t::DFM_FRAMES == debug_fps_metadata.type) {
if (debug_fps_metadata.curr_frame == debug_fps_metadata.period) {
/* time to calculate and display FPS */
nsecs_t sum = 0;
for (unsigned int i = 0; i < debug_fps_metadata.period; i++)
sum += debug_fps_metadata.framearrivals[i];
print_fps((debug_fps_metadata.period * float(1000000))/float(sum));
}
}
else if (debug_fps_metadata_t::DFM_TIME == debug_fps_metadata.type) {
debug_fps_metadata.time_elapsed += ((float)diff/1000.0);
if (debug_fps_metadata.time_elapsed >= debug_fps_metadata.time_period) {
float fps = (1000.0 * debug_fps_metadata.curr_frame)/
(float)debug_fps_metadata.time_elapsed;
print_fps(fps);
}
}
return;
}
#endif

394
libqcomui/qcom_ui.h Normal file
View File

@ -0,0 +1,394 @@
/*
* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef INCLUDE_LIBQCOM_UI
#define INCLUDE_LIBQCOM_UI
#include <cutils/native_handle.h>
#include <ui/GraphicBuffer.h>
#include <hardware/hwcomposer.h>
#include <hardware/hwcomposer_defs.h>
#include <ui/Region.h>
#include <EGL/egl.h>
#include <utils/Singleton.h>
#include <cutils/properties.h>
#include "../libgralloc/gralloc_priv.h"
using namespace android;
using android::sp;
using android::GraphicBuffer;
#define HWC_BYPASS_INDEX_MASK 0x00000030
/*
* Qcom specific Native Window perform operations
*/
enum {
NATIVE_WINDOW_SET_BUFFERS_SIZE = 0x10000000,
NATIVE_WINDOW_UPDATE_BUFFERS_GEOMETRY = 0x20000000,
NATIVE_WINDOW_SET_S3D_FORMAT = 0x40000000,
};
// Enum containing the supported composition types
enum {
COMPOSITION_TYPE_GPU = 0,
COMPOSITION_TYPE_MDP = 0x1,
COMPOSITION_TYPE_C2D = 0x2,
COMPOSITION_TYPE_CPU = 0x4,
COMPOSITION_TYPE_DYN = 0x8
};
/*
* Layer Attributes
*/
enum eLayerAttrib {
LAYER_UPDATE_STATUS,
LAYER_ASYNCHRONOUS_STATUS,
};
/*
* Layer Flags
*/
enum {
LAYER_UPDATING = 1<<0,
LAYER_ASYNCHRONOUS = 1<<1,
};
/*
* Flags set by the layer and sent to HWC
*/
enum {
HWC_LAYER_NOT_UPDATING = 0x00000002,
HWC_LAYER_ASYNCHRONOUS = 0x00000004,
HWC_USE_ORIGINAL_RESOLUTION = 0x10000000,
HWC_DO_NOT_USE_OVERLAY = 0x20000000,
HWC_COMP_BYPASS = 0x40000000,
HWC_USE_EXT_ONLY = 0x80000000, //Layer displayed on external only
HWC_USE_EXT_BLOCK = 0x01000000, //Layer displayed on external only
HWC_BYPASS_RESERVE_0 = 0x00000010,
HWC_BYPASS_RESERVE_1 = 0x00000020,
};
enum HWCCompositionType {
HWC_USE_GPU = HWC_FRAMEBUFFER, // This layer is to be handled by Surfaceflinger
HWC_USE_OVERLAY = HWC_OVERLAY, // This layer is to be handled by the overlay
HWC_USE_COPYBIT // This layer is to be handled by copybit
};
enum external_display {
EXT_DISPLAY_OFF,
EXT_DISPLAY_HDMI,
EXT_DISPLAY_WIFI
};
/*
* Structure to hold the buffer geometry
*/
struct qBufGeometry {
int width;
int height;
int format;
void set(int w, int h, int f) {
width = w;
height = h;
format = f;
}
};
#ifndef DEBUG_CALC_FPS
#define CALC_FPS() ((void)0)
#define CALC_INIT() ((void)0)
#else
#define CALC_FPS() CalcFps::getInstance().Fps()
#define CALC_INIT() CalcFps::getInstance().Init()
class CalcFps : public Singleton<CalcFps> {
public:
CalcFps();
~CalcFps();
void Init();
void Fps();
private:
static const unsigned int MAX_FPS_CALC_PERIOD_IN_FRAMES = 128;
static const unsigned int MAX_FRAMEARRIVAL_STEPS = 50;
static const unsigned int MAX_DEBUG_FPS_LEVEL = 2;
struct debug_fps_metadata_t {
/*fps calculation based on time or number of frames*/
enum DfmType {
DFM_FRAMES = 0,
DFM_TIME = 1,
};
DfmType type;
/* indicates how much time do we wait till we calculate FPS */
unsigned long time_period;
/*indicates how much time elapsed since we report fps*/
float time_elapsed;
/* indicates how many frames do we wait till we calculate FPS */
unsigned int period;
/* current frame, will go upto period, and then reset */
unsigned int curr_frame;
/* frame will arrive at a multiple of 16666 us at the display.
This indicates how many steps to consider for our calculations.
For example, if framearrival_steps = 10, then the frame that arrived
after 166660 us or more will be ignored.
*/
unsigned int framearrival_steps;
/* ignorethresh_us = framearrival_steps * 16666 */
nsecs_t ignorethresh_us;
/* used to calculate the actual frame arrival step, the times might not be
accurate
*/
unsigned int margin_us;
/* actual data storage */
nsecs_t framearrivals[MAX_FPS_CALC_PERIOD_IN_FRAMES];
nsecs_t accum_framearrivals[MAX_FRAMEARRIVAL_STEPS];
};
private:
void populate_debug_fps_metadata(void);
void print_fps(float fps);
void calc_fps(nsecs_t currtime_us);
private:
debug_fps_metadata_t debug_fps_metadata;
unsigned int debug_fps_level;
};
#endif
#if 0
class QCBaseLayer
{
// int mS3DFormat;
int32_t mComposeS3DFormat;
public:
QCBaseLayer()
{
mComposeS3DFormat = 0;
}
enum { // S3D formats
eS3D_SIDE_BY_SIDE = 0x10000,
eS3D_TOP_BOTTOM = 0x20000
};
/*
virtual status_t setStereoscopic3DFormat(int format) { mS3DFormat = format; return 0; }
virtual int getStereoscopic3DFormat() const { return mS3DFormat; }
*/
void setS3DComposeFormat (int32_t hints)
{
if (hints & HWC_HINT_DRAW_S3D_SIDE_BY_SIDE)
mComposeS3DFormat = eS3D_SIDE_BY_SIDE;
else if (hints & HWC_HINT_DRAW_S3D_TOP_BOTTOM)
mComposeS3DFormat = eS3D_TOP_BOTTOM;
else
mComposeS3DFormat = 0;
}
int32_t needsS3DCompose () const { return mComposeS3DFormat; }
};
#endif
/*
* Function to check if the allocated buffer is of the correct size.
* Reallocate the buffer with the correct size, if the size doesn't
* match
*
* @param: handle of the allocated buffer
* @param: requested size for the buffer
* @param: usage flags
*
* return 0 on success
*/
int checkBuffer(native_handle_t *buffer_handle, int size, int usage);
/*
* Checks if the format is supported by the GPU.
*
* @param: format to check
*
* @return true if the format is supported by the GPU.
*/
bool isGPUSupportedFormat(int format);
/*
* Adreno is not optimized for GL_TEXTURE_EXTERNAL_OES
* texure target. DO NOT choose TEXTURE_EXTERNAL_OES
* target for RGB formats.
*
* Based on the pixel format, decide the texture target.
*
* @param : pixel format to check
*
* @return : GL_TEXTURE_2D for RGB formats, and
* GL_TEXTURE_EXTERNAL_OES for YUV formats.
*
*/
int decideTextureTarget (const int pixel_format);
/*
* Gets the number of arguments required for this operation.
*
* @param: operation whose argument count is required.
*
* @return -EINVAL if the operation is invalid.
*/
int getNumberOfArgsForOperation(int operation);
/*
* Checks if memory needs to be reallocated for this buffer.
*
* @param: Geometry of the current buffer.
* @param: Required Geometry.
* @param: Geometry of the updated buffer.
*
* @return True if a memory reallocation is required.
*/
bool needNewBuffer(const qBufGeometry currentGeometry,
const qBufGeometry requiredGeometry,
const qBufGeometry updatedGeometry);
/*
* Update the geometry of this buffer without reallocation.
*
* @param: buffer whose geometry needs to be updated.
* @param: Updated buffer geometry
*/
int updateBufferGeometry(sp<GraphicBuffer> buffer, const qBufGeometry bufGeometry);
/*
* Update the S3D format of this buffer.
*
* @param: buffer whosei S3D format needs to be updated.
* @param: Updated buffer S3D format
*/
int updateBufferS3DFormat(sp<GraphicBuffer> buffer, const int s3dFormat);
/*
* Updates the flags for the layer
*
* @param: Attribute
* @param: Identifies if the attribute was enabled or disabled.
* @param: current Layer flags.
*
* @return: Flags for the layer
*/
int updateLayerQcomFlags(eLayerAttrib attribute, bool enable, int& currentFlags);
/*
* Gets the per frame HWC flags for this layer.
*
* @param: current hwcl flags
* @param: current layerFlags
*
* @return: the per frame flags.
*/
int getPerFrameFlags(int hwclFlags, int layerFlags);
/*
* Checks if FB is updated by this composition type
*
* @param: composition type
* @return: true if FB is updated, false if not
*/
bool isUpdatingFB(HWCCompositionType compositionType);
/*
* Get the current composition Type
*
* @return the compositon Type
*/
int getCompositionType();
/*
* Clear region implementation for C2D/MDP versions.
*
* @param: region to be cleared
* @param: EGL Display
* @param: EGL Surface
*
* @return 0 on success
*/
int qcomuiClearRegion(Region region, EGLDisplay dpy, EGLSurface sur);
/*
* Handles the externalDisplay event
* HDMI has highest priority compared to WifiDisplay
* Based on the current and the new display event, decides the
* external display to be enabled
*
* @param: newEvent - new external event
* @param: currEvent - currently enabled external event
* @return: external display to be enabled
*
*/
external_display handleEventHDMI(external_display newEvent, external_display
currEvent);
/*
* Checks if layers need to be dumped based on system property "debug.sf.dump"
* for raw dumps and "debug.sf.dump.png" for png dumps.
*
* For example, to dump 25 frames in raw format, do,
* adb shell setprop debug.sf.dump 25
* Layers are dumped in a time-stamped location: /data/sfdump*.
*
* To dump 10 frames in png format, do,
* adb shell setprop debug.sf.dump.png 10
* To dump another 25 or so frames in raw format, do,
* adb shell setprop debug.sf.dump 26
*
* To turn off logcat logging of layer-info, set both properties to 0,
* adb shell setprop debug.sf.dump.png 0
* adb shell setprop debug.sf.dump 0
*
* @return: true if layers need to be dumped (or logcat-ed).
*/
bool needToDumpLayers();
/*
* Dumps a layer's info into logcat and its buffer into raw/png files.
*
* @param: moduleCompositionType - Composition type set in hwcomposer module.
* @param: listFlags - Flags used in hwcomposer's list.
* @param: layerIndex - Index of layer being dumped.
* @param: hwLayers - Address of hwc_layer_t to log and dump.
*
*/
void dumpLayer(int moduleCompositionType, int listFlags, size_t layerIndex,
hwc_layer_t hwLayers[]);
#endif // INCLUDE_LIBQCOM_UI

View File

@ -0,0 +1,26 @@
LOCAL_PATH := $(call my-dir)
include $(CLEAR_VARS)
ifeq ($(USE_OPENGL_RENDERER),true)
LOCAL_PRELINK_MODULE := false
LOCAL_MODULE_PATH := $(TARGET_OUT_SHARED_LIBRARIES)
LOCAL_SHARED_LIBRARIES := libutils libcutils libGLESv2 libhwui
LOCAL_C_INCLUDES += \
frameworks/base/include/utils \
frameworks/base/libs/hwui \
external/skia/include/core \
external/skia/include/effects \
external/skia/include/images \
external/skia/src/ports \
external/skia/include/utils \
hardware/libhardware/include/hardware \
frameworks/base/opengl/include/GLES2
LOCAL_SRC_FILES := \
tilerenderer.cpp
LOCAL_MODULE := libtilerenderer
LOCAL_MODULE_TAGS := optional
include $(BUILD_SHARED_LIBRARY)
endif

View File

@ -0,0 +1,97 @@
/*
* Copyright (C) 2010 The Android Open Source Project
* Copyright (c) 2011 Code Aurora Forum. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <GLES2/gl2.h>
#include <EGL/egl.h>
#include <gl2ext.h>
#include <OpenGLRenderer.h>
#include "tilerenderer.h"
namespace android {
ANDROID_SINGLETON_STATIC_INSTANCE(uirenderer::TileRenderer) ;
namespace uirenderer {
TileRenderer::TileRenderer() {
mIsTiled = false;
}
TileRenderer::~TileRenderer() {
}
void TileRenderer::startTileRendering(OpenGLRenderer* renderer,
int left, int top,
int right, int bottom) {
int width = 0;
int height = 0;
GLenum status = GL_NO_ERROR;
if (renderer != NULL) {
renderer->getViewport(width, height);
}
if (!left && !right && !top && !bottom) {
left = 0;
top = 0;
right = width;
bottom = height;
}
if (!left && !right && !top && !bottom) {
//can't do tile rendering
LOGE("can't tile render; drity region, width, height not available");
return;
}
int l = left, t = (height - bottom), w = (right - left), h = (bottom - top), preserve = 0;
if (l < 0 || t < 0) {
l = (l < 0) ? 0 : l;
t = (t < 0) ? 0 : t;
preserve = 1;
}
if (w > width || h > height) {
w = (w > width) ? width : w;
h = (h > height) ? height : h;
preserve = 1;
}
//clear off all errors before tiling, if any
while ((status = glGetError()) != GL_NO_ERROR);
if (preserve)
glStartTilingQCOM(l, t, w, h, GL_COLOR_BUFFER_BIT0_QCOM);
else
glStartTilingQCOM(l, t, w, h, GL_NONE);
status = glGetError();
if (status == GL_NO_ERROR)
mIsTiled = true;
}
void TileRenderer::endTileRendering(OpenGLRenderer*) {
if (!mIsTiled) {
return;
}
glEndTilingQCOM(GL_COLOR_BUFFER_BIT0_QCOM);
mIsTiled = false;
GLenum status = GL_NO_ERROR;
while ((status = glGetError()) != GL_NO_ERROR);
}
}; // namespace uirenderer
}; // namespace android

View File

@ -0,0 +1,42 @@
/*
* Copyright (C) 2010 The Android Open Source Project
* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#ifndef ANDROID_TILE_RENDERER_H
#define ANDROID_TILE_RENDERER_H
#include <utils/Singleton.h>
namespace android {
namespace uirenderer {
class OpenGLRenderer;
class TileRenderer: public Singleton<TileRenderer> {
public:
TileRenderer();
~TileRenderer();
void startTileRendering(OpenGLRenderer* renderer, int left, int top, int right, int bottom);
void endTileRendering(OpenGLRenderer*);
private:
bool mIsTiled;
};
}; // namespace uirenderer
}; // namespace android
#endif