Merge remote-tracking branch 'securecrt/jellybean' into jellybean
Conflicts: arch/arm/configs/htcleo_defconfig
This commit is contained in:
commit
c679e15374
@ -17,7 +17,7 @@ endif
|
||||
|
||||
OBJCOPYFLAGS :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
|
||||
GZFLAGS :=-9
|
||||
#KBUILD_CFLAGS +=-pipe
|
||||
KBUILD_CFLAGS +=-pipe
|
||||
# Explicitly specifiy 32-bit ARM ISA since toolchain default can be -mthumb:
|
||||
KBUILD_CFLAGS +=$(call cc-option,-marm,)
|
||||
|
||||
|
@ -32,7 +32,7 @@ CONFIG_EXPERIMENTAL=y
|
||||
CONFIG_BROKEN_ON_SMP=y
|
||||
CONFIG_LOCK_KERNEL=y
|
||||
CONFIG_INIT_ENV_ARG_LIMIT=32
|
||||
CONFIG_LOCALVERSION="_tytung_HWA_r3.5"
|
||||
CONFIG_LOCALVERSION="_tytung_HWA_r3.3_JB"
|
||||
# CONFIG_LOCALVERSION_AUTO is not set
|
||||
CONFIG_HAVE_KERNEL_GZIP=y
|
||||
CONFIG_HAVE_KERNEL_BZIP2=y
|
||||
|
@ -565,13 +565,13 @@ adreno_recover_hang(struct kgsl_device *device)
|
||||
struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
|
||||
unsigned int timestamp;
|
||||
unsigned int num_rb_contents;
|
||||
unsigned int bad_context;
|
||||
unsigned int reftimestamp;
|
||||
unsigned int enable_ts;
|
||||
unsigned int soptimestamp;
|
||||
unsigned int eoptimestamp;
|
||||
struct adreno_context *drawctxt;
|
||||
unsigned int context_id;
|
||||
struct kgsl_context *context;
|
||||
struct adreno_context *adreno_context;
|
||||
int next = 0;
|
||||
|
||||
KGSL_DRV_ERR(device, "Starting recovery from 3D GPU hang....\n");
|
||||
@ -587,22 +587,35 @@ adreno_recover_hang(struct kgsl_device *device)
|
||||
ret = adreno_ringbuffer_extract(rb, rb_buffer, &num_rb_contents);
|
||||
if (ret)
|
||||
goto done;
|
||||
timestamp = rb->timestamp;
|
||||
KGSL_DRV_ERR(device, "Last issued timestamp: %x\n", timestamp);
|
||||
kgsl_sharedmem_readl(&device->memstore, &bad_context,
|
||||
KGSL_DEVICE_MEMSTORE_OFFSET(current_context));
|
||||
kgsl_sharedmem_readl(&device->memstore, &context_id,
|
||||
KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
|
||||
current_context));
|
||||
context = idr_find(&device->context_idr, context_id);
|
||||
if (context == NULL) {
|
||||
KGSL_DRV_ERR(device, "Last context unknown id:%d\n",
|
||||
context_id);
|
||||
context_id = KGSL_MEMSTORE_GLOBAL;
|
||||
}
|
||||
|
||||
timestamp = rb->timestamp[KGSL_MEMSTORE_GLOBAL];
|
||||
KGSL_DRV_ERR(device, "Last issued global timestamp: %x\n", timestamp);
|
||||
|
||||
kgsl_sharedmem_readl(&device->memstore, &reftimestamp,
|
||||
KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts));
|
||||
KGSL_MEMSTORE_OFFSET(context_id,
|
||||
ref_wait_ts));
|
||||
kgsl_sharedmem_readl(&device->memstore, &enable_ts,
|
||||
KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable));
|
||||
KGSL_MEMSTORE_OFFSET(context_id,
|
||||
ts_cmp_enable));
|
||||
kgsl_sharedmem_readl(&device->memstore, &soptimestamp,
|
||||
KGSL_DEVICE_MEMSTORE_OFFSET(soptimestamp));
|
||||
KGSL_MEMSTORE_OFFSET(context_id,
|
||||
soptimestamp));
|
||||
kgsl_sharedmem_readl(&device->memstore, &eoptimestamp,
|
||||
KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp));
|
||||
KGSL_MEMSTORE_OFFSET(context_id,
|
||||
eoptimestamp));
|
||||
/* Make sure memory is synchronized before restarting the GPU */
|
||||
mb();
|
||||
KGSL_CTXT_ERR(device,
|
||||
"Context that caused a GPU hang: %x\n", bad_context);
|
||||
"Context id that caused a GPU hang: %d\n", context_id);
|
||||
/* restart device */
|
||||
ret = adreno_stop(device);
|
||||
if (ret)
|
||||
@ -613,20 +626,20 @@ adreno_recover_hang(struct kgsl_device *device)
|
||||
KGSL_DRV_ERR(device, "Device has been restarted after hang\n");
|
||||
/* Restore timestamp states */
|
||||
kgsl_sharedmem_writel(&device->memstore,
|
||||
KGSL_DEVICE_MEMSTORE_OFFSET(soptimestamp),
|
||||
KGSL_MEMSTORE_OFFSET(context_id, soptimestamp),
|
||||
soptimestamp);
|
||||
kgsl_sharedmem_writel(&device->memstore,
|
||||
KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp),
|
||||
KGSL_MEMSTORE_OFFSET(context_id, eoptimestamp),
|
||||
eoptimestamp);
|
||||
kgsl_sharedmem_writel(&device->memstore,
|
||||
KGSL_DEVICE_MEMSTORE_OFFSET(soptimestamp),
|
||||
KGSL_MEMSTORE_OFFSET(context_id, soptimestamp),
|
||||
soptimestamp);
|
||||
if (num_rb_contents) {
|
||||
kgsl_sharedmem_writel(&device->memstore,
|
||||
KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts),
|
||||
KGSL_MEMSTORE_OFFSET(context_id, ref_wait_ts),
|
||||
reftimestamp);
|
||||
kgsl_sharedmem_writel(&device->memstore,
|
||||
KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable),
|
||||
KGSL_MEMSTORE_OFFSET(context_id, ts_cmp_enable),
|
||||
enable_ts);
|
||||
}
|
||||
/* Make sure all writes are posted before the GPU reads them */
|
||||
@ -634,12 +647,12 @@ adreno_recover_hang(struct kgsl_device *device)
|
||||
/* Mark the invalid context so no more commands are accepted from
|
||||
* that context */
|
||||
|
||||
drawctxt = (struct adreno_context *) bad_context;
|
||||
adreno_context = context->devctxt;
|
||||
|
||||
KGSL_CTXT_ERR(device,
|
||||
"Context that caused a GPU hang: %x\n", bad_context);
|
||||
"Context that caused a GPU hang: %d\n", adreno_context->id);
|
||||
|
||||
drawctxt->flags |= CTXT_FLAGS_GPU_HANG;
|
||||
adreno_context->flags |= CTXT_FLAGS_GPU_HANG;
|
||||
|
||||
/*
|
||||
* Set the reset status of all contexts to
|
||||
@ -649,7 +662,7 @@ adreno_recover_hang(struct kgsl_device *device)
|
||||
while ((context = idr_get_next(&device->context_idr, &next))) {
|
||||
if (KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT !=
|
||||
context->reset_status) {
|
||||
if (context->devctxt != drawctxt)
|
||||
if (context->id != context_id)
|
||||
context->reset_status =
|
||||
KGSL_CTX_STAT_INNOCENT_CONTEXT_RESET_EXT;
|
||||
else
|
||||
@ -661,7 +674,7 @@ adreno_recover_hang(struct kgsl_device *device)
|
||||
|
||||
/* Restore valid commands in ringbuffer */
|
||||
adreno_ringbuffer_restore(rb, rb_buffer, num_rb_contents);
|
||||
rb->timestamp = timestamp;
|
||||
rb->timestamp[KGSL_MEMSTORE_GLOBAL] = timestamp;
|
||||
done:
|
||||
vfree(rb_buffer);
|
||||
return ret;
|
||||
@ -755,7 +768,8 @@ static int adreno_getproperty(struct kgsl_device *device,
|
||||
shadowprop.size = device->memstore.size;
|
||||
/* GSL needs this to be set, even if it
|
||||
appears to be meaningless */
|
||||
shadowprop.flags = KGSL_FLAGS_INITIALIZED;
|
||||
shadowprop.flags = KGSL_FLAGS_INITIALIZED |
|
||||
KGSL_FLAGS_PER_CONTEXT_TIMESTAMPS;
|
||||
}
|
||||
if (copy_to_user(value, &shadowprop,
|
||||
sizeof(shadowprop))) {
|
||||
@ -1011,38 +1025,58 @@ void adreno_regwrite(struct kgsl_device *device, unsigned int offsetwords,
|
||||
__raw_writel(value, reg);
|
||||
}
|
||||
|
||||
static unsigned int _get_context_id(struct kgsl_context *k_ctxt)
|
||||
{
|
||||
unsigned int context_id = KGSL_MEMSTORE_GLOBAL;
|
||||
|
||||
if (k_ctxt != NULL) {
|
||||
struct adreno_context *a_ctxt = k_ctxt->devctxt;
|
||||
/*
|
||||
* if the context was not created with per context timestamp
|
||||
* support, we must use the global timestamp since issueibcmds
|
||||
* will be returning that one.
|
||||
*/
|
||||
if (a_ctxt->flags & CTXT_FLAGS_PER_CONTEXT_TS)
|
||||
context_id = a_ctxt->id;
|
||||
}
|
||||
|
||||
return context_id;
|
||||
}
|
||||
|
||||
static int kgsl_check_interrupt_timestamp(struct kgsl_device *device,
|
||||
unsigned int timestamp)
|
||||
struct kgsl_context *context, unsigned int timestamp)
|
||||
{
|
||||
int status;
|
||||
unsigned int ref_ts, enableflag;
|
||||
unsigned int context_id = _get_context_id(context);
|
||||
|
||||
status = kgsl_check_timestamp(device, timestamp);
|
||||
status = kgsl_check_timestamp(device, context, timestamp);
|
||||
if (!status) {
|
||||
mutex_lock(&device->mutex);
|
||||
kgsl_sharedmem_readl(&device->memstore, &enableflag,
|
||||
KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable));
|
||||
KGSL_MEMSTORE_OFFSET(context_id, ts_cmp_enable));
|
||||
mb();
|
||||
|
||||
if (enableflag) {
|
||||
kgsl_sharedmem_readl(&device->memstore, &ref_ts,
|
||||
KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts));
|
||||
KGSL_MEMSTORE_OFFSET(context_id,
|
||||
ref_wait_ts));
|
||||
mb();
|
||||
if (timestamp_cmp(ref_ts, timestamp) >= 0) {
|
||||
kgsl_sharedmem_writel(&device->memstore,
|
||||
KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts),
|
||||
timestamp);
|
||||
KGSL_MEMSTORE_OFFSET(context_id,
|
||||
ref_wait_ts), timestamp);
|
||||
wmb();
|
||||
}
|
||||
} else {
|
||||
unsigned int cmds[2];
|
||||
kgsl_sharedmem_writel(&device->memstore,
|
||||
KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts),
|
||||
timestamp);
|
||||
KGSL_MEMSTORE_OFFSET(context_id,
|
||||
ref_wait_ts), timestamp);
|
||||
enableflag = 1;
|
||||
kgsl_sharedmem_writel(&device->memstore,
|
||||
KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable),
|
||||
enableflag);
|
||||
KGSL_MEMSTORE_OFFSET(context_id,
|
||||
ts_cmp_enable), enableflag);
|
||||
wmb();
|
||||
/* submit a dummy packet so that even if all
|
||||
* commands upto timestamp get executed we will still
|
||||
@ -1076,6 +1110,7 @@ static int kgsl_check_interrupt_timestamp(struct kgsl_device *device,
|
||||
|
||||
/* MUST be called with the device mutex held */
|
||||
static int adreno_waittimestamp(struct kgsl_device *device,
|
||||
struct kgsl_context *context,
|
||||
unsigned int timestamp,
|
||||
unsigned int msecs)
|
||||
{
|
||||
@ -1087,15 +1122,19 @@ static int adreno_waittimestamp(struct kgsl_device *device,
|
||||
int retries;
|
||||
unsigned int msecs_first;
|
||||
unsigned int msecs_part;
|
||||
unsigned int ts_issued;
|
||||
unsigned int context_id = _get_context_id(context);
|
||||
|
||||
ts_issued = adreno_dev->ringbuffer.timestamp[context_id];
|
||||
|
||||
/* Don't wait forever, set a max value for now */
|
||||
if (msecs == -1)
|
||||
msecs = adreno_dev->wait_timeout;
|
||||
|
||||
if (timestamp_cmp(timestamp, adreno_dev->ringbuffer.timestamp) > 0) {
|
||||
KGSL_DRV_ERR(device, "Cannot wait for invalid ts: %x, "
|
||||
"rb->timestamp: %x\n",
|
||||
timestamp, adreno_dev->ringbuffer.timestamp);
|
||||
if (timestamp_cmp(timestamp, ts_issued) > 0) {
|
||||
KGSL_DRV_ERR(device, "Cannot wait for invalid ts <%d:0x%x>, "
|
||||
"last issued ts <%d:0x%x>\n",
|
||||
context_id, timestamp, context_id, ts_issued);
|
||||
status = -EINVAL;
|
||||
goto done;
|
||||
}
|
||||
@ -1107,7 +1146,7 @@ static int adreno_waittimestamp(struct kgsl_device *device,
|
||||
msecs_first = (msecs <= 100) ? ((msecs + 4) / 5) : 100;
|
||||
msecs_part = (msecs - msecs_first + 3) / 4;
|
||||
for (retries = 0; retries < 5; retries++) {
|
||||
if (kgsl_check_timestamp(device, timestamp)) {
|
||||
if (kgsl_check_timestamp(device, context, timestamp)) {
|
||||
/* if the timestamp happens while we're not
|
||||
* waiting, there's a chance that an interrupt
|
||||
* will not be generated and thus the timestamp
|
||||
@ -1130,7 +1169,7 @@ static int adreno_waittimestamp(struct kgsl_device *device,
|
||||
status = kgsl_wait_event_interruptible_timeout(
|
||||
device->wait_queue,
|
||||
kgsl_check_interrupt_timestamp(device,
|
||||
timestamp),
|
||||
context, timestamp),
|
||||
msecs_to_jiffies(retries ?
|
||||
msecs_part : msecs_first), io);
|
||||
mutex_lock(&device->mutex);
|
||||
@ -1147,9 +1186,10 @@ static int adreno_waittimestamp(struct kgsl_device *device,
|
||||
}
|
||||
status = -ETIMEDOUT;
|
||||
KGSL_DRV_ERR(device,
|
||||
"Device hang detected while waiting for timestamp: %x,"
|
||||
"last submitted(rb->timestamp): %x, wptr: %x\n",
|
||||
timestamp, adreno_dev->ringbuffer.timestamp,
|
||||
"Device hang detected while waiting for timestamp: "
|
||||
"<%d:0x%x>, last submitted timestamp: <%d:0x%x>, "
|
||||
"wptr: 0x%x\n",
|
||||
context_id, timestamp, context_id, ts_issued,
|
||||
adreno_dev->ringbuffer.wptr);
|
||||
if (!adreno_dump_and_recover(device)) {
|
||||
/* wait for idle after recovery as the
|
||||
@ -1163,15 +1203,17 @@ done:
|
||||
}
|
||||
|
||||
static unsigned int adreno_readtimestamp(struct kgsl_device *device,
|
||||
enum kgsl_timestamp_type type)
|
||||
struct kgsl_context *context, enum kgsl_timestamp_type type)
|
||||
{
|
||||
unsigned int timestamp = 0;
|
||||
unsigned int context_id = _get_context_id(context);
|
||||
|
||||
if (type == KGSL_TIMESTAMP_CONSUMED)
|
||||
adreno_regread(device, REG_CP_TIMESTAMP, ×tamp);
|
||||
else if (type == KGSL_TIMESTAMP_RETIRED)
|
||||
kgsl_sharedmem_readl(&device->memstore, ×tamp,
|
||||
KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp));
|
||||
KGSL_MEMSTORE_OFFSET(context_id,
|
||||
eoptimestamp));
|
||||
rmb();
|
||||
|
||||
return timestamp;
|
||||
|
@ -1427,8 +1427,8 @@ static void a2xx_drawctxt_restore(struct adreno_device *adreno_dev,
|
||||
cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER;
|
||||
cmds[2] = cp_type3_packet(CP_MEM_WRITE, 2);
|
||||
cmds[3] = device->memstore.gpuaddr +
|
||||
KGSL_DEVICE_MEMSTORE_OFFSET(current_context);
|
||||
cmds[4] = (unsigned int) context;
|
||||
KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context);
|
||||
cmds[4] = context->id;
|
||||
adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE, cmds, 5);
|
||||
kgsl_mmu_setstate(device, context->pagetable);
|
||||
|
||||
@ -1551,11 +1551,18 @@ static void a2xx_cp_intrcallback(struct kgsl_device *device)
|
||||
|
||||
if (status & CP_INT_CNTL__RB_INT_MASK) {
|
||||
/* signal intr completion event */
|
||||
unsigned int enableflag = 0;
|
||||
unsigned int context_id;
|
||||
kgsl_sharedmem_readl(&device->memstore,
|
||||
&context_id,
|
||||
KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
|
||||
current_context));
|
||||
if (context_id < KGSL_MEMSTORE_MAX) {
|
||||
kgsl_sharedmem_writel(&rb->device->memstore,
|
||||
KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable),
|
||||
enableflag);
|
||||
KGSL_MEMSTORE_OFFSET(context_id,
|
||||
ts_cmp_enable), 0);
|
||||
device->last_expired_ctxt_id = context_id;
|
||||
wmb();
|
||||
}
|
||||
KGSL_CMD_WARN(rb->device, "ringbuffer rb interrupt\n");
|
||||
}
|
||||
|
||||
@ -1780,7 +1787,6 @@ static void a2xx_gmeminit(struct adreno_device *adreno_dev)
|
||||
static void a2xx_start(struct adreno_device *adreno_dev)
|
||||
{
|
||||
struct kgsl_device *device = &adreno_dev->dev;
|
||||
int init_reftimestamp = 0x7fffffff;
|
||||
|
||||
/*
|
||||
* We need to make sure all blocks are powered up and clocked
|
||||
@ -1833,12 +1839,6 @@ static void a2xx_start(struct adreno_device *adreno_dev)
|
||||
else
|
||||
adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0x80);
|
||||
|
||||
kgsl_sharedmem_set(&device->memstore, 0, 0, device->memstore.size);
|
||||
|
||||
kgsl_sharedmem_writel(&device->memstore,
|
||||
KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts),
|
||||
init_reftimestamp);
|
||||
|
||||
adreno_regwrite(device, REG_RBBM_DEBUG, 0x00080000);
|
||||
|
||||
/* Make sure interrupts are disabled */
|
||||
|
@ -2222,8 +2222,8 @@ static void a3xx_drawctxt_restore(struct adreno_device *adreno_dev,
|
||||
cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER;
|
||||
cmds[2] = cp_type3_packet(CP_MEM_WRITE, 2);
|
||||
cmds[3] = device->memstore.gpuaddr +
|
||||
KGSL_DEVICE_MEMSTORE_OFFSET(current_context);
|
||||
cmds[4] = (unsigned int)context;
|
||||
KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context);
|
||||
cmds[4] = context->id;
|
||||
adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE, cmds, 5);
|
||||
kgsl_mmu_setstate(device, context->pagetable);
|
||||
|
||||
@ -2366,9 +2366,17 @@ static void a3xx_cp_callback(struct adreno_device *adreno_dev, int irq)
|
||||
struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
|
||||
|
||||
if (irq == A3XX_INT_CP_RB_INT) {
|
||||
unsigned int context_id;
|
||||
kgsl_sharedmem_readl(&adreno_dev->dev.memstore,
|
||||
&context_id,
|
||||
KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
|
||||
current_context));
|
||||
if (context_id < KGSL_MEMSTORE_MAX) {
|
||||
kgsl_sharedmem_writel(&rb->device->memstore,
|
||||
KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable), 0);
|
||||
KGSL_MEMSTORE_OFFSET(context_id,
|
||||
ts_cmp_enable), 0);
|
||||
wmb();
|
||||
}
|
||||
KGSL_CMD_WARN(rb->device, "ringbuffer rb interrupt\n");
|
||||
}
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
|
||||
/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
@ -17,6 +17,8 @@
|
||||
#include "kgsl_sharedmem.h"
|
||||
#include "adreno.h"
|
||||
|
||||
#define KGSL_INIT_REFTIMESTAMP 0x7FFFFFFF
|
||||
|
||||
/* quad for copying GMEM to context shadow */
|
||||
#define QUAD_LEN 12
|
||||
#define QUAD_RESTORE_LEN 14
|
||||
@ -154,6 +156,7 @@ int adreno_drawctxt_create(struct kgsl_device *device,
|
||||
|
||||
drawctxt->pagetable = pagetable;
|
||||
drawctxt->bin_base_offset = 0;
|
||||
drawctxt->id = context->id;
|
||||
|
||||
if (flags & KGSL_CONTEXT_PREAMBLE)
|
||||
drawctxt->flags |= CTXT_FLAGS_PREAMBLE;
|
||||
@ -161,10 +164,17 @@ int adreno_drawctxt_create(struct kgsl_device *device,
|
||||
if (flags & KGSL_CONTEXT_NO_GMEM_ALLOC)
|
||||
drawctxt->flags |= CTXT_FLAGS_NOGMEMALLOC;
|
||||
|
||||
if (flags & KGSL_CONTEXT_PER_CONTEXT_TS)
|
||||
drawctxt->flags |= CTXT_FLAGS_PER_CONTEXT_TS;
|
||||
|
||||
ret = adreno_dev->gpudev->ctxt_create(adreno_dev, drawctxt);
|
||||
if (ret)
|
||||
goto err;
|
||||
|
||||
kgsl_sharedmem_writel(&device->memstore,
|
||||
KGSL_MEMSTORE_OFFSET(drawctxt->id, ref_wait_ts),
|
||||
KGSL_INIT_REFTIMESTAMP);
|
||||
|
||||
context->devctxt = drawctxt;
|
||||
return 0;
|
||||
err:
|
||||
|
@ -40,6 +40,10 @@
|
||||
#define CTXT_FLAGS_GPU_HANG 0x00008000
|
||||
/* Specifies there is no need to save GMEM */
|
||||
#define CTXT_FLAGS_NOGMEMALLOC 0x00010000
|
||||
/* Trash state for context */
|
||||
#define CTXT_FLAGS_TRASHSTATE 0x00020000
|
||||
/* per context timestamps enabled */
|
||||
#define CTXT_FLAGS_PER_CONTEXT_TS 0x00040000
|
||||
|
||||
struct kgsl_device;
|
||||
struct adreno_device;
|
||||
@ -72,6 +76,7 @@ struct gmem_shadow_t {
|
||||
};
|
||||
|
||||
struct adreno_context {
|
||||
unsigned int id;
|
||||
uint32_t flags;
|
||||
struct kgsl_pagetable *pagetable;
|
||||
struct kgsl_memdesc gpustate;
|
||||
|
@ -14,6 +14,7 @@
|
||||
#include <linux/vmalloc.h>
|
||||
|
||||
#include "kgsl.h"
|
||||
#include "kgsl_sharedmem.h"
|
||||
|
||||
#include "adreno.h"
|
||||
#include "adreno_pm4types.h"
|
||||
@ -464,7 +465,9 @@ static int adreno_dump(struct kgsl_device *device)
|
||||
const uint32_t *rb_vaddr;
|
||||
int num_item = 0;
|
||||
int read_idx, write_idx;
|
||||
unsigned int ts_processed;
|
||||
unsigned int ts_processed = 0xdeaddead;
|
||||
struct kgsl_context *context;
|
||||
unsigned int context_id;
|
||||
|
||||
static struct ib_list ib_list;
|
||||
|
||||
@ -660,9 +663,18 @@ static int adreno_dump(struct kgsl_device *device)
|
||||
KGSL_LOG_DUMP(device,
|
||||
"MH_INTERRUPT: MASK = %08X | STATUS = %08X\n", r1, r2);
|
||||
|
||||
ts_processed = device->ftbl->readtimestamp(device,
|
||||
kgsl_sharedmem_readl(&device->memstore,
|
||||
(unsigned int *) &context_id,
|
||||
KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
|
||||
current_context));
|
||||
context = idr_find(&device->context_idr, context_id);
|
||||
if (context) {
|
||||
ts_processed = device->ftbl->readtimestamp(device, context,
|
||||
KGSL_TIMESTAMP_RETIRED);
|
||||
KGSL_LOG_DUMP(device, "TIMESTM RTRD: %08X\n", ts_processed);
|
||||
KGSL_LOG_DUMP(device, "CTXT: %d TIMESTM RTRD: %08X\n",
|
||||
context->id, ts_processed);
|
||||
} else
|
||||
KGSL_LOG_DUMP(device, "BAD CTXT: %d\n", context_id);
|
||||
|
||||
num_item = adreno_ringbuffer_count(&adreno_dev->ringbuffer,
|
||||
cp_rb_rptr);
|
||||
|
@ -236,7 +236,7 @@ int adreno_ringbuffer_start(struct adreno_ringbuffer *rb, unsigned int init_ram)
|
||||
return 0;
|
||||
|
||||
if (init_ram) {
|
||||
rb->timestamp = 0;
|
||||
rb->timestamp[KGSL_MEMSTORE_GLOBAL] = 0;
|
||||
GSL_RB_INIT_TIMESTAMP(rb);
|
||||
}
|
||||
|
||||
@ -321,18 +321,13 @@ int adreno_ringbuffer_start(struct adreno_ringbuffer *rb, unsigned int init_ram)
|
||||
}
|
||||
|
||||
/* setup scratch/timestamp */
|
||||
adreno_regwrite(device, REG_SCRATCH_ADDR,
|
||||
device->memstore.gpuaddr +
|
||||
KGSL_DEVICE_MEMSTORE_OFFSET(soptimestamp));
|
||||
adreno_regwrite(device, REG_SCRATCH_ADDR, device->memstore.gpuaddr +
|
||||
KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
|
||||
soptimestamp));
|
||||
|
||||
adreno_regwrite(device, REG_SCRATCH_UMSK,
|
||||
GSL_RB_MEMPTRS_SCRATCH_MASK);
|
||||
|
||||
/* update the eoptimestamp field with the last retired timestamp */
|
||||
kgsl_sharedmem_writel(&device->memstore,
|
||||
KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp),
|
||||
rb->timestamp);
|
||||
|
||||
/* load the CP ucode */
|
||||
|
||||
status = adreno_ringbuffer_load_pm4_ucode(device);
|
||||
@ -431,15 +426,28 @@ void adreno_ringbuffer_close(struct adreno_ringbuffer *rb)
|
||||
|
||||
static uint32_t
|
||||
adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
|
||||
struct adreno_context *context,
|
||||
unsigned int flags, unsigned int *cmds,
|
||||
int sizedwords)
|
||||
{
|
||||
struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
|
||||
unsigned int *ringcmds;
|
||||
unsigned int timestamp;
|
||||
unsigned int total_sizedwords = sizedwords + 6;
|
||||
unsigned int total_sizedwords = sizedwords;
|
||||
unsigned int i;
|
||||
unsigned int rcmd_gpu;
|
||||
unsigned int context_id = KGSL_MEMSTORE_GLOBAL;
|
||||
unsigned int gpuaddr = rb->device->memstore.gpuaddr;
|
||||
|
||||
if (context != NULL) {
|
||||
/*
|
||||
* if the context was not created with per context timestamp
|
||||
* support, we must use the global timestamp since issueibcmds
|
||||
* will be returning that one.
|
||||
*/
|
||||
if (context->flags & CTXT_FLAGS_PER_CONTEXT_TS)
|
||||
context_id = context->id;
|
||||
}
|
||||
|
||||
/* reserve space to temporarily turn off protected mode
|
||||
* error checking if needed
|
||||
@ -451,6 +459,13 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
|
||||
if (adreno_is_a3xx(adreno_dev))
|
||||
total_sizedwords += 7;
|
||||
|
||||
total_sizedwords += 2; /* scratchpad ts for recovery */
|
||||
if (context) {
|
||||
total_sizedwords += 3; /* sop timestamp */
|
||||
total_sizedwords += 4; /* eop timestamp */
|
||||
}
|
||||
total_sizedwords += 4; /* global timestamp for recovery*/
|
||||
|
||||
ringcmds = adreno_ringbuffer_allocspace(rb, total_sizedwords);
|
||||
rcmd_gpu = rb->buffer_desc.gpuaddr
|
||||
+ sizeof(uint)*(rb->wptr-total_sizedwords);
|
||||
@ -478,12 +493,20 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
|
||||
GSL_RB_WRITE(ringcmds, rcmd_gpu, 1);
|
||||
}
|
||||
|
||||
rb->timestamp++;
|
||||
timestamp = rb->timestamp;
|
||||
/* always increment the global timestamp. once. */
|
||||
rb->timestamp[KGSL_MEMSTORE_GLOBAL]++;
|
||||
if (context) {
|
||||
if (context_id == KGSL_MEMSTORE_GLOBAL)
|
||||
rb->timestamp[context_id] =
|
||||
rb->timestamp[KGSL_MEMSTORE_GLOBAL];
|
||||
else
|
||||
rb->timestamp[context_id]++;
|
||||
}
|
||||
timestamp = rb->timestamp[context_id];
|
||||
|
||||
/* start-of-pipeline and end-of-pipeline timestamps */
|
||||
/* scratchpad ts for recovery */
|
||||
GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type0_packet(REG_CP_TIMESTAMP, 1));
|
||||
GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
|
||||
GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp[KGSL_MEMSTORE_GLOBAL]);
|
||||
|
||||
if (adreno_is_a3xx(adreno_dev)) {
|
||||
/*
|
||||
@ -499,22 +522,41 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
|
||||
GSL_RB_WRITE(ringcmds, rcmd_gpu, 0x00);
|
||||
}
|
||||
|
||||
if (context) {
|
||||
/* start-of-pipeline timestamp */
|
||||
GSL_RB_WRITE(ringcmds, rcmd_gpu,
|
||||
cp_type3_packet(CP_MEM_WRITE, 2));
|
||||
GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
|
||||
KGSL_MEMSTORE_OFFSET(context->id, soptimestamp)));
|
||||
GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
|
||||
|
||||
/* end-of-pipeline timestamp */
|
||||
GSL_RB_WRITE(ringcmds, rcmd_gpu,
|
||||
cp_type3_packet(CP_EVENT_WRITE, 3));
|
||||
GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
|
||||
GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
|
||||
KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp)));
|
||||
GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
|
||||
}
|
||||
|
||||
GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type3_packet(CP_EVENT_WRITE, 3));
|
||||
GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
|
||||
GSL_RB_WRITE(ringcmds, rcmd_gpu,
|
||||
(rb->device->memstore.gpuaddr +
|
||||
KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp)));
|
||||
GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
|
||||
GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
|
||||
KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
|
||||
eoptimestamp)));
|
||||
GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp[KGSL_MEMSTORE_GLOBAL]);
|
||||
|
||||
if (!(flags & KGSL_CMD_FLAGS_NO_TS_CMP)) {
|
||||
/* Conditional execution based on memory values */
|
||||
GSL_RB_WRITE(ringcmds, rcmd_gpu,
|
||||
cp_type3_packet(CP_COND_EXEC, 4));
|
||||
GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr +
|
||||
KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable)) >> 2);
|
||||
GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr +
|
||||
KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts)) >> 2);
|
||||
GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
|
||||
GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
|
||||
KGSL_MEMSTORE_OFFSET(
|
||||
context_id, ts_cmp_enable)) >> 2);
|
||||
GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr +
|
||||
KGSL_MEMSTORE_OFFSET(
|
||||
context_id, ref_wait_ts)) >> 2);
|
||||
GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp);
|
||||
/* # of conditional command DWORDs */
|
||||
GSL_RB_WRITE(ringcmds, rcmd_gpu, 2);
|
||||
GSL_RB_WRITE(ringcmds, rcmd_gpu,
|
||||
@ -533,7 +575,6 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
|
||||
|
||||
adreno_ringbuffer_submit(rb);
|
||||
|
||||
/* return timestamp of issued coREG_ands */
|
||||
return timestamp;
|
||||
}
|
||||
|
||||
@ -548,7 +589,7 @@ adreno_ringbuffer_issuecmds(struct kgsl_device *device,
|
||||
|
||||
if (device->state & KGSL_STATE_HUNG)
|
||||
return;
|
||||
adreno_ringbuffer_addcmds(rb, flags, cmds, sizedwords);
|
||||
adreno_ringbuffer_addcmds(rb, NULL, flags, cmds, sizedwords);
|
||||
}
|
||||
|
||||
static bool _parse_ibs(struct kgsl_device_private *dev_priv, uint gpuaddr,
|
||||
@ -769,8 +810,8 @@ adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
|
||||
|
||||
if (drawctxt->flags & CTXT_FLAGS_GPU_HANG) {
|
||||
KGSL_CTXT_WARN(device, "Context %p caused a gpu hang.."
|
||||
" will not accept commands for this context\n",
|
||||
drawctxt);
|
||||
" will not accept commands for context %d\n",
|
||||
drawctxt, drawctxt->id);
|
||||
return -EDEADLK;
|
||||
}
|
||||
|
||||
@ -822,6 +863,7 @@ adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
|
||||
adreno_drawctxt_switch(adreno_dev, drawctxt, flags);
|
||||
|
||||
*timestamp = adreno_ringbuffer_addcmds(&adreno_dev->ringbuffer,
|
||||
drawctxt,
|
||||
KGSL_CMD_FLAGS_NOT_KERNEL_CMD,
|
||||
&link[0], (cmds - link));
|
||||
|
||||
@ -855,11 +897,25 @@ int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
|
||||
unsigned int val2;
|
||||
unsigned int val3;
|
||||
unsigned int copy_rb_contents = 0;
|
||||
unsigned int cur_context;
|
||||
struct kgsl_context *context;
|
||||
unsigned int context_id;
|
||||
|
||||
GSL_RB_GET_READPTR(rb, &rb->rptr);
|
||||
|
||||
retired_timestamp = device->ftbl->readtimestamp(device,
|
||||
/* current_context is the context that is presently active in the
|
||||
* GPU, i.e the context in which the hang is caused */
|
||||
kgsl_sharedmem_readl(&device->memstore, &context_id,
|
||||
KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
|
||||
current_context));
|
||||
KGSL_DRV_ERR(device, "Last context id: %d\n", context_id);
|
||||
context = idr_find(&device->context_idr, context_id);
|
||||
if (context == NULL) {
|
||||
KGSL_DRV_ERR(device,
|
||||
"GPU recovery from hang not possible because last"
|
||||
" context id is invalid.\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
retired_timestamp = device->ftbl->readtimestamp(device, context,
|
||||
KGSL_TIMESTAMP_RETIRED);
|
||||
KGSL_DRV_ERR(device, "GPU successfully executed till ts: %x\n",
|
||||
retired_timestamp);
|
||||
@ -894,7 +950,8 @@ int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
|
||||
(val1 == cp_type3_packet(CP_EVENT_WRITE, 3)
|
||||
&& val2 == CACHE_FLUSH_TS &&
|
||||
val3 == (rb->device->memstore.gpuaddr +
|
||||
KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp)))) {
|
||||
KGSL_MEMSTORE_OFFSET(context_id,
|
||||
eoptimestamp)))) {
|
||||
rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
|
||||
rb->buffer_desc.size);
|
||||
KGSL_DRV_ERR(device,
|
||||
@ -940,10 +997,6 @@ int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* current_context is the context that is presently active in the
|
||||
* GPU, i.e the context in which the hang is caused */
|
||||
kgsl_sharedmem_readl(&device->memstore, &cur_context,
|
||||
KGSL_DEVICE_MEMSTORE_OFFSET(current_context));
|
||||
while ((rb_rptr / sizeof(unsigned int)) != rb->wptr) {
|
||||
kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
|
||||
rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
|
||||
@ -958,7 +1011,8 @@ int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
|
||||
rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
|
||||
rb->buffer_desc.size);
|
||||
BUG_ON(val1 != (device->memstore.gpuaddr +
|
||||
KGSL_DEVICE_MEMSTORE_OFFSET(current_context)));
|
||||
KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
|
||||
current_context)));
|
||||
kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
|
||||
rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
|
||||
rb->buffer_desc.size);
|
||||
@ -970,7 +1024,7 @@ int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
|
||||
* and leave.
|
||||
*/
|
||||
|
||||
if ((copy_rb_contents == 0) && (value == cur_context)) {
|
||||
if ((copy_rb_contents == 0) && (value == context_id)) {
|
||||
KGSL_DRV_ERR(device, "GPU recovery could not "
|
||||
"find the previous context\n");
|
||||
return -EINVAL;
|
||||
@ -986,7 +1040,7 @@ int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
|
||||
/* if context switches to a context that did not cause
|
||||
* hang then start saving the rb contents as those
|
||||
* commands can be executed */
|
||||
if (value != cur_context) {
|
||||
if (value != context_id) {
|
||||
copy_rb_contents = 1;
|
||||
temp_rb_buffer[temp_idx++] = cp_nop_packet(1);
|
||||
temp_rb_buffer[temp_idx++] =
|
||||
|
@ -54,7 +54,8 @@ struct adreno_ringbuffer {
|
||||
|
||||
unsigned int wptr; /* write pointer offset in dwords from baseaddr */
|
||||
unsigned int rptr; /* read pointer offset in dwords from baseaddr */
|
||||
uint32_t timestamp;
|
||||
|
||||
unsigned int timestamp[KGSL_MEMSTORE_MAX];
|
||||
};
|
||||
|
||||
|
||||
|
@ -58,22 +58,30 @@ static struct ion_client *kgsl_ion_client;
|
||||
* @returns - 0 on success or error code on failure
|
||||
*/
|
||||
|
||||
static int kgsl_add_event(struct kgsl_device *device, u32 ts,
|
||||
void (*cb)(struct kgsl_device *, void *, u32), void *priv,
|
||||
static int kgsl_add_event(struct kgsl_device *device, u32 id, u32 ts,
|
||||
void (*cb)(struct kgsl_device *, void *, u32, u32), void *priv,
|
||||
struct kgsl_device_private *owner)
|
||||
{
|
||||
struct kgsl_event *event;
|
||||
struct list_head *n;
|
||||
unsigned int cur = device->ftbl->readtimestamp(device,
|
||||
KGSL_TIMESTAMP_RETIRED);
|
||||
unsigned int cur_ts;
|
||||
struct kgsl_context *context = NULL;
|
||||
|
||||
if (cb == NULL)
|
||||
return -EINVAL;
|
||||
|
||||
if (id != KGSL_MEMSTORE_GLOBAL) {
|
||||
context = idr_find(&device->context_idr, id);
|
||||
if (context == NULL)
|
||||
return -EINVAL;
|
||||
}
|
||||
cur_ts = device->ftbl->readtimestamp(device, context,
|
||||
KGSL_TIMESTAMP_RETIRED);
|
||||
|
||||
/* Check to see if the requested timestamp has already fired */
|
||||
|
||||
if (timestamp_cmp(cur, ts) >= 0) {
|
||||
cb(device, priv, cur);
|
||||
if (timestamp_cmp(cur_ts, ts) >= 0) {
|
||||
cb(device, priv, id, cur_ts);
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -81,17 +89,24 @@ static int kgsl_add_event(struct kgsl_device *device, u32 ts,
|
||||
if (event == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
event->context = context;
|
||||
event->timestamp = ts;
|
||||
event->priv = priv;
|
||||
event->func = cb;
|
||||
event->owner = owner;
|
||||
|
||||
/* Add the event in order to the list */
|
||||
/*
|
||||
* Add the event in order to the list. Order is by context id
|
||||
* first and then by timestamp for that context.
|
||||
*/
|
||||
|
||||
for (n = device->events.next ; n != &device->events; n = n->next) {
|
||||
struct kgsl_event *e =
|
||||
list_entry(n, struct kgsl_event, list);
|
||||
|
||||
if (e->context != context)
|
||||
continue;
|
||||
|
||||
if (timestamp_cmp(e->timestamp, ts) > 0) {
|
||||
list_add(&event->list, n->prev);
|
||||
break;
|
||||
@ -115,12 +130,16 @@ static void kgsl_cancel_events(struct kgsl_device *device,
|
||||
struct kgsl_device_private *owner)
|
||||
{
|
||||
struct kgsl_event *event, *event_tmp;
|
||||
unsigned int cur = device->ftbl->readtimestamp(device,
|
||||
KGSL_TIMESTAMP_RETIRED);
|
||||
unsigned int id, cur;
|
||||
|
||||
list_for_each_entry_safe(event, event_tmp, &device->events, list) {
|
||||
if (event->owner != owner)
|
||||
continue;
|
||||
|
||||
cur = device->ftbl->readtimestamp(device, event->context,
|
||||
KGSL_TIMESTAMP_RETIRED);
|
||||
|
||||
id = event->context ? event->context->id : KGSL_MEMSTORE_GLOBAL;
|
||||
/*
|
||||
* "cancel" the events by calling their callback.
|
||||
* Currently, events are used for lock and memory
|
||||
@ -128,7 +147,7 @@ static void kgsl_cancel_events(struct kgsl_device *device,
|
||||
* thing to do is release or free.
|
||||
*/
|
||||
if (event->func)
|
||||
event->func(device, event->priv, cur);
|
||||
event->func(device, event->priv, id, cur);
|
||||
|
||||
list_del(&event->list);
|
||||
kfree(event);
|
||||
@ -265,8 +284,8 @@ kgsl_create_context(struct kgsl_device_private *dev_priv)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
ret = idr_get_new(&dev_priv->device->context_idr,
|
||||
context, &id);
|
||||
ret = idr_get_new_above(&dev_priv->device->context_idr,
|
||||
context, 1, &id);
|
||||
|
||||
if (ret != -EAGAIN)
|
||||
break;
|
||||
@ -277,6 +296,16 @@ kgsl_create_context(struct kgsl_device_private *dev_priv)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
/* MAX - 1, there is one memdesc in memstore for device info */
|
||||
if (id >= KGSL_MEMSTORE_MAX) {
|
||||
KGSL_DRV_ERR(dev_priv->device, "cannot have more than %d "
|
||||
"ctxts due to memstore limitation\n",
|
||||
KGSL_MEMSTORE_MAX);
|
||||
idr_remove(&dev_priv->device->context_idr, id);
|
||||
kfree(context);
|
||||
return NULL;
|
||||
}
|
||||
|
||||
context->id = id;
|
||||
context->dev_priv = dev_priv;
|
||||
|
||||
@ -307,25 +336,28 @@ static void kgsl_timestamp_expired(struct work_struct *work)
|
||||
ts_expired_ws);
|
||||
struct kgsl_event *event, *event_tmp;
|
||||
uint32_t ts_processed;
|
||||
unsigned int id;
|
||||
|
||||
mutex_lock(&device->mutex);
|
||||
|
||||
/* get current EOP timestamp */
|
||||
ts_processed = device->ftbl->readtimestamp(device,
|
||||
KGSL_TIMESTAMP_RETIRED);
|
||||
|
||||
/* Process expired events */
|
||||
list_for_each_entry_safe(event, event_tmp, &device->events, list) {
|
||||
ts_processed = device->ftbl->readtimestamp(device,
|
||||
event->context, KGSL_TIMESTAMP_RETIRED);
|
||||
if (timestamp_cmp(ts_processed, event->timestamp) < 0)
|
||||
break;
|
||||
continue;
|
||||
|
||||
id = event->context ? event->context->id : KGSL_MEMSTORE_GLOBAL;
|
||||
|
||||
if (event->func)
|
||||
event->func(device, event->priv, ts_processed);
|
||||
event->func(device, event->priv, id, ts_processed);
|
||||
|
||||
list_del(&event->list);
|
||||
kfree(event);
|
||||
}
|
||||
|
||||
device->last_expired_ctxt_id = KGSL_CONTEXT_INVALID;
|
||||
|
||||
mutex_unlock(&device->mutex);
|
||||
}
|
||||
|
||||
@ -400,11 +432,15 @@ int kgsl_unregister_ts_notifier(struct kgsl_device *device,
|
||||
}
|
||||
EXPORT_SYMBOL(kgsl_unregister_ts_notifier);
|
||||
|
||||
int kgsl_check_timestamp(struct kgsl_device *device, unsigned int timestamp)
|
||||
int kgsl_check_timestamp(struct kgsl_device *device,
|
||||
struct kgsl_context *context, unsigned int timestamp)
|
||||
{
|
||||
unsigned int ts_processed;
|
||||
unsigned int global;
|
||||
|
||||
ts_processed = device->ftbl->readtimestamp(device,
|
||||
ts_processed = device->ftbl->readtimestamp(device, context,
|
||||
KGSL_TIMESTAMP_RETIRED);
|
||||
global = device->ftbl->readtimestamp(device, NULL,
|
||||
KGSL_TIMESTAMP_RETIRED);
|
||||
|
||||
return (timestamp_cmp(ts_processed, timestamp) >= 0);
|
||||
@ -745,6 +781,9 @@ static int kgsl_open(struct inode *inodep, struct file *filep)
|
||||
kgsl_check_suspended(device);
|
||||
|
||||
if (device->open_count == 0) {
|
||||
kgsl_sharedmem_set(&device->memstore, 0, 0,
|
||||
device->memstore.size);
|
||||
|
||||
result = device->ftbl->start(device, true);
|
||||
|
||||
if (result) {
|
||||
@ -885,21 +924,35 @@ static long kgsl_ioctl_device_getproperty(struct kgsl_device_private *dev_priv,
|
||||
return result;
|
||||
}
|
||||
|
||||
static long kgsl_ioctl_device_waittimestamp(struct kgsl_device_private
|
||||
*dev_priv, unsigned int cmd,
|
||||
void *data)
|
||||
static long kgsl_ioctl_device_setproperty(struct kgsl_device_private *dev_priv,
|
||||
unsigned int cmd, void *data)
|
||||
{
|
||||
int result = 0;
|
||||
struct kgsl_device_waittimestamp *param = data;
|
||||
/* The getproperty struct is reused for setproperty too */
|
||||
struct kgsl_device_getproperty *param = data;
|
||||
|
||||
/* Set the active count so that suspend doesn't do the
|
||||
wrong thing */
|
||||
if (dev_priv->device->ftbl->setproperty)
|
||||
result = dev_priv->device->ftbl->setproperty(
|
||||
dev_priv->device, param->type,
|
||||
param->value, param->sizebytes);
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static long _device_waittimestamp(struct kgsl_device_private *dev_priv,
|
||||
struct kgsl_context *context,
|
||||
unsigned int timestamp,
|
||||
unsigned int timeout)
|
||||
{
|
||||
int result = 0;
|
||||
|
||||
/* Set the active count so that suspend doesn't do the wrong thing */
|
||||
|
||||
dev_priv->device->active_cnt++;
|
||||
|
||||
result = dev_priv->device->ftbl->waittimestamp(dev_priv->device,
|
||||
param->timestamp,
|
||||
param->timeout);
|
||||
context, timestamp, timeout);
|
||||
|
||||
|
||||
/* Fire off any pending suspend operations that are in flight */
|
||||
|
||||
@ -910,6 +963,34 @@ static long kgsl_ioctl_device_waittimestamp(struct kgsl_device_private
|
||||
return result;
|
||||
}
|
||||
|
||||
static long kgsl_ioctl_device_waittimestamp(struct kgsl_device_private
|
||||
*dev_priv, unsigned int cmd,
|
||||
void *data)
|
||||
{
|
||||
struct kgsl_device_waittimestamp *param = data;
|
||||
|
||||
return _device_waittimestamp(dev_priv, KGSL_MEMSTORE_GLOBAL,
|
||||
param->timestamp, param->timeout);
|
||||
}
|
||||
|
||||
static long kgsl_ioctl_device_waittimestamp_ctxtid(struct kgsl_device_private
|
||||
*dev_priv, unsigned int cmd,
|
||||
void *data)
|
||||
{
|
||||
struct kgsl_device_waittimestamp_ctxtid *param = data;
|
||||
struct kgsl_context *context;
|
||||
|
||||
context = kgsl_find_context(dev_priv, param->context_id);
|
||||
if (context == NULL) {
|
||||
KGSL_DRV_ERR(dev_priv->device, "invalid context_id %d\n",
|
||||
param->context_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return _device_waittimestamp(dev_priv, context,
|
||||
param->timestamp, param->timeout);
|
||||
}
|
||||
|
||||
static long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
|
||||
unsigned int cmd, void *data)
|
||||
{
|
||||
@ -926,7 +1007,7 @@ static long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
|
||||
if (context == NULL) {
|
||||
result = -EINVAL;
|
||||
KGSL_DRV_ERR(dev_priv->device,
|
||||
"invalid drawctxt drawctxt_id %d\n",
|
||||
"invalid context_id %d\n",
|
||||
param->drawctxt_id);
|
||||
goto done;
|
||||
}
|
||||
@ -997,21 +1078,46 @@ done:
|
||||
return result;
|
||||
}
|
||||
|
||||
static long _cmdstream_readtimestamp(struct kgsl_device_private *dev_priv,
|
||||
struct kgsl_context *context, unsigned int type,
|
||||
unsigned int *timestamp)
|
||||
{
|
||||
*timestamp = dev_priv->device->ftbl->readtimestamp(dev_priv->device,
|
||||
context, type);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static long kgsl_ioctl_cmdstream_readtimestamp(struct kgsl_device_private
|
||||
*dev_priv, unsigned int cmd,
|
||||
void *data)
|
||||
{
|
||||
struct kgsl_cmdstream_readtimestamp *param = data;
|
||||
|
||||
param->timestamp =
|
||||
dev_priv->device->ftbl->readtimestamp(dev_priv->device,
|
||||
param->type);
|
||||
return _cmdstream_readtimestamp(dev_priv, NULL,
|
||||
param->type, ¶m->timestamp);
|
||||
}
|
||||
|
||||
return 0;
|
||||
static long kgsl_ioctl_cmdstream_readtimestamp_ctxtid(struct kgsl_device_private
|
||||
*dev_priv, unsigned int cmd,
|
||||
void *data)
|
||||
{
|
||||
struct kgsl_cmdstream_readtimestamp_ctxtid *param = data;
|
||||
struct kgsl_context *context;
|
||||
|
||||
context = kgsl_find_context(dev_priv, param->context_id);
|
||||
if (context == NULL) {
|
||||
KGSL_DRV_ERR(dev_priv->device, "invalid context_id %d\n",
|
||||
param->context_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return _cmdstream_readtimestamp(dev_priv, context,
|
||||
param->type, ¶m->timestamp);
|
||||
}
|
||||
|
||||
static void kgsl_freemem_event_cb(struct kgsl_device *device,
|
||||
void *priv, u32 timestamp)
|
||||
void *priv, u32 id, u32 timestamp)
|
||||
{
|
||||
struct kgsl_mem_entry *entry = priv;
|
||||
spin_lock(&entry->priv->mem_lock);
|
||||
@ -1020,30 +1126,65 @@ static void kgsl_freemem_event_cb(struct kgsl_device *device,
|
||||
kgsl_mem_entry_detach_process(entry);
|
||||
}
|
||||
|
||||
static long kgsl_ioctl_cmdstream_freememontimestamp(struct kgsl_device_private
|
||||
*dev_priv, unsigned int cmd,
|
||||
void *data)
|
||||
static long _cmdstream_freememontimestamp(struct kgsl_device_private *dev_priv,
|
||||
unsigned int gpuaddr, struct kgsl_context *context,
|
||||
unsigned int timestamp, unsigned int type)
|
||||
{
|
||||
int result = 0;
|
||||
struct kgsl_cmdstream_freememontimestamp *param = data;
|
||||
struct kgsl_mem_entry *entry = NULL;
|
||||
struct kgsl_device *device = dev_priv->device;
|
||||
unsigned int cur;
|
||||
unsigned int context_id = context ? context->id : KGSL_MEMSTORE_GLOBAL;
|
||||
|
||||
spin_lock(&dev_priv->process_priv->mem_lock);
|
||||
entry = kgsl_sharedmem_find(dev_priv->process_priv, param->gpuaddr);
|
||||
entry = kgsl_sharedmem_find(dev_priv->process_priv, gpuaddr);
|
||||
spin_unlock(&dev_priv->process_priv->mem_lock);
|
||||
|
||||
if (entry) {
|
||||
result = kgsl_add_event(dev_priv->device, param->timestamp,
|
||||
kgsl_freemem_event_cb, entry, dev_priv);
|
||||
cur = device->ftbl->readtimestamp(device, context,
|
||||
KGSL_TIMESTAMP_RETIRED);
|
||||
|
||||
result = kgsl_add_event(dev_priv->device, context_id,
|
||||
timestamp, kgsl_freemem_event_cb,
|
||||
entry, dev_priv);
|
||||
} else {
|
||||
KGSL_DRV_ERR(dev_priv->device,
|
||||
"invalid gpuaddr %08x\n", param->gpuaddr);
|
||||
"invalid gpuaddr %08x\n", gpuaddr);
|
||||
result = -EINVAL;
|
||||
}
|
||||
|
||||
return result;
|
||||
}
|
||||
|
||||
static long kgsl_ioctl_cmdstream_freememontimestamp(struct kgsl_device_private
|
||||
*dev_priv, unsigned int cmd,
|
||||
void *data)
|
||||
{
|
||||
struct kgsl_cmdstream_freememontimestamp *param = data;
|
||||
|
||||
return _cmdstream_freememontimestamp(dev_priv, param->gpuaddr,
|
||||
NULL, param->timestamp, param->type);
|
||||
}
|
||||
|
||||
static long kgsl_ioctl_cmdstream_freememontimestamp_ctxtid(
|
||||
struct kgsl_device_private
|
||||
*dev_priv, unsigned int cmd,
|
||||
void *data)
|
||||
{
|
||||
struct kgsl_cmdstream_freememontimestamp_ctxtid *param = data;
|
||||
struct kgsl_context *context;
|
||||
|
||||
context = kgsl_find_context(dev_priv, param->context_id);
|
||||
if (context == NULL) {
|
||||
KGSL_DRV_ERR(dev_priv->device,
|
||||
"invalid drawctxt context_id %d\n", param->context_id);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
return _cmdstream_freememontimestamp(dev_priv, param->gpuaddr,
|
||||
context, param->timestamp, param->type);
|
||||
}
|
||||
|
||||
static long kgsl_ioctl_drawctxt_create(struct kgsl_device_private *dev_priv,
|
||||
unsigned int cmd, void *data)
|
||||
{
|
||||
@ -1760,13 +1901,14 @@ struct kgsl_genlock_event_priv {
|
||||
* kgsl_genlock_event_cb - Event callback for a genlock timestamp event
|
||||
* @device - The KGSL device that expired the timestamp
|
||||
* @priv - private data for the event
|
||||
* @context_id - the context id that goes with the timestamp
|
||||
* @timestamp - the timestamp that triggered the event
|
||||
*
|
||||
* Release a genlock lock following the expiration of a timestamp
|
||||
*/
|
||||
|
||||
static void kgsl_genlock_event_cb(struct kgsl_device *device,
|
||||
void *priv, u32 timestamp)
|
||||
void *priv, u32 context_id, u32 timestamp)
|
||||
{
|
||||
struct kgsl_genlock_event_priv *ev = priv;
|
||||
int ret;
|
||||
@ -1794,7 +1936,7 @@ static void kgsl_genlock_event_cb(struct kgsl_device *device,
|
||||
*/
|
||||
|
||||
static int kgsl_add_genlock_event(struct kgsl_device *device,
|
||||
u32 timestamp, void __user *data, int len,
|
||||
u32 context_id, u32 timestamp, void __user *data, int len,
|
||||
struct kgsl_device_private *owner)
|
||||
{
|
||||
struct kgsl_genlock_event_priv *event;
|
||||
@ -1820,8 +1962,8 @@ static int kgsl_add_genlock_event(struct kgsl_device *device,
|
||||
return ret;
|
||||
}
|
||||
|
||||
ret = kgsl_add_event(device, timestamp, kgsl_genlock_event_cb, event,
|
||||
owner);
|
||||
ret = kgsl_add_event(device, context_id, timestamp,
|
||||
kgsl_genlock_event_cb, event, owner);
|
||||
if (ret)
|
||||
kfree(event);
|
||||
|
||||
@ -1829,7 +1971,7 @@ static int kgsl_add_genlock_event(struct kgsl_device *device,
|
||||
}
|
||||
#else
|
||||
static long kgsl_add_genlock_event(struct kgsl_device *device,
|
||||
u32 timestamp, void __user *data, int len,
|
||||
u32 context_id, u32 timestamp, void __user *data, int len,
|
||||
struct kgsl_device_private *owner)
|
||||
{
|
||||
return -EINVAL;
|
||||
@ -1853,8 +1995,8 @@ static long kgsl_ioctl_timestamp_event(struct kgsl_device_private *dev_priv,
|
||||
switch (param->type) {
|
||||
case KGSL_TIMESTAMP_EVENT_GENLOCK:
|
||||
ret = kgsl_add_genlock_event(dev_priv->device,
|
||||
param->timestamp, param->priv, param->len,
|
||||
dev_priv);
|
||||
param->context_id, param->timestamp, param->priv,
|
||||
param->len, dev_priv);
|
||||
break;
|
||||
default:
|
||||
ret = -EINVAL;
|
||||
@ -1878,12 +2020,18 @@ static const struct {
|
||||
kgsl_ioctl_device_getproperty, 1),
|
||||
KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_WAITTIMESTAMP,
|
||||
kgsl_ioctl_device_waittimestamp, 1),
|
||||
KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID,
|
||||
kgsl_ioctl_device_waittimestamp_ctxtid, 1),
|
||||
KGSL_IOCTL_FUNC(IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS,
|
||||
kgsl_ioctl_rb_issueibcmds, 1),
|
||||
KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_READTIMESTAMP,
|
||||
kgsl_ioctl_cmdstream_readtimestamp, 1),
|
||||
KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_CTXTID,
|
||||
kgsl_ioctl_cmdstream_readtimestamp_ctxtid, 1),
|
||||
KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP,
|
||||
kgsl_ioctl_cmdstream_freememontimestamp, 1),
|
||||
KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_CTXTID,
|
||||
kgsl_ioctl_cmdstream_freememontimestamp_ctxtid, 1),
|
||||
KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_CREATE,
|
||||
kgsl_ioctl_drawctxt_create, 1),
|
||||
KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_DESTROY,
|
||||
@ -1906,6 +2054,8 @@ static const struct {
|
||||
kgsl_ioctl_cff_user_event, 0),
|
||||
KGSL_IOCTL_FUNC(IOCTL_KGSL_TIMESTAMP_EVENT,
|
||||
kgsl_ioctl_timestamp_event, 1),
|
||||
KGSL_IOCTL_FUNC(IOCTL_KGSL_SETPROPERTY,
|
||||
kgsl_ioctl_device_setproperty, 1),
|
||||
};
|
||||
|
||||
static long kgsl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
|
||||
@ -2211,13 +2361,13 @@ kgsl_register_device(struct kgsl_device *device)
|
||||
|
||||
INIT_LIST_HEAD(&device->events);
|
||||
|
||||
device->last_expired_ctxt_id = KGSL_CONTEXT_INVALID;
|
||||
|
||||
ret = kgsl_mmu_init(device);
|
||||
if (ret != 0)
|
||||
goto err_dest_work_q;
|
||||
|
||||
ret = kgsl_allocate_contiguous(&device->memstore,
|
||||
sizeof(struct kgsl_devmemstore));
|
||||
|
||||
ret = kgsl_allocate_contiguous(&device->memstore, KGSL_MEMSTORE_SIZE);
|
||||
if (ret != 0)
|
||||
goto err_close_mmu;
|
||||
|
||||
|
@ -25,6 +25,14 @@
|
||||
|
||||
#define KGSL_NAME "kgsl"
|
||||
|
||||
/* The number of memstore arrays limits the number of contexts allowed.
|
||||
* If more contexts are needed, update multiple for MEMSTORE_SIZE
|
||||
*/
|
||||
#define KGSL_MEMSTORE_SIZE ((int)(PAGE_SIZE * 2))
|
||||
#define KGSL_MEMSTORE_GLOBAL (0)
|
||||
#define KGSL_MEMSTORE_MAX (KGSL_MEMSTORE_SIZE / \
|
||||
sizeof(struct kgsl_devmemstore) - 1)
|
||||
|
||||
/* Timestamp window used to detect rollovers */
|
||||
#define KGSL_TIMESTAMP_WINDOW 0x80000000
|
||||
|
||||
@ -150,6 +158,7 @@ struct kgsl_mem_entry {
|
||||
void *priv_data;
|
||||
struct list_head list;
|
||||
uint32_t free_timestamp;
|
||||
unsigned int context_id;
|
||||
/* back pointer to private structure under whose context this
|
||||
* allocation is made */
|
||||
struct kgsl_process_private *priv;
|
||||
|
@ -1,5 +1,4 @@
|
||||
/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
|
||||
* Copyright (C) 2011 Sony Ericsson Mobile Communications AB.
|
||||
/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
@ -47,6 +46,7 @@
|
||||
#define KGSL_STATE_SUSPEND 0x00000010
|
||||
#define KGSL_STATE_HUNG 0x00000020
|
||||
#define KGSL_STATE_DUMP_AND_RECOVER 0x00000040
|
||||
#define KGSL_STATE_SLUMBER 0x00000080
|
||||
|
||||
#define KGSL_GRAPHICS_MEMORY_LOW_WATERMARK 0x1000000
|
||||
|
||||
@ -76,9 +76,10 @@ struct kgsl_functable {
|
||||
enum kgsl_property_type type, void *value,
|
||||
unsigned int sizebytes);
|
||||
int (*waittimestamp) (struct kgsl_device *device,
|
||||
unsigned int timestamp, unsigned int msecs);
|
||||
struct kgsl_context *context, unsigned int timestamp,
|
||||
unsigned int msecs);
|
||||
unsigned int (*readtimestamp) (struct kgsl_device *device,
|
||||
enum kgsl_timestamp_type type);
|
||||
struct kgsl_context *context, enum kgsl_timestamp_type type);
|
||||
int (*issueibcmds) (struct kgsl_device_private *dev_priv,
|
||||
struct kgsl_context *context, struct kgsl_ibdesc *ibdesc,
|
||||
unsigned int sizedwords, uint32_t *timestamp,
|
||||
@ -101,6 +102,9 @@ struct kgsl_functable {
|
||||
struct kgsl_context *context);
|
||||
long (*ioctl) (struct kgsl_device_private *dev_priv,
|
||||
unsigned int cmd, void *data);
|
||||
int (*setproperty) (struct kgsl_device *device,
|
||||
enum kgsl_property_type type, void *value,
|
||||
unsigned int sizebytes);
|
||||
};
|
||||
|
||||
struct kgsl_memregion {
|
||||
@ -120,8 +124,9 @@ struct kgsl_mh {
|
||||
};
|
||||
|
||||
struct kgsl_event {
|
||||
struct kgsl_context *context;
|
||||
uint32_t timestamp;
|
||||
void (*func)(struct kgsl_device *, void *, u32);
|
||||
void (*func)(struct kgsl_device *, void *, u32, u32);
|
||||
void *priv;
|
||||
struct list_head list;
|
||||
struct kgsl_device_private *owner;
|
||||
@ -153,6 +158,7 @@ struct kgsl_device {
|
||||
uint32_t state;
|
||||
uint32_t requested_state;
|
||||
|
||||
unsigned int last_expired_ctxt_id;
|
||||
unsigned int active_cnt;
|
||||
struct completion suspend_gate;
|
||||
|
||||
@ -304,7 +310,8 @@ kgsl_find_context(struct kgsl_device_private *dev_priv, uint32_t id)
|
||||
return (ctxt && ctxt->dev_priv == dev_priv) ? ctxt : NULL;
|
||||
}
|
||||
|
||||
int kgsl_check_timestamp(struct kgsl_device *device, unsigned int timestamp);
|
||||
int kgsl_check_timestamp(struct kgsl_device *device,
|
||||
struct kgsl_context *context, unsigned int timestamp);
|
||||
|
||||
int kgsl_register_ts_notifier(struct kgsl_device *device,
|
||||
struct notifier_block *nb);
|
||||
|
14
drivers/gpu/msm/z180.c
Normal file → Executable file
14
drivers/gpu/msm/z180.c
Normal file → Executable file
@ -100,6 +100,7 @@ enum z180_cmdwindow_type {
|
||||
static int z180_start(struct kgsl_device *device, unsigned int init_ram);
|
||||
static int z180_stop(struct kgsl_device *device);
|
||||
static int z180_wait(struct kgsl_device *device,
|
||||
struct kgsl_context *context,
|
||||
unsigned int timestamp,
|
||||
unsigned int msecs);
|
||||
static void z180_regread(struct kgsl_device *device,
|
||||
@ -382,8 +383,8 @@ static int z180_idle(struct kgsl_device *device, unsigned int timeout)
|
||||
|
||||
if (timestamp_cmp(z180_dev->current_timestamp,
|
||||
z180_dev->timestamp) > 0)
|
||||
status = z180_wait(device, z180_dev->current_timestamp,
|
||||
timeout);
|
||||
status = z180_wait(device, NULL,
|
||||
z180_dev->current_timestamp, timeout);
|
||||
|
||||
if (status)
|
||||
KGSL_DRV_ERR(device, "z180_waittimestamp() timed out\n");
|
||||
@ -793,14 +794,16 @@ static void z180_cmdwindow_write(struct kgsl_device *device,
|
||||
}
|
||||
|
||||
static unsigned int z180_readtimestamp(struct kgsl_device *device,
|
||||
enum kgsl_timestamp_type type)
|
||||
struct kgsl_context *context, enum kgsl_timestamp_type type)
|
||||
{
|
||||
struct z180_device *z180_dev = Z180_DEVICE(device);
|
||||
(void)context;
|
||||
/* get current EOP timestamp */
|
||||
return z180_dev->timestamp;
|
||||
}
|
||||
|
||||
static int z180_waittimestamp(struct kgsl_device *device,
|
||||
struct kgsl_context *context,
|
||||
unsigned int timestamp,
|
||||
unsigned int msecs)
|
||||
{
|
||||
@ -811,13 +814,14 @@ static int z180_waittimestamp(struct kgsl_device *device,
|
||||
msecs = 10 * MSEC_PER_SEC;
|
||||
|
||||
mutex_unlock(&device->mutex);
|
||||
status = z180_wait(device, timestamp, msecs);
|
||||
status = z180_wait(device, context, timestamp, msecs);
|
||||
mutex_lock(&device->mutex);
|
||||
|
||||
return status;
|
||||
}
|
||||
|
||||
static int z180_wait(struct kgsl_device *device,
|
||||
struct kgsl_context *context,
|
||||
unsigned int timestamp,
|
||||
unsigned int msecs)
|
||||
{
|
||||
@ -826,7 +830,7 @@ static int z180_wait(struct kgsl_device *device,
|
||||
|
||||
timeout = wait_io_event_interruptible_timeout(
|
||||
device->wait_queue,
|
||||
kgsl_check_timestamp(device, timestamp),
|
||||
kgsl_check_timestamp(device, context, timestamp),
|
||||
msecs_to_jiffies(msecs));
|
||||
|
||||
if (timeout > 0)
|
||||
|
@ -992,7 +992,7 @@ static void setup_fb_info(struct msmfb_info *msmfb)
|
||||
int r;
|
||||
|
||||
/* finish setting up the fb_info struct */
|
||||
strncpy(fb_info->fix.id, "msmfb", 16);
|
||||
strncpy(fb_info->fix.id, "msmfb31_0", 16);
|
||||
fb_info->fix.ypanstep = 1;
|
||||
|
||||
fb_info->fbops = &msmfb_ops;
|
||||
|
@ -35,14 +35,18 @@
|
||||
#define _MSM_KGSL_H
|
||||
|
||||
#define KGSL_VERSION_MAJOR 3
|
||||
#define KGSL_VERSION_MINOR 8
|
||||
#define KGSL_VERSION_MINOR 10
|
||||
|
||||
/*context flags */
|
||||
#define KGSL_CONTEXT_SAVE_GMEM 1
|
||||
#define KGSL_CONTEXT_NO_GMEM_ALLOC 2
|
||||
#define KGSL_CONTEXT_SUBMIT_IB_LIST 4
|
||||
#define KGSL_CONTEXT_CTX_SWITCH 8
|
||||
#define KGSL_CONTEXT_PREAMBLE 16
|
||||
#define KGSL_CONTEXT_SAVE_GMEM 0x00000001
|
||||
#define KGSL_CONTEXT_NO_GMEM_ALLOC 0x00000002
|
||||
#define KGSL_CONTEXT_SUBMIT_IB_LIST 0x00000004
|
||||
#define KGSL_CONTEXT_CTX_SWITCH 0x00000008
|
||||
#define KGSL_CONTEXT_PREAMBLE 0x00000010
|
||||
#define KGSL_CONTEXT_TRASH_STATE 0x00000020
|
||||
#define KGSL_CONTEXT_PER_CONTEXT_TS 0x00000040
|
||||
|
||||
#define KGSL_CONTEXT_INVALID 0xffffffff
|
||||
|
||||
/* Memory allocayion flags */
|
||||
#define KGSL_MEMFLAGS_GPUREADONLY 0x01000000
|
||||
@ -58,6 +62,7 @@
|
||||
#define KGSL_FLAGS_RESERVED1 0x00000040
|
||||
#define KGSL_FLAGS_RESERVED2 0x00000080
|
||||
#define KGSL_FLAGS_SOFT_RESET 0x00000100
|
||||
#define KGSL_FLAGS_PER_CONTEXT_TIMESTAMPS 0x00000200
|
||||
|
||||
/* Clock flags to show which clocks should be controled by a given platform */
|
||||
#define KGSL_CLK_SRC 0x00000001
|
||||
@ -132,9 +137,9 @@ struct kgsl_devmemstore {
|
||||
unsigned int sbz5;
|
||||
};
|
||||
|
||||
#define KGSL_DEVICE_MEMSTORE_OFFSET(field) \
|
||||
offsetof(struct kgsl_devmemstore, field)
|
||||
|
||||
#define KGSL_MEMSTORE_OFFSET(ctxt_id, field) \
|
||||
((ctxt_id)*sizeof(struct kgsl_devmemstore) + \
|
||||
offsetof(struct kgsl_devmemstore, field))
|
||||
|
||||
/* timestamp id*/
|
||||
enum kgsl_timestamp_type {
|
||||
@ -268,6 +273,14 @@ struct kgsl_device_waittimestamp {
|
||||
#define IOCTL_KGSL_DEVICE_WAITTIMESTAMP \
|
||||
_IOW(KGSL_IOC_TYPE, 0x6, struct kgsl_device_waittimestamp)
|
||||
|
||||
struct kgsl_device_waittimestamp_ctxtid {
|
||||
unsigned int context_id;
|
||||
unsigned int timestamp;
|
||||
unsigned int timeout;
|
||||
};
|
||||
|
||||
#define IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID \
|
||||
_IOW(KGSL_IOC_TYPE, 0x7, struct kgsl_device_waittimestamp_ctxtid)
|
||||
|
||||
/* issue indirect commands to the GPU.
|
||||
* drawctxt_id must have been created with IOCTL_KGSL_DRAWCTXT_CREATE
|
||||
@ -361,6 +374,26 @@ struct kgsl_map_user_mem {
|
||||
#define IOCTL_KGSL_MAP_USER_MEM \
|
||||
_IOWR(KGSL_IOC_TYPE, 0x15, struct kgsl_map_user_mem)
|
||||
|
||||
struct kgsl_cmdstream_readtimestamp_ctxtid {
|
||||
unsigned int context_id;
|
||||
unsigned int type;
|
||||
unsigned int timestamp; /*output param */
|
||||
};
|
||||
|
||||
#define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_CTXTID \
|
||||
_IOWR(KGSL_IOC_TYPE, 0x16, struct kgsl_cmdstream_readtimestamp_ctxtid)
|
||||
|
||||
struct kgsl_cmdstream_freememontimestamp_ctxtid {
|
||||
unsigned int context_id;
|
||||
unsigned int gpuaddr;
|
||||
unsigned int type;
|
||||
unsigned int timestamp;
|
||||
};
|
||||
|
||||
#define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_CTXTID \
|
||||
_IOW(KGSL_IOC_TYPE, 0x17, \
|
||||
struct kgsl_cmdstream_freememontimestamp_ctxtid)
|
||||
|
||||
/* add a block of pmem or fb into the GPU address space */
|
||||
struct kgsl_sharedmem_from_pmem {
|
||||
int pmem_fd;
|
||||
@ -504,6 +537,14 @@ struct kgsl_timestamp_event_genlock {
|
||||
int handle; /* Handle of the genlock lock to release */
|
||||
};
|
||||
|
||||
/*
|
||||
* Set a property within the kernel. Uses the same structure as
|
||||
* IOCTL_KGSL_GETPROPERTY
|
||||
*/
|
||||
|
||||
#define IOCTL_KGSL_SETPROPERTY \
|
||||
_IOW(KGSL_IOC_TYPE, 0x32, struct kgsl_device_getproperty)
|
||||
|
||||
#ifdef __KERNEL__
|
||||
#ifdef CONFIG_MSM_KGSL_DRM
|
||||
int kgsl_gem_obj_addr(int drm_fd, int handle, unsigned long *start,
|
||||
|
@ -1,6 +1,7 @@
|
||||
/* include/linux/msm_mdp.h
|
||||
*
|
||||
* Copyright (C) 2007 Google Incorporated
|
||||
* Copyright (c) 2012 Code Aurora Forum. All rights reserved.
|
||||
*
|
||||
* This software is licensed under the terms of the GNU General Public
|
||||
* License version 2, as published by the Free Software Foundation, and
|
||||
@ -15,25 +16,90 @@
|
||||
#define _MSM_MDP_H_
|
||||
|
||||
#include <linux/types.h>
|
||||
#include <linux/fb.h>
|
||||
|
||||
#define MSMFB_IOCTL_MAGIC 'm'
|
||||
#define MSMFB_GRP_DISP _IOW(MSMFB_IOCTL_MAGIC, 1, unsigned int)
|
||||
#define MSMFB_BLIT _IOW(MSMFB_IOCTL_MAGIC, 2, unsigned int)
|
||||
#define MSMFB_SUSPEND_SW_REFRESHER _IOW(MSMFB_IOCTL_MAGIC, 128, unsigned int)
|
||||
#define MSMFB_RESUME_SW_REFRESHER _IOW(MSMFB_IOCTL_MAGIC, 129, unsigned int)
|
||||
#define MSMFB_CURSOR _IOW(MSMFB_IOCTL_MAGIC, 130, struct fb_cursor)
|
||||
#define MSMFB_SET_LUT _IOW(MSMFB_IOCTL_MAGIC, 131, struct fb_cmap)
|
||||
#define MSMFB_HISTOGRAM _IOWR(MSMFB_IOCTL_MAGIC, 132, struct mdp_histogram_data)
|
||||
/* new ioctls's for set/get ccs matrix */
|
||||
#define MSMFB_GET_CCS_MATRIX _IOWR(MSMFB_IOCTL_MAGIC, 133, struct mdp_ccs)
|
||||
#define MSMFB_SET_CCS_MATRIX _IOW(MSMFB_IOCTL_MAGIC, 134, struct mdp_ccs)
|
||||
#define MSMFB_OVERLAY_SET _IOWR(MSMFB_IOCTL_MAGIC, 135, \
|
||||
struct mdp_overlay)
|
||||
#define MSMFB_OVERLAY_UNSET _IOW(MSMFB_IOCTL_MAGIC, 136, unsigned int)
|
||||
#define MSMFB_OVERLAY_PLAY _IOW(MSMFB_IOCTL_MAGIC, 137, \
|
||||
struct msmfb_overlay_data)
|
||||
#define MSMFB_GET_PAGE_PROTECTION _IOR(MSMFB_IOCTL_MAGIC, 138, \
|
||||
struct mdp_page_protection)
|
||||
#define MSMFB_SET_PAGE_PROTECTION _IOW(MSMFB_IOCTL_MAGIC, 139, \
|
||||
struct mdp_page_protection)
|
||||
#define MSMFB_OVERLAY_GET _IOR(MSMFB_IOCTL_MAGIC, 140, \
|
||||
struct mdp_overlay)
|
||||
#define MSMFB_OVERLAY_PLAY_ENABLE _IOW(MSMFB_IOCTL_MAGIC, 141, unsigned int)
|
||||
#define MSMFB_OVERLAY_BLT _IOWR(MSMFB_IOCTL_MAGIC, 142, \
|
||||
struct msmfb_overlay_blt)
|
||||
#define MSMFB_OVERLAY_BLT_OFFSET _IOW(MSMFB_IOCTL_MAGIC, 143, unsigned int)
|
||||
#define MSMFB_HISTOGRAM_START _IOR(MSMFB_IOCTL_MAGIC, 144, \
|
||||
struct mdp_histogram_start_req)
|
||||
#define MSMFB_HISTOGRAM_STOP _IOR(MSMFB_IOCTL_MAGIC, 145, unsigned int)
|
||||
#define MSMFB_NOTIFY_UPDATE _IOW(MSMFB_IOCTL_MAGIC, 146, unsigned int)
|
||||
|
||||
#define MSMFB_OVERLAY_3D _IOWR(MSMFB_IOCTL_MAGIC, 147, \
|
||||
struct msmfb_overlay_3d)
|
||||
|
||||
#define MSMFB_MIXER_INFO _IOWR(MSMFB_IOCTL_MAGIC, 148, \
|
||||
struct msmfb_mixer_info_req)
|
||||
#define MSMFB_OVERLAY_PLAY_WAIT _IOWR(MSMFB_IOCTL_MAGIC, 149, \
|
||||
struct msmfb_overlay_data)
|
||||
#define MSMFB_WRITEBACK_INIT _IO(MSMFB_IOCTL_MAGIC, 150)
|
||||
#define MSMFB_WRITEBACK_START _IO(MSMFB_IOCTL_MAGIC, 151)
|
||||
#define MSMFB_WRITEBACK_STOP _IO(MSMFB_IOCTL_MAGIC, 152)
|
||||
#define MSMFB_WRITEBACK_QUEUE_BUFFER _IOW(MSMFB_IOCTL_MAGIC, 153, \
|
||||
struct msmfb_data)
|
||||
#define MSMFB_WRITEBACK_DEQUEUE_BUFFER _IOW(MSMFB_IOCTL_MAGIC, 154, \
|
||||
struct msmfb_data)
|
||||
#define MSMFB_WRITEBACK_TERMINATE _IO(MSMFB_IOCTL_MAGIC, 155)
|
||||
#define MSMFB_MDP_PP _IOWR(MSMFB_IOCTL_MAGIC, 156, struct msmfb_mdp_pp)
|
||||
|
||||
#define FB_TYPE_3D_PANEL 0x10101010
|
||||
#define MDP_IMGTYPE2_START 0x10000
|
||||
#define MSMFB_DRIVER_VERSION 0xF9E8D701
|
||||
|
||||
enum {
|
||||
MDP_RGB_565, /* RGB 565 planar */
|
||||
NOTIFY_UPDATE_START,
|
||||
NOTIFY_UPDATE_STOP,
|
||||
};
|
||||
|
||||
enum {
|
||||
MDP_RGB_565, /* RGB 565 planer */
|
||||
MDP_XRGB_8888, /* RGB 888 padded */
|
||||
MDP_Y_CBCR_H2V2, /* Y and CbCr, pseudo planar w/ Cb is in MSB */
|
||||
MDP_Y_CBCR_H2V2, /* Y and CbCr, pseudo planer w/ Cb is in MSB */
|
||||
MDP_Y_CBCR_H2V2_ADRENO,
|
||||
MDP_ARGB_8888, /* ARGB 888 */
|
||||
MDP_RGB_888, /* RGB 888 planar */
|
||||
MDP_Y_CRCB_H2V2, /* Y and CrCb, pseudo planar w/ Cr is in MSB */
|
||||
MDP_RGB_888, /* RGB 888 planer */
|
||||
MDP_Y_CRCB_H2V2, /* Y and CrCb, pseudo planer w/ Cr is in MSB */
|
||||
MDP_YCRYCB_H2V1, /* YCrYCb interleave */
|
||||
MDP_Y_CRCB_H2V1, /* Y and CrCb, pseduo planar w/ Cr is in MSB */
|
||||
MDP_Y_CBCR_H2V1, /* Y and CrCb, pseduo planar w/ Cr is in MSB */
|
||||
MDP_Y_CRCB_H2V1, /* Y and CrCb, pseduo planer w/ Cr is in MSB */
|
||||
MDP_Y_CBCR_H2V1, /* Y and CrCb, pseduo planer w/ Cr is in MSB */
|
||||
MDP_RGBA_8888, /* ARGB 888 */
|
||||
MDP_BGRA_8888, /* ABGR 888 */
|
||||
MDP_RGBX_8888, /* RGBX 888 */
|
||||
MDP_IMGTYPE_LIMIT /* Non valid image type after this enum */
|
||||
MDP_Y_CRCB_H2V2_TILE, /* Y and CrCb, pseudo planer tile */
|
||||
MDP_Y_CBCR_H2V2_TILE, /* Y and CbCr, pseudo planer tile */
|
||||
MDP_Y_CR_CB_H2V2, /* Y, Cr and Cb, planar */
|
||||
MDP_Y_CR_CB_GH2V2, /* Y, Cr and Cb, planar aligned to Android YV12 */
|
||||
MDP_Y_CB_CR_H2V2, /* Y, Cb and Cr, planar */
|
||||
MDP_Y_CRCB_H1V1, /* Y and CrCb, pseduo planer w/ Cr is in MSB */
|
||||
MDP_Y_CBCR_H1V1, /* Y and CbCr, pseduo planer w/ Cb is in MSB */
|
||||
MDP_IMGTYPE_LIMIT,
|
||||
MDP_BGR_565 = MDP_IMGTYPE2_START, /* BGR 565 planer */
|
||||
MDP_FB_FORMAT, /* framebuffer format */
|
||||
MDP_IMGTYPE_LIMIT2 /* Non valid image type after this enum */
|
||||
};
|
||||
|
||||
enum {
|
||||
@ -41,24 +107,57 @@ enum {
|
||||
FB_IMG,
|
||||
};
|
||||
|
||||
/* flag values */
|
||||
enum {
|
||||
HSIC_HUE = 0,
|
||||
HSIC_SAT,
|
||||
HSIC_INT,
|
||||
HSIC_CON,
|
||||
NUM_HSIC_PARAM,
|
||||
};
|
||||
|
||||
/* mdp_blit_req flag values */
|
||||
#define MDP_ROT_NOP 0
|
||||
#define MDP_FLIP_LR 0x1
|
||||
#define MDP_FLIP_UD 0x2
|
||||
#define MDP_ROT_90 0x4
|
||||
#define MDP_ROT_180 (MDP_FLIP_UD|MDP_FLIP_LR)
|
||||
#define MDP_ROT_270 (MDP_ROT_90|MDP_FLIP_UD|MDP_FLIP_LR)
|
||||
#define MDP_ROT_MASK 0x7
|
||||
#define MDP_DITHER 0x8
|
||||
#define MDP_BLUR 0x10
|
||||
#define MDP_BLEND_FG_PREMULT 0x20000
|
||||
#define MDP_DEINTERLACE 0x80000000
|
||||
#define MDP_SHARPENING 0x40000000
|
||||
#define MDP_NO_DMA_BARRIER_START 0x20000000
|
||||
#define MDP_NO_DMA_BARRIER_END 0x10000000
|
||||
#define MDP_NO_BLIT 0x08000000
|
||||
#define MDP_BLIT_WITH_DMA_BARRIERS 0x000
|
||||
#define MDP_BLIT_WITH_NO_DMA_BARRIERS \
|
||||
(MDP_NO_DMA_BARRIER_START | MDP_NO_DMA_BARRIER_END)
|
||||
#define MDP_BLIT_SRC_GEM 0x04000000
|
||||
#define MDP_BLIT_DST_GEM 0x02000000
|
||||
#define MDP_BLIT_NON_CACHED 0x01000000
|
||||
#define MDP_OV_PIPE_SHARE 0x00800000
|
||||
#define MDP_DEINTERLACE_ODD 0x00400000
|
||||
#define MDP_OV_PLAY_NOWAIT 0x00200000
|
||||
#define MDP_SOURCE_ROTATED_90 0x00100000
|
||||
#define MDP_DPP_HSIC 0x00080000
|
||||
#define MDP_BACKEND_COMPOSITION 0x00040000
|
||||
#define MDP_BORDERFILL_SUPPORTED 0x00010000
|
||||
#define MDP_SECURE_OVERLAY_SESSION 0x00008000
|
||||
#define MDP_MEMORY_ID_TYPE_FB 0x00001000
|
||||
|
||||
#define MDP_TRANSP_NOP 0xffffffff
|
||||
#define MDP_ALPHA_NOP 0xff
|
||||
|
||||
/* drewis: added for android 4.0 */
|
||||
#define MDP_BLIT_NON_CACHED 0x01000000
|
||||
/* drewis: end */
|
||||
#define MDP_FB_PAGE_PROTECTION_NONCACHED (0)
|
||||
#define MDP_FB_PAGE_PROTECTION_WRITECOMBINE (1)
|
||||
#define MDP_FB_PAGE_PROTECTION_WRITETHROUGHCACHE (2)
|
||||
#define MDP_FB_PAGE_PROTECTION_WRITEBACKCACHE (3)
|
||||
#define MDP_FB_PAGE_PROTECTION_WRITEBACKWACACHE (4)
|
||||
/* Sentinel: Don't use! */
|
||||
#define MDP_FB_PAGE_PROTECTION_INVALID (5)
|
||||
/* Count of the number of MDP_FB_PAGE_PROTECTION_... values. */
|
||||
#define MDP_NUM_FB_PAGE_PROTECTION_VALUES (5)
|
||||
|
||||
struct mdp_rect {
|
||||
uint32_t x;
|
||||
@ -73,8 +172,41 @@ struct mdp_img {
|
||||
uint32_t format;
|
||||
uint32_t offset;
|
||||
int memory_id; /* the file descriptor */
|
||||
uint32_t priv;
|
||||
};
|
||||
|
||||
/*
|
||||
* {3x3} + {3} ccs matrix
|
||||
*/
|
||||
|
||||
#define MDP_CCS_RGB2YUV 0
|
||||
#define MDP_CCS_YUV2RGB 1
|
||||
|
||||
#define MDP_CCS_SIZE 9
|
||||
#define MDP_BV_SIZE 3
|
||||
|
||||
struct mdp_ccs {
|
||||
int direction; /* MDP_CCS_RGB2YUV or YUV2RGB */
|
||||
uint16_t ccs[MDP_CCS_SIZE]; /* 3x3 color coefficients */
|
||||
uint16_t bv[MDP_BV_SIZE]; /* 1x3 bias vector */
|
||||
};
|
||||
|
||||
struct mdp_csc {
|
||||
int id;
|
||||
uint32_t csc_mv[9];
|
||||
uint32_t csc_pre_bv[3];
|
||||
uint32_t csc_post_bv[3];
|
||||
uint32_t csc_pre_lv[6];
|
||||
uint32_t csc_post_lv[6];
|
||||
};
|
||||
|
||||
/* The version of the mdp_blit_req structure so that
|
||||
* user applications can selectively decide which functionality
|
||||
* to include
|
||||
*/
|
||||
|
||||
#define MDP_BLIT_REQ_VERSION 2
|
||||
|
||||
struct mdp_blit_req {
|
||||
struct mdp_img src;
|
||||
struct mdp_img dst;
|
||||
@ -83,6 +215,7 @@ struct mdp_blit_req {
|
||||
uint32_t alpha;
|
||||
uint32_t transp_mask;
|
||||
uint32_t flags;
|
||||
int sharpening_strength; /* -127 <--> 127, default 64 */
|
||||
};
|
||||
|
||||
struct mdp_blit_req_list {
|
||||
@ -90,4 +223,289 @@ struct mdp_blit_req_list {
|
||||
struct mdp_blit_req req[];
|
||||
};
|
||||
|
||||
#define MSMFB_DATA_VERSION 2
|
||||
|
||||
struct msmfb_data {
|
||||
uint32_t offset;
|
||||
int memory_id;
|
||||
int id;
|
||||
uint32_t flags;
|
||||
uint32_t priv;
|
||||
uint32_t iova;
|
||||
};
|
||||
|
||||
#define MSMFB_NEW_REQUEST -1
|
||||
|
||||
struct msmfb_overlay_data {
|
||||
uint32_t id;
|
||||
struct msmfb_data data;
|
||||
uint32_t version_key;
|
||||
struct msmfb_data plane1_data;
|
||||
struct msmfb_data plane2_data;
|
||||
};
|
||||
|
||||
struct msmfb_img {
|
||||
uint32_t width;
|
||||
uint32_t height;
|
||||
uint32_t format;
|
||||
};
|
||||
|
||||
#define MSMFB_WRITEBACK_DEQUEUE_BLOCKING 0x1
|
||||
struct msmfb_writeback_data {
|
||||
struct msmfb_data buf_info;
|
||||
struct msmfb_img img;
|
||||
};
|
||||
|
||||
struct dpp_ctrl {
|
||||
/*
|
||||
*'sharp_strength' has inputs = -128 <-> 127
|
||||
* Increasingly positive values correlate with increasingly sharper
|
||||
* picture. Increasingly negative values correlate with increasingly
|
||||
* smoothed picture.
|
||||
*/
|
||||
int8_t sharp_strength;
|
||||
int8_t hsic_params[NUM_HSIC_PARAM];
|
||||
};
|
||||
|
||||
struct mdp_overlay {
|
||||
struct msmfb_img src;
|
||||
struct mdp_rect src_rect;
|
||||
struct mdp_rect dst_rect;
|
||||
uint32_t z_order; /* stage number */
|
||||
uint32_t is_fg; /* control alpha & transp */
|
||||
uint32_t alpha;
|
||||
uint32_t transp_mask;
|
||||
uint32_t flags;
|
||||
uint32_t id;
|
||||
uint32_t user_data[8];
|
||||
struct dpp_ctrl dpp;
|
||||
};
|
||||
|
||||
struct msmfb_overlay_3d {
|
||||
uint32_t is_3d;
|
||||
uint32_t width;
|
||||
uint32_t height;
|
||||
};
|
||||
|
||||
|
||||
struct msmfb_overlay_blt {
|
||||
uint32_t enable;
|
||||
uint32_t offset;
|
||||
uint32_t width;
|
||||
uint32_t height;
|
||||
uint32_t bpp;
|
||||
};
|
||||
|
||||
struct mdp_histogram {
|
||||
uint32_t frame_cnt;
|
||||
uint32_t bin_cnt;
|
||||
uint32_t *r;
|
||||
uint32_t *g;
|
||||
uint32_t *b;
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
|
||||
mdp_block_type defines the identifiers for each of pipes in MDP 4.3
|
||||
|
||||
MDP_BLOCK_RESERVED is provided for backward compatibility and is
|
||||
deprecated. It corresponds to DMA_P. So MDP_BLOCK_DMA_P should be used
|
||||
instead.
|
||||
|
||||
*/
|
||||
|
||||
enum {
|
||||
MDP_BLOCK_RESERVED = 0,
|
||||
MDP_BLOCK_OVERLAY_0,
|
||||
MDP_BLOCK_OVERLAY_1,
|
||||
MDP_BLOCK_VG_1,
|
||||
MDP_BLOCK_VG_2,
|
||||
MDP_BLOCK_RGB_1,
|
||||
MDP_BLOCK_RGB_2,
|
||||
MDP_BLOCK_DMA_P,
|
||||
MDP_BLOCK_DMA_S,
|
||||
MDP_BLOCK_DMA_E,
|
||||
MDP_BLOCK_MAX,
|
||||
};
|
||||
|
||||
/*
|
||||
mdp_histogram_start_req is used to provide the parameters for
|
||||
histogram start request
|
||||
*/
|
||||
|
||||
struct mdp_histogram_start_req {
|
||||
uint32_t block;
|
||||
uint8_t frame_cnt;
|
||||
uint8_t bit_mask;
|
||||
uint8_t num_bins;
|
||||
};
|
||||
|
||||
|
||||
/*
|
||||
|
||||
mdp_histogram_data is used to return the histogram data, once
|
||||
the histogram is done/stopped/cance
|
||||
|
||||
*/
|
||||
|
||||
|
||||
struct mdp_histogram_data {
|
||||
uint32_t block;
|
||||
uint8_t bin_cnt;
|
||||
uint32_t *c0;
|
||||
uint32_t *c1;
|
||||
uint32_t *c2;
|
||||
uint32_t *extra_info;
|
||||
};
|
||||
|
||||
struct mdp_pcc_coeff {
|
||||
uint32_t c, r, g, b, rr, gg, bb, rg, gb, rb, rgb_0, rgb_1;
|
||||
};
|
||||
|
||||
struct mdp_pcc_cfg_data {
|
||||
uint32_t block;
|
||||
uint32_t ops;
|
||||
struct mdp_pcc_coeff r, g, b;
|
||||
};
|
||||
|
||||
#define MDP_CSC_FLAG_ENABLE 0x1
|
||||
#define MDP_CSC_FLAG_YUV_IN 0x2
|
||||
#define MDP_CSC_FLAG_YUV_OUT 0x4
|
||||
|
||||
struct mdp_csc_cfg {
|
||||
/* flags for enable CSC, toggling RGB,YUV input/output */
|
||||
uint32_t flags;
|
||||
uint32_t csc_mv[9];
|
||||
uint32_t csc_pre_bv[3];
|
||||
uint32_t csc_post_bv[3];
|
||||
uint32_t csc_pre_lv[6];
|
||||
uint32_t csc_post_lv[6];
|
||||
};
|
||||
|
||||
struct mdp_csc_cfg_data {
|
||||
uint32_t block;
|
||||
struct mdp_csc_cfg csc_data;
|
||||
};
|
||||
|
||||
enum {
|
||||
mdp_lut_igc,
|
||||
mdp_lut_pgc,
|
||||
mdp_lut_hist,
|
||||
mdp_lut_max,
|
||||
};
|
||||
|
||||
|
||||
struct mdp_igc_lut_data {
|
||||
uint32_t block;
|
||||
uint32_t len, ops;
|
||||
uint32_t *c0_c1_data;
|
||||
uint32_t *c2_data;
|
||||
};
|
||||
|
||||
struct mdp_ar_gc_lut_data {
|
||||
uint32_t x_start;
|
||||
uint32_t slope;
|
||||
uint32_t offset;
|
||||
};
|
||||
|
||||
struct mdp_pgc_lut_data {
|
||||
uint32_t block;
|
||||
uint32_t flags;
|
||||
uint8_t num_r_stages;
|
||||
uint8_t num_g_stages;
|
||||
uint8_t num_b_stages;
|
||||
struct mdp_ar_gc_lut_data *r_data;
|
||||
struct mdp_ar_gc_lut_data *g_data;
|
||||
struct mdp_ar_gc_lut_data *b_data;
|
||||
};
|
||||
|
||||
|
||||
struct mdp_hist_lut_data {
|
||||
uint32_t block;
|
||||
uint32_t ops;
|
||||
uint32_t len;
|
||||
uint32_t *data;
|
||||
};
|
||||
|
||||
|
||||
struct mdp_lut_cfg_data {
|
||||
uint32_t lut_type;
|
||||
union {
|
||||
struct mdp_igc_lut_data igc_lut_data;
|
||||
struct mdp_pgc_lut_data pgc_lut_data;
|
||||
struct mdp_hist_lut_data hist_lut_data;
|
||||
} data;
|
||||
};
|
||||
|
||||
struct mdp_qseed_cfg_data {
|
||||
uint32_t block;
|
||||
uint32_t table_num;
|
||||
uint32_t ops;
|
||||
uint32_t len;
|
||||
uint32_t *data;
|
||||
};
|
||||
|
||||
|
||||
enum {
|
||||
mdp_op_pcc_cfg,
|
||||
mdp_op_csc_cfg,
|
||||
mdp_op_lut_cfg,
|
||||
mdp_op_qseed_cfg,
|
||||
mdp_op_max,
|
||||
};
|
||||
|
||||
struct msmfb_mdp_pp {
|
||||
uint32_t op;
|
||||
union {
|
||||
struct mdp_pcc_cfg_data pcc_cfg_data;
|
||||
struct mdp_csc_cfg_data csc_cfg_data;
|
||||
struct mdp_lut_cfg_data lut_cfg_data;
|
||||
struct mdp_qseed_cfg_data qseed_cfg_data;
|
||||
} data;
|
||||
};
|
||||
|
||||
|
||||
struct mdp_page_protection {
|
||||
uint32_t page_protection;
|
||||
};
|
||||
|
||||
|
||||
struct mdp_mixer_info {
|
||||
int pndx;
|
||||
int pnum;
|
||||
int ptype;
|
||||
int mixer_num;
|
||||
int z_order;
|
||||
};
|
||||
|
||||
#define MAX_PIPE_PER_MIXER 4
|
||||
|
||||
struct msmfb_mixer_info_req {
|
||||
int mixer_num;
|
||||
int cnt;
|
||||
struct mdp_mixer_info info[MAX_PIPE_PER_MIXER];
|
||||
};
|
||||
|
||||
enum {
|
||||
DISPLAY_SUBSYSTEM_ID,
|
||||
ROTATOR_SUBSYSTEM_ID,
|
||||
};
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
/* get the framebuffer physical address information */
|
||||
int get_fb_phys_info(unsigned long *start, unsigned long *len, int fb_num,
|
||||
int subsys_id);
|
||||
struct fb_info *msm_fb_get_writeback_fb(void);
|
||||
int msm_fb_writeback_init(struct fb_info *info);
|
||||
int msm_fb_writeback_start(struct fb_info *info);
|
||||
int msm_fb_writeback_queue_buffer(struct fb_info *info,
|
||||
struct msmfb_data *data);
|
||||
int msm_fb_writeback_dequeue_buffer(struct fb_info *info,
|
||||
struct msmfb_data *data);
|
||||
int msm_fb_writeback_stop(struct fb_info *info);
|
||||
int msm_fb_writeback_terminate(struct fb_info *info);
|
||||
#endif
|
||||
|
||||
#endif /* _MSM_MDP_H_ */
|
||||
|
0
mm/ashmem.c
Normal file → Executable file
0
mm/ashmem.c
Normal file → Executable file
Loading…
x
Reference in New Issue
Block a user