Merge pull request from zeusk/ics_HWA

[KGSL] update to msm-kgsl3d0 v3.8
This commit is contained in:
tytung 2012-05-13 21:49:47 -07:00
commit 50abfc6c2d
56 changed files with 6445 additions and 5088 deletions

@ -0,0 +1 @@
#include <generated/asm-offsets.h>

@ -0,0 +1,75 @@
/*
* arch/arm/include/asm/outercache.h
*
* Copyright (C) 2010 ARM Ltd.
* Written by Catalin Marinas <catalin.marinas@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __ASM_OUTERCACHE_H
#define __ASM_OUTERCACHE_H
struct outer_cache_fns {
void (*inv_range)(unsigned long, unsigned long);
void (*clean_range)(unsigned long, unsigned long);
void (*flush_range)(unsigned long, unsigned long);
#ifdef CONFIG_OUTER_CACHE_SYNC
void (*sync)(void);
#endif
};
#ifdef CONFIG_OUTER_CACHE
extern struct outer_cache_fns outer_cache;
static inline void outer_inv_range(unsigned long start, unsigned long end)
{
if (outer_cache.inv_range)
outer_cache.inv_range(start, end);
}
static inline void outer_clean_range(unsigned long start, unsigned long end)
{
if (outer_cache.clean_range)
outer_cache.clean_range(start, end);
}
static inline void outer_flush_range(unsigned long start, unsigned long end)
{
if (outer_cache.flush_range)
outer_cache.flush_range(start, end);
}
#else
static inline void outer_inv_range(unsigned long start, unsigned long end)
{ }
static inline void outer_clean_range(unsigned long start, unsigned long end)
{ }
static inline void outer_flush_range(unsigned long start, unsigned long end)
{ }
#endif
#ifdef CONFIG_OUTER_CACHE_SYNC
static inline void outer_sync(void)
{
if (outer_cache.sync)
outer_cache.sync();
}
#else
static inline void outer_sync(void)
{ }
#endif
#endif /* __ASM_OUTERCACHE_H */

@ -1,4 +1,4 @@
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
@ -24,16 +24,40 @@
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef __A205_REG_H
#define __A205_REG_H
#define REG_LEIA_PC_INDX_OFFSET REG_VGT_INDX_OFFSET
#define REG_LEIA_PC_VERTEX_REUSE_BLOCK_CNTL REG_VGT_VERTEX_REUSE_BLOCK_CNTL
#define REG_LEIA_PC_MAX_VTX_INDX REG_VGT_MAX_VTX_INDX
#define REG_LEIA_GRAS_CONTROL 0x2210
#define REG_LEIA_VSC_BIN_SIZE 0x0C01
#define REG_LEIA_VSC_PIPE_DATA_LENGTH_7 0x0C1D
#ifndef _INTERNAL_POWER_RAIL_H
#define _INTERNAL_POWER_RAIL_H
/* Clock power rail IDs */
#define PWR_RAIL_GRP_CLK 8
#define PWR_RAIL_GRP_2D_CLK 58
#define PWR_RAIL_MDP_CLK 14
#define PWR_RAIL_MFC_CLK 68
#define PWR_RAIL_ROTATOR_CLK 90
#define PWR_RAIL_VDC_CLK 39
#define PWR_RAIL_VFE_CLK 41
#define PWR_RAIL_VPE_CLK 76
enum rail_ctl_mode {
PWR_RAIL_CTL_AUTO = 0,
PWR_RAIL_CTL_MANUAL,
};
static inline int __maybe_unused internal_pwr_rail_ctl(unsigned rail_id,
bool enable)
{
/* Not yet implemented. */
return 0;
}
static inline int __maybe_unused internal_pwr_rail_mode(unsigned rail_id,
enum rail_ctl_mode mode)
{
/* Not yet implemented. */
return 0;
}
int internal_pwr_rail_ctl_auto(unsigned rail_id, bool enable);
#endif /* _INTERNAL_POWER_RAIL_H */
#endif /*__A205_REG_H */

@ -1,6 +1,7 @@
/* arch/arm/mach-msm/include/mach/memory.h
*
* Copyright (C) 2007 Google, Inc.
* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@ -12,7 +13,6 @@
* GNU General Public License for more details.
*
*/
#ifndef __ASM_ARCH_MEMORY_H
#define __ASM_ARCH_MEMORY_H
@ -37,28 +37,41 @@
#define PHYS_OFFSET UL(0x10000000)
#endif
#define MAX_PHYSMEM_BITS 32
#define SECTION_SIZE_BITS 25
#define HAS_ARCH_IO_REMAP_PFN_RANGE
#define CONSISTENT_DMA_SIZE (4*SZ_1M)
#ifndef __ASSEMBLY__
void *alloc_bootmem_aligned(unsigned long size, unsigned long alignment);
unsigned long allocate_contiguous_ebi_nomap(unsigned long, unsigned long);
void clean_and_invalidate_caches(unsigned long, unsigned long, unsigned long);
void clean_caches(unsigned long, unsigned long, unsigned long);
void invalidate_caches(unsigned long, unsigned long, unsigned long);
int platform_physical_remove_pages(unsigned long, unsigned long);
int platform_physical_add_pages(unsigned long, unsigned long);
int platform_physical_low_power_pages(unsigned long, unsigned long);
#ifdef CONFIG_ARCH_MSM_ARM11
void write_to_strongly_ordered_memory(void);
void map_zero_page_strongly_ordered(void);
#include <asm/mach-types.h>
#if defined(CONFIG_ARCH_MSM7227)
#ifdef CONFIG_ARCH_MSM7X27
#define arch_barrier_extra() do \
{ \
write_to_strongly_ordered_memory(); \
} while (0)
#else
#define arch_barrier_extra() do {} while (0)
#define arch_barrier_extra() do \
{ if (machine_is_msm7x27_surf() || machine_is_msm7x27_ffa()) \
write_to_strongly_ordered_memory(); \
} while (0)
#endif
#endif
#ifdef CONFIG_CACHE_L2X0
@ -67,12 +80,17 @@ extern void l2x0_cache_flush_all(void);
#define finish_arch_switch(prev) do { l2x0_cache_sync(); } while (0)
#endif
#endif
#endif
#ifdef CONFIG_ARCH_MSM_SCORPION
#define arch_has_speculative_dfetch() 1
#define arch_has_speculative_dfetch() 1
#endif
#endif
/* these correspond to values known by the modem */
#define MEMORY_DEEP_POWERDOWN 0
#define MEMORY_SELF_REFRESH 1
#define MEMORY_ACTIVE 2
#define NPA_MEMORY_NODE_NAME "/mem/ebi1/cs1"

@ -0,0 +1,64 @@
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/* The MSM Hardware supports multiple flavors of physical memory.
* This file captures hardware specific information of these types.
*/
#ifndef __ASM_ARCH_MSM_MEMTYPES_H
#define __ASM_ARCH_MSM_MEMTYPES_H
#include <mach/memory.h>
#include <linux/init.h>
int __init meminfo_init(unsigned int, unsigned int);
/* Redundant check to prevent this from being included outside of 7x30 */
#if defined(CONFIG_ARCH_MSM7X30)
unsigned int get_num_populated_chipselects(void);
#endif
unsigned int get_num_memory_banks(void);
unsigned int get_memory_bank_size(unsigned int);
unsigned int get_memory_bank_start(unsigned int);
int soc_change_memory_power(u64, u64, int);
enum {
MEMTYPE_NONE = -1,
MEMTYPE_SMI_KERNEL = 0,
MEMTYPE_SMI,
MEMTYPE_EBI0,
MEMTYPE_EBI1,
MEMTYPE_MAX,
};
void msm_reserve(void);
#define MEMTYPE_FLAGS_FIXED 0x1
#define MEMTYPE_FLAGS_1M_ALIGN 0x2
struct memtype_reserve {
unsigned long start;
unsigned long size;
unsigned long limit;
int flags;
};
struct reserve_info {
struct memtype_reserve *memtype_reserve_table;
void (*calculate_reserve_sizes)(void);
int (*paddr_to_memtype)(unsigned int);
unsigned long low_unstable_address;
unsigned long max_unstable_size;
unsigned long bank_size;
};
extern struct reserve_info *reserve_info;
#endif

@ -16,10 +16,19 @@
#include <linux/mm.h>
#include <linux/mm_types.h>
#include <linux/bootmem.h>
#include <linux/memory_alloc.h>
#include <linux/module.h>
#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/mach/map.h>
#include <asm/cacheflush.h>
#include <mach/msm_memtypes.h>
#include <linux/hardirq.h>
#if defined(CONFIG_MSM_NPA_REMOTE)
#include "npa_remote.h"
#include <linux/completion.h>
#include <linux/err.h>
#endif
int arch_io_remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t prot)
@ -34,7 +43,7 @@ int arch_io_remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
void *zero_page_strongly_ordered;
static void map_zero_page_strongly_ordered(void)
void map_zero_page_strongly_ordered(void)
{
if (zero_page_strongly_ordered)
return;
@ -43,12 +52,15 @@ static void map_zero_page_strongly_ordered(void)
ioremap_strongly_ordered(page_to_pfn(empty_zero_page)
<< PAGE_SHIFT, PAGE_SIZE);
}
EXPORT_SYMBOL(map_zero_page_strongly_ordered);
void write_to_strongly_ordered_memory(void)
{
map_zero_page_strongly_ordered();
*(int *)zero_page_strongly_ordered = 0;
}
EXPORT_SYMBOL(write_to_strongly_ordered_memory);
void flush_axi_bus_buffer(void)
{
__asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
@ -109,3 +121,57 @@ void invalidate_caches(unsigned long vstart,
flush_axi_bus_buffer();
}
void *alloc_bootmem_aligned(unsigned long size, unsigned long alignment)
{
void *unused_addr = NULL;
unsigned long addr, tmp_size, unused_size;
/* Allocate maximum size needed, see where it ends up.
* Then free it -- in this path there are no other allocators
* so we can depend on getting the same address back
* when we allocate a smaller piece that is aligned
* at the end (if necessary) and the piece we really want,
* then free the unused first piece.
*/
tmp_size = size + alignment - PAGE_SIZE;
addr = (unsigned long)alloc_bootmem(tmp_size);
free_bootmem(__pa(addr), tmp_size);
unused_size = alignment - (addr % alignment);
if (unused_size)
unused_addr = alloc_bootmem(unused_size);
addr = (unsigned long)alloc_bootmem(size);
if (unused_size)
free_bootmem(__pa(unused_addr), unused_size);
return (void *)addr;
}
int platform_physical_remove_pages(unsigned long start_pfn,
unsigned long nr_pages)
{
return 0;
}
int platform_physical_add_pages(unsigned long start_pfn,
unsigned long nr_pages)
{
return 0;
}
int platform_physical_low_power_pages(unsigned long start_pfn,
unsigned long nr_pages)
{
return 0;
}
unsigned long allocate_contiguous_ebi_nomap(unsigned long size,
unsigned long align)
{
return _allocate_contiguous_memory_nomap(size, MEMTYPE_EBI0,
align, __builtin_return_address(0));
}
EXPORT_SYMBOL(allocate_contiguous_ebi_nomap);

@ -4,6 +4,7 @@
* bootloader.
*
* Copyright (C) 2007 Google, Inc.
* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
* Author: Brian Swetland <swetland@google.com>
*
* This software is licensed under the terms of the GNU General Public
@ -22,7 +23,7 @@
#include <linux/platform_device.h>
#include <asm/mach/flash.h>
#include <asm/io.h>
#include <linux/io.h>
#include <asm/setup.h>
@ -38,47 +39,26 @@
#define ATAG_MSM_PARTITION 0x4d534D70 /* MSMp */
struct msm_ptbl_entry
{
struct msm_ptbl_entry {
char name[16];
__u32 offset;
__u32 size;
__u32 flags;
};
#define MSM_MAX_PARTITIONS 11
#define MSM_MAX_PARTITIONS 8
static struct mtd_partition msm_nand_partitions[MSM_MAX_PARTITIONS];
static char msm_nand_names[MSM_MAX_PARTITIONS * 16];
extern struct flash_platform_data msm_nand_data;
int emmc_partition_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
struct mtd_partition *ptn = msm_nand_partitions;
char *p = page;
int i;
uint64_t offset;
uint64_t size;
p += sprintf(p, "dev: size erasesize name\n");
for (i = 0; i < MSM_MAX_PARTITIONS && ptn->name; i++, ptn++) {
offset = ptn->offset;
size = ptn->size;
p += sprintf(p, "mmcblk0p%llu: %08llx %08x \"%s\"\n", offset, size * 512, 512, ptn->name);
}
return p - page;
}
static int __init parse_tag_msm_partition(const struct tag *tag)
{
struct mtd_partition *ptn = msm_nand_partitions;
char *name = msm_nand_names;
struct msm_ptbl_entry *entry = (void *) &tag->u;
unsigned count, n;
unsigned have_kpanic = 0;
count = (tag->hdr.size - 2) /
(sizeof(struct msm_ptbl_entry) / sizeof(__u32));
@ -90,9 +70,6 @@ static int __init parse_tag_msm_partition(const struct tag *tag)
memcpy(name, entry->name, 15);
name[15] = 0;
if (!strcmp(name, "kpanic"))
have_kpanic = 1;
ptn->name = name;
ptn->offset = entry->offset;
ptn->size = entry->size;
@ -102,42 +79,6 @@ static int __init parse_tag_msm_partition(const struct tag *tag)
ptn++;
}
#ifdef CONFIG_VIRTUAL_KPANIC_PARTITION
if (!have_kpanic) {
int i;
uint64_t kpanic_off = 0;
if (count == MSM_MAX_PARTITIONS) {
printk("Cannot create virtual 'kpanic' partition\n");
goto out;
}
for (i = 0; i < count; i++) {
ptn = &msm_nand_partitions[i];
if (!strcmp(ptn->name, CONFIG_VIRTUAL_KPANIC_SRC)) {
ptn->size -= CONFIG_VIRTUAL_KPANIC_PSIZE;
kpanic_off = ptn->offset + ptn->size;
break;
}
}
if (i == count) {
printk(KERN_ERR "Partition %s not found\n",
CONFIG_VIRTUAL_KPANIC_SRC);
goto out;
}
ptn = &msm_nand_partitions[count];
ptn->name ="kpanic";
ptn->offset = kpanic_off;
ptn->size = CONFIG_VIRTUAL_KPANIC_PSIZE;
printk("Virtual mtd partition '%s' created @%llx (%llu)\n",
ptn->name, ptn->offset, ptn->size);
count++;
}
out:
#endif /* CONFIG_VIRTUAL_KPANIC_SRC */
msm_nand_data.nr_parts = count;
msm_nand_data.parts = msm_nand_partitions;

@ -64,22 +64,30 @@ config MSM_KGSL_DRM
bool "Build a DRM interface for the MSM_KGSL driver"
depends on MSM_KGSL && DRM
config MSM_KGSL_MMU
config MSM_KGSL_GPUMMU
bool "Enable the GPU MMU in the MSM_KGSL driver"
depends on MSM_KGSL && MMU && !MSM_KGSL_CFF_DUMP
depends on MSM_KGSL && !MSM_KGSL_CFF_DUMP
default y
config MSM_KGSL_IOMMU
bool "Enable the use of IOMMU in the MSM_KGSL driver"
depends on MSM_KGSL && MSM_IOMMU && !MSM_KGSL_GPUMMU && !MSM_KGSL_CFF_DUMP
config MSM_KGSL_MMU
bool
depends on MSM_KGSL_GPUMMU || MSM_KGSL_IOMMU
default y
config KGSL_PER_PROCESS_PAGE_TABLE
bool "Enable Per Process page tables for the KGSL driver"
default n
depends on MSM_KGSL_MMU && !MSM_KGSL_DRM
depends on MSM_KGSL_GPUMMU && !MSM_KGSL_DRM
---help---
The MMU will use per process pagetables when enabled.
config MSM_KGSL_PAGE_TABLE_SIZE
hex "Size of pagetables"
default 0xFFF0000
depends on MSM_KGSL_MMU
---help---
Sets the pagetable size used by the MMU. The max value
is 0xFFF0000 or (256M - 64K).
@ -97,7 +105,7 @@ config MSM_KGSL_PAGE_TABLE_COUNT
config MSM_KGSL_MMU_PAGE_FAULT
bool "Force the GPU MMU to page fault for unmapped regions"
default y
depends on MSM_KGSL_MMU
depends on MSM_KGSL_GPUMMU
config MSM_KGSL_DISABLE_SHADOW_WRITES
bool "Disable register shadow writes for context switches"

@ -4,17 +4,21 @@ msm_kgsl_core-y = \
kgsl.o \
kgsl_sharedmem.o \
kgsl_pwrctrl.o \
kgsl_pwrscale.o
kgsl_pwrscale.o \
kgsl_mmu.o \
kgsl_gpummu.o
msm_kgsl_core-$(CONFIG_DEBUG_FS) += kgsl_debugfs.o
msm_kgsl_core-$(CONFIG_MSM_KGSL_MMU) += kgsl_mmu.o
msm_kgsl_core-$(CONFIG_MSM_KGSL_CFF_DUMP) += kgsl_cffdump.o
msm_kgsl_core-$(CONFIG_MSM_KGSL_DRM) += kgsl_drm.o
msm_kgsl_core-$(CONFIG_MSM_SCM) += kgsl_pwrscale_trustzone.o
msm_kgsl_core-$(CONFIG_MSM_SLEEP_STATS) += kgsl_pwrscale_idlestats.o
msm_adreno-y += \
adreno_ringbuffer.o \
adreno_drawctxt.o \
adreno_postmortem.o \
adreno_a2xx.o \
adreno.o
msm_adreno-$(CONFIG_DEBUG_FS) += adreno_debugfs.o

@ -1,29 +1,13 @@
/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __A200_REG_H
@ -271,18 +255,6 @@ union reg_cp_rb_cntl {
#define MH_ARBITER_CONFIG__RB_CLNT_ENABLE__SHIFT 0x00000019
#define MH_ARBITER_CONFIG__PA_CLNT_ENABLE__SHIFT 0x0000001a
#define MH_MMU_CONFIG__RB_W_CLNT_BEHAVIOR__SHIFT 0x00000004
#define MH_MMU_CONFIG__CP_W_CLNT_BEHAVIOR__SHIFT 0x00000006
#define MH_MMU_CONFIG__CP_R0_CLNT_BEHAVIOR__SHIFT 0x00000008
#define MH_MMU_CONFIG__CP_R1_CLNT_BEHAVIOR__SHIFT 0x0000000a
#define MH_MMU_CONFIG__CP_R2_CLNT_BEHAVIOR__SHIFT 0x0000000c
#define MH_MMU_CONFIG__CP_R3_CLNT_BEHAVIOR__SHIFT 0x0000000e
#define MH_MMU_CONFIG__CP_R4_CLNT_BEHAVIOR__SHIFT 0x00000010
#define MH_MMU_CONFIG__VGT_R0_CLNT_BEHAVIOR__SHIFT 0x00000012
#define MH_MMU_CONFIG__VGT_R1_CLNT_BEHAVIOR__SHIFT 0x00000014
#define MH_MMU_CONFIG__TC_R_CLNT_BEHAVIOR__SHIFT 0x00000016
#define MH_MMU_CONFIG__PA_W_CLNT_BEHAVIOR__SHIFT 0x00000018
#define CP_RB_CNTL__RB_BUFSZ__SHIFT 0x00000000
#define CP_RB_CNTL__RB_BLKSZ__SHIFT 0x00000008
#define CP_RB_CNTL__RB_POLL_EN__SHIFT 0x00000014
@ -335,21 +307,6 @@ union reg_cp_rb_cntl {
#define REG_MASTER_INT_SIGNAL 0x03B7
#define REG_MH_ARBITER_CONFIG 0x0A40
#define REG_MH_INTERRUPT_CLEAR 0x0A44
#define REG_MH_INTERRUPT_MASK 0x0A42
#define REG_MH_INTERRUPT_STATUS 0x0A43
#define REG_MH_MMU_CONFIG 0x0040
#define REG_MH_MMU_INVALIDATE 0x0045
#define REG_MH_MMU_MPU_BASE 0x0046
#define REG_MH_MMU_MPU_END 0x0047
#define REG_MH_MMU_PAGE_FAULT 0x0043
#define REG_MH_MMU_PT_BASE 0x0042
#define REG_MH_MMU_TRAN_ERROR 0x0044
#define REG_MH_MMU_VA_RANGE 0x0041
#define REG_MH_CLNT_INTF_CTRL_CONFIG1 0x0A54
#define REG_MH_CLNT_INTF_CTRL_CONFIG2 0x0A55
#define REG_PA_CL_VPORT_XSCALE 0x210F
#define REG_PA_CL_VPORT_ZOFFSET 0x2114
#define REG_PA_CL_VPORT_ZSCALE 0x2113
@ -407,6 +364,7 @@ union reg_cp_rb_cntl {
#define REG_SQ_CF_BOOLEANS 0x4900
#define REG_SQ_CF_LOOP 0x4908
#define REG_SQ_GPR_MANAGEMENT 0x0D00
#define REG_SQ_FLOW_CONTROL 0x0D01
#define REG_SQ_INST_STORE_MANAGMENT 0x0D02
#define REG_SQ_INT_ACK 0x0D36
#define REG_SQ_INT_CNTL 0x0D34
@ -438,11 +396,23 @@ union reg_cp_rb_cntl {
#define REG_SQ_CONSTANT_0 0x4000
#define REG_SQ_FETCH_0 0x4800
#define REG_MH_AXI_ERROR 0xA45
#define REG_MH_DEBUG_CTRL 0xA4E
#define REG_MH_DEBUG_DATA 0xA4F
#define REG_COHER_BASE_PM4 0xA2A
#define REG_COHER_STATUS_PM4 0xA2B
#define REG_COHER_SIZE_PM4 0xA29
/*registers added in adreno220*/
#define REG_A220_PC_INDX_OFFSET REG_VGT_INDX_OFFSET
#define REG_A220_PC_VERTEX_REUSE_BLOCK_CNTL REG_VGT_VERTEX_REUSE_BLOCK_CNTL
#define REG_A220_PC_MAX_VTX_INDX REG_VGT_MAX_VTX_INDX
#define REG_A220_RB_LRZ_VSC_CONTROL 0x2209
#define REG_A220_GRAS_CONTROL 0x2210
#define REG_A220_VSC_BIN_SIZE 0x0C01
#define REG_A220_VSC_PIPE_DATA_LENGTH_7 0x0C1D
/*registers added in adreno225*/
#define REG_A225_RB_COLOR_INFO3 0x2005
#define REG_A225_PC_MULTI_PRIM_IB_RESET_INDX 0x2103
#define REG_A225_GRAS_UCP0X 0x2340
#define REG_A225_GRAS_UCP_ENABLED 0x2360
#endif /* __A200_REG_H */

@ -19,21 +19,26 @@
#include "kgsl.h"
#include "kgsl_pwrscale.h"
#include "kgsl_cffdump.h"
#include "kgsl_sharedmem.h"
#include "adreno.h"
#include "adreno_pm4types.h"
#include "adreno_debugfs.h"
#include "adreno_postmortem.h"
#include "a200_reg.h"
#include "a2xx_reg.h"
#include "kgsl_mmu.h"
#define cpu_is_msm7x01() 0
#define cpu_is_msm7x30() 0
#define cpu_is_qsd8x50() 1
#define cpu_is_msm8x60() 0
#define cpu_is_msm8960() 0
#define cpu_is_msm8930() 0
#define DRIVER_VERSION_MAJOR 3
#define DRIVER_VERSION_MINOR 1
#define GSL_RBBM_INT_MASK \
(RBBM_INT_CNTL__RDERR_INT_MASK | \
RBBM_INT_CNTL__DISPLAY_UPDATE_INT_MASK)
/* Adreno MH arbiter config*/
#define ADRENO_CFG_MHARB \
(0x10 \
@ -66,8 +71,7 @@
| (MMU_CONFIG << MH_MMU_CONFIG__TC_R_CLNT_BEHAVIOR__SHIFT) \
| (MMU_CONFIG << MH_MMU_CONFIG__PA_W_CLNT_BEHAVIOR__SHIFT))
/* max msecs to wait for gpu to finish its operation(s) */
#define MAX_WAITGPU_SECS (HZ + HZ/2)
static const struct kgsl_functable adreno_functable;
static struct adreno_device device_3d0 = {
.dev = {
@ -75,29 +79,24 @@ static struct adreno_device device_3d0 = {
.id = KGSL_DEVICE_3D0,
.ver_major = DRIVER_VERSION_MAJOR,
.ver_minor = DRIVER_VERSION_MINOR,
.mmu = {
.config = ADRENO_MMU_CONFIG,
.mh = {
.mharb = ADRENO_CFG_MHARB,
/* Remove 1k boundary check in z470 to avoid a GPU
* hang. Notice that this solution won't work if
* both EBI and SMI are used
*/
.mh_intf_cfg1 = 0x00032f07,
/* turn off memory protection unit by setting
acceptable physical address range to include
all pages. */
.mpu_base = 0x00000000,
.mpu_range = 0xFFFFF000,
.reg = {
.config = REG_MH_MMU_CONFIG,
.mpu_base = REG_MH_MMU_MPU_BASE,
.mpu_end = REG_MH_MMU_MPU_END,
.va_range = REG_MH_MMU_VA_RANGE,
.pt_page = REG_MH_MMU_PT_BASE,
.page_fault = REG_MH_MMU_PAGE_FAULT,
.tran_error = REG_MH_MMU_TRAN_ERROR,
.invalidate = REG_MH_MMU_INVALIDATE,
.interrupt_mask = REG_MH_INTERRUPT_MASK,
.interrupt_status = REG_MH_INTERRUPT_STATUS,
.interrupt_clear = REG_MH_INTERRUPT_CLEAR,
.axi_error = REG_MH_AXI_ERROR,
},
},
.mmu = {
.config = ADRENO_MMU_CONFIG,
},
.pwrctrl = {
.pwr_rail = PWR_RAIL_GRP_CLK,
.regulator_name = "fs_gfx3d",
.irq_name = KGSL_3D0_IRQ,
.src_clk_name = "grp_src_clk",
@ -106,6 +105,14 @@ static struct adreno_device device_3d0 = {
.state = KGSL_STATE_INIT,
.active_cnt = 0,
.iomemname = KGSL_3D0_REG_MEMORY,
.ftbl = &adreno_functable,
#ifdef CONFIG_HAS_EARLYSUSPEND
.display_off = {
.level = EARLY_SUSPEND_LEVEL_STOP_DRAWING,
.suspend = kgsl_early_suspend_driver,
.resume = kgsl_late_resume_driver,
},
#endif
},
.gmemspace = {
.gpu_base = 0,
@ -113,12 +120,39 @@ static struct adreno_device device_3d0 = {
},
.pfp_fw = NULL,
.pm4_fw = NULL,
.mharb = ADRENO_CFG_MHARB,
};
static void __devinit adreno_getfunctable(struct kgsl_functable *ftbl);
/*
* This is the master list of all GPU cores that are supported by this
* driver.
*/
static int adreno_gmeminit(struct adreno_device *adreno_dev)
#define ANY_ID (~0)
static const struct {
enum adreno_gpurev gpurev;
unsigned int core, major, minor, patchid;
const char *pm4fw;
const char *pfpfw;
struct adreno_gpudev *gpudev;
} adreno_gpulist[] = {
{ ADRENO_REV_A200, 0, 2, ANY_ID, ANY_ID,
"yamato_pm4.fw", "yamato_pfp.fw", &adreno_a2xx_gpudev },
{ ADRENO_REV_A205, 0, 1, 0, ANY_ID,
"yamato_pm4.fw", "yamato_pfp.fw", &adreno_a2xx_gpudev },
{ ADRENO_REV_A220, 2, 1, ANY_ID, ANY_ID,
"leia_pm4_470.fw", "leia_pfp_470.fw", &adreno_a2xx_gpudev },
/*
* patchlevel 5 (8960v2) needs special pm4 firmware to work around
* a hardware problem.
*/
{ ADRENO_REV_A225, 2, 2, 0, 5,
"a225p5_pm4.fw", "a225_pfp.fw", &adreno_a2xx_gpudev },
{ ADRENO_REV_A225, 2, 2, ANY_ID, ANY_ID,
"a225_pm4.fw", "a225_pfp.fw", &adreno_a2xx_gpudev },
};
static void adreno_gmeminit(struct adreno_device *adreno_dev)
{
struct kgsl_device *device = &adreno_dev->dev;
union reg_rb_edram_info rb_edram_info;
@ -137,90 +171,27 @@ static int adreno_gmeminit(struct adreno_device *adreno_dev)
rb_edram_info.val = 0;
rb_edram_info.f.edram_size = edram_value;
if (!adreno_is_a220(adreno_dev))
rb_edram_info.f.edram_mapping_mode = 0; /* EDRAM_MAP_UPPER */
rb_edram_info.f.edram_mapping_mode = 0; /* EDRAM_MAP_UPPER */
/* must be aligned to size */
rb_edram_info.f.edram_range = (adreno_dev->gmemspace.gpu_base >> 14);
adreno_regwrite(device, REG_RB_EDRAM_INFO, rb_edram_info.val);
return 0;
}
static int adreno_gmemclose(struct kgsl_device *device)
static irqreturn_t adreno_isr(int irq, void *data)
{
adreno_regwrite(device, REG_RB_EDRAM_INFO, 0x00000000);
irqreturn_t result;
struct kgsl_device *device = data;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
return 0;
}
static void adreno_rbbm_intrcallback(struct kgsl_device *device)
{
unsigned int status = 0;
unsigned int rderr = 0;
adreno_regread_isr(device, REG_RBBM_INT_STATUS, &status);
if (status & RBBM_INT_CNTL__RDERR_INT_MASK) {
union rbbm_read_error_u rerr;
adreno_regread_isr(device, REG_RBBM_READ_ERROR, &rderr);
rerr.val = rderr;
if (rerr.f.read_address == REG_CP_INT_STATUS &&
rerr.f.read_error &&
rerr.f.read_requester)
KGSL_DRV_WARN(device,
"rbbm read error interrupt: %08x\n", rderr);
else
KGSL_DRV_CRIT(device,
"rbbm read error interrupt: %08x\n", rderr);
} else if (status & RBBM_INT_CNTL__DISPLAY_UPDATE_INT_MASK) {
KGSL_DRV_INFO(device, "rbbm display update interrupt\n");
} else if (status & RBBM_INT_CNTL__GUI_IDLE_INT_MASK) {
KGSL_DRV_INFO(device, "rbbm gui idle interrupt\n");
} else {
KGSL_CMD_WARN(device,
"bad bits in REG_CP_INT_STATUS %08x\n", status);
}
status &= GSL_RBBM_INT_MASK;
adreno_regwrite_isr(device, REG_RBBM_INT_ACK, status);
}
irqreturn_t adreno_isr(int irq, void *data)
{
irqreturn_t result = IRQ_NONE;
struct kgsl_device *device;
unsigned int status;
device = (struct kgsl_device *) data;
BUG_ON(device == NULL);
BUG_ON(device->regspace.sizebytes == 0);
BUG_ON(device->regspace.mmio_virt_base == 0);
adreno_regread_isr(device, REG_MASTER_INT_SIGNAL, &status);
if (status & MASTER_INT_SIGNAL__MH_INT_STAT) {
kgsl_mh_intrcallback(device);
result = IRQ_HANDLED;
}
if (status & MASTER_INT_SIGNAL__CP_INT_STAT) {
kgsl_cp_intrcallback(device);
result = IRQ_HANDLED;
}
if (status & MASTER_INT_SIGNAL__RBBM_INT_STAT) {
adreno_rbbm_intrcallback(device);
result = IRQ_HANDLED;
}
result = adreno_dev->gpudev->irq_handler(adreno_dev);
if (device->requested_state == KGSL_STATE_NONE) {
if (device->pwrctrl.nap_allowed == true) {
device->requested_state = KGSL_STATE_NAP;
queue_work(device->work_queue, &device->idle_check_ws);
} else if (device->pwrctrl.idle_pass == true) {
} else if (device->pwrscale.policy != NULL) {
queue_work(device->work_queue, &device->idle_check_ws);
}
}
@ -231,7 +202,7 @@ irqreturn_t adreno_isr(int irq, void *data)
return result;
}
static int adreno_cleanup_pt(struct kgsl_device *device,
static void adreno_cleanup_pt(struct kgsl_device *device,
struct kgsl_pagetable *pagetable)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
@ -243,9 +214,7 @@ static int adreno_cleanup_pt(struct kgsl_device *device,
kgsl_mmu_unmap(pagetable, &device->memstore);
kgsl_mmu_unmap(pagetable, &device->mmu.dummyspace);
return 0;
kgsl_mmu_unmap(pagetable, &device->mmu.setstate_memory);
}
static int adreno_setup_pt(struct kgsl_device *device,
@ -255,12 +224,6 @@ static int adreno_setup_pt(struct kgsl_device *device,
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
BUG_ON(rb->buffer_desc.physaddr == 0);
BUG_ON(rb->memptrs_desc.physaddr == 0);
BUG_ON(device->memstore.physaddr == 0);
#ifdef CONFIG_MSM_KGSL_MMU
BUG_ON(device->mmu.dummyspace.physaddr == 0);
#endif
result = kgsl_mmu_map_global(pagetable, &rb->buffer_desc,
GSL_PT_PAGE_RV);
if (result)
@ -276,7 +239,7 @@ static int adreno_setup_pt(struct kgsl_device *device,
if (result)
goto unmap_memptrs_desc;
result = kgsl_mmu_map_global(pagetable, &device->mmu.dummyspace,
result = kgsl_mmu_map_global(pagetable, &device->mmu.setstate_memory,
GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
if (result)
goto unmap_memstore_desc;
@ -296,7 +259,8 @@ error:
return result;
}
static int adreno_setstate(struct kgsl_device *device, uint32_t flags)
static void adreno_setstate(struct kgsl_device *device,
uint32_t flags)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
unsigned int link[32];
@ -304,38 +268,36 @@ static int adreno_setstate(struct kgsl_device *device, uint32_t flags)
int sizedwords = 0;
unsigned int mh_mmu_invalidate = 0x00000003; /*invalidate all and tc */
#ifndef CONFIG_MSM_KGSL_MMU
return 0;
#endif
/* if possible, set via command stream,
* otherwise set via direct register writes
*/
/* If possible, then set the state via the command stream to avoid
a CPU idle. Otherwise, use the default setstate which uses register
writes */
if (adreno_dev->drawctxt_active) {
if (flags & KGSL_MMUFLAGS_PTUPDATE) {
/* wait for graphics pipe to be idle */
*cmds++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
*cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
*cmds++ = 0x00000000;
/* set page table base */
*cmds++ = pm4_type0_packet(REG_MH_MMU_PT_BASE, 1);
*cmds++ = device->mmu.hwpagetable->base.gpuaddr;
*cmds++ = cp_type0_packet(MH_MMU_PT_BASE, 1);
*cmds++ = kgsl_pt_get_base_addr(
device->mmu.hwpagetable);
sizedwords += 4;
}
if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
if (!(flags & KGSL_MMUFLAGS_PTUPDATE)) {
*cmds++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE,
*cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE,
1);
*cmds++ = 0x00000000;
sizedwords += 2;
}
*cmds++ = pm4_type0_packet(REG_MH_MMU_INVALIDATE, 1);
*cmds++ = cp_type0_packet(MH_MMU_INVALIDATE, 1);
*cmds++ = mh_mmu_invalidate;
sizedwords += 2;
}
if (flags & KGSL_MMUFLAGS_PTUPDATE &&
!adreno_is_a220(adreno_dev)) {
adreno_is_a20x(adreno_dev)) {
/* HW workaround: to resolve MMU page fault interrupts
* caused by the VGT.It prevents the CP PFP from filling
* the VGT DMA request fifo too early,thereby ensuring
@ -348,34 +310,36 @@ static int adreno_setstate(struct kgsl_device *device, uint32_t flags)
* VGT DMA request fifo and prevent any further
* vertex/bin updates from occurring until the wait
* has finished. */
*cmds++ = pm4_type3_packet(PM4_SET_CONSTANT, 2);
*cmds++ = cp_type3_packet(CP_SET_CONSTANT, 2);
*cmds++ = (0x4 << 16) |
(REG_PA_SU_SC_MODE_CNTL - 0x2000);
*cmds++ = 0; /* disable faceness generation */
*cmds++ = pm4_type3_packet(PM4_SET_BIN_BASE_OFFSET, 1);
*cmds++ = device->mmu.dummyspace.gpuaddr;
*cmds++ = pm4_type3_packet(PM4_DRAW_INDX_BIN, 6);
*cmds++ = cp_type3_packet(CP_SET_BIN_BASE_OFFSET, 1);
*cmds++ = device->mmu.setstate_memory.gpuaddr;
*cmds++ = cp_type3_packet(CP_DRAW_INDX_BIN, 6);
*cmds++ = 0; /* viz query info */
*cmds++ = 0x0003C004; /* draw indicator */
*cmds++ = 0; /* bin base */
*cmds++ = 3; /* bin size */
*cmds++ = device->mmu.dummyspace.gpuaddr; /* dma base */
*cmds++ =
device->mmu.setstate_memory.gpuaddr; /* dma base */
*cmds++ = 6; /* dma size */
*cmds++ = pm4_type3_packet(PM4_DRAW_INDX_BIN, 6);
*cmds++ = cp_type3_packet(CP_DRAW_INDX_BIN, 6);
*cmds++ = 0; /* viz query info */
*cmds++ = 0x0003C004; /* draw indicator */
*cmds++ = 0; /* bin base */
*cmds++ = 3; /* bin size */
/* dma base */
*cmds++ = device->mmu.dummyspace.gpuaddr;
*cmds++ = device->mmu.setstate_memory.gpuaddr;
*cmds++ = 6; /* dma size */
*cmds++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
*cmds++ = cp_type3_packet(CP_WAIT_FOR_IDLE, 1);
*cmds++ = 0x00000000;
sizedwords += 21;
}
if (flags & (KGSL_MMUFLAGS_PTUPDATE | KGSL_MMUFLAGS_TLBFLUSH)) {
*cmds++ = pm4_type3_packet(PM4_INVALIDATE_STATE, 1);
*cmds++ = cp_type3_packet(CP_INVALIDATE_STATE, 1);
*cmds++ = 0x7fff; /* invalidate all base pointers */
sizedwords += 2;
}
@ -383,25 +347,13 @@ static int adreno_setstate(struct kgsl_device *device, uint32_t flags)
adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_PMODE,
&link[0], sizedwords);
} else {
if (flags & KGSL_MMUFLAGS_PTUPDATE) {
adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
adreno_regwrite(device, REG_MH_MMU_PT_BASE,
device->mmu.hwpagetable->base.gpuaddr);
}
if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
adreno_regwrite(device, REG_MH_MMU_INVALIDATE,
mh_mmu_invalidate);
}
kgsl_mmu_device_setstate(device, flags);
}
return 0;
}
static unsigned int
adreno_getchipid(struct kgsl_device *device)
{
/* XXX: drewis edit: only for 8x50 */
unsigned int chipid = 0;
unsigned int coreid, majorid, minorid, patchid, revid;
@ -409,57 +361,64 @@ adreno_getchipid(struct kgsl_device *device)
adreno_regread(device, REG_RBBM_PERIPHID2, &majorid);
adreno_regread(device, REG_RBBM_PATCH_RELEASE, &revid);
chipid = (coreid & 0xF) << 24;
/*
* adreno 22x gpus are indicated by coreid 2,
* but REG_RBBM_PERIPHID1 always contains 0 for this field
*/
if (cpu_is_msm8960() || cpu_is_msm8x60() || cpu_is_msm8930())
chipid = 2 << 24;
else
chipid = (coreid & 0xF) << 24;
chipid |= ((majorid >> 4) & 0xF) << 16;
minorid = ((revid >> 0) & 0xFF);
patchid = 1;
patchid = ((revid >> 16) & 0xFF);
/* 8x50 returns 0 for patch release, but it should be 1 */
if (cpu_is_qsd8x50())
patchid = 1;
chipid |= (minorid << 8) | patchid;
return chipid;
}
/* all chipid fields are 8 bits wide so 256 won't occur in a real chipid */
#define DONT_CARE 256
static const struct {
unsigned int core;
unsigned int major;
unsigned int minor;
enum adreno_gpurev gpurev;
} gpurev_table[] = {
/* major and minor may be DONT_CARE, but core must not be */
{0, 2, DONT_CARE, ADRENO_REV_A200},
{0, 1, 0, ADRENO_REV_A205},
{2, 1, DONT_CARE, ADRENO_REV_A220},
{2, 2, DONT_CARE, ADRENO_REV_A225},
};
static inline bool _rev_match(unsigned int id, unsigned int entry)
{
return (entry == DONT_CARE || entry == id);
return (entry == ANY_ID || entry == id);
}
#undef DONT_CARE
enum adreno_gpurev adreno_get_rev(struct adreno_device *adreno_dev)
static void
adreno_identify_gpu(struct adreno_device *adreno_dev)
{
enum adreno_gpurev gpurev = ADRENO_REV_UNKNOWN;
unsigned int i, core, major, minor;
unsigned int i, core, major, minor, patchid;
adreno_dev->chip_id = adreno_getchipid(&adreno_dev->dev);
core = (adreno_dev->chip_id >> 24) & 0xff;
major = (adreno_dev->chip_id >> 16) & 0xff;
minor = (adreno_dev->chip_id >> 8) & 0xff;
patchid = (adreno_dev->chip_id & 0xff);
for (i = 0; i < ARRAY_SIZE(gpurev_table); i++) {
if (core == gpurev_table[i].core &&
_rev_match(major, gpurev_table[i].major) &&
_rev_match(minor, gpurev_table[i].minor)) {
gpurev = gpurev_table[i].gpurev;
for (i = 0; i < ARRAY_SIZE(adreno_gpulist); i++) {
if (core == adreno_gpulist[i].core &&
_rev_match(major, adreno_gpulist[i].major) &&
_rev_match(minor, adreno_gpulist[i].minor) &&
_rev_match(patchid, adreno_gpulist[i].patchid))
break;
}
}
return gpurev;
if (i == ARRAY_SIZE(adreno_gpulist)) {
adreno_dev->gpurev = ADRENO_REV_UNKNOWN;
return;
}
adreno_dev->gpurev = adreno_gpulist[i].gpurev;
adreno_dev->gpudev = adreno_gpulist[i].gpudev;
adreno_dev->pfp_fwfile = adreno_gpulist[i].pfpfw;
adreno_dev->pm4_fwfile = adreno_gpulist[i].pm4fw;
}
static int __devinit
@ -473,9 +432,9 @@ adreno_probe(struct platform_device *pdev)
adreno_dev = ADRENO_DEVICE(device);
device->parentdev = &pdev->dev;
init_completion(&device->recovery_gate);
adreno_dev->wait_timeout = 10000; /* default value in milliseconds */
adreno_getfunctable(&device->ftbl);
init_completion(&device->recovery_gate);
status = adreno_ringbuffer_init(device);
if (status != 0)
@ -487,6 +446,9 @@ adreno_probe(struct platform_device *pdev)
adreno_debugfs_init(device);
kgsl_pwrscale_init(device);
kgsl_pwrscale_attach_policy(device, ADRENO_DEFAULT_PWRSCALE_POLICY);
device->flags &= ~KGSL_FLAGS_SOFT_RESET;
return 0;
@ -505,6 +467,9 @@ static int __devexit adreno_remove(struct platform_device *pdev)
device = (struct kgsl_device *)pdev->id_entry->driver_data;
adreno_dev = ADRENO_DEVICE(device);
kgsl_pwrscale_detach_policy(device);
kgsl_pwrscale_close(device);
adreno_ringbuffer_close(&adreno_dev->ringbuffer);
kgsl_device_platform_remove(device);
@ -523,23 +488,38 @@ static int adreno_start(struct kgsl_device *device, unsigned int init_ram)
/* Power up the device */
kgsl_pwrctrl_enable(device);
/* Identify the specific GPU */
adreno_identify_gpu(adreno_dev);
if (adreno_dev->gpurev == ADRENO_REV_UNKNOWN) {
KGSL_DRV_ERR(device, "Unknown chip ID %x\n",
adreno_dev->chip_id);
goto error_clk_off;
}
if (adreno_is_a20x(adreno_dev)) {
/*
* the MH_CLNT_INTF_CTRL_CONFIG registers aren't present
* on older gpus
*/
device->mh.mh_intf_cfg1 = 0;
device->mh.mh_intf_cfg2 = 0;
}
kgsl_mh_start(device);
if (kgsl_mmu_start(device))
goto error_clk_off;
adreno_dev->chip_id = adreno_getchipid(device);
/*We need to make sure all blocks are powered up and clocked before
*issuing a soft reset. The overrides will then be turned off (set to 0)
*/
adreno_regwrite(device, REG_RBBM_PM_OVERRIDE1, 0xfffffffe);
if (adreno_dev->chip_id == CHIP_REV_251)
adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0x000000ff);
else
adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0xffffffff);
adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0xffffffff);
/* Only reset CP block if all blocks have previously been reset */
if (!(device->flags & KGSL_FLAGS_SOFT_RESET) ||
!adreno_is_a220(adreno_dev)) {
!adreno_is_a22x(adreno_dev)) {
adreno_regwrite(device, REG_RBBM_SOFT_RESET, 0xFFFFFFFF);
device->flags |= KGSL_FLAGS_SOFT_RESET;
} else
@ -554,44 +534,39 @@ static int adreno_start(struct kgsl_device *device, unsigned int init_ram)
adreno_regwrite(device, REG_RBBM_CNTL, 0x00004442);
adreno_regwrite(device, REG_MH_ARBITER_CONFIG,
adreno_dev->mharb);
if (!adreno_is_a220(adreno_dev)) {
adreno_regwrite(device,
REG_MH_CLNT_INTF_CTRL_CONFIG1, 0x00030f27);
adreno_regwrite(device,
REG_MH_CLNT_INTF_CTRL_CONFIG2, 0x00472747);
}
/* Remove 1k boundary check in z470 to avoid GPU hang.
Notice that, this solution won't work if both EBI and SMI are used */
if (adreno_is_a220(adreno_dev)) {
adreno_regwrite(device, REG_MH_CLNT_INTF_CTRL_CONFIG1,
0x00032f07);
if (adreno_is_a225(adreno_dev)) {
/* Enable large instruction store for A225 */
adreno_regwrite(device, REG_SQ_FLOW_CONTROL, 0x18000000);
}
adreno_regwrite(device, REG_SQ_VS_PROGRAM, 0x00000000);
adreno_regwrite(device, REG_SQ_PS_PROGRAM, 0x00000000);
adreno_regwrite(device, REG_RBBM_PM_OVERRIDE1, 0);
if (!adreno_is_a220(adreno_dev))
if (cpu_is_msm8960() || cpu_is_msm8930())
adreno_regwrite(device, REG_RBBM_PM_OVERRIDE1, 0x200);
else
adreno_regwrite(device, REG_RBBM_PM_OVERRIDE1, 0);
if (!adreno_is_a22x(adreno_dev))
adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0);
else
adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0x80);
kgsl_sharedmem_set(&device->memstore, 0, 0, device->memstore.size);
kgsl_sharedmem_writel(&device->memstore,
KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts),
init_reftimestamp);
adreno_regwrite(device, REG_RBBM_DEBUG, 0x000C0000);
adreno_regwrite(device, REG_RBBM_DEBUG, 0x00080000);
adreno_regwrite(device, REG_RBBM_INT_CNTL, GSL_RBBM_INT_MASK);
/* Make sure interrupts are disabled */
/* make sure SQ interrupts are disabled */
adreno_regwrite(device, REG_RBBM_INT_CNTL, 0);
adreno_regwrite(device, REG_CP_INT_CNTL, 0);
adreno_regwrite(device, REG_SQ_INT_CNTL, 0);
if (adreno_is_a220(adreno_dev))
if (adreno_is_a22x(adreno_dev))
adreno_dev->gmemspace.sizebytes = SZ_512K;
else
adreno_dev->gmemspace.sizebytes = SZ_256K;
@ -608,9 +583,9 @@ static int adreno_start(struct kgsl_device *device, unsigned int init_ram)
error_irq_off:
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
kgsl_mmu_stop(device);
error_clk_off:
kgsl_pwrctrl_disable(device);
kgsl_mmu_stop(device);
return status;
}
@ -618,19 +593,15 @@ error_clk_off:
static int adreno_stop(struct kgsl_device *device)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
del_timer(&device->idle_timer);
adreno_regwrite(device, REG_RBBM_INT_CNTL, 0);
adreno_dev->drawctxt_active = NULL;
adreno_ringbuffer_stop(&adreno_dev->ringbuffer);
adreno_gmemclose(device);
kgsl_mmu_stop(device);
/* Disable the clocks before the power rail. */
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
del_timer_sync(&device->idle_timer);
/* Power down the device */
kgsl_pwrctrl_disable(device);
@ -786,7 +757,7 @@ static int adreno_getproperty(struct kgsl_device *device,
devinfo.device_id = device->id+1;
devinfo.chip_id = adreno_dev->chip_id;
devinfo.mmu_enabled = kgsl_mmu_enabled();
devinfo.gpu_id = adreno_get_rev(adreno_dev);
devinfo.gpu_id = adreno_dev->gpurev;
devinfo.gmem_gpubaseaddr = adreno_dev->gmemspace.
gpu_base;
devinfo.gmem_sizebytes = adreno_dev->gmemspace.
@ -829,16 +800,13 @@ static int adreno_getproperty(struct kgsl_device *device,
break;
case KGSL_PROP_MMU_ENABLE:
{
#ifdef CONFIG_MSM_KGSL_MMU
int mmuProp = 1;
#else
int mmuProp = 0;
#endif
int mmu_prop = kgsl_mmu_enabled();
if (sizebytes != sizeof(int)) {
status = -EINVAL;
break;
}
if (copy_to_user(value, &mmuProp, sizeof(mmuProp))) {
if (copy_to_user(value, &mmu_prop, sizeof(mmu_prop))) {
status = -EFAULT;
break;
}
@ -872,7 +840,9 @@ int adreno_idle(struct kgsl_device *device, unsigned int timeout)
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
unsigned int rbbm_status;
unsigned long wait_time = jiffies + MAX_WAITGPU_SECS;
unsigned long wait_timeout =
msecs_to_jiffies(adreno_dev->wait_timeout);
unsigned long wait_time = jiffies + wait_timeout;
kgsl_cffdump_regpoll(device->id, REG_RBBM_STATUS << 2,
0x00000000, 0x80000000);
@ -892,7 +862,7 @@ retry:
}
/* now, wait for the GPU to finish its operations */
wait_time = jiffies + MAX_WAITGPU_SECS;
wait_time = jiffies + wait_timeout;
while (time_before(jiffies, wait_time)) {
adreno_regread(device, REG_RBBM_STATUS, &rbbm_status);
if (rbbm_status == 0x110)
@ -902,7 +872,7 @@ retry:
err:
KGSL_DRV_ERR(device, "spun too long waiting for RB to idle\n");
if (!adreno_dump_and_recover(device)) {
wait_time = jiffies + MAX_WAITGPU_SECS;
wait_time = jiffies + wait_timeout;
goto retry;
}
return -ETIMEDOUT;
@ -915,6 +885,7 @@ static unsigned int adreno_isidle(struct kgsl_device *device)
struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
unsigned int rbbm_status;
WARN_ON(!(rb->flags & KGSL_FLAGS_STARTED));
if (rb->flags & KGSL_FLAGS_STARTED) {
/* Is the ring buffer is empty? */
GSL_RB_GET_READPTR(rb, &rb->rptr);
@ -926,40 +897,20 @@ static unsigned int adreno_isidle(struct kgsl_device *device)
status = true;
}
} else {
KGSL_DRV_ERR(device, "ringbuffer not started\n");
BUG();
/* if the ringbuffer isn't started we are VERY idle */
status = true;
}
return status;
}
/******************************************************************/
/* Caller must hold the driver mutex. */
static int adreno_resume_context(struct kgsl_device *device)
{
int status = 0;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
if (device->pwrctrl.suspended_ctxt != NULL) {
adreno_drawctxt_switch(adreno_dev,
device->pwrctrl.suspended_ctxt, 0);
status = adreno_idle(device, 0);
}
return status;
}
/******************************************************************/
/* Caller must hold the device mutex. */
static int adreno_suspend_context(struct kgsl_device *device)
{
int status = 0;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
/* save ctxt ptr and switch to NULL ctxt */
device->pwrctrl.suspended_ctxt = adreno_dev->drawctxt_active;
if (device->pwrctrl.suspended_ctxt != NULL) {
/* switch to NULL ctxt */
if (adreno_dev->drawctxt_active != NULL) {
adreno_drawctxt_switch(adreno_dev, NULL, 0);
status = adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
}
@ -993,12 +944,8 @@ uint8_t *kgsl_sharedmem_convertaddr(struct kgsl_device *device,
mutex_lock(&kgsl_driver.process_mutex);
list_for_each_entry(priv, &kgsl_driver.process_list, list) {
if (pt_base != 0
&& priv->pagetable
&& priv->pagetable->base.gpuaddr != pt_base) {
if (!kgsl_mmu_pt_equal(priv->pagetable, pt_base))
continue;
}
spin_lock(&priv->mem_lock);
entry = kgsl_sharedmem_find_region(priv, gpuaddr,
sizeof(unsigned int));
@ -1025,42 +972,33 @@ uint8_t *kgsl_sharedmem_convertaddr(struct kgsl_device *device,
return result;
}
static void _adreno_regread(struct kgsl_device *device,
unsigned int offsetwords,
unsigned int *value)
void adreno_regread(struct kgsl_device *device, unsigned int offsetwords,
unsigned int *value)
{
unsigned int *reg;
BUG_ON(offsetwords*sizeof(uint32_t) >= device->regspace.sizebytes);
reg = (unsigned int *)(device->regspace.mmio_virt_base
+ (offsetwords << 2));
if (!in_interrupt())
kgsl_pre_hwaccess(device);
/*ensure this read finishes before the next one.
* i.e. act like normal readl() */
*value = __raw_readl(reg);
rmb();
}
void adreno_regread(struct kgsl_device *device, unsigned int offsetwords,
unsigned int *value)
{
kgsl_pre_hwaccess(device);
_adreno_regread(device, offsetwords, value);
}
void adreno_regread_isr(struct kgsl_device *device,
unsigned int offsetwords,
unsigned int *value)
{
_adreno_regread(device, offsetwords, value);
}
static void _adreno_regwrite(struct kgsl_device *device,
unsigned int offsetwords,
unsigned int value)
void adreno_regwrite(struct kgsl_device *device, unsigned int offsetwords,
unsigned int value)
{
unsigned int *reg;
BUG_ON(offsetwords*sizeof(uint32_t) >= device->regspace.sizebytes);
if (!in_interrupt())
kgsl_pre_hwaccess(device);
kgsl_cffdump_regwrite(device->id, offsetwords << 2, value);
reg = (unsigned int *)(device->regspace.mmio_virt_base
+ (offsetwords << 2));
@ -1071,20 +1009,6 @@ static void _adreno_regwrite(struct kgsl_device *device,
__raw_writel(value, reg);
}
void adreno_regwrite(struct kgsl_device *device, unsigned int offsetwords,
unsigned int value)
{
kgsl_pre_hwaccess(device);
_adreno_regwrite(device, offsetwords, value);
}
void adreno_regwrite_isr(struct kgsl_device *device,
unsigned int offsetwords,
unsigned int value)
{
_adreno_regwrite(device, offsetwords, value);
}
static int kgsl_check_interrupt_timestamp(struct kgsl_device *device,
unsigned int timestamp)
{
@ -1102,7 +1026,7 @@ static int kgsl_check_interrupt_timestamp(struct kgsl_device *device,
kgsl_sharedmem_readl(&device->memstore, &ref_ts,
KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts));
mb();
if (timestamp_cmp(ref_ts, timestamp)) {
if (timestamp_cmp(ref_ts, timestamp) >= 0) {
kgsl_sharedmem_writel(&device->memstore,
KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts),
timestamp);
@ -1121,7 +1045,7 @@ static int kgsl_check_interrupt_timestamp(struct kgsl_device *device,
/* submit a dummy packet so that even if all
* commands upto timestamp get executed we will still
* get an interrupt */
cmds[0] = pm4_type3_packet(PM4_NOP, 1);
cmds[0] = cp_type3_packet(CP_NOP, 1);
cmds[1] = 0;
adreno_ringbuffer_issuecmds(device, 0, &cmds[0], 2);
}
@ -1132,15 +1056,18 @@ static int kgsl_check_interrupt_timestamp(struct kgsl_device *device,
}
/*
wait_io_event_interruptible_timeout checks for the exit condition before
wait_event_interruptible_timeout checks for the exit condition before
placing a process in wait q. For conditional interrupts we expect the
process to already be in its wait q when its exit condition checking
function is called.
*/
#define kgsl_wait_io_event_interruptible_timeout(wq, condition, timeout)\
#define kgsl_wait_event_interruptible_timeout(wq, condition, timeout, io)\
({ \
long __ret = timeout; \
__wait_io_event_interruptible_timeout(wq, condition, __ret); \
if (io) \
__wait_io_event_interruptible_timeout(wq, condition, __ret);\
else \
__wait_event_interruptible_timeout(wq, condition, __ret);\
__ret; \
})
@ -1150,11 +1077,15 @@ static int adreno_waittimestamp(struct kgsl_device *device,
unsigned int msecs)
{
long status = 0;
uint io = 1;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
if (timestamp != adreno_dev->ringbuffer.timestamp &&
timestamp_cmp(timestamp,
adreno_dev->ringbuffer.timestamp)) {
/* Don't wait forever, set a max value for now */
if (msecs == -1)
msecs = adreno_dev->wait_timeout;
if (timestamp_cmp(timestamp, adreno_dev->ringbuffer.timestamp) > 0) {
KGSL_DRV_ERR(device, "Cannot wait for invalid ts: %x, "
"rb->timestamp: %x\n",
timestamp, adreno_dev->ringbuffer.timestamp);
@ -1162,13 +1093,20 @@ static int adreno_waittimestamp(struct kgsl_device *device,
goto done;
}
if (!kgsl_check_timestamp(device, timestamp)) {
if (pwr->active_pwrlevel) {
int low_pwrlevel = pwr->num_pwrlevels -
KGSL_PWRLEVEL_LOW_OFFSET;
if (pwr->active_pwrlevel == low_pwrlevel)
io = 0;
}
mutex_unlock(&device->mutex);
/* We need to make sure that the process is placed in wait-q
* before its condition is called */
status = kgsl_wait_io_event_interruptible_timeout(
status = kgsl_wait_event_interruptible_timeout(
device->wait_queue,
kgsl_check_interrupt_timestamp(device,
timestamp), msecs_to_jiffies(msecs));
timestamp),
msecs_to_jiffies(msecs), io);
mutex_lock(&device->mutex);
if (status > 0)
@ -1227,10 +1165,8 @@ static long adreno_ioctl(struct kgsl_device_private *dev_priv,
context = kgsl_find_context(dev_priv, binbase->drawctxt_id);
if (context) {
result = adreno_drawctxt_set_bin_base_offset(
dev_priv->device,
context,
binbase->offset);
adreno_drawctxt_set_bin_base_offset(
dev_priv->device, context, binbase->offset);
} else {
result = -EINVAL;
KGSL_DRV_ERR(dev_priv->device,
@ -1299,33 +1235,36 @@ static void adreno_power_stats(struct kgsl_device *device,
REG_PERF_MODE_CNT | REG_PERF_STATE_ENABLE);
}
static void __devinit adreno_getfunctable(struct kgsl_functable *ftbl)
void adreno_irqctrl(struct kgsl_device *device, int state)
{
if (ftbl == NULL)
return;
ftbl->device_regread = adreno_regread;
ftbl->device_regwrite = adreno_regwrite;
ftbl->device_regread_isr = adreno_regread_isr;
ftbl->device_regwrite_isr = adreno_regwrite_isr;
ftbl->device_setstate = adreno_setstate;
ftbl->device_idle = adreno_idle;
ftbl->device_isidle = adreno_isidle;
ftbl->device_suspend_context = adreno_suspend_context;
ftbl->device_resume_context = adreno_resume_context;
ftbl->device_start = adreno_start;
ftbl->device_stop = adreno_stop;
ftbl->device_getproperty = adreno_getproperty;
ftbl->device_waittimestamp = adreno_waittimestamp;
ftbl->device_readtimestamp = adreno_readtimestamp;
ftbl->device_issueibcmds = adreno_ringbuffer_issueibcmds;
ftbl->device_drawctxt_create = adreno_drawctxt_create;
ftbl->device_drawctxt_destroy = adreno_drawctxt_destroy;
ftbl->device_ioctl = adreno_ioctl;
ftbl->device_setup_pt = adreno_setup_pt;
ftbl->device_cleanup_pt = adreno_cleanup_pt;
ftbl->device_power_stats = adreno_power_stats;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
adreno_dev->gpudev->irq_control(adreno_dev, state);
}
static const struct kgsl_functable adreno_functable = {
/* Mandatory functions */
.regread = adreno_regread,
.regwrite = adreno_regwrite,
.idle = adreno_idle,
.isidle = adreno_isidle,
.suspend_context = adreno_suspend_context,
.start = adreno_start,
.stop = adreno_stop,
.getproperty = adreno_getproperty,
.waittimestamp = adreno_waittimestamp,
.readtimestamp = adreno_readtimestamp,
.issueibcmds = adreno_ringbuffer_issueibcmds,
.ioctl = adreno_ioctl,
.setup_pt = adreno_setup_pt,
.cleanup_pt = adreno_cleanup_pt,
.power_stats = adreno_power_stats,
.irqctrl = adreno_irqctrl,
/* Optional functions */
.setstate = adreno_setstate,
.drawctxt_create = adreno_drawctxt_create,
.drawctxt_destroy = adreno_drawctxt_destroy,
};
static struct platform_device_id adreno_id_table[] = {
{ DEVICE_3D0_NAME, (kernel_ulong_t)&device_3d0.dev, },
{ },

@ -1,34 +1,19 @@
/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __ADRENO_H
#define __ADRENO_H
#include "kgsl_device.h"
#include "adreno_drawctxt.h"
#include "adreno_ringbuffer.h"
@ -47,34 +32,11 @@
#define KGSL_CONTEXT_TO_MEM_IDENTIFIER 0xDEADBEEF
#define KGSL_CMD_IDENTIFIER 0xFEEDFACE
struct adreno_device {
struct kgsl_device dev; /* Must be first field in this struct */
unsigned int chip_id;
struct kgsl_memregion gmemspace;
struct adreno_context *drawctxt_active;
wait_queue_head_t ib1_wq;
unsigned int *pfp_fw;
size_t pfp_fw_size;
unsigned int *pm4_fw;
size_t pm4_fw_size;
struct adreno_ringbuffer ringbuffer;
unsigned int mharb;
};
int adreno_idle(struct kgsl_device *device, unsigned int timeout);
void adreno_regread(struct kgsl_device *device, unsigned int offsetwords,
unsigned int *value);
void adreno_regwrite(struct kgsl_device *device, unsigned int offsetwords,
unsigned int value);
void adreno_regread_isr(struct kgsl_device *device,
unsigned int offsetwords,
unsigned int *value);
void adreno_regwrite_isr(struct kgsl_device *device,
unsigned int offsetwords,
unsigned int value);
uint8_t *kgsl_sharedmem_convertaddr(struct kgsl_device *device,
unsigned int pt_base, unsigned int gpuaddr, unsigned int *size);
#ifdef CONFIG_MSM_SCM
#define ADRENO_DEFAULT_PWRSCALE_POLICY (&kgsl_pwrscale_policy_tz)
#else
#define ADRENO_DEFAULT_PWRSCALE_POLICY NULL
#endif
enum adreno_gpurev {
ADRENO_REV_UNKNOWN = 0,
@ -84,38 +46,84 @@ enum adreno_gpurev {
ADRENO_REV_A225 = 225,
};
enum adreno_gpurev adreno_get_rev(struct adreno_device *adreno_dev);
struct adreno_gpudev;
struct adreno_device {
struct kgsl_device dev; /* Must be first field in this struct */
unsigned int chip_id;
enum adreno_gpurev gpurev;
struct kgsl_memregion gmemspace;
struct adreno_context *drawctxt_active;
const char *pfp_fwfile;
unsigned int *pfp_fw;
size_t pfp_fw_size;
const char *pm4_fwfile;
unsigned int *pm4_fw;
size_t pm4_fw_size;
struct adreno_ringbuffer ringbuffer;
unsigned int mharb;
struct adreno_gpudev *gpudev;
unsigned int wait_timeout;
};
struct adreno_gpudev {
int (*ctxt_gpustate_shadow)(struct adreno_device *,
struct adreno_context *);
int (*ctxt_gmem_shadow)(struct adreno_device *,
struct adreno_context *);
void (*ctxt_save)(struct adreno_device *, struct adreno_context *);
void (*ctxt_restore)(struct adreno_device *, struct adreno_context *);
irqreturn_t (*irq_handler)(struct adreno_device *);
void (*irq_control)(struct adreno_device *, int);
};
extern struct adreno_gpudev adreno_a2xx_gpudev;
int adreno_idle(struct kgsl_device *device, unsigned int timeout);
void adreno_regread(struct kgsl_device *device, unsigned int offsetwords,
unsigned int *value);
void adreno_regwrite(struct kgsl_device *device, unsigned int offsetwords,
unsigned int value);
uint8_t *kgsl_sharedmem_convertaddr(struct kgsl_device *device,
unsigned int pt_base, unsigned int gpuaddr, unsigned int *size);
static inline int adreno_is_a200(struct adreno_device *adreno_dev)
{
return (adreno_get_rev(adreno_dev) == ADRENO_REV_A200);
return (adreno_dev->gpurev == ADRENO_REV_A200);
}
static inline int adreno_is_a205(struct adreno_device *adreno_dev)
{
return (adreno_get_rev(adreno_dev) == ADRENO_REV_A200);
return (adreno_dev->gpurev == ADRENO_REV_A200);
}
static inline int adreno_is_a20x(struct adreno_device *adreno_dev)
{
enum adreno_gpurev rev = adreno_get_rev(adreno_dev);
return (rev == ADRENO_REV_A200 || rev == ADRENO_REV_A205);
return (adreno_dev->gpurev == ADRENO_REV_A200 ||
adreno_dev->gpurev == ADRENO_REV_A205);
}
static inline int adreno_is_a220(struct adreno_device *adreno_dev)
{
return (adreno_get_rev(adreno_dev) == ADRENO_REV_A220);
return (adreno_dev->gpurev == ADRENO_REV_A220);
}
static inline int adreno_is_a225(struct adreno_device *adreno_dev)
{
return (adreno_get_rev(adreno_dev) == ADRENO_REV_A225);
return (adreno_dev->gpurev == ADRENO_REV_A225);
}
static inline int adreno_is_a22x(struct adreno_device *adreno_dev)
{
enum adreno_gpurev rev = adreno_get_rev(adreno_dev);
return (rev == ADRENO_REV_A220 || rev == ADRENO_REV_A225);
return (adreno_dev->gpurev == ADRENO_REV_A220 ||
adreno_dev->gpurev == ADRENO_REV_A225);
}
static inline int adreno_is_a2xx(struct adreno_device *adreno_dev)
{
return (adreno_dev->gpurev <= ADRENO_REV_A225);
}
#endif /*__ADRENO_H */

File diff suppressed because it is too large Load Diff

@ -14,12 +14,13 @@
#include <linux/delay.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include "kgsl.h"
#include "adreno_postmortem.h"
#include "adreno.h"
#include "a200_reg.h"
#include "a2xx_reg.h"
unsigned int kgsl_cff_dump_enable;
int kgsl_pm_regs_enabled;
@ -130,7 +131,7 @@ static ssize_t kgsl_ib_dump_read(
if (!ppos || !device || !kgsl_ib_base)
return 0;
kgsl_regread(device, REG_MH_MMU_PT_BASE, &pt_base);
kgsl_regread(device, MH_MMU_PT_BASE, &pt_base);
base_addr = kgsl_sharedmem_convertaddr(device, pt_base, kgsl_ib_base,
&ib_memsize);
@ -395,8 +396,8 @@ static void kgsl_mh_reg_read_fill(struct kgsl_device *device, int i,
int j;
for (j = 0; j < linec; ++j) {
kgsl_regwrite(device, REG_MH_DEBUG_CTRL, i+j);
kgsl_regread(device, REG_MH_DEBUG_DATA, vals+j);
kgsl_regwrite(device, MH_DEBUG_CTRL, i+j);
kgsl_regread(device, MH_DEBUG_DATA, vals+j);
}
}
@ -420,6 +421,8 @@ static const struct file_operations kgsl_mh_debug_fops = {
void adreno_debugfs_init(struct kgsl_device *device)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
if (!device->d_debugfs || IS_ERR(device->d_debugfs))
return;
@ -435,6 +438,8 @@ void adreno_debugfs_init(struct kgsl_device *device)
&kgsl_mh_debug_fops);
debugfs_create_file("cff_dump", 0644, device->d_debugfs, device,
&kgsl_cff_dump_enable_fops);
debugfs_create_u32("wait_timeout", 0644, device->d_debugfs,
&adreno_dev->wait_timeout);
/* Create post mortem control files */

@ -1,29 +1,13 @@
/* Copyright (c) 2002,2008-2011, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __ADRENO_DEBUGFS_H

File diff suppressed because it is too large Load Diff

@ -1,36 +1,20 @@
/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __ADRENO_DRAWCTXT_H
#define __ADRENO_DRAWCTXT_H
#include "a200_reg.h"
#include "a220_reg.h"
#include "adreno_pm4types.h"
#include "a2xx_reg.h"
/* Flags */
@ -95,19 +79,73 @@ struct adreno_context {
struct gmem_shadow_t context_gmem_shadow;
};
int adreno_drawctxt_create(struct kgsl_device *device,
struct kgsl_pagetable *pagetable,
struct kgsl_context *context,
uint32_t flags);
int adreno_drawctxt_create(struct kgsl_device_private *dev_priv,
uint32_t flags,
struct kgsl_context *context);
int adreno_drawctxt_destroy(struct kgsl_device *device,
void adreno_drawctxt_destroy(struct kgsl_device *device,
struct kgsl_context *context);
void adreno_drawctxt_switch(struct adreno_device *adreno_dev,
struct adreno_context *drawctxt,
unsigned int flags);
int adreno_drawctxt_set_bin_base_offset(struct kgsl_device *device,
void adreno_drawctxt_set_bin_base_offset(struct kgsl_device *device,
struct kgsl_context *context,
unsigned int offset);
/* GPU context switch helper functions */
void build_quad_vtxbuff(struct adreno_context *drawctxt,
struct gmem_shadow_t *shadow, unsigned int **incmd);
unsigned int uint2float(unsigned int);
static inline unsigned int virt2gpu(unsigned int *cmd,
struct kgsl_memdesc *memdesc)
{
return memdesc->gpuaddr + ((char *) cmd - (char *) memdesc->hostptr);
}
static inline void create_ib1(struct adreno_context *drawctxt,
unsigned int *cmd,
unsigned int *start,
unsigned int *end)
{
cmd[0] = CP_HDR_INDIRECT_BUFFER_PFD;
cmd[1] = virt2gpu(start, &drawctxt->gpustate);
cmd[2] = end - start;
}
static inline unsigned int *reg_range(unsigned int *cmd, unsigned int start,
unsigned int end)
{
*cmd++ = CP_REG(start); /* h/w regs, start addr */
*cmd++ = end - start + 1; /* count */
return cmd;
}
static inline void calc_gmemsize(struct gmem_shadow_t *shadow, int gmem_size)
{
int w = 64, h = 64;
shadow->format = COLORX_8_8_8_8;
/* convert from bytes to 32-bit words */
gmem_size = (gmem_size + 3) / 4;
while ((w * h) < gmem_size) {
if (w < h)
w *= 2;
else
h *= 2;
}
shadow->pitch = shadow->width = w;
shadow->height = h;
shadow->gmem_pitch = shadow->pitch;
shadow->size = shadow->pitch * shadow->height * 4;
}
#endif /* __ADRENO_DRAWCTXT_H */

@ -1,193 +1,193 @@
/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __ADRENO_PM4TYPES_H
#define __ADRENO_PM4TYPES_H
#define PM4_PKT_MASK 0xc0000000
#define CP_PKT_MASK 0xc0000000
#define PM4_TYPE0_PKT ((unsigned int)0 << 30)
#define PM4_TYPE1_PKT ((unsigned int)1 << 30)
#define PM4_TYPE2_PKT ((unsigned int)2 << 30)
#define PM4_TYPE3_PKT ((unsigned int)3 << 30)
#define CP_TYPE0_PKT ((unsigned int)0 << 30)
#define CP_TYPE1_PKT ((unsigned int)1 << 30)
#define CP_TYPE2_PKT ((unsigned int)2 << 30)
#define CP_TYPE3_PKT ((unsigned int)3 << 30)
/* type3 packets */
/* initialize CP's micro-engine */
#define PM4_ME_INIT 0x48
#define CP_ME_INIT 0x48
/* skip N 32-bit words to get to the next packet */
#define PM4_NOP 0x10
#define CP_NOP 0x10
/* indirect buffer dispatch. prefetch parser uses this packet type to determine
* whether to pre-fetch the IB
*/
#define PM4_INDIRECT_BUFFER 0x3f
#define CP_INDIRECT_BUFFER 0x3f
/* indirect buffer dispatch. same as IB, but init is pipelined */
#define PM4_INDIRECT_BUFFER_PFD 0x37
#define CP_INDIRECT_BUFFER_PFD 0x37
/* wait for the IDLE state of the engine */
#define PM4_WAIT_FOR_IDLE 0x26
#define CP_WAIT_FOR_IDLE 0x26
/* wait until a register or memory location is a specific value */
#define PM4_WAIT_REG_MEM 0x3c
#define CP_WAIT_REG_MEM 0x3c
/* wait until a register location is equal to a specific value */
#define PM4_WAIT_REG_EQ 0x52
#define CP_WAIT_REG_EQ 0x52
/* wait until a register location is >= a specific value */
#define PM4_WAT_REG_GTE 0x53
#define CP_WAT_REG_GTE 0x53
/* wait until a read completes */
#define PM4_WAIT_UNTIL_READ 0x5c
#define CP_WAIT_UNTIL_READ 0x5c
/* wait until all base/size writes from an IB_PFD packet have completed */
#define PM4_WAIT_IB_PFD_COMPLETE 0x5d
#define CP_WAIT_IB_PFD_COMPLETE 0x5d
/* register read/modify/write */
#define PM4_REG_RMW 0x21
#define CP_REG_RMW 0x21
/* reads register in chip and writes to memory */
#define PM4_REG_TO_MEM 0x3e
#define CP_REG_TO_MEM 0x3e
/* write N 32-bit words to memory */
#define PM4_MEM_WRITE 0x3d
#define CP_MEM_WRITE 0x3d
/* write CP_PROG_COUNTER value to memory */
#define PM4_MEM_WRITE_CNTR 0x4f
#define CP_MEM_WRITE_CNTR 0x4f
/* conditional execution of a sequence of packets */
#define PM4_COND_EXEC 0x44
#define CP_COND_EXEC 0x44
/* conditional write to memory or register */
#define PM4_COND_WRITE 0x45
#define CP_COND_WRITE 0x45
/* generate an event that creates a write to memory when completed */
#define PM4_EVENT_WRITE 0x46
#define CP_EVENT_WRITE 0x46
/* generate a VS|PS_done event */
#define PM4_EVENT_WRITE_SHD 0x58
#define CP_EVENT_WRITE_SHD 0x58
/* generate a cache flush done event */
#define PM4_EVENT_WRITE_CFL 0x59
#define CP_EVENT_WRITE_CFL 0x59
/* generate a z_pass done event */
#define PM4_EVENT_WRITE_ZPD 0x5b
#define CP_EVENT_WRITE_ZPD 0x5b
/* initiate fetch of index buffer and draw */
#define PM4_DRAW_INDX 0x22
#define CP_DRAW_INDX 0x22
/* draw using supplied indices in packet */
#define PM4_DRAW_INDX_2 0x36
#define CP_DRAW_INDX_2 0x36
/* initiate fetch of index buffer and binIDs and draw */
#define PM4_DRAW_INDX_BIN 0x34
#define CP_DRAW_INDX_BIN 0x34
/* initiate fetch of bin IDs and draw using supplied indices */
#define PM4_DRAW_INDX_2_BIN 0x35
#define CP_DRAW_INDX_2_BIN 0x35
/* begin/end initiator for viz query extent processing */
#define PM4_VIZ_QUERY 0x23
#define CP_VIZ_QUERY 0x23
/* fetch state sub-blocks and initiate shader code DMAs */
#define PM4_SET_STATE 0x25
#define CP_SET_STATE 0x25
/* load constant into chip and to memory */
#define PM4_SET_CONSTANT 0x2d
#define CP_SET_CONSTANT 0x2d
/* load sequencer instruction memory (pointer-based) */
#define PM4_IM_LOAD 0x27
#define CP_IM_LOAD 0x27
/* load sequencer instruction memory (code embedded in packet) */
#define PM4_IM_LOAD_IMMEDIATE 0x2b
#define CP_IM_LOAD_IMMEDIATE 0x2b
/* load constants from a location in memory */
#define PM4_LOAD_CONSTANT_CONTEXT 0x2e
#define CP_LOAD_CONSTANT_CONTEXT 0x2e
/* selective invalidation of state pointers */
#define PM4_INVALIDATE_STATE 0x3b
#define CP_INVALIDATE_STATE 0x3b
/* dynamically changes shader instruction memory partition */
#define PM4_SET_SHADER_BASES 0x4A
#define CP_SET_SHADER_BASES 0x4A
/* sets the 64-bit BIN_MASK register in the PFP */
#define PM4_SET_BIN_MASK 0x50
#define CP_SET_BIN_MASK 0x50
/* sets the 64-bit BIN_SELECT register in the PFP */
#define PM4_SET_BIN_SELECT 0x51
#define CP_SET_BIN_SELECT 0x51
/* updates the current context, if needed */
#define PM4_CONTEXT_UPDATE 0x5e
#define CP_CONTEXT_UPDATE 0x5e
/* generate interrupt from the command stream */
#define PM4_INTERRUPT 0x40
#define CP_INTERRUPT 0x40
/* copy sequencer instruction memory to system memory */
#define PM4_IM_STORE 0x2c
#define CP_IM_STORE 0x2c
/* program an offset that will added to the BIN_BASE value of
* the 3D_DRAW_INDX_BIN packet */
#define PM4_SET_BIN_BASE_OFFSET 0x4B
/*
* for a20x
* program an offset that will added to the BIN_BASE value of
* the 3D_DRAW_INDX_BIN packet
*/
#define CP_SET_BIN_BASE_OFFSET 0x4B
#define PM4_SET_PROTECTED_MODE 0x5f /* sets the register protection mode */
/*
* for a22x
* sets draw initiator flags register in PFP, gets bitwise-ORed into
* every draw initiator
*/
#define CP_SET_DRAW_INIT_FLAGS 0x4B
#define CP_SET_PROTECTED_MODE 0x5f /* sets the register protection mode */
/* packet header building macros */
#define pm4_type0_packet(regindx, cnt) \
(PM4_TYPE0_PKT | (((cnt)-1) << 16) | ((regindx) & 0x7FFF))
#define cp_type0_packet(regindx, cnt) \
(CP_TYPE0_PKT | (((cnt)-1) << 16) | ((regindx) & 0x7FFF))
#define pm4_type0_packet_for_sameregister(regindx, cnt) \
((PM4_TYPE0_PKT | (((cnt)-1) << 16) | ((1 << 15) | \
#define cp_type0_packet_for_sameregister(regindx, cnt) \
((CP_TYPE0_PKT | (((cnt)-1) << 16) | ((1 << 15) | \
((regindx) & 0x7FFF)))
#define pm4_type1_packet(reg0, reg1) \
(PM4_TYPE1_PKT | ((reg1) << 12) | (reg0))
#define cp_type1_packet(reg0, reg1) \
(CP_TYPE1_PKT | ((reg1) << 12) | (reg0))
#define pm4_type3_packet(opcode, cnt) \
(PM4_TYPE3_PKT | (((cnt)-1) << 16) | (((opcode) & 0xFF) << 8))
#define cp_type3_packet(opcode, cnt) \
(CP_TYPE3_PKT | (((cnt)-1) << 16) | (((opcode) & 0xFF) << 8))
#define pm4_predicated_type3_packet(opcode, cnt) \
(PM4_TYPE3_PKT | (((cnt)-1) << 16) | (((opcode) & 0xFF) << 8) | 0x1)
#define cp_predicated_type3_packet(opcode, cnt) \
(CP_TYPE3_PKT | (((cnt)-1) << 16) | (((opcode) & 0xFF) << 8) | 0x1)
#define pm4_nop_packet(cnt) \
(PM4_TYPE3_PKT | (((cnt)-1) << 16) | (PM4_NOP << 8))
#define cp_nop_packet(cnt) \
(CP_TYPE3_PKT | (((cnt)-1) << 16) | (CP_NOP << 8))
/* packet headers */
#define PM4_HDR_ME_INIT pm4_type3_packet(PM4_ME_INIT, 18)
#define PM4_HDR_INDIRECT_BUFFER_PFD pm4_type3_packet(PM4_INDIRECT_BUFFER_PFD, 2)
#define PM4_HDR_INDIRECT_BUFFER pm4_type3_packet(PM4_INDIRECT_BUFFER, 2)
#define CP_HDR_ME_INIT cp_type3_packet(CP_ME_INIT, 18)
#define CP_HDR_INDIRECT_BUFFER_PFD cp_type3_packet(CP_INDIRECT_BUFFER_PFD, 2)
#define CP_HDR_INDIRECT_BUFFER cp_type3_packet(CP_INDIRECT_BUFFER, 2)
/* dword base address of the GFX decode space */
#define SUBBLOCK_OFFSET(reg) ((unsigned int)((reg) - (0x2000)))
/* gmem command buffer length */
#define CP_REG(reg) ((0x4 << 16) | (SUBBLOCK_OFFSET(reg)))
#endif /* __ADRENO_PM4TYPES_H */

@ -22,9 +22,10 @@
#include "adreno_debugfs.h"
#include "kgsl_cffdump.h"
#include "a200_reg.h"
#include "a2xx_reg.h"
#define INVALID_RB_CMD 0xaaaaaaaa
#define NUM_DWORDS_OF_RINGBUFFER_HISTORY 100
struct pm_id_name {
uint32_t id;
@ -43,28 +44,28 @@ static const struct pm_id_name pm0_types[] = {
};
static const struct pm_id_name pm3_types[] = {
{PM4_COND_EXEC, "CND_EXEC"},
{PM4_CONTEXT_UPDATE, "CX__UPDT"},
{PM4_DRAW_INDX, "DRW_NDX_"},
{PM4_DRAW_INDX_BIN, "DRW_NDXB"},
{PM4_EVENT_WRITE, "EVENT_WT"},
{PM4_IM_LOAD, "IN__LOAD"},
{PM4_IM_LOAD_IMMEDIATE, "IM_LOADI"},
{PM4_IM_STORE, "IM_STORE"},
{PM4_INDIRECT_BUFFER, "IND_BUF_"},
{PM4_INDIRECT_BUFFER_PFD, "IND_BUFP"},
{PM4_INTERRUPT, "PM4_INTR"},
{PM4_INVALIDATE_STATE, "INV_STAT"},
{PM4_LOAD_CONSTANT_CONTEXT, "LD_CN_CX"},
{PM4_ME_INIT, "ME__INIT"},
{PM4_NOP, "PM4__NOP"},
{PM4_REG_RMW, "REG__RMW"},
{PM4_REG_TO_MEM, "REG2_MEM"},
{PM4_SET_BIN_BASE_OFFSET, "ST_BIN_O"},
{PM4_SET_CONSTANT, "ST_CONST"},
{PM4_SET_PROTECTED_MODE, "ST_PRT_M"},
{PM4_SET_SHADER_BASES, "ST_SHD_B"},
{PM4_WAIT_FOR_IDLE, "WAIT4IDL"},
{CP_COND_EXEC, "CND_EXEC"},
{CP_CONTEXT_UPDATE, "CX__UPDT"},
{CP_DRAW_INDX, "DRW_NDX_"},
{CP_DRAW_INDX_BIN, "DRW_NDXB"},
{CP_EVENT_WRITE, "EVENT_WT"},
{CP_IM_LOAD, "IN__LOAD"},
{CP_IM_LOAD_IMMEDIATE, "IM_LOADI"},
{CP_IM_STORE, "IM_STORE"},
{CP_INDIRECT_BUFFER, "IND_BUF_"},
{CP_INDIRECT_BUFFER_PFD, "IND_BUFP"},
{CP_INTERRUPT, "PM4_INTR"},
{CP_INVALIDATE_STATE, "INV_STAT"},
{CP_LOAD_CONSTANT_CONTEXT, "LD_CN_CX"},
{CP_ME_INIT, "ME__INIT"},
{CP_NOP, "PM4__NOP"},
{CP_REG_RMW, "REG__RMW"},
{CP_REG_TO_MEM, "REG2_MEM"},
{CP_SET_BIN_BASE_OFFSET, "ST_BIN_O"},
{CP_SET_CONSTANT, "ST_CONST"},
{CP_SET_PROTECTED_MODE, "ST_PRT_M"},
{CP_SET_SHADER_BASES, "ST_SHD_B"},
{CP_WAIT_FOR_IDLE, "WAIT4IDL"},
};
/* Offset address pairs: start, end of range to dump (inclusive) */
@ -174,14 +175,14 @@ static bool adreno_is_pm4_type(uint32_t word)
if (adreno_is_pm4_len(word) > 16)
return 0;
if ((word & (3<<30)) == PM4_TYPE0_PKT) {
if ((word & (3<<30)) == CP_TYPE0_PKT) {
for (i = 0; i < ARRAY_SIZE(pm0_types); ++i) {
if ((word & 0x7FFF) == pm0_types[i].id)
return 1;
}
return 0;
}
if ((word & (3<<30)) == PM4_TYPE3_PKT) {
if ((word & (3<<30)) == CP_TYPE3_PKT) {
for (i = 0; i < ARRAY_SIZE(pm3_types); ++i) {
if ((word & 0xFFFF) == (pm3_types[i].id << 8))
return 1;
@ -198,14 +199,14 @@ static const char *adreno_pm4_name(uint32_t word)
if (word == INVALID_RB_CMD)
return "--------";
if ((word & (3<<30)) == PM4_TYPE0_PKT) {
if ((word & (3<<30)) == CP_TYPE0_PKT) {
for (i = 0; i < ARRAY_SIZE(pm0_types); ++i) {
if ((word & 0x7FFF) == pm0_types[i].id)
return pm0_types[i].name;
}
return "????????";
}
if ((word & (3<<30)) == PM4_TYPE3_PKT) {
if ((word & (3<<30)) == CP_TYPE3_PKT) {
for (i = 0; i < ARRAY_SIZE(pm3_types); ++i) {
if ((word & 0xFFFF) == (pm3_types[i].id << 8))
return pm3_types[i].name;
@ -289,7 +290,7 @@ static void dump_ib1(struct kgsl_device *device, uint32_t pt_base,
for (i = 0; i+3 < ib1_size; ) {
value = ib1_addr[i++];
if (value == pm4_type3_packet(PM4_INDIRECT_BUFFER_PFD, 2)) {
if (value == cp_type3_packet(CP_INDIRECT_BUFFER_PFD, 2)) {
uint32_t ib2_base = ib1_addr[i++];
uint32_t ib2_size = ib1_addr[i++];
@ -456,7 +457,7 @@ static int adreno_dump(struct kgsl_device *device)
unsigned int r1, r2, r3, rbbm_status;
unsigned int cp_ib1_base, cp_ib1_bufsz, cp_stat;
unsigned int cp_ib2_base, cp_ib2_bufsz;
unsigned int pt_base;
unsigned int pt_base, cur_pt_base;
unsigned int cp_rb_base, rb_count;
unsigned int cp_rb_wptr, cp_rb_rptr;
unsigned int i;
@ -536,7 +537,12 @@ static int adreno_dump(struct kgsl_device *device)
kgsl_regread(device, REG_CP_RB_RPTR_ADDR, &r3);
KGSL_LOG_DUMP(device,
"CP_RB: BASE = %08X | CNTL = %08X | RPTR_ADDR = %08X"
"\n", cp_rb_base, r2, r3);
" | rb_count = %08X\n", cp_rb_base, r2, r3, rb_count);
{
struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
if (rb->sizedwords != rb_count)
rb_count = rb->sizedwords;
}
kgsl_regread(device, REG_CP_RB_RPTR, &cp_rb_rptr);
kgsl_regread(device, REG_CP_RB_WPTR, &cp_rb_wptr);
@ -628,38 +634,37 @@ static int adreno_dump(struct kgsl_device *device)
"COHER: SIZE_PM4 = %08X | BASE_PM4 = %08X | STATUS_PM4"
" = %08X\n", r1, r2, r3);
kgsl_regread(device, REG_MH_AXI_ERROR, &r1);
kgsl_regread(device, MH_AXI_ERROR, &r1);
KGSL_LOG_DUMP(device, "MH: AXI_ERROR = %08X\n", r1);
kgsl_regread(device, REG_MH_MMU_PAGE_FAULT, &r1);
kgsl_regread(device, REG_MH_MMU_CONFIG, &r2);
kgsl_regread(device, REG_MH_MMU_MPU_BASE, &r3);
kgsl_regread(device, MH_MMU_PAGE_FAULT, &r1);
kgsl_regread(device, MH_MMU_CONFIG, &r2);
kgsl_regread(device, MH_MMU_MPU_BASE, &r3);
KGSL_LOG_DUMP(device,
"MH_MMU: PAGE_FAULT = %08X | CONFIG = %08X | MPU_BASE ="
" %08X\n", r1, r2, r3);
kgsl_regread(device, REG_MH_MMU_MPU_END, &r1);
kgsl_regread(device, REG_MH_MMU_VA_RANGE, &r2);
kgsl_regread(device, REG_MH_MMU_PT_BASE, &pt_base);
kgsl_regread(device, MH_MMU_MPU_END, &r1);
kgsl_regread(device, MH_MMU_VA_RANGE, &r2);
pt_base = kgsl_mmu_get_current_ptbase(device);
KGSL_LOG_DUMP(device,
" MPU_END = %08X | VA_RANGE = %08X | PT_BASE ="
" %08X\n", r1, r2, pt_base);
cur_pt_base = pt_base;
KGSL_LOG_DUMP(device, "PAGETABLE SIZE: %08X ", KGSL_PAGETABLE_SIZE);
kgsl_regread(device, REG_MH_MMU_TRAN_ERROR, &r1);
kgsl_regread(device, MH_MMU_TRAN_ERROR, &r1);
KGSL_LOG_DUMP(device, " TRAN_ERROR = %08X\n", r1);
kgsl_regread(device, REG_MH_INTERRUPT_MASK, &r1);
kgsl_regread(device, REG_MH_INTERRUPT_STATUS, &r2);
kgsl_regread(device, MH_INTERRUPT_MASK, &r1);
kgsl_regread(device, MH_INTERRUPT_STATUS, &r2);
KGSL_LOG_DUMP(device,
"MH_INTERRUPT: MASK = %08X | STATUS = %08X\n", r1, r2);
if (device->ftbl.device_readtimestamp != NULL) {
ts_processed = device->ftbl.device_readtimestamp(
device, KGSL_TIMESTAMP_RETIRED);
KGSL_LOG_DUMP(device, "TIMESTM RTRD: %08X\n", ts_processed);
}
ts_processed = device->ftbl->readtimestamp(device,
KGSL_TIMESTAMP_RETIRED);
KGSL_LOG_DUMP(device, "TIMESTM RTRD: %08X\n", ts_processed);
num_item = adreno_ringbuffer_count(&adreno_dev->ringbuffer,
cp_rb_rptr);
@ -676,21 +681,21 @@ static int adreno_dump(struct kgsl_device *device)
KGSL_LOG_DUMP(device, "RB: rd_addr:%8.8x rb_size:%d num_item:%d\n",
cp_rb_base, rb_count<<2, num_item);
rb_vaddr = (const uint32_t *)kgsl_sharedmem_convertaddr(device, pt_base,
cp_rb_base, &rb_memsize);
rb_vaddr = (const uint32_t *)kgsl_sharedmem_convertaddr(device,
cur_pt_base, cp_rb_base, &rb_memsize);
if (!rb_vaddr) {
KGSL_LOG_POSTMORTEM_WRITE(device,
"Can't fetch vaddr for CP_RB_BASE\n");
goto error_vfree;
}
read_idx = (int)cp_rb_rptr - 64;
read_idx = (int)cp_rb_rptr - NUM_DWORDS_OF_RINGBUFFER_HISTORY;
if (read_idx < 0)
read_idx += rb_count;
write_idx = (int)cp_rb_wptr + 16;
if (write_idx > rb_count)
write_idx -= rb_count;
num_item += 64+16;
num_item += NUM_DWORDS_OF_RINGBUFFER_HISTORY+16;
if (num_item > rb_count)
num_item = rb_count;
if (write_idx >= read_idx)
@ -706,20 +711,27 @@ static int adreno_dump(struct kgsl_device *device)
i = 0;
for (read_idx = 0; read_idx < num_item; ) {
uint32_t this_cmd = rb_copy[read_idx++];
if (this_cmd == pm4_type3_packet(PM4_INDIRECT_BUFFER_PFD, 2)) {
if (this_cmd == cp_type3_packet(CP_INDIRECT_BUFFER_PFD, 2)) {
uint32_t ib_addr = rb_copy[read_idx++];
uint32_t ib_size = rb_copy[read_idx++];
dump_ib1(device, pt_base, (read_idx-3)<<2, ib_addr,
dump_ib1(device, cur_pt_base, (read_idx-3)<<2, ib_addr,
ib_size, &ib_list, 0);
for (; i < ib_list.count; ++i)
dump_ib(device, "IB2:", pt_base,
dump_ib(device, "IB2:", cur_pt_base,
ib_list.offsets[i],
ib_list.bases[i],
ib_list.sizes[i], 0);
} else if (this_cmd == cp_type0_packet(MH_MMU_PT_BASE, 1)) {
/* Set cur_pt_base to the new pagetable base */
cur_pt_base = rb_copy[read_idx++];
}
}
read_idx = (int)cp_rb_rptr - 64;
/* Restore cur_pt_base back to the pt_base of
the process in whose context the GPU hung */
cur_pt_base = pt_base;
read_idx = (int)cp_rb_rptr - NUM_DWORDS_OF_RINGBUFFER_HISTORY;
if (read_idx < 0)
read_idx += rb_count;
KGSL_LOG_DUMP(device,
@ -728,30 +740,31 @@ static int adreno_dump(struct kgsl_device *device)
adreno_dump_rb(device, rb_copy, num_item<<2, read_idx, rb_count);
if (adreno_ib_dump_enabled()) {
for (read_idx = 64; read_idx >= 0; --read_idx) {
for (read_idx = NUM_DWORDS_OF_RINGBUFFER_HISTORY;
read_idx >= 0; --read_idx) {
uint32_t this_cmd = rb_copy[read_idx];
if (this_cmd == pm4_type3_packet(
PM4_INDIRECT_BUFFER_PFD, 2)) {
if (this_cmd == cp_type3_packet(
CP_INDIRECT_BUFFER_PFD, 2)) {
uint32_t ib_addr = rb_copy[read_idx+1];
uint32_t ib_size = rb_copy[read_idx+2];
if (cp_ib1_bufsz && cp_ib1_base == ib_addr) {
if (ib_size && cp_ib1_base == ib_addr) {
KGSL_LOG_DUMP(device,
"IB1: base:%8.8X "
"count:%d\n", ib_addr, ib_size);
dump_ib(device, "IB1: ", pt_base,
dump_ib(device, "IB1: ", cur_pt_base,
read_idx<<2, ib_addr, ib_size,
1);
}
}
}
for (i = 0; i < ib_list.count; ++i) {
if (cp_ib2_bufsz && cp_ib2_base == ib_list.bases[i]) {
uint32_t ib_size = ib_list.sizes[i];
uint32_t ib_offset = ib_list.offsets[i];
uint32_t ib_size = ib_list.sizes[i];
uint32_t ib_offset = ib_list.offsets[i];
if (ib_size && cp_ib2_base == ib_list.bases[i]) {
KGSL_LOG_DUMP(device,
"IB2: base:%8.8X count:%d\n",
cp_ib2_base, ib_size);
dump_ib(device, "IB2: ", pt_base, ib_offset,
dump_ib(device, "IB2: ", cur_pt_base, ib_offset,
ib_list.bases[i], ib_size, 1);
}
}
@ -802,7 +815,7 @@ int adreno_postmortem_dump(struct kgsl_device *device, int manual)
}
/* Disable the idle timer so we don't get interrupted */
del_timer(&device->idle_timer);
del_timer_sync(&device->idle_timer);
/* Turn off napping to make sure we have the clocks full
attention through the following process */

@ -1,29 +1,13 @@
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/

@ -16,14 +16,15 @@
#include <linux/log2.h>
#include "kgsl.h"
#include "kgsl_sharedmem.h"
#include "kgsl_cffdump.h"
#include "adreno.h"
#include "adreno_pm4types.h"
#include "adreno_ringbuffer.h"
#include "a200_reg.h"
#include "a2xx_reg.h"
#define VALID_STATUS_COUNT_MAX 10
#define GSL_RB_NOP_SIZEDWORDS 2
/* protected mode error checking below register address 0x800
* note: if CP_INTERRUPT packet is used then checking needs
@ -31,17 +32,6 @@
*/
#define GSL_RB_PROTECTED_MODE_CONTROL 0x200001F2
#define GSL_CP_INT_MASK \
(CP_INT_CNTL__SW_INT_MASK | \
CP_INT_CNTL__T0_PACKET_IN_IB_MASK | \
CP_INT_CNTL__OPCODE_ERROR_MASK | \
CP_INT_CNTL__PROTECTED_MODE_ERROR_MASK | \
CP_INT_CNTL__RESERVED_BIT_ERROR_MASK | \
CP_INT_CNTL__IB_ERROR_MASK | \
CP_INT_CNTL__IB2_INT_MASK | \
CP_INT_CNTL__IB1_INT_MASK | \
CP_INT_CNTL__RB_INT_MASK)
/* Firmware file names
* Legacy names must remain but replacing macro names to
* match current kgsl model.
@ -52,102 +42,17 @@
#define A200_PM4_FW "yamato_pm4.fw"
#define A220_PFP_470_FW "leia_pfp_470.fw"
#define A220_PM4_470_FW "leia_pm4_470.fw"
/* functions */
void kgsl_cp_intrcallback(struct kgsl_device *device)
{
unsigned int status = 0, num_reads = 0, master_status = 0;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
adreno_regread_isr(device, REG_MASTER_INT_SIGNAL, &master_status);
while (!status && (num_reads < VALID_STATUS_COUNT_MAX) &&
(master_status & MASTER_INT_SIGNAL__CP_INT_STAT)) {
adreno_regread_isr(device, REG_CP_INT_STATUS, &status);
adreno_regread_isr(device, REG_MASTER_INT_SIGNAL,
&master_status);
num_reads++;
}
if (num_reads > 1)
KGSL_DRV_WARN(device,
"Looped %d times to read REG_CP_INT_STATUS\n",
num_reads);
if (!status) {
if (master_status & MASTER_INT_SIGNAL__CP_INT_STAT) {
/* This indicates that we could not read CP_INT_STAT.
* As a precaution just wake up processes so
* they can check their timestamps. Since, we
* did not ack any interrupts this interrupt will
* be generated again */
KGSL_DRV_WARN(device, "Unable to read CP_INT_STATUS\n");
wake_up_interruptible_all(&device->wait_queue);
} else
KGSL_DRV_WARN(device, "Spurious interrput detected\n");
return;
}
if (status & CP_INT_CNTL__RB_INT_MASK) {
/* signal intr completion event */
unsigned int enableflag = 0;
kgsl_sharedmem_writel(&rb->device->memstore,
KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable),
enableflag);
wmb();
KGSL_CMD_WARN(rb->device, "ringbuffer rb interrupt\n");
}
if (status & CP_INT_CNTL__T0_PACKET_IN_IB_MASK) {
KGSL_CMD_CRIT(rb->device,
"ringbuffer TO packet in IB interrupt\n");
adreno_regwrite_isr(rb->device, REG_CP_INT_CNTL, 0);
}
if (status & CP_INT_CNTL__OPCODE_ERROR_MASK) {
KGSL_CMD_CRIT(rb->device,
"ringbuffer opcode error interrupt\n");
adreno_regwrite_isr(rb->device, REG_CP_INT_CNTL, 0);
}
if (status & CP_INT_CNTL__PROTECTED_MODE_ERROR_MASK) {
KGSL_CMD_CRIT(rb->device,
"ringbuffer protected mode error interrupt\n");
adreno_regwrite_isr(rb->device, REG_CP_INT_CNTL, 0);
}
if (status & CP_INT_CNTL__RESERVED_BIT_ERROR_MASK) {
KGSL_CMD_CRIT(rb->device,
"ringbuffer reserved bit error interrupt\n");
adreno_regwrite_isr(rb->device, REG_CP_INT_CNTL, 0);
}
if (status & CP_INT_CNTL__IB_ERROR_MASK) {
KGSL_CMD_CRIT(rb->device,
"ringbuffer IB error interrupt\n");
adreno_regwrite_isr(rb->device, REG_CP_INT_CNTL, 0);
}
if (status & CP_INT_CNTL__SW_INT_MASK)
KGSL_CMD_INFO(rb->device, "ringbuffer software interrupt\n");
if (status & CP_INT_CNTL__IB2_INT_MASK)
KGSL_CMD_INFO(rb->device, "ringbuffer ib2 interrupt\n");
if (status & (~GSL_CP_INT_MASK))
KGSL_CMD_WARN(rb->device,
"bad bits in REG_CP_INT_STATUS %08x\n", status);
/* only ack bits we understand */
status &= GSL_CP_INT_MASK;
adreno_regwrite_isr(device, REG_CP_INT_ACK, status);
if (status & (CP_INT_CNTL__IB1_INT_MASK | CP_INT_CNTL__RB_INT_MASK)) {
KGSL_CMD_WARN(rb->device, "ringbuffer ib1/rb interrupt\n");
wake_up_interruptible_all(&device->wait_queue);
atomic_notifier_call_chain(&(device->ts_notifier_list),
device->id,
NULL);
}
}
#define A225_PFP_FW "a225_pfp.fw"
#define A225_PM4_FW "a225_pm4.fw"
static void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb)
{
BUG_ON(rb->wptr == 0);
/* Let the pwrscale policy know that new commands have
been submitted. */
kgsl_pwrscale_busy(rb->device);
/*synchronize memory before informing the hardware of the
*new commands.
*/
@ -156,7 +61,7 @@ static void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb)
adreno_regwrite(rb->device, REG_CP_RB_WPTR, rb->wptr);
}
static int
static void
adreno_ringbuffer_waitspace(struct adreno_ringbuffer *rb, unsigned int numcmds,
int wptr_ahead)
{
@ -173,7 +78,7 @@ adreno_ringbuffer_waitspace(struct adreno_ringbuffer *rb, unsigned int numcmds,
cmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*rb->wptr;
GSL_RB_WRITE(cmds, cmds_gpu, pm4_nop_packet(nopcount));
GSL_RB_WRITE(cmds, cmds_gpu, cp_nop_packet(nopcount));
/* Make sure that rptr is not 0 before submitting
* commands at the end of ringbuffer. We do not
@ -197,8 +102,6 @@ adreno_ringbuffer_waitspace(struct adreno_ringbuffer *rb, unsigned int numcmds,
freecmds = rb->rptr - rb->wptr;
} while ((freecmds != 0) && (freecmds <= numcmds));
return 0;
}
@ -206,7 +109,6 @@ static unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb,
unsigned int numcmds)
{
unsigned int *ptr = NULL;
int status = 0;
BUG_ON(numcmds >= rb->sizedwords);
@ -217,22 +119,20 @@ static unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb,
/* reserve dwords for nop packet */
if ((rb->wptr + numcmds) > (rb->sizedwords -
GSL_RB_NOP_SIZEDWORDS))
status = adreno_ringbuffer_waitspace(rb, numcmds, 1);
adreno_ringbuffer_waitspace(rb, numcmds, 1);
} else {
/* wptr behind rptr */
if ((rb->wptr + numcmds) >= rb->rptr)
status = adreno_ringbuffer_waitspace(rb, numcmds, 0);
adreno_ringbuffer_waitspace(rb, numcmds, 0);
/* check for remaining space */
/* reserve dwords for nop packet */
if ((rb->wptr + numcmds) > (rb->sizedwords -
GSL_RB_NOP_SIZEDWORDS))
status = adreno_ringbuffer_waitspace(rb, numcmds, 1);
adreno_ringbuffer_waitspace(rb, numcmds, 1);
}
if (status == 0) {
ptr = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
rb->wptr += numcmds;
}
ptr = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
rb->wptr += numcmds;
return ptr;
}
@ -266,19 +166,15 @@ static int _load_firmware(struct kgsl_device *device, const char *fwfile,
static int adreno_ringbuffer_load_pm4_ucode(struct kgsl_device *device)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
const char *fwfile;
int i, ret = 0;
if (adreno_is_a220(adreno_dev))
fwfile = A220_PM4_470_FW;
else
fwfile = A200_PM4_FW;
if (adreno_dev->pm4_fw == NULL) {
int len;
unsigned int *ptr;
void *ptr;
ret = _load_firmware(device, adreno_dev->pm4_fwfile,
&ptr, &len);
ret = _load_firmware(device, fwfile, (void *) &ptr, &len);
if (ret)
goto err;
@ -309,19 +205,14 @@ err:
static int adreno_ringbuffer_load_pfp_ucode(struct kgsl_device *device)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
const char *fwfile;
int i, ret = 0;
if (adreno_is_a220(adreno_dev))
fwfile = A220_PFP_470_FW;
else
fwfile = A200_PFP_FW;
if (adreno_dev->pfp_fw == NULL) {
int len;
unsigned int *ptr;
void *ptr;
ret = _load_firmware(device, fwfile, (void *) &ptr, &len);
ret = _load_firmware(device, adreno_dev->pfp_fwfile,
&ptr, &len);
if (ret)
goto err;
@ -441,7 +332,7 @@ int adreno_ringbuffer_start(struct adreno_ringbuffer *rb, unsigned int init_ram)
cmds = adreno_ringbuffer_allocspace(rb, 19);
cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*(rb->wptr-19);
GSL_RB_WRITE(cmds, cmds_gpu, PM4_HDR_ME_INIT);
GSL_RB_WRITE(cmds, cmds_gpu, CP_HDR_ME_INIT);
/* All fields present (bits 9:0) */
GSL_RB_WRITE(cmds, cmds_gpu, 0x000003ff);
/* Disable/Enable Real-Time Stream processing (present but ignored) */
@ -450,21 +341,21 @@ int adreno_ringbuffer_start(struct adreno_ringbuffer *rb, unsigned int init_ram)
GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
GSL_RB_WRITE(cmds, cmds_gpu,
GSL_HAL_SUBBLOCK_OFFSET(REG_RB_SURFACE_INFO));
SUBBLOCK_OFFSET(REG_RB_SURFACE_INFO));
GSL_RB_WRITE(cmds, cmds_gpu,
GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SC_WINDOW_OFFSET));
SUBBLOCK_OFFSET(REG_PA_SC_WINDOW_OFFSET));
GSL_RB_WRITE(cmds, cmds_gpu,
GSL_HAL_SUBBLOCK_OFFSET(REG_VGT_MAX_VTX_INDX));
SUBBLOCK_OFFSET(REG_VGT_MAX_VTX_INDX));
GSL_RB_WRITE(cmds, cmds_gpu,
GSL_HAL_SUBBLOCK_OFFSET(REG_SQ_PROGRAM_CNTL));
SUBBLOCK_OFFSET(REG_SQ_PROGRAM_CNTL));
GSL_RB_WRITE(cmds, cmds_gpu,
GSL_HAL_SUBBLOCK_OFFSET(REG_RB_DEPTHCONTROL));
SUBBLOCK_OFFSET(REG_RB_DEPTHCONTROL));
GSL_RB_WRITE(cmds, cmds_gpu,
GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SU_POINT_SIZE));
SUBBLOCK_OFFSET(REG_PA_SU_POINT_SIZE));
GSL_RB_WRITE(cmds, cmds_gpu,
GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SC_LINE_CNTL));
SUBBLOCK_OFFSET(REG_PA_SC_LINE_CNTL));
GSL_RB_WRITE(cmds, cmds_gpu,
GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SU_POLY_OFFSET_FRONT_SCALE));
SUBBLOCK_OFFSET(REG_PA_SU_POLY_OFFSET_FRONT_SCALE));
/* Vertex and Pixel Shader Start Addresses in instructions
* (3 DWORDS per instruction) */
@ -489,25 +380,20 @@ int adreno_ringbuffer_start(struct adreno_ringbuffer *rb, unsigned int init_ram)
/* idle device to validate ME INIT */
status = adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
adreno_regwrite(rb->device, REG_CP_INT_CNTL, GSL_CP_INT_MASK);
if (status == 0)
rb->flags |= KGSL_FLAGS_STARTED;
return status;
}
int adreno_ringbuffer_stop(struct adreno_ringbuffer *rb)
void adreno_ringbuffer_stop(struct adreno_ringbuffer *rb)
{
if (rb->flags & KGSL_FLAGS_STARTED) {
adreno_regwrite(rb->device, REG_CP_INT_CNTL, 0);
/* ME_HALT */
adreno_regwrite(rb->device, REG_CP_ME_CNTL, 0x10000000);
rb->flags &= ~KGSL_FLAGS_STARTED;
}
return 0;
}
int adreno_ringbuffer_init(struct kgsl_device *device)
@ -525,7 +411,8 @@ int adreno_ringbuffer_init(struct kgsl_device *device)
rb->sizedwords = KGSL_RB_SIZE >> 2;
/* allocate memory for ringbuffer */
status = kgsl_allocate_contig(&rb->buffer_desc, (rb->sizedwords << 2));
status = kgsl_allocate_contiguous(&rb->buffer_desc,
(rb->sizedwords << 2));
if (status != 0) {
adreno_ringbuffer_close(rb);
@ -535,7 +422,7 @@ int adreno_ringbuffer_init(struct kgsl_device *device)
/* allocate memory for polling and timestamps */
/* This really can be at 4 byte alignment boundry but for using MMU
* we need to make it at page boundary */
status = kgsl_allocate_contig(&rb->memptrs_desc,
status = kgsl_allocate_contiguous(&rb->memptrs_desc,
sizeof(struct kgsl_rbmemptrs));
if (status != 0) {
@ -549,7 +436,7 @@ int adreno_ringbuffer_init(struct kgsl_device *device)
return 0;
}
int adreno_ringbuffer_close(struct adreno_ringbuffer *rb)
void adreno_ringbuffer_close(struct adreno_ringbuffer *rb)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
@ -563,8 +450,6 @@ int adreno_ringbuffer_close(struct adreno_ringbuffer *rb)
adreno_dev->pm4_fw = NULL;
memset(rb, 0, sizeof(struct adreno_ringbuffer));
return 0;
}
static uint32_t
@ -590,13 +475,13 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
+ sizeof(uint)*(rb->wptr-total_sizedwords);
if (!(flags & KGSL_CMD_FLAGS_NOT_KERNEL_CMD)) {
GSL_RB_WRITE(ringcmds, rcmd_gpu, pm4_nop_packet(1));
GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_nop_packet(1));
GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_CMD_IDENTIFIER);
}
if (flags & KGSL_CMD_FLAGS_PMODE) {
/* disable protected mode error checking */
GSL_RB_WRITE(ringcmds, rcmd_gpu,
pm4_type3_packet(PM4_SET_PROTECTED_MODE, 1));
cp_type3_packet(CP_SET_PROTECTED_MODE, 1));
GSL_RB_WRITE(ringcmds, rcmd_gpu, 0);
}
@ -608,7 +493,7 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
if (flags & KGSL_CMD_FLAGS_PMODE) {
/* re-enable protected mode error checking */
GSL_RB_WRITE(ringcmds, rcmd_gpu,
pm4_type3_packet(PM4_SET_PROTECTED_MODE, 1));
cp_type3_packet(CP_SET_PROTECTED_MODE, 1));
GSL_RB_WRITE(ringcmds, rcmd_gpu, 1);
}
@ -616,9 +501,9 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
timestamp = rb->timestamp;
/* start-of-pipeline and end-of-pipeline timestamps */
GSL_RB_WRITE(ringcmds, rcmd_gpu, pm4_type0_packet(REG_CP_TIMESTAMP, 1));
GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type0_packet(REG_CP_TIMESTAMP, 1));
GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
GSL_RB_WRITE(ringcmds, rcmd_gpu, pm4_type3_packet(PM4_EVENT_WRITE, 3));
GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type3_packet(CP_EVENT_WRITE, 3));
GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
GSL_RB_WRITE(ringcmds, rcmd_gpu,
(rb->device->memstore.gpuaddr +
@ -628,7 +513,7 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
if (!(flags & KGSL_CMD_FLAGS_NO_TS_CMP)) {
/* Conditional execution based on memory values */
GSL_RB_WRITE(ringcmds, rcmd_gpu,
pm4_type3_packet(PM4_COND_EXEC, 4));
cp_type3_packet(CP_COND_EXEC, 4));
GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr +
KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable)) >> 2);
GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr +
@ -637,7 +522,7 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
/* # of conditional command DWORDs */
GSL_RB_WRITE(ringcmds, rcmd_gpu, 2);
GSL_RB_WRITE(ringcmds, rcmd_gpu,
pm4_type3_packet(PM4_INTERRUPT, 1));
cp_type3_packet(CP_INTERRUPT, 1));
GSL_RB_WRITE(ringcmds, rcmd_gpu, CP_INT_CNTL__RB_INT_MASK);
}
@ -701,13 +586,13 @@ adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
(void)kgsl_cffdump_parse_ibs(dev_priv, NULL,
ibdesc[i].gpuaddr, ibdesc[i].sizedwords, false);
*cmds++ = PM4_HDR_INDIRECT_BUFFER_PFD;
*cmds++ = CP_HDR_INDIRECT_BUFFER_PFD;
*cmds++ = ibdesc[i].gpuaddr;
*cmds++ = ibdesc[i].sizedwords;
}
kgsl_setstate(device,
kgsl_pt_get_flags(device->mmu.hwpagetable,
kgsl_mmu_pt_get_flags(device->mmu.hwpagetable,
device->id));
adreno_drawctxt_switch(adreno_dev, drawctxt, flags);
@ -751,13 +636,8 @@ int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
GSL_RB_GET_READPTR(rb, &rb->rptr);
/* drewis: still not sure where this struct was changed */
#if 0
retired_timestamp = device->ftbl->readtimestamp(device,
KGSL_TIMESTAMP_RETIRED);
#endif
retired_timestamp = device->ftbl.device_readtimestamp(
device, KGSL_TIMESTAMP_RETIRED);
KGSL_DRV_ERR(device, "GPU successfully executed till ts: %x\n",
retired_timestamp);
/*
@ -786,9 +666,9 @@ int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
kgsl_sharedmem_readl(&rb->buffer_desc, &val3, rb_rptr);
/* match the pattern found at the end of a command */
if ((val1 == 2 &&
val2 == pm4_type3_packet(PM4_INTERRUPT, 1)
val2 == cp_type3_packet(CP_INTERRUPT, 1)
&& val3 == CP_INT_CNTL__RB_INT_MASK) ||
(val1 == pm4_type3_packet(PM4_EVENT_WRITE, 3)
(val1 == cp_type3_packet(CP_EVENT_WRITE, 3)
&& val2 == CACHE_FLUSH_TS &&
val3 == (rb->device->memstore.gpuaddr +
KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp)))) {
@ -830,7 +710,7 @@ int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
kgsl_sharedmem_readl(&rb->buffer_desc, &val2,
adreno_ringbuffer_inc_wrapped(rb_rptr,
rb->buffer_desc.size));
if (val1 == pm4_nop_packet(1) && val2 == KGSL_CMD_IDENTIFIER) {
if (val1 == cp_nop_packet(1) && val2 == KGSL_CMD_IDENTIFIER) {
KGSL_DRV_ERR(device,
"GPU recovery from hang not possible because "
"of hang in kgsl command\n");
@ -850,7 +730,7 @@ int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
rb->buffer_desc.size);
BUG_ON(value != pm4_type3_packet(PM4_MEM_WRITE, 2));
BUG_ON(value != cp_type3_packet(CP_MEM_WRITE, 2));
kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
rb->buffer_desc.size);
@ -873,14 +753,14 @@ int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
* commands can be executed */
if (value != cur_context) {
copy_rb_contents = 1;
temp_rb_buffer[temp_idx++] = pm4_nop_packet(1);
temp_rb_buffer[temp_idx++] = cp_nop_packet(1);
temp_rb_buffer[temp_idx++] =
KGSL_CMD_IDENTIFIER;
temp_rb_buffer[temp_idx++] = pm4_nop_packet(1);
temp_rb_buffer[temp_idx++] = cp_nop_packet(1);
temp_rb_buffer[temp_idx++] =
KGSL_CONTEXT_TO_MEM_IDENTIFIER;
temp_rb_buffer[temp_idx++] =
pm4_type3_packet(PM4_MEM_WRITE, 2);
cp_type3_packet(CP_MEM_WRITE, 2);
temp_rb_buffer[temp_idx++] = val1;
temp_rb_buffer[temp_idx++] = value;
} else {

@ -1,29 +1,14 @@
/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
* Copyright (C) 2011 Sony Ericsson Mobile Communications AB.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __ADRENO_RINGBUFFER_H
@ -77,9 +62,6 @@ struct adreno_ringbuffer {
uint32_t timestamp;
};
/* dword base address of the GFX decode space */
#define GSL_HAL_SUBBLOCK_OFFSET(reg) ((unsigned int)((reg) - (0x2000)))
#define GSL_RB_WRITE(ring, gpuaddr, data) \
do { \
writel_relaxed(data, ring); \
@ -135,9 +117,9 @@ int adreno_ringbuffer_init(struct kgsl_device *device);
int adreno_ringbuffer_start(struct adreno_ringbuffer *rb,
unsigned int init_ram);
int adreno_ringbuffer_stop(struct adreno_ringbuffer *rb);
void adreno_ringbuffer_stop(struct adreno_ringbuffer *rb);
int adreno_ringbuffer_close(struct adreno_ringbuffer *rb);
void adreno_ringbuffer_close(struct adreno_ringbuffer *rb);
void adreno_ringbuffer_issuecmds(struct kgsl_device *device,
unsigned int flags,

@ -1,4 +1,5 @@
/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
* Copyright (C) 2011 Sony Ericsson Mobile Communications AB.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -20,6 +21,7 @@
#include <linux/android_pmem.h>
#include <linux/vmalloc.h>
#include <linux/pm_runtime.h>
#include <linux/genlock.h>
#include <linux/ashmem.h>
#include <linux/major.h>
@ -27,14 +29,78 @@
#include "kgsl.h"
#include "kgsl_debugfs.h"
#include "kgsl_cffdump.h"
#include "kgsl_log.h"
#include "kgsl_sharedmem.h"
#include "kgsl_device.h"
#undef MODULE_PARAM_PREFIX
#define MODULE_PARAM_PREFIX "kgsl."
static int kgsl_pagetable_count = KGSL_PAGETABLE_COUNT;
static char *ksgl_mmu_type;
module_param_named(ptcount, kgsl_pagetable_count, int, 0);
MODULE_PARM_DESC(kgsl_pagetable_count,
"Minimum number of pagetables for KGSL to allocate at initialization time");
module_param_named(mmutype, ksgl_mmu_type, charp, 0);
MODULE_PARM_DESC(ksgl_mmu_type,
"Type of MMU to be used for graphics. Valid values are 'iommu' or 'gpummu' or 'nommu'");
#ifdef CONFIG_GENLOCK
/**
* kgsl_add_event - Add a new timstamp event for the KGSL device
* @device - KGSL device for the new event
* @ts - the timestamp to trigger the event on
* @cb - callback function to call when the timestamp expires
* @priv - private data for the specific event type
*
* @returns - 0 on success or error code on failure
*/
static int kgsl_add_event(struct kgsl_device *device, u32 ts,
void (*cb)(struct kgsl_device *, void *, u32), void *priv)
{
struct kgsl_event *event;
struct list_head *n;
unsigned int cur = device->ftbl->readtimestamp(device,
KGSL_TIMESTAMP_RETIRED);
if (cb == NULL)
return -EINVAL;
/* Check to see if the requested timestamp has already fired */
if (timestamp_cmp(cur, ts) >= 0) {
cb(device, priv, cur);
return 0;
}
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (event == NULL)
return -ENOMEM;
event->timestamp = ts;
event->priv = priv;
event->func = cb;
/* Add the event in order to the list */
for (n = device->events.next ; n != &device->events; n = n->next) {
struct kgsl_event *e =
list_entry(n, struct kgsl_event, list);
if (timestamp_cmp(e->timestamp, ts) > 0) {
list_add(&event->list, n->prev);
break;
}
}
if (n == &device->events)
list_add_tail(&event->list, &device->events);
return 0;
}
#endif
static inline struct kgsl_mem_entry *
kgsl_mem_entry_create(void)
@ -173,36 +239,41 @@ static void kgsl_memqueue_freememontimestamp(struct kgsl_device *device,
list_add_tail(&entry->list, &device->memqueue);
}
static void kgsl_memqueue_drain(struct kgsl_device *device)
static void kgsl_timestamp_expired(struct work_struct *work)
{
struct kgsl_device *device = container_of(work, struct kgsl_device,
ts_expired_ws);
struct kgsl_mem_entry *entry, *entry_tmp;
struct kgsl_event *event, *event_tmp;
uint32_t ts_processed;
BUG_ON(!mutex_is_locked(&device->mutex));
mutex_lock(&device->mutex);
/* get current EOP timestamp */
ts_processed = device->ftbl.device_readtimestamp(
device,
KGSL_TIMESTAMP_RETIRED);
ts_processed = device->ftbl->readtimestamp(device,
KGSL_TIMESTAMP_RETIRED);
/* Flush the freememontimestamp queue */
list_for_each_entry_safe(entry, entry_tmp, &device->memqueue, list) {
KGSL_MEM_INFO(device,
"ts_processed %d ts_free %d gpuaddr %x)\n",
ts_processed, entry->free_timestamp,
entry->memdesc.gpuaddr);
if (!timestamp_cmp(ts_processed, entry->free_timestamp))
if (timestamp_cmp(ts_processed, entry->free_timestamp) < 0)
break;
list_del(&entry->list);
kgsl_mem_entry_put(entry);
}
}
static void kgsl_memqueue_drain_unlocked(struct kgsl_device *device)
{
mutex_lock(&device->mutex);
kgsl_check_suspended(device);
kgsl_memqueue_drain(device);
/* Process expired events */
list_for_each_entry_safe(event, event_tmp, &device->events, list) {
if (timestamp_cmp(ts_processed, event->timestamp) < 0)
break;
if (event->func)
event->func(device, event->priv, ts_processed);
list_del(&event->list);
kfree(event);
}
mutex_unlock(&device->mutex);
}
@ -280,43 +351,19 @@ EXPORT_SYMBOL(kgsl_unregister_ts_notifier);
int kgsl_check_timestamp(struct kgsl_device *device, unsigned int timestamp)
{
unsigned int ts_processed;
BUG_ON(device->ftbl.device_readtimestamp == NULL);
ts_processed = device->ftbl.device_readtimestamp(
device, KGSL_TIMESTAMP_RETIRED);
ts_processed = device->ftbl->readtimestamp(device,
KGSL_TIMESTAMP_RETIRED);
return timestamp_cmp(ts_processed, timestamp);
return (timestamp_cmp(ts_processed, timestamp) >= 0);
}
EXPORT_SYMBOL(kgsl_check_timestamp);
int kgsl_setstate(struct kgsl_device *device, uint32_t flags)
{
int status = -ENXIO;
if (flags && device->ftbl.device_setstate) {
status = device->ftbl.device_setstate(device, flags);
} else
status = 0;
return status;
}
EXPORT_SYMBOL(kgsl_setstate);
int kgsl_idle(struct kgsl_device *device, unsigned int timeout)
{
int status = -ENXIO;
if (device->ftbl.device_idle)
status = device->ftbl.device_idle(device, timeout);
return status;
}
EXPORT_SYMBOL(kgsl_idle);
static int kgsl_suspend_device(struct kgsl_device *device, pm_message_t state)
{
int status = -EINVAL;
unsigned int nap_allowed_saved;
struct kgsl_pwrscale_policy *policy_saved;
if (!device)
return -EINVAL;
@ -326,6 +373,8 @@ static int kgsl_suspend_device(struct kgsl_device *device, pm_message_t state)
mutex_lock(&device->mutex);
nap_allowed_saved = device->pwrctrl.nap_allowed;
device->pwrctrl.nap_allowed = false;
policy_saved = device->pwrscale.policy;
device->pwrscale.policy = NULL;
device->requested_state = KGSL_STATE_SUSPEND;
/* Make sure no user process is waiting for a timestamp *
* before supending */
@ -335,19 +384,19 @@ static int kgsl_suspend_device(struct kgsl_device *device, pm_message_t state)
mutex_lock(&device->mutex);
}
/* Don't let the timer wake us during suspended sleep. */
del_timer(&device->idle_timer);
del_timer_sync(&device->idle_timer);
switch (device->state) {
case KGSL_STATE_INIT:
break;
case KGSL_STATE_ACTIVE:
/* Wait for the device to become idle */
device->ftbl.device_idle(device, KGSL_TIMEOUT_DEFAULT);
device->ftbl->idle(device, KGSL_TIMEOUT_DEFAULT);
case KGSL_STATE_NAP:
case KGSL_STATE_SLEEP:
/* Get the completion ready to be waited upon. */
INIT_COMPLETION(device->hwaccess_gate);
device->ftbl.device_suspend_context(device);
device->ftbl.device_stop(device);
device->ftbl->suspend_context(device);
device->ftbl->stop(device);
device->state = KGSL_STATE_SUSPEND;
KGSL_PWR_WARN(device, "state -> SUSPEND, device %d\n",
device->id);
@ -359,6 +408,7 @@ static int kgsl_suspend_device(struct kgsl_device *device, pm_message_t state)
}
device->requested_state = KGSL_STATE_NONE;
device->pwrctrl.nap_allowed = nap_allowed_saved;
device->pwrscale.policy = policy_saved;
status = 0;
end:
@ -378,7 +428,8 @@ static int kgsl_resume_device(struct kgsl_device *device)
mutex_lock(&device->mutex);
if (device->state == KGSL_STATE_SUSPEND) {
device->requested_state = KGSL_STATE_ACTIVE;
status = device->ftbl.device_start(device, 0);
kgsl_pwrctrl_pwrlevel_change(device, KGSL_PWRLEVEL_NOMINAL);
status = device->ftbl->start(device, 0);
if (status == 0) {
device->state = KGSL_STATE_ACTIVE;
KGSL_PWR_WARN(device,
@ -391,13 +442,13 @@ static int kgsl_resume_device(struct kgsl_device *device)
device->state = KGSL_STATE_INIT;
goto end;
}
status = device->ftbl.device_resume_context(device);
complete_all(&device->hwaccess_gate);
}
device->requested_state = KGSL_STATE_NONE;
end:
mutex_unlock(&device->mutex);
kgsl_check_idle(device);
KGSL_PWR_WARN(device, "resume end\n");
return status;
}
@ -434,6 +485,16 @@ const struct dev_pm_ops kgsl_pm_ops = {
};
EXPORT_SYMBOL(kgsl_pm_ops);
void kgsl_early_suspend_driver(struct early_suspend *h)
{
struct kgsl_device *device = container_of(h,
struct kgsl_device, display_off);
mutex_lock(&device->mutex);
kgsl_pwrctrl_pwrlevel_change(device, KGSL_PWRLEVEL_NOMINAL);
mutex_unlock(&device->mutex);
}
EXPORT_SYMBOL(kgsl_early_suspend_driver);
int kgsl_suspend_driver(struct platform_device *pdev,
pm_message_t state)
{
@ -449,6 +510,16 @@ int kgsl_resume_driver(struct platform_device *pdev)
}
EXPORT_SYMBOL(kgsl_resume_driver);
void kgsl_late_resume_driver(struct early_suspend *h)
{
struct kgsl_device *device = container_of(h,
struct kgsl_device, display_off);
mutex_lock(&device->mutex);
kgsl_pwrctrl_pwrlevel_change(device, KGSL_PWRLEVEL_TURBO);
mutex_unlock(&device->mutex);
}
EXPORT_SYMBOL(kgsl_late_resume_driver);
/* file operations */
static struct kgsl_process_private *
kgsl_get_process_private(struct kgsl_device_private *cur_dev_priv)
@ -477,15 +548,11 @@ kgsl_get_process_private(struct kgsl_device_private *cur_dev_priv)
INIT_LIST_HEAD(&private->mem_list);
#ifdef CONFIG_MSM_KGSL_MMU
if (kgsl_mmu_enabled())
{
unsigned long pt_name;
#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
pt_name = task_tgid_nr(current);
#else
pt_name = KGSL_MMU_GLOBAL_PT;
#endif
private->pagetable = kgsl_mmu_getpagetable(pt_name);
if (private->pagetable == NULL) {
kfree(private);
@ -493,7 +560,6 @@ kgsl_get_process_private(struct kgsl_device_private *cur_dev_priv)
goto out;
}
}
#endif
list_add(&private->list, &kgsl_driver.process_list);
@ -559,7 +625,7 @@ static int kgsl_release(struct inode *inodep, struct file *filep)
break;
if (context->dev_priv == dev_priv) {
device->ftbl.device_drawctxt_destroy(device, context);
device->ftbl->drawctxt_destroy(device, context);
kgsl_destroy_context(dev_priv, context);
}
@ -568,7 +634,7 @@ static int kgsl_release(struct inode *inodep, struct file *filep)
device->open_count--;
if (device->open_count == 0) {
result = device->ftbl.device_stop(device);
result = device->ftbl->stop(device);
device->state = KGSL_STATE_INIT;
KGSL_PWR_WARN(device, "state -> INIT, device %d\n", device->id);
}
@ -602,7 +668,6 @@ static int kgsl_open(struct inode *inodep, struct file *filep)
}
result = pm_runtime_get_sync(device->parentdev);
result = 0;
if (result < 0) {
KGSL_DRV_ERR(device,
"Runtime PM: Unable to wake up the device, rc = %d\n",
@ -633,7 +698,7 @@ static int kgsl_open(struct inode *inodep, struct file *filep)
kgsl_check_suspended(device);
if (device->open_count == 0) {
result = device->ftbl.device_start(device, true);
result = device->ftbl->start(device, true);
if (result) {
mutex_unlock(&device->mutex);
@ -648,7 +713,7 @@ static int kgsl_open(struct inode *inodep, struct file *filep)
KGSL_DRV_INFO(device, "Initialized %s: mmu=%s pagetable_count=%d\n",
device->name, kgsl_mmu_enabled() ? "on" : "off",
KGSL_PAGETABLE_COUNT);
kgsl_pagetable_count);
return result;
@ -746,7 +811,7 @@ static long kgsl_ioctl_device_getproperty(struct kgsl_device_private *dev_priv,
break;
}
default:
result = dev_priv->device->ftbl.device_getproperty(
result = dev_priv->device->ftbl->getproperty(
dev_priv->device, param->type,
param->value, param->sizebytes);
}
@ -767,16 +832,10 @@ static long kgsl_ioctl_device_waittimestamp(struct kgsl_device_private
dev_priv->device->active_cnt++;
/* Don't wait forever, set a max value for now */
if (param->timeout == -1)
param->timeout = 10 * MSEC_PER_SEC;
result = dev_priv->device->ftbl.device_waittimestamp(dev_priv->device,
result = dev_priv->device->ftbl->waittimestamp(dev_priv->device,
param->timestamp,
param->timeout);
kgsl_memqueue_drain(dev_priv->device);
/* Fire off any pending suspend operations that are in flight */
INIT_COMPLETION(dev_priv->device->suspend_gate);
@ -894,15 +953,7 @@ static long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv,
goto free_ibdesc;
}
/* Let the pwrscale policy know that a new command buffer
is being issued */
kgsl_pwrscale_busy(dev_priv->device);
/* drewis: don't know what changed this...diff from cherry-pick
f3c1074d1539be20cecbb82f37705bd16058418e */
/* result = dev_priv->device->ftbl->issueibcmds(dev_priv,*/
result = dev_priv->device->ftbl.device_issueibcmds(dev_priv,
result = dev_priv->device->ftbl->issueibcmds(dev_priv,
context,
ibdesc,
param->numibs,
@ -939,8 +990,8 @@ static long kgsl_ioctl_cmdstream_readtimestamp(struct kgsl_device_private
struct kgsl_cmdstream_readtimestamp *param = data;
param->timestamp =
dev_priv->device->ftbl.device_readtimestamp(
dev_priv->device, param->type);
dev_priv->device->ftbl->readtimestamp(dev_priv->device,
param->type);
return 0;
}
@ -962,7 +1013,6 @@ static long kgsl_ioctl_cmdstream_freememontimestamp(struct kgsl_device_private
if (entry) {
kgsl_memqueue_freememontimestamp(dev_priv->device, entry,
param->timestamp, param->type);
kgsl_memqueue_drain(dev_priv->device);
} else {
KGSL_DRV_ERR(dev_priv->device,
"invalid gpuaddr %08x\n", param->gpuaddr);
@ -986,10 +1036,10 @@ static long kgsl_ioctl_drawctxt_create(struct kgsl_device_private *dev_priv,
goto done;
}
if (dev_priv->device->ftbl.device_drawctxt_create != NULL)
result = dev_priv->device->ftbl.device_drawctxt_create(dev_priv,
param->flags,
context);
if (dev_priv->device->ftbl->drawctxt_create)
result = dev_priv->device->ftbl->drawctxt_create(
dev_priv->device, dev_priv->process_priv->pagetable,
context, param->flags);
param->drawctxt_id = context->id;
@ -1014,9 +1064,9 @@ static long kgsl_ioctl_drawctxt_destroy(struct kgsl_device_private *dev_priv,
goto done;
}
result = dev_priv->device->ftbl.device_drawctxt_destroy(
dev_priv->device,
context);
if (dev_priv->device->ftbl->drawctxt_destroy)
dev_priv->device->ftbl->drawctxt_destroy(dev_priv->device,
context);
kgsl_destroy_context(dev_priv, context);
@ -1074,9 +1124,6 @@ kgsl_ioctl_sharedmem_from_vmalloc(struct kgsl_device_private *dev_priv,
if (!kgsl_mmu_enabled())
return -ENODEV;
/* Make sure all pending freed memory is collected */
kgsl_memqueue_drain_unlocked(dev_priv->device);
if (!param->hostptr) {
KGSL_CORE_ERR("invalid hostptr %x\n", param->hostptr);
result = -EINVAL;
@ -1251,7 +1298,11 @@ static int kgsl_setup_phys_file(struct kgsl_mem_entry *entry,
entry->memdesc.size = size;
entry->memdesc.physaddr = phys + (offset & PAGE_MASK);
entry->memdesc.hostptr = (void *) (virt + (offset & PAGE_MASK));
entry->memdesc.ops = &kgsl_contig_ops;
ret = memdesc_sg_phys(&entry->memdesc,
phys + (offset & PAGE_MASK), size);
if (ret)
goto err;
return 0;
err:
@ -1261,6 +1312,60 @@ err:
return ret;
}
static int memdesc_sg_virt(struct kgsl_memdesc *memdesc,
void *addr, int size)
{
int i;
int sglen = PAGE_ALIGN(size) / PAGE_SIZE;
unsigned long paddr = (unsigned long) addr;
memdesc->sg = kmalloc(sglen * sizeof(struct scatterlist),
GFP_KERNEL);
if (memdesc->sg == NULL)
return -ENOMEM;
memdesc->sglen = sglen;
sg_init_table(memdesc->sg, sglen);
spin_lock(&current->mm->page_table_lock);
for (i = 0; i < sglen; i++, paddr += PAGE_SIZE) {
struct page *page;
pmd_t *ppmd;
pte_t *ppte;
pgd_t *ppgd = pgd_offset(current->mm, paddr);
if (pgd_none(*ppgd) || pgd_bad(*ppgd))
goto err;
ppmd = pmd_offset(ppgd, paddr);
if (pmd_none(*ppmd) || pmd_bad(*ppmd))
goto err;
ppte = pte_offset_map(ppmd, paddr);
if (ppte == NULL)
goto err;
page = pfn_to_page(pte_pfn(*ppte));
if (!page)
goto err;
sg_set_page(&memdesc->sg[i], page, PAGE_SIZE, 0);
pte_unmap(ppte);
}
spin_unlock(&current->mm->page_table_lock);
return 0;
err:
spin_unlock(&current->mm->page_table_lock);
kfree(memdesc->sg);
memdesc->sg = NULL;
return -EINVAL;
}
static int kgsl_setup_hostptr(struct kgsl_mem_entry *entry,
struct kgsl_pagetable *pagetable,
void *hostptr, unsigned int offset,
@ -1310,9 +1415,9 @@ static int kgsl_setup_hostptr(struct kgsl_mem_entry *entry,
entry->memdesc.pagetable = pagetable;
entry->memdesc.size = size;
entry->memdesc.hostptr = hostptr + (offset & PAGE_MASK);
entry->memdesc.ops = &kgsl_userptr_ops;
return 0;
return memdesc_sg_virt(&entry->memdesc,
hostptr + (offset & PAGE_MASK), size);
}
#ifdef CONFIG_ASHMEM
@ -1360,11 +1465,13 @@ static int kgsl_setup_ashmem(struct kgsl_mem_entry *entry,
}
entry->file_ptr = filep;
entry->memdesc.pagetable = pagetable;
entry->memdesc.size = ALIGN(size, PAGE_SIZE);
entry->memdesc.hostptr = hostptr;
entry->memdesc.ops = &kgsl_userptr_ops;
ret = memdesc_sg_virt(&entry->memdesc, hostptr, size);
if (ret)
goto err;
return 0;
@ -1395,8 +1502,6 @@ static long kgsl_ioctl_map_user_mem(struct kgsl_device_private *dev_priv,
if (entry == NULL)
return -ENOMEM;
kgsl_memqueue_drain_unlocked(dev_priv->device);
if (_IOC_SIZE(cmd) == sizeof(struct kgsl_sharedmem_from_pmem))
memtype = KGSL_USER_MEM_TYPE_PMEM;
else
@ -1536,9 +1641,6 @@ kgsl_ioctl_gpumem_alloc(struct kgsl_device_private *dev_priv,
if (entry == NULL)
return -ENOMEM;
/* Make sure all pending freed memory is collected */
kgsl_memqueue_drain_unlocked(dev_priv->device);
result = kgsl_allocate_user(&entry->memdesc, private->pagetable,
param->size, param->flags);
@ -1586,6 +1688,114 @@ static long kgsl_ioctl_cff_user_event(struct kgsl_device_private *dev_priv,
return result;
}
#ifdef CONFIG_GENLOCK
struct kgsl_genlock_event_priv {
struct genlock_handle *handle;
struct genlock *lock;
};
/**
* kgsl_genlock_event_cb - Event callback for a genlock timestamp event
* @device - The KGSL device that expired the timestamp
* @priv - private data for the event
* @timestamp - the timestamp that triggered the event
*
* Release a genlock lock following the expiration of a timestamp
*/
static void kgsl_genlock_event_cb(struct kgsl_device *device,
void *priv, u32 timestamp)
{
struct kgsl_genlock_event_priv *ev = priv;
int ret;
ret = genlock_lock(ev->handle, GENLOCK_UNLOCK, 0, 0);
if (ret)
KGSL_CORE_ERR("Error while unlocking genlock: %d\n", ret);
genlock_put_handle(ev->handle);
kfree(ev);
}
/**
* kgsl_add_genlock-event - Create a new genlock event
* @device - KGSL device to create the event on
* @timestamp - Timestamp to trigger the event
* @data - User space buffer containing struct kgsl_genlock_event_priv
* @len - length of the userspace buffer
* @returns 0 on success or error code on error
*
* Attack to a genlock handle and register an event to release the
* genlock lock when the timestamp expires
*/
static int kgsl_add_genlock_event(struct kgsl_device *device,
u32 timestamp, void __user *data, int len)
{
struct kgsl_genlock_event_priv *event;
struct kgsl_timestamp_event_genlock priv;
int ret;
if (len != sizeof(priv))
return -EINVAL;
if (copy_from_user(&priv, data, sizeof(priv)))
return -EFAULT;
event = kzalloc(sizeof(*event), GFP_KERNEL);
if (event == NULL)
return -ENOMEM;
event->handle = genlock_get_handle_fd(priv.handle);
if (IS_ERR(event->handle)) {
int ret = PTR_ERR(event->handle);
kfree(event);
return ret;
}
ret = kgsl_add_event(device, timestamp, kgsl_genlock_event_cb, event);
if (ret)
kfree(event);
return ret;
}
#else
static long kgsl_add_genlock_event(struct kgsl_device *device,
u32 timestamp, void __user *data, int len)
{
return -EINVAL;
}
#endif
/**
* kgsl_ioctl_timestamp_event - Register a new timestamp event from userspace
* @dev_priv - pointer to the private device structure
* @cmd - the ioctl cmd passed from kgsl_ioctl
* @data - the user data buffer from kgsl_ioctl
* @returns 0 on success or error code on failure
*/
static long kgsl_ioctl_timestamp_event(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
struct kgsl_timestamp_event *param = data;
int ret;
switch (param->type) {
case KGSL_TIMESTAMP_EVENT_GENLOCK:
ret = kgsl_add_genlock_event(dev_priv->device,
param->timestamp, param->priv, param->len);
break;
default:
ret = -EINVAL;
}
return ret;
}
typedef long (*kgsl_ioctl_func_t)(struct kgsl_device_private *,
unsigned int, void *);
@ -1627,6 +1837,8 @@ static const struct {
kgsl_ioctl_cff_syncmem, 0),
KGSL_IOCTL_FUNC(IOCTL_KGSL_CFF_USER_EVENT,
kgsl_ioctl_cff_user_event, 0),
KGSL_IOCTL_FUNC(IOCTL_KGSL_TIMESTAMP_EVENT,
kgsl_ioctl_timestamp_event, 1),
};
static long kgsl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
@ -1676,7 +1888,13 @@ static long kgsl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg)
func = kgsl_ioctl_funcs[nr].func;
lock = kgsl_ioctl_funcs[nr].lock;
} else {
func = dev_priv->device->ftbl.device_ioctl;
func = dev_priv->device->ftbl->ioctl;
if (!func) {
KGSL_DRV_INFO(dev_priv->device,
"invalid ioctl code %08x\n", cmd);
ret = -EINVAL;
goto done;
}
lock = 1;
}
@ -1749,7 +1967,7 @@ kgsl_gpumem_vm_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
{
struct kgsl_mem_entry *entry = vma->vm_private_data;
if (!entry->memdesc.ops->vmfault)
if (!entry->memdesc.ops || !entry->memdesc.ops->vmfault)
return VM_FAULT_SIGBUS;
return entry->memdesc.ops->vmfault(&entry->memdesc, vma, vmf);
@ -1773,7 +1991,7 @@ static int kgsl_mmap(struct file *file, struct vm_area_struct *vma)
unsigned long vma_offset = vma->vm_pgoff << PAGE_SHIFT;
struct kgsl_device_private *dev_priv = file->private_data;
struct kgsl_process_private *private = dev_priv->process_priv;
struct kgsl_mem_entry *entry;
struct kgsl_mem_entry *tmp, *entry = NULL;
struct kgsl_device *device = dev_priv->device;
/* Handle leagacy behavior for memstore */
@ -1784,9 +2002,10 @@ static int kgsl_mmap(struct file *file, struct vm_area_struct *vma)
/* Find a chunk of GPU memory */
spin_lock(&private->mem_lock);
list_for_each_entry(entry, &private->mem_list, list) {
if (vma_offset == entry->memdesc.gpuaddr) {
kgsl_mem_entry_get(entry);
list_for_each_entry(tmp, &private->mem_list, list) {
if (vma_offset == tmp->memdesc.gpuaddr) {
kgsl_mem_entry_get(tmp);
entry = tmp;
break;
}
}
@ -1795,7 +2014,9 @@ static int kgsl_mmap(struct file *file, struct vm_area_struct *vma)
if (entry == NULL)
return -EINVAL;
if (!entry->memdesc.ops->vmflags || !entry->memdesc.ops->vmfault)
if (!entry->memdesc.ops ||
!entry->memdesc.ops->vmflags ||
!entry->memdesc.ops->vmfault)
return -EINVAL;
vma->vm_flags |= entry->memdesc.ops->vmflags(&entry->memdesc);
@ -1818,7 +2039,7 @@ static const struct file_operations kgsl_fops = {
struct kgsl_driver kgsl_driver = {
.process_mutex = __MUTEX_INITIALIZER(kgsl_driver.process_mutex),
.pt_mutex = __MUTEX_INITIALIZER(kgsl_driver.pt_mutex),
.ptlock = __SPIN_LOCK_UNLOCKED(kgsl_driver.ptlock),
.devlock = __MUTEX_INITIALIZER(kgsl_driver.devlock),
};
EXPORT_SYMBOL(kgsl_driver);
@ -1842,6 +2063,8 @@ void kgsl_unregister_device(struct kgsl_device *device)
kgsl_pwrctrl_uninit_sysfs(device);
wake_lock_destroy(&device->idle_wakelock);
pm_qos_remove_requirement(PM_QOS_CPU_DMA_LATENCY, "kgsl");
idr_destroy(&device->context_idr);
if (device->memstore.hostptr)
@ -1860,8 +2083,6 @@ void kgsl_unregister_device(struct kgsl_device *device)
mutex_lock(&kgsl_driver.devlock);
kgsl_driver.devp[minor] = NULL;
mutex_unlock(&kgsl_driver.devlock);
atomic_dec(&kgsl_driver.device_count);
}
EXPORT_SYMBOL(kgsl_unregister_device);
@ -1904,8 +2125,6 @@ kgsl_register_device(struct kgsl_device *device)
dev_set_drvdata(device->parentdev, device);
/* Generic device initialization */
atomic_inc(&kgsl_driver.device_count);
init_waitqueue_head(&device->wait_queue);
kgsl_cffdump_open(device->id);
@ -1921,22 +2140,25 @@ kgsl_register_device(struct kgsl_device *device)
goto err_devlist;
INIT_WORK(&device->idle_check_ws, kgsl_idle_check);
INIT_WORK(&device->ts_expired_ws, kgsl_timestamp_expired);
INIT_LIST_HEAD(&device->memqueue);
INIT_LIST_HEAD(&device->events);
ret = kgsl_mmu_init(device);
if (ret != 0)
goto err_dest_work_q;
ret = kgsl_allocate_contig(&device->memstore,
ret = kgsl_allocate_contiguous(&device->memstore,
sizeof(struct kgsl_devmemstore));
if (ret != 0)
goto err_close_mmu;
kgsl_sharedmem_set(&device->memstore, 0, 0, device->memstore.size);
wake_lock_init(&device->idle_wakelock, WAKE_LOCK_IDLE, device->name);
pm_qos_add_requirement(PM_QOS_CPU_DMA_LATENCY, "kgsl",
PM_QOS_DEFAULT_VALUE);
idr_init(&device->context_idr);
/* sysfs and debugfs initalization - failure here is non fatal */
@ -2066,17 +2288,19 @@ EXPORT_SYMBOL(kgsl_device_platform_remove);
static int __devinit
kgsl_ptdata_init(void)
{
INIT_LIST_HEAD(&kgsl_driver.pagetable_list);
return kgsl_ptpool_init(&kgsl_driver.ptpool, KGSL_PAGETABLE_SIZE,
kgsl_pagetable_count);
kgsl_driver.ptpool = kgsl_mmu_ptpool_init(KGSL_PAGETABLE_SIZE,
kgsl_pagetable_count);
if (!kgsl_driver.ptpool)
return -ENOMEM;
return 0;
}
static void kgsl_core_exit(void)
{
unregister_chrdev_region(kgsl_driver.major, KGSL_DEVICE_MAX);
kgsl_ptpool_destroy(&kgsl_driver.ptpool);
kgsl_mmu_ptpool_destroy(&kgsl_driver.ptpool);
kgsl_driver.ptpool = NULL;
device_unregister(&kgsl_driver.virtdev);
@ -2094,7 +2318,6 @@ static void kgsl_core_exit(void)
static int __init kgsl_core_init(void)
{
int result = 0;
/* alloc major and minor device numbers */
result = alloc_chrdev_region(&kgsl_driver.major, 0, KGSL_DEVICE_MAX,
KGSL_NAME);
@ -2148,14 +2371,17 @@ static int __init kgsl_core_init(void)
kgsl_sharedmem_init_sysfs();
kgsl_cffdump_init();
/* Generic device initialization */
atomic_set(&kgsl_driver.device_count, -1);
INIT_LIST_HEAD(&kgsl_driver.process_list);
result = kgsl_ptdata_init();
if (result)
goto err;
INIT_LIST_HEAD(&kgsl_driver.pagetable_list);
kgsl_mmu_set_mmutype(ksgl_mmu_type);
if (KGSL_MMU_TYPE_GPU == kgsl_mmu_get_mmutype()) {
result = kgsl_ptdata_init();
if (result)
goto err;
}
result = kgsl_drm_init(NULL);

@ -1,29 +1,13 @@
/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __KGSL_H
@ -38,26 +22,8 @@
#include <linux/cdev.h>
#include <linux/regulator/consumer.h>
#include <linux/atomic.h>
#include "kgsl_device.h"
#include "kgsl_pwrctrl.h"
#include "kgsl_sharedmem.h"
#include "kgsl_log.h"
#include "kgsl_cffdump.h"
#define KGSL_NAME "kgsl"
#define CHIP_REV_251 0x020501
/* Flags to control whether to flush or invalidate a cached memory range */
#define KGSL_CACHE_INV 0x00000000
#define KGSL_CACHE_CLEAN 0x00000001
#define KGSL_CACHE_FLUSH 0x00000002
#define KGSL_CACHE_USER_ADDR 0x00000010
#define KGSL_CACHE_VMALLOC_ADDR 0x00000020
/*cache coherency ops */
#define DRM_KGSL_GEM_CACHE_OP_TO_DEV 0x0001
#define DRM_KGSL_GEM_CACHE_OP_FROM_DEV 0x0002
@ -74,13 +40,9 @@
#define KGSL_PAGETABLE_ENTRIES(_sz) (((_sz) >> PAGE_SHIFT) + \
KGSL_PT_EXTRA_ENTRIES)
#ifdef CONFIG_MSM_KGSL_MMU
#define KGSL_PAGETABLE_SIZE \
ALIGN(KGSL_PAGETABLE_ENTRIES(CONFIG_MSM_KGSL_PAGE_TABLE_SIZE) * \
KGSL_PAGETABLE_ENTRY_SIZE, PAGE_SIZE)
#else
#define KGSL_PAGETABLE_SIZE 0
#endif
#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
#define KGSL_PAGETABLE_COUNT (CONFIG_MSM_KGSL_PAGE_TABLE_COUNT)
@ -99,6 +61,8 @@ KGSL_PAGETABLE_ENTRY_SIZE, PAGE_SIZE)
#define KGSL_STATS_ADD(_size, _stat, _max) \
do { _stat += (_size); if (_stat > _max) _max = _stat; } while (0)
struct kgsl_device;
struct kgsl_driver {
struct cdev cdev;
dev_t major;
@ -108,24 +72,21 @@ struct kgsl_driver {
/* Kobjects for storing pagetable and process statistics */
struct kobject *ptkobj;
struct kobject *prockobj;
atomic_t device_count;
struct kgsl_device *devp[KGSL_DEVICE_MAX];
uint32_t flags_debug;
/* Global lilst of open processes */
struct list_head process_list;
/* Global list of pagetables */
struct list_head pagetable_list;
/* Mutex for accessing the pagetable list */
struct mutex pt_mutex;
/* Spinlock for accessing the pagetable list */
spinlock_t ptlock;
/* Mutex for accessing the process list */
struct mutex process_mutex;
/* Mutex for protecting the device list */
struct mutex devlock;
struct kgsl_ptpool ptpool;
void *ptpool;
struct {
unsigned int vmalloc;
@ -143,6 +104,22 @@ extern struct kgsl_driver kgsl_driver;
#define KGSL_USER_MEMORY 1
#define KGSL_MAPPED_MEMORY 2
struct kgsl_pagetable;
struct kgsl_memdesc_ops;
/* shared memory allocation */
struct kgsl_memdesc {
struct kgsl_pagetable *pagetable;
void *hostptr;
unsigned int gpuaddr;
unsigned int physaddr;
unsigned int size;
unsigned int priv;
struct scatterlist *sg;
unsigned int sglen;
struct kgsl_memdesc_ops *ops;
};
struct kgsl_mem_entry {
struct kref refcount;
struct kgsl_memdesc memdesc;
@ -167,53 +144,14 @@ uint8_t *kgsl_gpuaddr_to_vaddr(const struct kgsl_memdesc *memdesc,
struct kgsl_mem_entry *kgsl_sharedmem_find_region(
struct kgsl_process_private *private, unsigned int gpuaddr,
size_t size);
int kgsl_idle(struct kgsl_device *device, unsigned int timeout);
int kgsl_setstate(struct kgsl_device *device, uint32_t flags);
static inline void kgsl_regread(struct kgsl_device *device,
unsigned int offsetwords,
unsigned int *value)
{
device->ftbl.device_regread(device, offsetwords, value);
}
static inline void kgsl_regwrite(struct kgsl_device *device,
unsigned int offsetwords,
unsigned int value)
{
device->ftbl.device_regwrite(device, offsetwords, value);
}
static inline void kgsl_regread_isr(struct kgsl_device *device,
unsigned int offsetwords,
unsigned int *value)
{
device->ftbl.device_regread_isr(device, offsetwords, value);
}
static inline void kgsl_regwrite_isr(struct kgsl_device *device,
unsigned int offsetwords,
unsigned int value)
{
device->ftbl.device_regwrite_isr(device, offsetwords, value);
}
int kgsl_check_timestamp(struct kgsl_device *device, unsigned int timestamp);
int kgsl_register_ts_notifier(struct kgsl_device *device,
struct notifier_block *nb);
int kgsl_unregister_ts_notifier(struct kgsl_device *device,
struct notifier_block *nb);
int kgsl_device_platform_probe(struct kgsl_device *device,
irqreturn_t (*dev_isr) (int, void*));
void kgsl_device_platform_remove(struct kgsl_device *device);
extern const struct dev_pm_ops kgsl_pm_ops;
struct early_suspend;
int kgsl_suspend_driver(struct platform_device *pdev, pm_message_t state);
int kgsl_resume_driver(struct platform_device *pdev);
void kgsl_early_suspend_driver(struct early_suspend *h);
void kgsl_late_resume_driver(struct early_suspend *h);
#ifdef CONFIG_MSM_KGSL_DRM
extern int kgsl_drm_init(struct platform_device *dev);
@ -240,22 +178,14 @@ static inline int kgsl_gpuaddr_in_memdesc(const struct kgsl_memdesc *memdesc,
return 0;
}
static inline struct kgsl_device *kgsl_device_from_dev(struct device *dev)
{
int i;
for (i = 0; i < KGSL_DEVICE_MAX; i++) {
if (kgsl_driver.devp[i] && kgsl_driver.devp[i]->dev == dev)
return kgsl_driver.devp[i];
}
return NULL;
}
static inline bool timestamp_cmp(unsigned int new, unsigned int old)
static inline int timestamp_cmp(unsigned int new, unsigned int old)
{
int ts_diff = new - old;
return (ts_diff >= 0) || (ts_diff < -20000);
if (ts_diff == 0)
return 0;
return ((ts_diff > 0) || (ts_diff < -20000)) ? 1 : -1;
}
static inline void
@ -270,21 +200,4 @@ kgsl_mem_entry_put(struct kgsl_mem_entry *entry)
kref_put(&entry->refcount, kgsl_mem_entry_destroy);
}
static inline int kgsl_create_device_sysfs_files(struct device *root,
struct device_attribute **list)
{
int ret = 0, i;
for (i = 0; list[i] != NULL; i++)
ret |= device_create_file(root, list[i]);
return ret;
}
static inline void kgsl_remove_device_sysfs_files(struct device *root,
struct device_attribute **list)
{
int i;
for (i = 0; list[i] != NULL; i++)
device_remove_file(root, list[i]);
}
#endif /* __KGSL_H */

@ -20,6 +20,7 @@
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/sched.h>
#include <mach/socinfo.h>
#include "kgsl.h"
#include "kgsl_cffdump.h"
@ -362,8 +363,10 @@ void kgsl_cffdump_open(enum kgsl_deviceid device_id)
/*TODO: move this to where we can report correct gmemsize*/
unsigned int va_base;
/* XXX: drewis edit: only for 8x50 */
va_base = 0x20000000;
if (cpu_is_msm8x60() || cpu_is_msm8960() || cpu_is_msm8930())
va_base = 0x40000000;
else
va_base = 0x20000000;
kgsl_cffdump_memory_base(device_id, va_base,
CONFIG_MSM_KGSL_PAGE_TABLE_SIZE, SZ_256K);
@ -523,8 +526,8 @@ static bool kgsl_cffdump_handle_type3(struct kgsl_device_private *dev_priv,
static uint size_stack[ADDRESS_STACK_SIZE];
switch (GET_PM4_TYPE3_OPCODE(hostaddr)) {
case PM4_INDIRECT_BUFFER_PFD:
case PM4_INDIRECT_BUFFER:
case CP_INDIRECT_BUFFER_PFD:
case CP_INDIRECT_BUFFER:
{
/* traverse indirect buffers */
int i;
@ -607,7 +610,6 @@ bool kgsl_cffdump_parse_ibs(struct kgsl_device_private *dev_priv,
if (!memdesc->physaddr) {
KGSL_CORE_ERR("no physaddr");
return true;
} else {
mb();
kgsl_cache_range_op((struct kgsl_memdesc *)memdesc,

@ -1,29 +1,13 @@
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/

@ -14,6 +14,7 @@
#include <linux/debugfs.h>
#include "kgsl.h"
#include "kgsl_device.h"
/*default log levels is error for everything*/
#define KGSL_LOG_LEVEL_DEFAULT 3

@ -1,29 +1,14 @@
/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
* Copyright (C) 2011 Sony Ericsson Mobile Communications AB.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __KGSL_DEVICE_H
@ -31,7 +16,10 @@
#include <linux/idr.h>
#include <linux/wakelock.h>
#include <linux/pm_qos_params.h>
#include <linux/earlysuspend.h>
#include "kgsl.h"
#include "kgsl_mmu.h"
#include "kgsl_pwrctrl.h"
#include "kgsl_log.h"
@ -71,55 +59,48 @@ struct kgsl_context;
struct kgsl_power_stats;
struct kgsl_functable {
void (*device_regread) (struct kgsl_device *device,
unsigned int offsetwords,
unsigned int *value);
void (*device_regwrite) (struct kgsl_device *device,
unsigned int offsetwords,
unsigned int value);
void (*device_regread_isr) (struct kgsl_device *device,
unsigned int offsetwords,
unsigned int *value);
void (*device_regwrite_isr) (struct kgsl_device *device,
unsigned int offsetwords,
unsigned int value);
int (*device_setstate) (struct kgsl_device *device, uint32_t flags);
int (*device_idle) (struct kgsl_device *device, unsigned int timeout);
unsigned int (*device_isidle) (struct kgsl_device *device);
int (*device_suspend_context) (struct kgsl_device *device);
int (*device_resume_context) (struct kgsl_device *device);
int (*device_start) (struct kgsl_device *device, unsigned int init_ram);
int (*device_stop) (struct kgsl_device *device);
int (*device_getproperty) (struct kgsl_device *device,
enum kgsl_property_type type,
void *value,
unsigned int sizebytes);
int (*device_waittimestamp) (struct kgsl_device *device,
unsigned int timestamp,
unsigned int msecs);
unsigned int (*device_readtimestamp) (
struct kgsl_device *device,
enum kgsl_timestamp_type type);
int (*device_issueibcmds) (struct kgsl_device_private *dev_priv,
struct kgsl_context *context,
struct kgsl_ibdesc *ibdesc,
unsigned int sizedwords,
uint32_t *timestamp,
unsigned int flags);
int (*device_drawctxt_create) (struct kgsl_device_private *dev_priv,
uint32_t flags,
struct kgsl_context *context);
int (*device_drawctxt_destroy) (struct kgsl_device *device,
struct kgsl_context *context);
long (*device_ioctl) (struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data);
int (*device_setup_pt)(struct kgsl_device *device,
struct kgsl_pagetable *pagetable);
int (*device_cleanup_pt)(struct kgsl_device *device,
struct kgsl_pagetable *pagetable);
void (*device_power_stats)(struct kgsl_device *device,
/* Mandatory functions - these functions must be implemented
by the client device. The driver will not check for a NULL
pointer before calling the hook.
*/
void (*regread) (struct kgsl_device *device,
unsigned int offsetwords, unsigned int *value);
void (*regwrite) (struct kgsl_device *device,
unsigned int offsetwords, unsigned int value);
int (*idle) (struct kgsl_device *device, unsigned int timeout);
unsigned int (*isidle) (struct kgsl_device *device);
int (*suspend_context) (struct kgsl_device *device);
int (*start) (struct kgsl_device *device, unsigned int init_ram);
int (*stop) (struct kgsl_device *device);
int (*getproperty) (struct kgsl_device *device,
enum kgsl_property_type type, void *value,
unsigned int sizebytes);
int (*waittimestamp) (struct kgsl_device *device,
unsigned int timestamp, unsigned int msecs);
unsigned int (*readtimestamp) (struct kgsl_device *device,
enum kgsl_timestamp_type type);
int (*issueibcmds) (struct kgsl_device_private *dev_priv,
struct kgsl_context *context, struct kgsl_ibdesc *ibdesc,
unsigned int sizedwords, uint32_t *timestamp,
unsigned int flags);
int (*setup_pt)(struct kgsl_device *device,
struct kgsl_pagetable *pagetable);
void (*cleanup_pt)(struct kgsl_device *device,
struct kgsl_pagetable *pagetable);
void (*power_stats)(struct kgsl_device *device,
struct kgsl_power_stats *stats);
void (*irqctrl)(struct kgsl_device *device, int state);
/* Optional functions - these functions are not mandatory. The
driver will check that the function pointer is not NULL before
calling the hook */
void (*setstate) (struct kgsl_device *device, uint32_t flags);
int (*drawctxt_create) (struct kgsl_device *device,
struct kgsl_pagetable *pagetable, struct kgsl_context *context,
uint32_t flags);
void (*drawctxt_destroy) (struct kgsl_device *device,
struct kgsl_context *context);
long (*ioctl) (struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data);
};
struct kgsl_memregion {
@ -129,6 +110,23 @@ struct kgsl_memregion {
unsigned int sizebytes;
};
/* MH register values */
struct kgsl_mh {
unsigned int mharb;
unsigned int mh_intf_cfg1;
unsigned int mh_intf_cfg2;
uint32_t mpu_base;
int mpu_range;
};
struct kgsl_event {
uint32_t timestamp;
void (*func)(struct kgsl_device *, void *, u32);
void *priv;
struct list_head list;
};
struct kgsl_device {
struct device *dev;
const char *name;
@ -140,9 +138,10 @@ struct kgsl_device {
struct kgsl_memdesc memstore;
const char *iomemname;
struct kgsl_mh mh;
struct kgsl_mmu mmu;
struct completion hwaccess_gate;
struct kgsl_functable ftbl;
const struct kgsl_functable *ftbl;
struct work_struct idle_check_ws;
struct timer_list idle_timer;
struct kgsl_pwrctrl pwrctrl;
@ -163,6 +162,7 @@ struct kgsl_device {
struct completion recovery_gate;
struct dentry *d_debugfs;
struct idr context_idr;
struct early_suspend display_off;
/* Logging levels */
int cmd_log;
@ -173,6 +173,8 @@ struct kgsl_device {
struct wake_lock idle_wakelock;
struct kgsl_pwrscale pwrscale;
struct kobject pwrscale_kobj;
struct work_struct ts_expired_ws;
struct list_head events;
};
struct kgsl_context {
@ -215,12 +217,60 @@ struct kgsl_power_stats {
struct kgsl_device *kgsl_get_device(int dev_idx);
static inline void kgsl_regread(struct kgsl_device *device,
unsigned int offsetwords,
unsigned int *value)
{
device->ftbl->regread(device, offsetwords, value);
}
static inline void kgsl_regwrite(struct kgsl_device *device,
unsigned int offsetwords,
unsigned int value)
{
device->ftbl->regwrite(device, offsetwords, value);
}
static inline int kgsl_idle(struct kgsl_device *device, unsigned int timeout)
{
return device->ftbl->idle(device, timeout);
}
static inline int kgsl_create_device_sysfs_files(struct device *root,
struct device_attribute **list)
{
int ret = 0, i;
for (i = 0; list[i] != NULL; i++)
ret |= device_create_file(root, list[i]);
return ret;
}
static inline void kgsl_remove_device_sysfs_files(struct device *root,
struct device_attribute **list)
{
int i;
for (i = 0; list[i] != NULL; i++)
device_remove_file(root, list[i]);
}
static inline struct kgsl_mmu *
kgsl_get_mmu(struct kgsl_device *device)
{
return (struct kgsl_mmu *) (device ? &device->mmu : NULL);
}
static inline struct kgsl_device *kgsl_device_from_dev(struct device *dev)
{
int i;
for (i = 0; i < KGSL_DEVICE_MAX; i++) {
if (kgsl_driver.devp[i] && kgsl_driver.devp[i]->dev == dev)
return kgsl_driver.devp[i];
}
return NULL;
}
static inline int kgsl_create_device_workqueue(struct kgsl_device *device)
{
device->work_queue = create_workqueue(device->name);
@ -244,4 +294,16 @@ kgsl_find_context(struct kgsl_device_private *dev_priv, uint32_t id)
return (ctxt && ctxt->dev_priv == dev_priv) ? ctxt : NULL;
}
int kgsl_check_timestamp(struct kgsl_device *device, unsigned int timestamp);
int kgsl_register_ts_notifier(struct kgsl_device *device,
struct notifier_block *nb);
int kgsl_unregister_ts_notifier(struct kgsl_device *device,
struct notifier_block *nb);
int kgsl_device_platform_probe(struct kgsl_device *device,
irqreturn_t (*dev_isr) (int, void*));
void kgsl_device_platform_remove(struct kgsl_device *device);
#endif /* __KGSL_DEVICE_H */

@ -293,7 +293,6 @@ kgsl_gem_alloc_memory(struct drm_gem_object *obj)
}
priv->memdesc.size = obj->size * priv->bufcount;
priv->memdesc.ops = &kgsl_contig_ops;
} else if (TYPE_IS_MEM(priv->type)) {
priv->memdesc.hostptr =

@ -0,0 +1,766 @@
/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/types.h>
#include <linux/device.h>
#include <linux/spinlock.h>
#include <linux/genalloc.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include "kgsl.h"
#include "kgsl_mmu.h"
#include "kgsl_device.h"
#include "kgsl_sharedmem.h"
#include "adreno_ringbuffer.h"
static ssize_t
sysfs_show_ptpool_entries(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
kgsl_driver.ptpool;
return snprintf(buf, PAGE_SIZE, "%d\n", pool->entries);
}
static ssize_t
sysfs_show_ptpool_min(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
kgsl_driver.ptpool;
return snprintf(buf, PAGE_SIZE, "%d\n",
pool->static_entries);
}
static ssize_t
sysfs_show_ptpool_chunks(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
kgsl_driver.ptpool;
return snprintf(buf, PAGE_SIZE, "%d\n", pool->chunks);
}
static ssize_t
sysfs_show_ptpool_ptsize(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
kgsl_driver.ptpool;
return snprintf(buf, PAGE_SIZE, "%d\n", pool->ptsize);
}
static struct kobj_attribute attr_ptpool_entries = {
.attr = { .name = "ptpool_entries", .mode = 0444 },
.show = sysfs_show_ptpool_entries,
.store = NULL,
};
static struct kobj_attribute attr_ptpool_min = {
.attr = { .name = "ptpool_min", .mode = 0444 },
.show = sysfs_show_ptpool_min,
.store = NULL,
};
static struct kobj_attribute attr_ptpool_chunks = {
.attr = { .name = "ptpool_chunks", .mode = 0444 },
.show = sysfs_show_ptpool_chunks,
.store = NULL,
};
static struct kobj_attribute attr_ptpool_ptsize = {
.attr = { .name = "ptpool_ptsize", .mode = 0444 },
.show = sysfs_show_ptpool_ptsize,
.store = NULL,
};
static struct attribute *ptpool_attrs[] = {
&attr_ptpool_entries.attr,
&attr_ptpool_min.attr,
&attr_ptpool_chunks.attr,
&attr_ptpool_ptsize.attr,
NULL,
};
static struct attribute_group ptpool_attr_group = {
.attrs = ptpool_attrs,
};
static int
_kgsl_ptpool_add_entries(struct kgsl_ptpool *pool, int count, int dynamic)
{
struct kgsl_ptpool_chunk *chunk;
size_t size = ALIGN(count * pool->ptsize, PAGE_SIZE);
BUG_ON(count == 0);
if (get_order(size) >= MAX_ORDER) {
KGSL_CORE_ERR("ptpool allocation is too big: %d\n", size);
return -EINVAL;
}
chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
if (chunk == NULL) {
KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*chunk));
return -ENOMEM;
}
chunk->size = size;
chunk->count = count;
chunk->dynamic = dynamic;
chunk->data = dma_alloc_coherent(NULL, size,
&chunk->phys, GFP_KERNEL);
if (chunk->data == NULL) {
KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size);
goto err;
}
chunk->bitmap = kzalloc(BITS_TO_LONGS(count) * 4, GFP_KERNEL);
if (chunk->bitmap == NULL) {
KGSL_CORE_ERR("kzalloc(%d) failed\n",
BITS_TO_LONGS(count) * 4);
goto err_dma;
}
list_add_tail(&chunk->list, &pool->list);
pool->chunks++;
pool->entries += count;
if (!dynamic)
pool->static_entries += count;
return 0;
err_dma:
dma_free_coherent(NULL, chunk->size, chunk->data, chunk->phys);
err:
kfree(chunk);
return -ENOMEM;
}
static void *
_kgsl_ptpool_get_entry(struct kgsl_ptpool *pool, unsigned int *physaddr)
{
struct kgsl_ptpool_chunk *chunk;
list_for_each_entry(chunk, &pool->list, list) {
int bit = find_first_zero_bit(chunk->bitmap, chunk->count);
if (bit >= chunk->count)
continue;
set_bit(bit, chunk->bitmap);
*physaddr = chunk->phys + (bit * pool->ptsize);
return chunk->data + (bit * pool->ptsize);
}
return NULL;
}
/**
* kgsl_ptpool_add
* @pool: A pointer to a ptpool structure
* @entries: Number of entries to add
*
* Add static entries to the pagetable pool.
*/
static int
kgsl_ptpool_add(struct kgsl_ptpool *pool, int count)
{
int ret = 0;
BUG_ON(count == 0);
mutex_lock(&pool->lock);
/* Only 4MB can be allocated in one chunk, so larger allocations
need to be split into multiple sections */
while (count) {
int entries = ((count * pool->ptsize) > SZ_4M) ?
SZ_4M / pool->ptsize : count;
/* Add the entries as static, i.e. they don't ever stand
a chance of being removed */
ret = _kgsl_ptpool_add_entries(pool, entries, 0);
if (ret)
break;
count -= entries;
}
mutex_unlock(&pool->lock);
return ret;
}
/**
* kgsl_ptpool_alloc
* @pool: A pointer to a ptpool structure
* @addr: A pointer to store the physical address of the chunk
*
* Allocate a pagetable from the pool. Returns the virtual address
* of the pagetable, the physical address is returned in physaddr
*/
static void *kgsl_ptpool_alloc(struct kgsl_ptpool *pool,
unsigned int *physaddr)
{
void *addr = NULL;
int ret;
mutex_lock(&pool->lock);
addr = _kgsl_ptpool_get_entry(pool, physaddr);
if (addr)
goto done;
/* Add a chunk for 1 more pagetable and mark it as dynamic */
ret = _kgsl_ptpool_add_entries(pool, 1, 1);
if (ret)
goto done;
addr = _kgsl_ptpool_get_entry(pool, physaddr);
done:
mutex_unlock(&pool->lock);
return addr;
}
static inline void _kgsl_ptpool_rm_chunk(struct kgsl_ptpool_chunk *chunk)
{
list_del(&chunk->list);
if (chunk->data)
dma_free_coherent(NULL, chunk->size, chunk->data,
chunk->phys);
kfree(chunk->bitmap);
kfree(chunk);
}
/**
* kgsl_ptpool_free
* @pool: A pointer to a ptpool structure
* @addr: A pointer to the virtual address to free
*
* Free a pagetable allocated from the pool
*/
static void kgsl_ptpool_free(struct kgsl_ptpool *pool, void *addr)
{
struct kgsl_ptpool_chunk *chunk, *tmp;
if (pool == NULL || addr == NULL)
return;
mutex_lock(&pool->lock);
list_for_each_entry_safe(chunk, tmp, &pool->list, list) {
if (addr >= chunk->data &&
addr < chunk->data + chunk->size) {
int bit = ((unsigned long) (addr - chunk->data)) /
pool->ptsize;
clear_bit(bit, chunk->bitmap);
memset(addr, 0, pool->ptsize);
if (chunk->dynamic &&
bitmap_empty(chunk->bitmap, chunk->count))
_kgsl_ptpool_rm_chunk(chunk);
break;
}
}
mutex_unlock(&pool->lock);
}
void kgsl_gpummu_ptpool_destroy(void *ptpool)
{
struct kgsl_ptpool *pool = (struct kgsl_ptpool *)ptpool;
struct kgsl_ptpool_chunk *chunk, *tmp;
if (pool == NULL)
return;
mutex_lock(&pool->lock);
list_for_each_entry_safe(chunk, tmp, &pool->list, list)
_kgsl_ptpool_rm_chunk(chunk);
mutex_unlock(&pool->lock);
kfree(pool);
}
/**
* kgsl_ptpool_init
* @pool: A pointer to a ptpool structure to initialize
* @ptsize: The size of each pagetable entry
* @entries: The number of inital entries to add to the pool
*
* Initalize a pool and allocate an initial chunk of entries.
*/
void *kgsl_gpummu_ptpool_init(int ptsize, int entries)
{
struct kgsl_ptpool *pool;
int ret = 0;
BUG_ON(ptsize == 0);
pool = kzalloc(sizeof(struct kgsl_ptpool), GFP_KERNEL);
if (!pool) {
KGSL_CORE_ERR("Failed to allocate memory "
"for ptpool\n");
return NULL;
}
pool->ptsize = ptsize;
mutex_init(&pool->lock);
INIT_LIST_HEAD(&pool->list);
if (entries) {
ret = kgsl_ptpool_add(pool, entries);
if (ret)
goto err_ptpool_remove;
}
ret = sysfs_create_group(kgsl_driver.ptkobj, &ptpool_attr_group);
if (ret) {
KGSL_CORE_ERR("sysfs_create_group failed for ptpool "
"statistics: %d\n", ret);
goto err_ptpool_remove;
}
return (void *)pool;
err_ptpool_remove:
kgsl_gpummu_ptpool_destroy(pool);
return NULL;
}
int kgsl_gpummu_pt_equal(struct kgsl_pagetable *pt,
unsigned int pt_base)
{
struct kgsl_gpummu_pt *gpummu_pt = pt->priv;
return pt && pt_base && (gpummu_pt->base.gpuaddr == pt_base);
}
void kgsl_gpummu_destroy_pagetable(void *mmu_specific_pt)
{
struct kgsl_gpummu_pt *gpummu_pt = (struct kgsl_gpummu_pt *)
mmu_specific_pt;
kgsl_ptpool_free((struct kgsl_ptpool *)kgsl_driver.ptpool,
gpummu_pt->base.hostptr);
kgsl_driver.stats.coherent -= KGSL_PAGETABLE_SIZE;
kfree(gpummu_pt->tlbflushfilter.base);
kfree(gpummu_pt);
}
static inline uint32_t
kgsl_pt_entry_get(unsigned int va_base, uint32_t va)
{
return (va - va_base) >> PAGE_SHIFT;
}
static inline void
kgsl_pt_map_set(struct kgsl_gpummu_pt *pt, uint32_t pte, uint32_t val)
{
uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
writel_relaxed(val, &baseptr[pte]);
}
static inline uint32_t
kgsl_pt_map_get(struct kgsl_gpummu_pt *pt, uint32_t pte)
{
uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
return readl_relaxed(&baseptr[pte]) & GSL_PT_PAGE_ADDR_MASK;
}
static unsigned int kgsl_gpummu_pt_get_flags(struct kgsl_pagetable *pt,
enum kgsl_deviceid id)
{
unsigned int result = 0;
struct kgsl_gpummu_pt *gpummu_pt;
if (pt == NULL)
return 0;
gpummu_pt = pt->priv;
spin_lock(&pt->lock);
if (gpummu_pt->tlb_flags && (1<<id)) {
result = KGSL_MMUFLAGS_TLBFLUSH;
gpummu_pt->tlb_flags &= ~(1<<id);
}
spin_unlock(&pt->lock);
return result;
}
static void kgsl_gpummu_pagefault(struct kgsl_device *device)
{
unsigned int reg;
unsigned int ptbase;
kgsl_regread(device, MH_MMU_PAGE_FAULT, &reg);
kgsl_regread(device, MH_MMU_PT_BASE, &ptbase);
KGSL_MEM_CRIT(device,
"mmu page fault: page=0x%lx pt=%d op=%s axi=%d\n",
reg & ~(PAGE_SIZE - 1),
kgsl_mmu_get_ptname_from_ptbase(ptbase),
reg & 0x02 ? "WRITE" : "READ", (reg >> 4) & 0xF);
}
static void *kgsl_gpummu_create_pagetable(void)
{
struct kgsl_gpummu_pt *gpummu_pt;
gpummu_pt = kzalloc(sizeof(struct kgsl_gpummu_pt),
GFP_KERNEL);
if (!gpummu_pt)
return NULL;
gpummu_pt->tlb_flags = 0;
gpummu_pt->last_superpte = 0;
gpummu_pt->tlbflushfilter.size = (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE /
(PAGE_SIZE * GSL_PT_SUPER_PTE * 8)) + 1;
gpummu_pt->tlbflushfilter.base = (unsigned int *)
kzalloc(gpummu_pt->tlbflushfilter.size, GFP_KERNEL);
if (!gpummu_pt->tlbflushfilter.base) {
KGSL_CORE_ERR("kzalloc(%d) failed\n",
gpummu_pt->tlbflushfilter.size);
goto err_free_gpummu;
}
GSL_TLBFLUSH_FILTER_RESET();
gpummu_pt->base.hostptr = kgsl_ptpool_alloc((struct kgsl_ptpool *)
kgsl_driver.ptpool,
&gpummu_pt->base.physaddr);
if (gpummu_pt->base.hostptr == NULL)
goto err_flushfilter;
/* ptpool allocations are from coherent memory, so update the
device statistics acordingly */
KGSL_STATS_ADD(KGSL_PAGETABLE_SIZE, kgsl_driver.stats.coherent,
kgsl_driver.stats.coherent_max);
gpummu_pt->base.gpuaddr = gpummu_pt->base.physaddr;
gpummu_pt->base.size = KGSL_PAGETABLE_SIZE;
return (void *)gpummu_pt;
err_flushfilter:
kfree(gpummu_pt->tlbflushfilter.base);
err_free_gpummu:
kfree(gpummu_pt);
return NULL;
}
static void kgsl_gpummu_default_setstate(struct kgsl_device *device,
uint32_t flags)
{
struct kgsl_gpummu_pt *gpummu_pt;
if (!kgsl_mmu_enabled())
return;
if (flags & KGSL_MMUFLAGS_PTUPDATE) {
kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
gpummu_pt = device->mmu.hwpagetable->priv;
kgsl_regwrite(device, MH_MMU_PT_BASE,
gpummu_pt->base.gpuaddr);
}
if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
/* Invalidate all and tc */
kgsl_regwrite(device, MH_MMU_INVALIDATE, 0x00000003);
}
}
static void kgsl_gpummu_setstate(struct kgsl_device *device,
struct kgsl_pagetable *pagetable)
{
struct kgsl_mmu *mmu = &device->mmu;
struct kgsl_gpummu_pt *gpummu_pt;
if (mmu->flags & KGSL_FLAGS_STARTED) {
/* page table not current, then setup mmu to use new
* specified page table
*/
if (mmu->hwpagetable != pagetable) {
mmu->hwpagetable = pagetable;
spin_lock(&mmu->hwpagetable->lock);
gpummu_pt = mmu->hwpagetable->priv;
gpummu_pt->tlb_flags &= ~(1<<device->id);
spin_unlock(&mmu->hwpagetable->lock);
/* call device specific set page table */
kgsl_setstate(mmu->device, KGSL_MMUFLAGS_TLBFLUSH |
KGSL_MMUFLAGS_PTUPDATE);
}
}
}
static int kgsl_gpummu_init(struct kgsl_device *device)
{
/*
* intialize device mmu
*
* call this with the global lock held
*/
int status = 0;
struct kgsl_mmu *mmu = &device->mmu;
mmu->device = device;
/* sub-client MMU lookups require address translation */
if ((mmu->config & ~0x1) > 0) {
/*make sure virtual address range is a multiple of 64Kb */
if (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE & ((1 << 16) - 1)) {
KGSL_CORE_ERR("Invalid pagetable size requested "
"for GPUMMU: %x\n", CONFIG_MSM_KGSL_PAGE_TABLE_SIZE);
return -EINVAL;
}
/* allocate memory used for completing r/w operations that
* cannot be mapped by the MMU
*/
status = kgsl_allocate_contiguous(&mmu->setstate_memory, 64);
if (!status)
kgsl_sharedmem_set(&mmu->setstate_memory, 0, 0,
mmu->setstate_memory.size);
}
dev_info(device->dev, "|%s| MMU type set for device is GPUMMU\n",
__func__);
return status;
}
static int kgsl_gpummu_start(struct kgsl_device *device)
{
/*
* intialize device mmu
*
* call this with the global lock held
*/
struct kgsl_mmu *mmu = &device->mmu;
struct kgsl_gpummu_pt *gpummu_pt;
if (mmu->flags & KGSL_FLAGS_STARTED)
return 0;
/* MMU not enabled */
if ((mmu->config & 0x1) == 0)
return 0;
/* setup MMU and sub-client behavior */
kgsl_regwrite(device, MH_MMU_CONFIG, mmu->config);
/* idle device */
kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
/* enable axi interrupts */
kgsl_regwrite(device, MH_INTERRUPT_MASK,
GSL_MMU_INT_MASK | MH_INTERRUPT_MASK__MMU_PAGE_FAULT);
kgsl_sharedmem_set(&mmu->setstate_memory, 0, 0,
mmu->setstate_memory.size);
/* TRAN_ERROR needs a 32 byte (32 byte aligned) chunk of memory
* to complete transactions in case of an MMU fault. Note that
* we'll leave the bottom 32 bytes of the setstate_memory for other
* purposes (e.g. use it when dummy read cycles are needed
* for other blocks) */
kgsl_regwrite(device, MH_MMU_TRAN_ERROR,
mmu->setstate_memory.physaddr + 32);
if (mmu->defaultpagetable == NULL)
mmu->defaultpagetable =
kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
/* Return error if the default pagetable doesn't exist */
if (mmu->defaultpagetable == NULL)
return -ENOMEM;
mmu->hwpagetable = mmu->defaultpagetable;
gpummu_pt = mmu->hwpagetable->priv;
kgsl_regwrite(device, MH_MMU_PT_BASE,
gpummu_pt->base.gpuaddr);
kgsl_regwrite(device, MH_MMU_VA_RANGE,
(KGSL_PAGETABLE_BASE |
(CONFIG_MSM_KGSL_PAGE_TABLE_SIZE >> 16)));
kgsl_setstate(device, KGSL_MMUFLAGS_TLBFLUSH);
mmu->flags |= KGSL_FLAGS_STARTED;
return 0;
}
static int
kgsl_gpummu_unmap(void *mmu_specific_pt,
struct kgsl_memdesc *memdesc)
{
unsigned int numpages;
unsigned int pte, ptefirst, ptelast, superpte;
unsigned int range = memdesc->size;
struct kgsl_gpummu_pt *gpummu_pt = mmu_specific_pt;
/* All GPU addresses as assigned are page aligned, but some
functions purturb the gpuaddr with an offset, so apply the
mask here to make sure we have the right address */
unsigned int gpuaddr = memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK;
numpages = (range >> PAGE_SHIFT);
if (range & (PAGE_SIZE - 1))
numpages++;
ptefirst = kgsl_pt_entry_get(KGSL_PAGETABLE_BASE, gpuaddr);
ptelast = ptefirst + numpages;
superpte = ptefirst - (ptefirst & (GSL_PT_SUPER_PTE-1));
GSL_TLBFLUSH_FILTER_SETDIRTY(superpte / GSL_PT_SUPER_PTE);
for (pte = ptefirst; pte < ptelast; pte++) {
#ifdef VERBOSE_DEBUG
/* check if PTE exists */
if (!kgsl_pt_map_get(gpummu_pt, pte))
KGSL_CORE_ERR("pt entry %x is already "
"unmapped for pagetable %p\n", pte, gpummu_pt);
#endif
kgsl_pt_map_set(gpummu_pt, pte, GSL_PT_PAGE_DIRTY);
superpte = pte - (pte & (GSL_PT_SUPER_PTE - 1));
if (pte == superpte)
GSL_TLBFLUSH_FILTER_SETDIRTY(superpte /
GSL_PT_SUPER_PTE);
}
/* Post all writes to the pagetable */
wmb();
return 0;
}
#define SUPERPTE_IS_DIRTY(_p) \
(((_p) & (GSL_PT_SUPER_PTE - 1)) == 0 && \
GSL_TLBFLUSH_FILTER_ISDIRTY((_p) / GSL_PT_SUPER_PTE))
static int
kgsl_gpummu_map(void *mmu_specific_pt,
struct kgsl_memdesc *memdesc,
unsigned int protflags)
{
unsigned int pte;
struct kgsl_gpummu_pt *gpummu_pt = mmu_specific_pt;
struct scatterlist *s;
int flushtlb = 0;
int i;
pte = kgsl_pt_entry_get(KGSL_PAGETABLE_BASE, memdesc->gpuaddr);
/* Flush the TLB if the first PTE isn't at the superpte boundary */
if (pte & (GSL_PT_SUPER_PTE - 1))
flushtlb = 1;
for_each_sg(memdesc->sg, s, memdesc->sglen, i) {
unsigned int paddr = sg_phys(s);
unsigned int j;
/* Each sg entry might be multiple pages long */
for (j = paddr; j < paddr + s->length; pte++, j += PAGE_SIZE) {
if (SUPERPTE_IS_DIRTY(pte))
flushtlb = 1;
kgsl_pt_map_set(gpummu_pt, pte, j | protflags);
}
}
/* Flush the TLB if the last PTE isn't at the superpte boundary */
if ((pte + 1) & (GSL_PT_SUPER_PTE - 1))
flushtlb = 1;
wmb();
if (flushtlb) {
/*set all devices as needing flushing*/
gpummu_pt->tlb_flags = UINT_MAX;
GSL_TLBFLUSH_FILTER_RESET();
}
return 0;
}
static int kgsl_gpummu_stop(struct kgsl_device *device)
{
struct kgsl_mmu *mmu = &device->mmu;
kgsl_regwrite(device, MH_MMU_CONFIG, 0x00000000);
mmu->flags &= ~KGSL_FLAGS_STARTED;
return 0;
}
static int kgsl_gpummu_close(struct kgsl_device *device)
{
/*
* close device mmu
*
* call this with the global lock held
*/
struct kgsl_mmu *mmu = &device->mmu;
if (mmu->setstate_memory.gpuaddr)
kgsl_sharedmem_free(&mmu->setstate_memory);
if (mmu->defaultpagetable)
kgsl_mmu_putpagetable(mmu->defaultpagetable);
return 0;
}
static unsigned int
kgsl_gpummu_get_current_ptbase(struct kgsl_device *device)
{
unsigned int ptbase;
kgsl_regread(device, MH_MMU_PT_BASE, &ptbase);
return ptbase;
}
struct kgsl_mmu_ops gpummu_ops = {
.mmu_init = kgsl_gpummu_init,
.mmu_close = kgsl_gpummu_close,
.mmu_start = kgsl_gpummu_start,
.mmu_stop = kgsl_gpummu_stop,
.mmu_setstate = kgsl_gpummu_setstate,
.mmu_device_setstate = kgsl_gpummu_default_setstate,
.mmu_pagefault = kgsl_gpummu_pagefault,
.mmu_get_current_ptbase = kgsl_gpummu_get_current_ptbase,
};
struct kgsl_mmu_pt_ops gpummu_pt_ops = {
.mmu_map = kgsl_gpummu_map,
.mmu_unmap = kgsl_gpummu_unmap,
.mmu_create_pagetable = kgsl_gpummu_create_pagetable,
.mmu_destroy_pagetable = kgsl_gpummu_destroy_pagetable,
.mmu_pt_equal = kgsl_gpummu_pt_equal,
.mmu_pt_get_flags = kgsl_gpummu_pt_get_flags,
};

@ -0,0 +1,85 @@
/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __KGSL_GPUMMU_H
#define __KGSL_GPUMMU_H
#define GSL_PT_PAGE_BITS_MASK 0x00000007
#define GSL_PT_PAGE_ADDR_MASK PAGE_MASK
#define GSL_MMU_INT_MASK \
(MH_INTERRUPT_MASK__AXI_READ_ERROR | \
MH_INTERRUPT_MASK__AXI_WRITE_ERROR)
/* Macros to manage TLB flushing */
#define GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS (sizeof(unsigned char) * 8)
#define GSL_TLBFLUSH_FILTER_GET(superpte) \
(*((unsigned char *) \
(((unsigned int)gpummu_pt->tlbflushfilter.base) \
+ (superpte / GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS))))
#define GSL_TLBFLUSH_FILTER_SETDIRTY(superpte) \
(GSL_TLBFLUSH_FILTER_GET((superpte)) |= 1 << \
(superpte % GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS))
#define GSL_TLBFLUSH_FILTER_ISDIRTY(superpte) \
(GSL_TLBFLUSH_FILTER_GET((superpte)) & \
(1 << (superpte % GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS)))
#define GSL_TLBFLUSH_FILTER_RESET() memset(gpummu_pt->tlbflushfilter.base,\
0, gpummu_pt->tlbflushfilter.size)
extern struct kgsl_mmu_ops gpummu_ops;
extern struct kgsl_mmu_pt_ops gpummu_pt_ops;
struct kgsl_tlbflushfilter {
unsigned int *base;
unsigned int size;
};
struct kgsl_gpummu_pt {
struct kgsl_memdesc base;
unsigned int last_superpte;
unsigned int tlb_flags;
/* Maintain filter to manage tlb flushing */
struct kgsl_tlbflushfilter tlbflushfilter;
};
struct kgsl_ptpool_chunk {
size_t size;
unsigned int count;
int dynamic;
void *data;
unsigned int phys;
unsigned long *bitmap;
struct list_head list;
};
struct kgsl_ptpool {
size_t ptsize;
struct mutex lock;
struct list_head list;
int entries;
int static_entries;
int chunks;
};
void *kgsl_gpummu_ptpool_init(int ptsize,
int entries);
void kgsl_gpummu_ptpool_destroy(void *ptpool);
static inline unsigned int kgsl_pt_get_base_addr(struct kgsl_pagetable *pt)
{
struct kgsl_gpummu_pt *gpummu_pt = pt->priv;
return gpummu_pt->base.gpuaddr;
}
#endif /* __KGSL_GPUMMU_H */

@ -0,0 +1,333 @@
/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/types.h>
#include <linux/device.h>
#include <linux/spinlock.h>
#include <linux/genalloc.h>
#include <linux/slab.h>
#include <linux/iommu.h>
#include <mach/iommu.h>
#include <linux/msm_kgsl.h>
#include "kgsl.h"
#include "kgsl_device.h"
#include "kgsl_mmu.h"
#include "kgsl_sharedmem.h"
struct kgsl_iommu {
struct device *iommu_user_dev;
int iommu_user_dev_attached;
struct device *iommu_priv_dev;
int iommu_priv_dev_attached;
};
static int kgsl_iommu_pt_equal(struct kgsl_pagetable *pt,
unsigned int pt_base)
{
struct iommu_domain *domain = pt->priv;
return pt && pt_base && ((unsigned int)domain == pt_base);
}
static void kgsl_iommu_destroy_pagetable(void *mmu_specific_pt)
{
struct iommu_domain *domain = mmu_specific_pt;
if (domain)
iommu_domain_free(domain);
}
void *kgsl_iommu_create_pagetable(void)
{
struct iommu_domain *domain = iommu_domain_alloc(0);
if (!domain)
KGSL_CORE_ERR("Failed to create iommu domain\n");
return domain;
}
static void kgsl_detach_pagetable_iommu_domain(struct kgsl_mmu *mmu)
{
struct iommu_domain *domain;
struct kgsl_iommu *iommu = mmu->priv;
BUG_ON(mmu->hwpagetable == NULL);
BUG_ON(mmu->hwpagetable->priv == NULL);
domain = mmu->hwpagetable->priv;
if (iommu->iommu_user_dev_attached) {
iommu_detach_device(domain, iommu->iommu_user_dev);
iommu->iommu_user_dev_attached = 0;
KGSL_MEM_INFO(mmu->device,
"iommu %p detached from user dev of MMU: %p\n",
domain, mmu);
}
if (iommu->iommu_priv_dev_attached) {
iommu_detach_device(domain, iommu->iommu_priv_dev);
iommu->iommu_priv_dev_attached = 0;
KGSL_MEM_INFO(mmu->device,
"iommu %p detached from priv dev of MMU: %p\n",
domain, mmu);
}
}
static int kgsl_attach_pagetable_iommu_domain(struct kgsl_mmu *mmu)
{
struct iommu_domain *domain;
int ret = 0;
struct kgsl_iommu *iommu = mmu->priv;
BUG_ON(mmu->hwpagetable == NULL);
BUG_ON(mmu->hwpagetable->priv == NULL);
domain = mmu->hwpagetable->priv;
if (iommu->iommu_user_dev && !iommu->iommu_user_dev_attached) {
ret = iommu_attach_device(domain, iommu->iommu_user_dev);
if (ret) {
KGSL_MEM_ERR(mmu->device,
"Failed to attach device, err %d\n", ret);
goto done;
}
iommu->iommu_user_dev_attached = 1;
KGSL_MEM_INFO(mmu->device,
"iommu %p attached to user dev of MMU: %p\n",
domain, mmu);
}
if (iommu->iommu_priv_dev && !iommu->iommu_priv_dev_attached) {
ret = iommu_attach_device(domain, iommu->iommu_priv_dev);
if (ret) {
KGSL_MEM_ERR(mmu->device,
"Failed to attach device, err %d\n", ret);
iommu_detach_device(domain, iommu->iommu_user_dev);
iommu->iommu_user_dev_attached = 0;
goto done;
}
iommu->iommu_priv_dev_attached = 1;
KGSL_MEM_INFO(mmu->device,
"iommu %p attached to priv dev of MMU: %p\n",
domain, mmu);
}
done:
return ret;
}
static int kgsl_get_iommu_ctxt(struct kgsl_iommu *iommu,
struct kgsl_device *device)
{
int status = 0;
struct platform_device *pdev =
container_of(device->parentdev, struct platform_device, dev);
struct kgsl_device_platform_data *pdata_dev = pdev->dev.platform_data;
if (pdata_dev->iommu_user_ctx_name)
iommu->iommu_user_dev = msm_iommu_get_ctx(
pdata_dev->iommu_user_ctx_name);
if (pdata_dev->iommu_priv_ctx_name)
iommu->iommu_priv_dev = msm_iommu_get_ctx(
pdata_dev->iommu_priv_ctx_name);
if (!iommu->iommu_user_dev) {
KGSL_CORE_ERR("Failed to get user iommu dev handle for "
"device %s\n",
pdata_dev->iommu_user_ctx_name);
status = -EINVAL;
}
return status;
}
static void kgsl_iommu_setstate(struct kgsl_device *device,
struct kgsl_pagetable *pagetable)
{
struct kgsl_mmu *mmu = &device->mmu;
if (mmu->flags & KGSL_FLAGS_STARTED) {
/* page table not current, then setup mmu to use new
* specified page table
*/
if (mmu->hwpagetable != pagetable) {
kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
kgsl_detach_pagetable_iommu_domain(mmu);
mmu->hwpagetable = pagetable;
if (mmu->hwpagetable)
kgsl_attach_pagetable_iommu_domain(mmu);
}
}
}
static int kgsl_iommu_init(struct kgsl_device *device)
{
/*
* intialize device mmu
*
* call this with the global lock held
*/
int status = 0;
struct kgsl_mmu *mmu = &device->mmu;
struct kgsl_iommu *iommu;
mmu->device = device;
iommu = kzalloc(sizeof(struct kgsl_iommu), GFP_KERNEL);
if (!iommu) {
KGSL_CORE_ERR("kzalloc(%d) failed\n",
sizeof(struct kgsl_iommu));
return -ENOMEM;
}
iommu->iommu_priv_dev_attached = 0;
iommu->iommu_user_dev_attached = 0;
status = kgsl_get_iommu_ctxt(iommu, device);
if (status) {
kfree(iommu);
iommu = NULL;
}
mmu->priv = iommu;
dev_info(device->dev, "|%s| MMU type set for device is IOMMU\n",
__func__);
return status;
}
static int kgsl_iommu_start(struct kgsl_device *device)
{
int status;
struct kgsl_mmu *mmu = &device->mmu;
if (mmu->flags & KGSL_FLAGS_STARTED)
return 0;
kgsl_regwrite(device, MH_MMU_CONFIG, 0x00000000);
if (mmu->defaultpagetable == NULL)
mmu->defaultpagetable =
kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
/* Return error if the default pagetable doesn't exist */
if (mmu->defaultpagetable == NULL)
return -ENOMEM;
mmu->hwpagetable = mmu->defaultpagetable;
status = kgsl_attach_pagetable_iommu_domain(mmu);
if (!status)
mmu->flags |= KGSL_FLAGS_STARTED;
return status;
}
static int
kgsl_iommu_unmap(void *mmu_specific_pt,
struct kgsl_memdesc *memdesc)
{
int ret;
unsigned int range = memdesc->size;
struct iommu_domain *domain = (struct iommu_domain *)
mmu_specific_pt;
/* All GPU addresses as assigned are page aligned, but some
functions purturb the gpuaddr with an offset, so apply the
mask here to make sure we have the right address */
unsigned int gpuaddr = memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK;
if (range == 0 || gpuaddr == 0)
return 0;
ret = iommu_unmap_range(domain, gpuaddr, range);
if (ret)
KGSL_CORE_ERR("iommu_unmap_range(%p, %x, %d) failed "
"with err: %d\n", domain, gpuaddr,
range, ret);
return 0;
}
static int
kgsl_iommu_map(void *mmu_specific_pt,
struct kgsl_memdesc *memdesc,
unsigned int protflags)
{
int ret;
unsigned int iommu_virt_addr;
struct iommu_domain *domain = mmu_specific_pt;
BUG_ON(NULL == domain);
iommu_virt_addr = memdesc->gpuaddr;
ret = iommu_map_range(domain, iommu_virt_addr, memdesc->sg,
memdesc->size, MSM_IOMMU_ATTR_NONCACHED);
if (ret) {
KGSL_CORE_ERR("iommu_map_range(%p, %x, %p, %d, %d) "
"failed with err: %d\n", domain,
iommu_virt_addr, memdesc->sg, memdesc->size,
MSM_IOMMU_ATTR_NONCACHED, ret);
return ret;
}
return ret;
}
static int kgsl_iommu_stop(struct kgsl_device *device)
{
/*
* stop device mmu
*
* call this with the global lock held
*/
struct kgsl_mmu *mmu = &device->mmu;
if (mmu->flags & KGSL_FLAGS_STARTED) {
/* detach iommu attachment */
kgsl_detach_pagetable_iommu_domain(mmu);
mmu->flags &= ~KGSL_FLAGS_STARTED;
}
return 0;
}
static int kgsl_iommu_close(struct kgsl_device *device)
{
struct kgsl_mmu *mmu = &device->mmu;
if (mmu->defaultpagetable)
kgsl_mmu_putpagetable(mmu->defaultpagetable);
return 0;
}
static unsigned int
kgsl_iommu_get_current_ptbase(struct kgsl_device *device)
{
/* Current base is always the hwpagetables domain as we
* do not use per process pagetables right not for iommu.
* This will change when we switch to per process pagetables.
*/
return (unsigned int)device->mmu.hwpagetable->priv;
}
struct kgsl_mmu_ops iommu_ops = {
.mmu_init = kgsl_iommu_init,
.mmu_close = kgsl_iommu_close,
.mmu_start = kgsl_iommu_start,
.mmu_stop = kgsl_iommu_stop,
.mmu_setstate = kgsl_iommu_setstate,
.mmu_device_setstate = NULL,
.mmu_pagefault = NULL,
.mmu_get_current_ptbase = kgsl_iommu_get_current_ptbase,
};
struct kgsl_mmu_pt_ops iommu_pt_ops = {
.mmu_map = kgsl_iommu_map,
.mmu_unmap = kgsl_iommu_unmap,
.mmu_create_pagetable = kgsl_iommu_create_pagetable,
.mmu_destroy_pagetable = kgsl_iommu_destroy_pagetable,
.mmu_pt_equal = kgsl_iommu_pt_equal,
.mmu_pt_get_flags = NULL,
};

@ -1,29 +1,13 @@
/* Copyright (c) 2002,2008-2011, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __KGSL_LOG_H

File diff suppressed because it is too large Load Diff

@ -1,34 +1,20 @@
/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __KGSL_MMU_H
#define __KGSL_MMU_H
#include "kgsl_sharedmem.h"
#define KGSL_MMU_ALIGN_SHIFT 13
#define KGSL_MMU_ALIGN_MASK (~((1 << KGSL_MMU_ALIGN_SHIFT) - 1))
/* Identifier for the global page table */
/* Per process page tables will probably pass in the thread group
@ -36,10 +22,52 @@
#define KGSL_MMU_GLOBAL_PT 0
struct kgsl_device;
#define GSL_PT_SUPER_PTE 8
#define GSL_PT_PAGE_WV 0x00000001
#define GSL_PT_PAGE_RV 0x00000002
#define GSL_PT_PAGE_DIRTY 0x00000004
/* MMU registers - the register locations for all cores are the
same. The method for getting to those locations differs between
2D and 3D, but the 2D and 3D register functions do that magic
for us */
#define MH_MMU_CONFIG 0x0040
#define MH_MMU_VA_RANGE 0x0041
#define MH_MMU_PT_BASE 0x0042
#define MH_MMU_PAGE_FAULT 0x0043
#define MH_MMU_TRAN_ERROR 0x0044
#define MH_MMU_INVALIDATE 0x0045
#define MH_MMU_MPU_BASE 0x0046
#define MH_MMU_MPU_END 0x0047
#define MH_INTERRUPT_MASK 0x0A42
#define MH_INTERRUPT_STATUS 0x0A43
#define MH_INTERRUPT_CLEAR 0x0A44
#define MH_AXI_ERROR 0x0A45
#define MH_ARBITER_CONFIG 0x0A40
#define MH_DEBUG_CTRL 0x0A4E
#define MH_DEBUG_DATA 0x0A4F
#define MH_AXI_HALT_CONTROL 0x0A50
#define MH_CLNT_INTF_CTRL_CONFIG1 0x0A54
#define MH_CLNT_INTF_CTRL_CONFIG2 0x0A55
/* MH_MMU_CONFIG bit definitions */
#define MH_MMU_CONFIG__RB_W_CLNT_BEHAVIOR__SHIFT 0x00000004
#define MH_MMU_CONFIG__CP_W_CLNT_BEHAVIOR__SHIFT 0x00000006
#define MH_MMU_CONFIG__CP_R0_CLNT_BEHAVIOR__SHIFT 0x00000008
#define MH_MMU_CONFIG__CP_R1_CLNT_BEHAVIOR__SHIFT 0x0000000a
#define MH_MMU_CONFIG__CP_R2_CLNT_BEHAVIOR__SHIFT 0x0000000c
#define MH_MMU_CONFIG__CP_R3_CLNT_BEHAVIOR__SHIFT 0x0000000e
#define MH_MMU_CONFIG__CP_R4_CLNT_BEHAVIOR__SHIFT 0x00000010
#define MH_MMU_CONFIG__VGT_R0_CLNT_BEHAVIOR__SHIFT 0x00000012
#define MH_MMU_CONFIG__VGT_R1_CLNT_BEHAVIOR__SHIFT 0x00000014
#define MH_MMU_CONFIG__TC_R_CLNT_BEHAVIOR__SHIFT 0x00000016
#define MH_MMU_CONFIG__PA_W_CLNT_BEHAVIOR__SHIFT 0x00000018
/* MMU Flags */
#define KGSL_MMUFLAGS_TLBFLUSH 0x10000000
#define KGSL_MMUFLAGS_PTUPDATE 0x20000000
@ -48,43 +76,30 @@
#define MH_INTERRUPT_MASK__AXI_WRITE_ERROR 0x00000002L
#define MH_INTERRUPT_MASK__MMU_PAGE_FAULT 0x00000004L
/* Macros to manage TLB flushing */
#define GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS (sizeof(unsigned char) * 8)
#define GSL_TLBFLUSH_FILTER_GET(superpte) \
(*((unsigned char *) \
(((unsigned int)pagetable->tlbflushfilter.base) \
+ (superpte / GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS))))
#define GSL_TLBFLUSH_FILTER_SETDIRTY(superpte) \
(GSL_TLBFLUSH_FILTER_GET((superpte)) |= 1 << \
(superpte % GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS))
#define GSL_TLBFLUSH_FILTER_ISDIRTY(superpte) \
(GSL_TLBFLUSH_FILTER_GET((superpte)) & \
(1 << (superpte % GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS)))
#define GSL_TLBFLUSH_FILTER_RESET() memset(pagetable->tlbflushfilter.base,\
0, pagetable->tlbflushfilter.size)
#ifdef CONFIG_MSM_KGSL_MMU
#define KGSL_MMU_INT_MASK \
(MH_INTERRUPT_MASK__AXI_READ_ERROR | \
MH_INTERRUPT_MASK__AXI_WRITE_ERROR | \
MH_INTERRUPT_MASK__MMU_PAGE_FAULT)
#else
#define KGSL_MMU_INT_MASK \
(MH_INTERRUPT_MASK__AXI_READ_ERROR | \
MH_INTERRUPT_MASK__AXI_WRITE_ERROR)
#endif
struct kgsl_device;
struct kgsl_tlbflushfilter {
unsigned int *base;
unsigned int size;
enum kgsl_mmutype {
KGSL_MMU_TYPE_GPU = 0,
KGSL_MMU_TYPE_IOMMU,
KGSL_MMU_TYPE_NONE
};
struct kgsl_pagetable {
spinlock_t lock;
unsigned int refcnt;
struct kgsl_memdesc base;
uint32_t va_base;
unsigned int va_range;
unsigned int last_superpte;
struct kref refcount;
unsigned int max_entries;
struct gen_pool *pool;
struct list_head list;
unsigned int name;
/* Maintain filter to manage tlb flushing */
struct kgsl_tlbflushfilter tlbflushfilter;
unsigned int tlb_flags;
struct kobject *kobj;
struct {
@ -93,22 +108,36 @@ struct kgsl_pagetable {
unsigned int max_mapped;
unsigned int max_entries;
} stats;
const struct kgsl_mmu_pt_ops *pt_ops;
void *priv;
};
struct kgsl_mmu_reg {
struct kgsl_mmu_ops {
int (*mmu_init) (struct kgsl_device *device);
int (*mmu_close) (struct kgsl_device *device);
int (*mmu_start) (struct kgsl_device *device);
int (*mmu_stop) (struct kgsl_device *device);
void (*mmu_setstate) (struct kgsl_device *device,
struct kgsl_pagetable *pagetable);
void (*mmu_device_setstate) (struct kgsl_device *device,
uint32_t flags);
void (*mmu_pagefault) (struct kgsl_device *device);
unsigned int (*mmu_get_current_ptbase)
(struct kgsl_device *device);
};
uint32_t config;
uint32_t mpu_base;
uint32_t mpu_end;
uint32_t va_range;
uint32_t pt_page;
uint32_t page_fault;
uint32_t tran_error;
uint32_t invalidate;
uint32_t interrupt_mask;
uint32_t interrupt_status;
uint32_t interrupt_clear;
uint32_t axi_error;
struct kgsl_mmu_pt_ops {
int (*mmu_map) (void *mmu_pt,
struct kgsl_memdesc *memdesc,
unsigned int protflags);
int (*mmu_unmap) (void *mmu_pt,
struct kgsl_memdesc *memdesc);
void *(*mmu_create_pagetable) (void);
void (*mmu_destroy_pagetable) (void *pt);
int (*mmu_pt_equal) (struct kgsl_pagetable *pt,
unsigned int pt_base);
unsigned int (*mmu_pt_get_flags) (struct kgsl_pagetable *pt,
enum kgsl_deviceid id);
};
struct kgsl_mmu {
@ -116,46 +145,27 @@ struct kgsl_mmu {
uint32_t flags;
struct kgsl_device *device;
unsigned int config;
uint32_t mpu_base;
int mpu_range;
struct kgsl_memdesc dummyspace;
struct kgsl_mmu_reg reg;
struct kgsl_memdesc setstate_memory;
/* current page table object being used by device mmu */
struct kgsl_pagetable *defaultpagetable;
struct kgsl_pagetable *hwpagetable;
const struct kgsl_mmu_ops *mmu_ops;
void *priv;
};
struct kgsl_ptpool_chunk {
size_t size;
unsigned int count;
int dynamic;
#include "kgsl_gpummu.h"
void *data;
unsigned int phys;
unsigned long *bitmap;
struct list_head list;
};
struct kgsl_ptpool {
size_t ptsize;
struct mutex lock;
struct list_head list;
int entries;
int static_entries;
int chunks;
};
extern struct kgsl_mmu_ops iommu_ops;
extern struct kgsl_mmu_pt_ops iommu_pt_ops;
struct kgsl_pagetable *kgsl_mmu_getpagetable(unsigned long name);
#ifdef CONFIG_MSM_KGSL_MMU
void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable);
void kgsl_mh_start(struct kgsl_device *device);
void kgsl_mh_intrcallback(struct kgsl_device *device);
int kgsl_mmu_init(struct kgsl_device *device);
int kgsl_mmu_start(struct kgsl_device *device);
int kgsl_mmu_stop(struct kgsl_device *device);
int kgsl_mmu_close(struct kgsl_device *device);
int kgsl_mmu_setstate(struct kgsl_device *device,
struct kgsl_pagetable *pagetable);
int kgsl_mmu_map(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc,
unsigned int protflags);
@ -163,105 +173,21 @@ int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc, unsigned int protflags);
int kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc);
void kgsl_ptpool_destroy(struct kgsl_ptpool *pool);
int kgsl_ptpool_init(struct kgsl_ptpool *pool, int ptsize, int entries);
void kgsl_mh_intrcallback(struct kgsl_device *device);
void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable);
unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr);
void kgsl_setstate(struct kgsl_device *device, uint32_t flags);
void kgsl_mmu_device_setstate(struct kgsl_device *device, uint32_t flags);
void kgsl_mmu_setstate(struct kgsl_device *device,
struct kgsl_pagetable *pt);
int kgsl_mmu_get_ptname_from_ptbase(unsigned int pt_base);
int kgsl_mmu_pt_get_flags(struct kgsl_pagetable *pt,
enum kgsl_deviceid id);
static inline int kgsl_mmu_enabled(void)
{
return 1;
}
#else
static inline int kgsl_mmu_enabled(void)
{
return 0;
}
static inline int kgsl_mmu_init(struct kgsl_device *device)
{
return 0;
}
static inline int kgsl_mmu_start(struct kgsl_device *device)
{
return 0;
}
static inline int kgsl_mmu_stop(struct kgsl_device *device)
{
return 0;
}
static inline int kgsl_mmu_close(struct kgsl_device *device)
{
return 0;
}
static inline int kgsl_mmu_setstate(struct kgsl_device *device,
struct kgsl_pagetable *pagetable)
{
return 0;
}
static inline int kgsl_mmu_map(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc,
unsigned int protflags)
{
memdesc->gpuaddr = memdesc->physaddr;
return 0;
}
static inline int kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc)
{
return 0;
}
static inline int kgsl_ptpool_init(struct kgsl_ptpool *pool, int ptsize,
int entries)
{
return 0;
}
static inline int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc, unsigned int protflags)
{
/* gpuaddr is the same that gets passed in */
return 0;
}
static inline void kgsl_ptpool_destroy(struct kgsl_ptpool *pool) { }
static inline void kgsl_mh_intrcallback(struct kgsl_device *device) { }
static inline void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable) { }
static inline unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr)
{
return 0;
}
#endif
static inline unsigned int kgsl_pt_get_flags(struct kgsl_pagetable *pt,
enum kgsl_deviceid id)
{
unsigned int result = 0;
if (pt == NULL)
return 0;
spin_lock(&pt->lock);
if (pt->tlb_flags && (1<<id)) {
result = KGSL_MMUFLAGS_TLBFLUSH;
pt->tlb_flags &= ~(1<<id);
}
spin_unlock(&pt->lock);
return result;
}
void kgsl_mmu_ptpool_destroy(void *ptpool);
void *kgsl_mmu_ptpool_init(int ptsize, int entries);
int kgsl_mmu_enabled(void);
int kgsl_mmu_pt_equal(struct kgsl_pagetable *pt,
unsigned int pt_base);
void kgsl_mmu_set_mmutype(char *mmutype);
unsigned int kgsl_mmu_get_current_ptbase(struct kgsl_device *device);
enum kgsl_mmutype kgsl_mmu_get_mmutype(void);
#endif /* __KGSL_MMU_H */

@ -1,4 +1,5 @@
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
* Copyright (C) 2011 Sony Ericsson Mobile Communications AB.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -11,53 +12,22 @@
*
*/
#include <linux/interrupt.h>
#include <linux/err.h>
#include <mach/msm_iomap.h>
#include <mach/msm_bus.h>
#include "kgsl.h"
#include "kgsl_pwrscale.h"
#include "kgsl_device.h"
#define KGSL_PWRFLAGS_POWER_ON 0
#define KGSL_PWRFLAGS_CLK_ON 1
#define KGSL_PWRFLAGS_AXI_ON 2
#define KGSL_PWRFLAGS_IRQ_ON 3
#define SWITCH_OFF 200
#define TZ_UPDATE_ID 0x01404000
#define TZ_RESET_ID 0x01403000
#ifdef CONFIG_MSM_SECURE_IO
/* Trap into the TrustZone, and call funcs there. */
static int __secure_tz_entry(u32 cmd, u32 val)
{
register u32 r0 asm("r0") = cmd;
register u32 r1 asm("r1") = 0x0;
register u32 r2 asm("r2") = val;
__iowmb();
asm(
__asmeq("%0", "r0")
__asmeq("%1", "r0")
__asmeq("%2", "r1")
__asmeq("%3", "r2")
"smc #0 @ switch to secure world\n"
: "=r" (r0)
: "r" (r0), "r" (r1), "r" (r2)
);
return r0;
}
#else
static int __secure_tz_entry(u32 cmd, u32 val)
{
return 0;
}
#endif /* CONFIG_MSM_SECURE_IO */
/* Returns the requested update to our power level. *
* Either up/down (-1/1) a level, or stay the same (0). */
static inline int kgsl_pwrctrl_tz_update(u32 idle)
{
return __secure_tz_entry(TZ_UPDATE_ID, idle);
}
static inline void kgsl_pwrctrl_tz_reset(void)
{
__secure_tz_entry(TZ_RESET_ID, 0);
}
#define GPU_SWFI_LATENCY 3
#define UPDATE_BUSY_VAL 1000000
#define UPDATE_BUSY 50
void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
unsigned int new_level)
@ -67,16 +37,18 @@ void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
new_level >= pwr->thermal_pwrlevel &&
new_level != pwr->active_pwrlevel) {
pwr->active_pwrlevel = new_level;
if (test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags))
if ((test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags)) ||
(device->state == KGSL_STATE_NAP))
clk_set_rate(pwr->grp_clks[0],
pwr->pwrlevels[pwr->active_pwrlevel].
gpu_freq);
if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags))
if (pwr->pcl)
msm_bus_scale_client_update_request(pwr->pcl,
if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) {
if (pwr->ebi1_clk)
clk_set_rate(pwr->ebi1_clk,
pwr->pwrlevels[pwr->active_pwrlevel].
bus_freq);
KGSL_PWR_WARN(device, "pwr level changed to %d\n",
}
KGSL_PWR_WARN(device, "kgsl pwr level changed to %d\n",
pwr->active_pwrlevel);
}
}
@ -257,41 +229,20 @@ static int kgsl_pwrctrl_idle_timer_show(struct device *dev,
device->pwrctrl.interval_timeout);
}
static int kgsl_pwrctrl_scaling_governor_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
char temp[20];
struct kgsl_device *device = kgsl_device_from_dev(dev);
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
unsigned int reset = pwr->idle_pass;
snprintf(temp, sizeof(temp), "%.*s",
(int)min(count, sizeof(temp) - 1), buf);
if (strncmp(temp, "ondemand", 8) == 0)
reset = 1;
else if (strncmp(temp, "performance", 11) == 0)
reset = 0;
mutex_lock(&device->mutex);
pwr->idle_pass = reset;
if (pwr->idle_pass == 0)
kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
mutex_unlock(&device->mutex);
return count;
}
static int kgsl_pwrctrl_scaling_governor_show(struct device *dev,
static int kgsl_pwrctrl_gpubusy_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
int ret;
struct kgsl_device *device = kgsl_device_from_dev(dev);
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
if (pwr->idle_pass)
return snprintf(buf, 10, "ondemand\n");
else
return snprintf(buf, 13, "performance\n");
struct kgsl_busy *b = &device->pwrctrl.busy;
ret = snprintf(buf, 17, "%7d %7d\n",
b->on_time_old, b->time_old);
if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
b->on_time_old = 0;
b->time_old = 0;
}
return ret;
}
DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store);
@ -300,15 +251,15 @@ DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
DEVICE_ATTR(pwrnap, 0644, kgsl_pwrctrl_pwrnap_show, kgsl_pwrctrl_pwrnap_store);
DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show,
kgsl_pwrctrl_idle_timer_store);
DEVICE_ATTR(scaling_governor, 0644, kgsl_pwrctrl_scaling_governor_show,
kgsl_pwrctrl_scaling_governor_store);
DEVICE_ATTR(gpubusy, 0644, kgsl_pwrctrl_gpubusy_show,
NULL);
static const struct device_attribute *pwrctrl_attr_list[] = {
static struct device_attribute *pwrctrl_attr_list[] = {
&dev_attr_gpuclk,
&dev_attr_max_gpuclk,
&dev_attr_pwrnap,
&dev_attr_idle_timer,
&dev_attr_scaling_governor,
&dev_attr_gpubusy,
NULL
};
@ -322,28 +273,29 @@ void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device)
kgsl_remove_device_sysfs_files(device->dev, pwrctrl_attr_list);
}
static void kgsl_pwrctrl_idle_calc(struct kgsl_device *device)
/* Track the amount of time the gpu is on vs the total system time. *
* Regularly update the percentage of busy time displayed by sysfs. */
static void kgsl_pwrctrl_busy_time(struct kgsl_device *device, bool on_time)
{
int val;
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
struct kgsl_power_stats stats;
device->ftbl.device_power_stats(device, &stats);
if (stats.total_time == 0)
return;
/* If the GPU has stayed in turbo mode for a while, *
* stop writing out values. */
if (pwr->active_pwrlevel)
pwr->no_switch_cnt = 0;
else if (pwr->no_switch_cnt > SWITCH_OFF)
return;
pwr->no_switch_cnt++;
val = kgsl_pwrctrl_tz_update(stats.total_time - stats.busy_time);
if (val)
kgsl_pwrctrl_pwrlevel_change(device,
pwr->active_pwrlevel + val);
struct kgsl_busy *b = &device->pwrctrl.busy;
int elapsed;
if (b->start.tv_sec == 0)
do_gettimeofday(&(b->start));
do_gettimeofday(&(b->stop));
elapsed = (b->stop.tv_sec - b->start.tv_sec) * 1000000;
elapsed += b->stop.tv_usec - b->start.tv_usec;
b->time += elapsed;
if (on_time)
b->on_time += elapsed;
/* Update the output regularly and reset the counters. */
if ((b->time > UPDATE_BUSY_VAL) ||
!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
b->on_time_old = b->on_time;
b->time_old = b->time;
b->on_time = 0;
b->time = 0;
}
do_gettimeofday(&(b->start));
}
void kgsl_pwrctrl_clk(struct kgsl_device *device, int state)
@ -363,6 +315,7 @@ void kgsl_pwrctrl_clk(struct kgsl_device *device, int state)
clk_set_rate(pwr->grp_clks[0],
pwr->pwrlevels[pwr->num_pwrlevels - 1].
gpu_freq);
kgsl_pwrctrl_busy_time(device, true);
}
} else if (state == KGSL_PWRFLAGS_ON) {
if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
@ -381,6 +334,7 @@ void kgsl_pwrctrl_clk(struct kgsl_device *device, int state)
for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
if (pwr->grp_clks[i])
clk_enable(pwr->grp_clks[i]);
kgsl_pwrctrl_busy_time(device, false);
}
}
}
@ -399,9 +353,6 @@ void kgsl_pwrctrl_axi(struct kgsl_device *device, int state)
clk_set_rate(pwr->ebi1_clk, 0);
clk_disable(pwr->ebi1_clk);
}
if (pwr->pcl)
msm_bus_scale_client_update_request(pwr->pcl,
0);
}
} else if (state == KGSL_PWRFLAGS_ON) {
if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON,
@ -414,10 +365,6 @@ void kgsl_pwrctrl_axi(struct kgsl_device *device, int state)
pwr->pwrlevels[pwr->active_pwrlevel].
bus_freq);
}
if (pwr->pcl)
msm_bus_scale_client_update_request(pwr->pcl,
pwr->pwrlevels[pwr->active_pwrlevel].
bus_freq);
}
}
}
@ -458,13 +405,18 @@ void kgsl_pwrctrl_irq(struct kgsl_device *device, int state)
KGSL_PWR_INFO(device,
"irq on, device %d\n", device->id);
enable_irq(pwr->interrupt_num);
device->ftbl->irqctrl(device, 1);
}
} else if (state == KGSL_PWRFLAGS_OFF) {
if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON,
&pwr->power_flags)) {
KGSL_PWR_INFO(device,
"irq off, device %d\n", device->id);
disable_irq(pwr->interrupt_num);
device->ftbl->irqctrl(device, 0);
if (in_interrupt())
disable_irq_nosync(pwr->interrupt_num);
else
disable_irq(pwr->interrupt_num);
}
}
}
@ -533,9 +485,6 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
pwr->power_flags = 0;
pwr->nap_allowed = pdata_pwr->nap_allowed;
/* drewis: below was removed at some point before i cherry-picked the below commit */
pwr->idle_pass = pdata_pwr->idle_pass;
/*dc14311... msm: kgsl: Replace internal_power_rail API calls with regulator APIs*/
pwr->interval_timeout = pdata_pwr->idle_timeout;
pwr->ebi1_clk = clk_get(NULL, "ebi1_kgsl_clk");
if (IS_ERR(pwr->ebi1_clk))
@ -544,19 +493,6 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
clk_set_rate(pwr->ebi1_clk,
pwr->pwrlevels[pwr->active_pwrlevel].
bus_freq);
if (pdata_dev->clk.bus_scale_table != NULL) {
pwr->pcl =
msm_bus_scale_register_client(pdata_dev->clk.
bus_scale_table);
if (!pwr->pcl) {
KGSL_PWR_ERR(device,
"msm_bus_scale_register_client failed: "
"id %d table %p", device->id,
pdata_dev->clk.bus_scale_table);
result = -EINVAL;
goto done;
}
}
/*acquire interrupt */
pwr->interrupt_num =
@ -568,6 +504,8 @@ int kgsl_pwrctrl_init(struct kgsl_device *device)
result = -EINVAL;
goto done;
}
register_early_suspend(&device->display_off);
return result;
clk_err:
@ -586,6 +524,8 @@ void kgsl_pwrctrl_close(struct kgsl_device *device)
KGSL_PWR_INFO(device, "close device %d\n", device->id);
unregister_early_suspend(&device->display_off);
if (pwr->interrupt_num > 0) {
if (pwr->have_irq) {
free_irq(pwr->interrupt_num, NULL);
@ -596,8 +536,6 @@ void kgsl_pwrctrl_close(struct kgsl_device *device)
clk_put(pwr->ebi1_clk);
if (pwr->pcl)
msm_bus_scale_unregister_client(pwr->pcl);
pwr->pcl = 0;
@ -622,15 +560,22 @@ void kgsl_idle_check(struct work_struct *work)
idle_check_ws);
mutex_lock(&device->mutex);
if ((device->pwrctrl.idle_pass) &&
(device->requested_state != KGSL_STATE_SLEEP))
kgsl_pwrctrl_idle_calc(device);
if (device->state & (KGSL_STATE_ACTIVE | KGSL_STATE_NAP)) {
if (kgsl_pwrctrl_sleep(device) != 0)
if (device->requested_state != KGSL_STATE_SLEEP)
kgsl_pwrscale_idle(device);
if (kgsl_pwrctrl_sleep(device) != 0) {
mod_timer(&device->idle_timer,
jiffies +
device->pwrctrl.interval_timeout);
/* If the GPU has been too busy to sleep, make sure *
* that is acurately reflected in the % busy numbers. */
device->pwrctrl.busy.no_nap_cnt++;
if (device->pwrctrl.busy.no_nap_cnt > UPDATE_BUSY) {
kgsl_pwrctrl_busy_time(device, true);
device->pwrctrl.busy.no_nap_cnt = 0;
}
}
} else if (device->state & (KGSL_STATE_HUNG |
KGSL_STATE_DUMP_AND_RECOVER)) {
device->requested_state = KGSL_STATE_NONE;
@ -684,11 +629,11 @@ int kgsl_pwrctrl_sleep(struct kgsl_device *device)
/* Work through the legal state transitions */
if (device->requested_state == KGSL_STATE_NAP) {
if (device->ftbl.device_isidle(device))
if (device->ftbl->isidle(device))
goto nap;
} else if (device->requested_state == KGSL_STATE_SLEEP) {
if (device->state == KGSL_STATE_NAP ||
device->ftbl.device_isidle(device))
device->ftbl->isidle(device))
goto sleep;
}
@ -702,9 +647,10 @@ sleep:
clk_set_rate(pwr->grp_clks[0],
pwr->pwrlevels[pwr->num_pwrlevels - 1].
gpu_freq);
device->pwrctrl.no_switch_cnt = 0;
kgsl_pwrctrl_busy_time(device, false);
pwr->busy.start.tv_sec = 0;
device->pwrctrl.time = 0;
kgsl_pwrctrl_tz_reset();
goto clk_off;
nap:
@ -730,11 +676,9 @@ void kgsl_pwrctrl_wake(struct kgsl_device *device)
return;
if (device->state != KGSL_STATE_NAP) {
if (device->pwrctrl.idle_pass)
kgsl_pwrctrl_pwrlevel_change(device,
device->pwrctrl.thermal_pwrlevel);
kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
}
/* Turn on the core clocks */
kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON);

@ -1,53 +1,48 @@
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __KGSL_PWRCTRL_H
#define __KGSL_PWRCTRL_H
#include <mach/internal_power_rail.h>
/*****************************************************************************
** power flags
*****************************************************************************/
#define KGSL_PWRFLAGS_POWER_ON 0
#define KGSL_PWRFLAGS_CLK_ON 1
#define KGSL_PWRFLAGS_AXI_ON 2
#define KGSL_PWRFLAGS_IRQ_ON 3
#define KGSL_PWRFLAGS_ON 1
#define KGSL_PWRFLAGS_OFF 0
#define KGSL_DEFAULT_PWRLEVEL 1
#define KGSL_PWRLEVEL_TURBO 0
#define KGSL_PWRLEVEL_NOMINAL 1
#define KGSL_PWRLEVEL_LOW_OFFSET 2
#define KGSL_MAX_CLKS 5
struct platform_device;
struct kgsl_busy {
struct timeval start;
struct timeval stop;
int on_time;
int time;
int on_time_old;
int time_old;
unsigned int no_nap_cnt;
};
struct kgsl_pwrctrl {
int interrupt_num;
int have_irq;
unsigned int pwr_rail;
struct clk *ebi1_clk;
struct clk *grp_clks[KGSL_MAX_CLKS];
unsigned long power_flags;
@ -59,13 +54,11 @@ struct kgsl_pwrctrl {
struct regulator *gpu_reg;
uint32_t pcl;
unsigned int nap_allowed;
struct adreno_context *suspended_ctxt;
const char *regulator_name;
const char *irq_name;
const char *src_clk_name;
s64 time;
unsigned int no_switch_cnt;
unsigned int idle_pass;
struct kgsl_busy busy;
};
void kgsl_pwrctrl_clk(struct kgsl_device *device, int state);

@ -1,4 +1,5 @@
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
* Copyright (C) 2011 Sony Ericsson Mobile Communications AB.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -15,6 +16,7 @@
#include "kgsl.h"
#include "kgsl_pwrscale.h"
#include "kgsl_device.h"
struct kgsl_pwrscale_attribute {
struct attribute attr;
@ -38,6 +40,12 @@ __ATTR(_name, _mode, _show, _store)
/* Master list of available policies */
static struct kgsl_pwrscale_policy *kgsl_pwrscale_policies[] = {
#ifdef CONFIG_MSM_SCM
&kgsl_pwrscale_policy_tz,
#endif
#ifdef CONFIG_MSM_SLEEP_STATS
&kgsl_pwrscale_policy_idlestats,
#endif
NULL
};
@ -141,9 +149,6 @@ static ssize_t policy_sysfs_store(struct kobject *kobj,
static void policy_sysfs_release(struct kobject *kobj)
{
struct kgsl_pwrscale *pwrscale = to_pwrscale(kobj);
complete(&pwrscale->kobj_unregister);
}
static ssize_t pwrscale_sysfs_show(struct kobject *kobj,
@ -181,12 +186,12 @@ static void pwrscale_sysfs_release(struct kobject *kobj)
{
}
static const struct sysfs_ops policy_sysfs_ops = {
static struct sysfs_ops policy_sysfs_ops = {
.show = policy_sysfs_show,
.store = policy_sysfs_store
};
static const struct sysfs_ops pwrscale_sysfs_ops = {
static struct sysfs_ops pwrscale_sysfs_ops = {
.show = pwrscale_sysfs_show,
.store = pwrscale_sysfs_store
};
@ -220,13 +225,17 @@ EXPORT_SYMBOL(kgsl_pwrscale_wake);
void kgsl_pwrscale_busy(struct kgsl_device *device)
{
if (device->pwrscale.policy && device->pwrscale.policy->busy)
device->pwrscale.policy->busy(device, &device->pwrscale);
if (!device->pwrscale.gpu_busy)
device->pwrscale.policy->busy(device,
&device->pwrscale);
device->pwrscale.gpu_busy = 1;
}
void kgsl_pwrscale_idle(struct kgsl_device *device)
{
if (device->pwrscale.policy && device->pwrscale.policy->idle)
device->pwrscale.policy->idle(device, &device->pwrscale);
device->pwrscale.gpu_busy = 0;
}
EXPORT_SYMBOL(kgsl_pwrscale_idle);
@ -236,12 +245,8 @@ int kgsl_pwrscale_policy_add_files(struct kgsl_device *device,
{
int ret;
init_completion(&pwrscale->kobj_unregister);
ret = kobject_init_and_add(&pwrscale->kobj,
&ktype_pwrscale_policy,
&device->pwrscale_kobj,
"%s", pwrscale->policy->name);
ret = kobject_add(&pwrscale->kobj, &device->pwrscale_kobj,
"%s", pwrscale->policy->name);
if (ret)
return ret;
@ -249,8 +254,8 @@ int kgsl_pwrscale_policy_add_files(struct kgsl_device *device,
ret = sysfs_create_group(&pwrscale->kobj, attr_group);
if (ret) {
kobject_del(&pwrscale->kobj);
kobject_put(&pwrscale->kobj);
wait_for_completion(&pwrscale->kobj_unregister);
}
return ret;
@ -261,16 +266,24 @@ void kgsl_pwrscale_policy_remove_files(struct kgsl_device *device,
struct attribute_group *attr_group)
{
sysfs_remove_group(&pwrscale->kobj, attr_group);
kobject_del(&pwrscale->kobj);
kobject_put(&pwrscale->kobj);
wait_for_completion(&pwrscale->kobj_unregister);
}
static void _kgsl_pwrscale_detach_policy(struct kgsl_device *device)
{
if (device->pwrscale.policy != NULL) {
device->pwrscale.policy->close(device, &device->pwrscale);
kgsl_pwrctrl_pwrlevel_change(device,
device->pwrctrl.thermal_pwrlevel);
}
device->pwrscale.policy = NULL;
}
void kgsl_pwrscale_detach_policy(struct kgsl_device *device)
{
mutex_lock(&device->mutex);
if (device->pwrscale.policy != NULL)
device->pwrscale.policy->close(device, &device->pwrscale);
device->pwrscale.policy = NULL;
_kgsl_pwrscale_detach_policy(device);
mutex_unlock(&device->mutex);
}
EXPORT_SYMBOL(kgsl_pwrscale_detach_policy);
@ -278,16 +291,25 @@ EXPORT_SYMBOL(kgsl_pwrscale_detach_policy);
int kgsl_pwrscale_attach_policy(struct kgsl_device *device,
struct kgsl_pwrscale_policy *policy)
{
int ret;
if (device->pwrscale.policy != NULL)
kgsl_pwrscale_detach_policy(device);
int ret = 0;
mutex_lock(&device->mutex);
if (device->pwrscale.policy == policy)
goto done;
if (device->pwrscale.policy != NULL)
_kgsl_pwrscale_detach_policy(device);
device->pwrscale.policy = policy;
ret = device->pwrscale.policy->init(device, &device->pwrscale);
if (ret)
device->pwrscale.policy = NULL;
if (policy) {
ret = device->pwrscale.policy->init(device, &device->pwrscale);
if (ret)
device->pwrscale.policy = NULL;
}
done:
mutex_unlock(&device->mutex);
return ret;
@ -296,8 +318,16 @@ EXPORT_SYMBOL(kgsl_pwrscale_attach_policy);
int kgsl_pwrscale_init(struct kgsl_device *device)
{
return kobject_init_and_add(&device->pwrscale_kobj, &ktype_pwrscale,
&device->dev->kobj, "pwrscale");
int ret;
ret = kobject_init_and_add(&device->pwrscale_kobj, &ktype_pwrscale,
&device->dev->kobj, "pwrscale");
if (ret)
return ret;
kobject_init(&device->pwrscale.kobj, &ktype_pwrscale_policy);
return ret;
}
EXPORT_SYMBOL(kgsl_pwrscale_init);

@ -1,29 +1,13 @@
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
@ -51,8 +35,8 @@ struct kgsl_pwrscale_policy {
struct kgsl_pwrscale {
struct kgsl_pwrscale_policy *policy;
struct kobject kobj;
struct completion kobj_unregister;
void *priv;
int gpu_busy;
};
struct kgsl_pwrscale_policy_attribute {
@ -68,8 +52,12 @@ struct kgsl_pwrscale_policy_attribute {
struct kgsl_pwrscale_policy_attribute policy_attr_##_name = \
__ATTR(_name, _mode, _show, _store)
extern struct kgsl_pwrscale_policy kgsl_pwrscale_policy_tz;
extern struct kgsl_pwrscale_policy kgsl_pwrscale_policy_idlestats;
int kgsl_pwrscale_init(struct kgsl_device *device);
void kgsl_pwrscale_close(struct kgsl_device *device);
int kgsl_pwrscale_attach_policy(struct kgsl_device *device,
struct kgsl_pwrscale_policy *policy);
void kgsl_pwrscale_detach_policy(struct kgsl_device *device);

@ -0,0 +1,221 @@
/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/idle_stats_device.h>
#include <linux/cpufreq.h>
#include <linux/notifier.h>
#include <linux/cpumask.h>
#include <linux/tick.h>
#include "kgsl.h"
#include "kgsl_pwrscale.h"
#include "kgsl_device.h"
#define MAX_CORES 4
struct _cpu_info {
spinlock_t lock;
struct notifier_block cpu_nb;
u64 start[MAX_CORES];
u64 end[MAX_CORES];
int curr_freq[MAX_CORES];
int max_freq[MAX_CORES];
};
struct idlestats_priv {
char name[32];
struct msm_idle_stats_device idledev;
struct kgsl_device *device;
struct msm_idle_pulse pulse;
struct _cpu_info cpu_info;
};
static int idlestats_cpufreq_notifier(
struct notifier_block *nb,
unsigned long val, void *data)
{
struct _cpu_info *cpu = container_of(nb,
struct _cpu_info, cpu_nb);
struct cpufreq_freqs *freq = data;
if (val != CPUFREQ_POSTCHANGE)
return 0;
spin_lock(&cpu->lock);
if (freq->cpu < num_possible_cpus())
cpu->curr_freq[freq->cpu] = freq->new / 1000;
spin_unlock(&cpu->lock);
return 0;
}
static void idlestats_get_sample(struct msm_idle_stats_device *idledev,
struct msm_idle_pulse *pulse)
{
struct kgsl_power_stats stats;
struct idlestats_priv *priv = container_of(idledev,
struct idlestats_priv, idledev);
struct kgsl_device *device = priv->device;
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
mutex_lock(&device->mutex);
/* If the GPU is asleep, don't wake it up - assume that we
are idle */
if (!(device->state & (KGSL_STATE_SLEEP | KGSL_STATE_NAP))) {
device->ftbl->power_stats(device, &stats);
pulse->busy_start_time = pwr->time - stats.busy_time;
pulse->busy_interval = stats.busy_time;
} else {
pulse->busy_start_time = pwr->time;
pulse->busy_interval = 0;
}
pulse->wait_interval = 0;
mutex_unlock(&device->mutex);
}
static void idlestats_busy(struct kgsl_device *device,
struct kgsl_pwrscale *pwrscale)
{
struct idlestats_priv *priv = pwrscale->priv;
int i, busy, nr_cpu = 1;
if (priv->pulse.busy_start_time != 0) {
priv->pulse.wait_interval = 0;
/* Calculate the total CPU busy time for this GPU pulse */
for (i = 0; i < num_possible_cpus(); i++) {
spin_lock(&priv->cpu_info.lock);
if (cpu_online(i)) {
priv->cpu_info.end[i] =
(u64)ktime_to_us(ktime_get()) -
get_cpu_idle_time_us(i, NULL);
busy = priv->cpu_info.end[i] -
priv->cpu_info.start[i];
/* Normalize the busy time by frequency */
busy = priv->cpu_info.curr_freq[i] *
(busy / priv->cpu_info.max_freq[i]);
priv->pulse.wait_interval += busy;
nr_cpu++;
}
spin_unlock(&priv->cpu_info.lock);
}
priv->pulse.wait_interval /= nr_cpu;
msm_idle_stats_idle_end(&priv->idledev, &priv->pulse);
}
priv->pulse.busy_start_time = ktime_to_us(ktime_get());
}
static void idlestats_idle(struct kgsl_device *device,
struct kgsl_pwrscale *pwrscale)
{
int i, nr_cpu;
struct kgsl_power_stats stats;
struct idlestats_priv *priv = pwrscale->priv;
/* This is called from within a mutex protected function, so
no additional locking required */
device->ftbl->power_stats(device, &stats);
/* If total_time is zero, then we don't have
any interesting statistics to store */
if (stats.total_time == 0) {
priv->pulse.busy_start_time = 0;
return;
}
priv->pulse.busy_interval = stats.busy_time;
nr_cpu = num_possible_cpus();
for (i = 0; i < nr_cpu; i++)
if (cpu_online(i))
priv->cpu_info.start[i] =
(u64)ktime_to_us(ktime_get()) -
get_cpu_idle_time_us(i, NULL);
msm_idle_stats_idle_start(&priv->idledev);
}
static void idlestats_sleep(struct kgsl_device *device,
struct kgsl_pwrscale *pwrscale)
{
struct idlestats_priv *priv = pwrscale->priv;
priv->idledev.stats->event |= MSM_IDLE_STATS_EVENT_IDLE_TIMER_EXPIRED;
}
static int idlestats_init(struct kgsl_device *device,
struct kgsl_pwrscale *pwrscale)
{
struct idlestats_priv *priv;
struct cpufreq_policy cpu_policy;
int ret, i;
priv = pwrscale->priv = kzalloc(sizeof(struct idlestats_priv),
GFP_KERNEL);
if (pwrscale->priv == NULL)
return -ENOMEM;
snprintf(priv->name, sizeof(priv->name), "idle_stats_%s",
device->name);
priv->device = device;
priv->idledev.name = (const char *) priv->name;
priv->idledev.get_sample = idlestats_get_sample;
spin_lock_init(&priv->cpu_info.lock);
priv->cpu_info.cpu_nb.notifier_call =
idlestats_cpufreq_notifier;
ret = cpufreq_register_notifier(&priv->cpu_info.cpu_nb,
CPUFREQ_TRANSITION_NOTIFIER);
if (ret)
goto err;
for (i = 0; i < num_possible_cpus(); i++) {
cpufreq_frequency_table_cpuinfo(&cpu_policy,
cpufreq_frequency_get_table(i));
priv->cpu_info.max_freq[i] = cpu_policy.max / 1000;
priv->cpu_info.curr_freq[i] = cpu_policy.max / 1000;
}
ret = msm_idle_stats_register_device(&priv->idledev);
err:
if (ret) {
kfree(pwrscale->priv);
pwrscale->priv = NULL;
}
return ret;
}
static void idlestats_close(struct kgsl_device *device,
struct kgsl_pwrscale *pwrscale)
{
struct idlestats_priv *priv = pwrscale->priv;
if (pwrscale->priv == NULL)
return;
cpufreq_unregister_notifier(&priv->cpu_info.cpu_nb,
CPUFREQ_TRANSITION_NOTIFIER);
msm_idle_stats_deregister_device(&priv->idledev);
kfree(pwrscale->priv);
pwrscale->priv = NULL;
}
struct kgsl_pwrscale_policy kgsl_pwrscale_policy_idlestats = {
.name = "idlestats",
.init = idlestats_init,
.idle = idlestats_idle,
.busy = idlestats_busy,
.sleep = idlestats_sleep,
.close = idlestats_close
};

@ -0,0 +1,197 @@
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <mach/socinfo.h>
#include <mach/scm.h>
#include "kgsl.h"
#include "kgsl_pwrscale.h"
#include "kgsl_device.h"
#define TZ_GOVERNOR_PERFORMANCE 0
#define TZ_GOVERNOR_ONDEMAND 1
struct tz_priv {
int governor;
unsigned int no_switch_cnt;
unsigned int skip_cnt;
};
#define SWITCH_OFF 200
#define SWITCH_OFF_RESET_TH 40
#define SKIP_COUNTER 500
#define TZ_RESET_ID 0x3
#define TZ_UPDATE_ID 0x4
#ifdef CONFIG_MSM_SCM
/* Trap into the TrustZone, and call funcs there. */
static int __secure_tz_entry(u32 cmd, u32 val)
{
__iowmb();
return scm_call_atomic1(SCM_SVC_IO, cmd, val);
}
#else
static int __secure_tz_entry(u32 cmd, u32 val)
{
return 0;
}
#endif /* CONFIG_MSM_SCM */
static ssize_t tz_governor_show(struct kgsl_device *device,
struct kgsl_pwrscale *pwrscale,
char *buf)
{
struct tz_priv *priv = pwrscale->priv;
int ret;
if (priv->governor == TZ_GOVERNOR_ONDEMAND)
ret = snprintf(buf, 10, "ondemand\n");
else
ret = snprintf(buf, 13, "performance\n");
return ret;
}
static ssize_t tz_governor_store(struct kgsl_device *device,
struct kgsl_pwrscale *pwrscale,
const char *buf, size_t count)
{
char str[20];
struct tz_priv *priv = pwrscale->priv;
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
int ret;
ret = sscanf(buf, "%20s", str);
if (ret != 1)
return -EINVAL;
mutex_lock(&device->mutex);
if (!strncmp(str, "ondemand", 8))
priv->governor = TZ_GOVERNOR_ONDEMAND;
else if (!strncmp(str, "performance", 11))
priv->governor = TZ_GOVERNOR_PERFORMANCE;
if (priv->governor == TZ_GOVERNOR_PERFORMANCE)
kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
mutex_unlock(&device->mutex);
return count;
}
PWRSCALE_POLICY_ATTR(governor, 0644, tz_governor_show, tz_governor_store);
static struct attribute *tz_attrs[] = {
&policy_attr_governor.attr,
NULL
};
static struct attribute_group tz_attr_group = {
.attrs = tz_attrs,
};
static void tz_wake(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale)
{
struct tz_priv *priv = pwrscale->priv;
if (device->state != KGSL_STATE_NAP &&
priv->governor == TZ_GOVERNOR_ONDEMAND)
kgsl_pwrctrl_pwrlevel_change(device,
device->pwrctrl.thermal_pwrlevel);
}
static void tz_idle(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale)
{
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
struct tz_priv *priv = pwrscale->priv;
struct kgsl_power_stats stats;
int val;
/* In "performance" mode the clock speed always stays
the same */
if (priv->governor == TZ_GOVERNOR_PERFORMANCE)
return;
device->ftbl->power_stats(device, &stats);
if (stats.total_time == 0)
return;
/* If the GPU has stayed in turbo mode for a while, *
* stop writing out values. */
if (pwr->active_pwrlevel == 0) {
if (priv->no_switch_cnt > SWITCH_OFF) {
priv->skip_cnt++;
if (priv->skip_cnt > SKIP_COUNTER) {
priv->no_switch_cnt -= SWITCH_OFF_RESET_TH;
priv->skip_cnt = 0;
}
return;
}
priv->no_switch_cnt++;
} else {
priv->no_switch_cnt = 0;
}
val = __secure_tz_entry(TZ_UPDATE_ID,
stats.total_time - stats.busy_time);
if (val)
kgsl_pwrctrl_pwrlevel_change(device,
pwr->active_pwrlevel + val);
}
static void tz_sleep(struct kgsl_device *device,
struct kgsl_pwrscale *pwrscale)
{
struct tz_priv *priv = pwrscale->priv;
__secure_tz_entry(TZ_RESET_ID, 0);
priv->no_switch_cnt = 0;
}
static int tz_init(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale)
{
struct tz_priv *priv;
/* Trustzone is only valid for some SOCs */
if (!(cpu_is_msm8x60() || cpu_is_msm8960() || cpu_is_msm8930()))
return -EINVAL;
priv = pwrscale->priv = kzalloc(sizeof(struct tz_priv), GFP_KERNEL);
if (pwrscale->priv == NULL)
return -ENOMEM;
priv->governor = TZ_GOVERNOR_ONDEMAND;
kgsl_pwrscale_policy_add_files(device, pwrscale, &tz_attr_group);
return 0;
}
static void tz_close(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale)
{
kgsl_pwrscale_policy_remove_files(device, pwrscale, &tz_attr_group);
kfree(pwrscale->priv);
pwrscale->priv = NULL;
}
struct kgsl_pwrscale_policy kgsl_pwrscale_policy_tz = {
.name = "trustzone",
.init = tz_init,
.idle = tz_idle,
.sleep = tz_sleep,
.wake = tz_wake,
.close = tz_close
};
EXPORT_SYMBOL(kgsl_pwrscale_policy_tz);

@ -1,4 +1,5 @@
/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
* Copyright (C) 2011 Sony Ericsson Mobile Communications AB.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -11,11 +12,14 @@
*
*/
#include <linux/vmalloc.h>
#include <linux/memory_alloc.h>
#include <asm/cacheflush.h>
#include "kgsl.h"
#include "kgsl_sharedmem.h"
#include "kgsl_cffdump.h"
#include "kgsl_device.h"
#include "adreno_ringbuffer.h"
static struct kgsl_process_private *
_get_priv_from_kobj(struct kobject *kobj)
@ -166,7 +170,7 @@ DEVICE_ATTR(mapped, 0444, kgsl_drv_memstat_show, NULL);
DEVICE_ATTR(mapped_max, 0444, kgsl_drv_memstat_show, NULL);
DEVICE_ATTR(histogram, 0444, kgsl_drv_histogram_show, NULL);
static const struct device_attribute *drv_attr_list[] = {
static struct device_attribute *drv_attr_list[] = {
&dev_attr_vmalloc,
&dev_attr_vmalloc_max,
&dev_attr_coherent,
@ -205,28 +209,21 @@ static void _outer_cache_range_op(int op, unsigned long addr, size_t size)
break;
}
}
#endif
static unsigned long kgsl_vmalloc_physaddr(struct kgsl_memdesc *memdesc,
unsigned int offset)
static void outer_cache_range_op_sg(struct scatterlist *sg, int sglen, int op)
{
unsigned int addr;
struct scatterlist *s;
int i;
if (offset > memdesc->size)
return 0;
addr = vmalloc_to_pfn(memdesc->hostptr + offset);
return addr << PAGE_SHIFT;
for_each_sg(sg, s, sglen, i) {
unsigned int paddr = sg_phys(s);
_outer_cache_range_op(op, paddr, s->length);
}
}
#ifdef CONFIG_OUTER_CACHE
static void kgsl_vmalloc_outer_cache(struct kgsl_memdesc *memdesc, int op)
#else
static void outer_cache_range_op_sg(struct scatterlist *sg, int sglen, int op)
{
void *vaddr = memdesc->hostptr;
for (; vaddr < (memdesc->hostptr + memdesc->size); vaddr += PAGE_SIZE) {
unsigned long paddr = page_to_phys(vmalloc_to_page(vaddr));
_outer_cache_range_op(op, paddr, PAGE_SIZE);
}
}
#endif
@ -261,6 +258,42 @@ static void kgsl_vmalloc_free(struct kgsl_memdesc *memdesc)
vfree(memdesc->hostptr);
}
static int kgsl_contiguous_vmflags(struct kgsl_memdesc *memdesc)
{
return VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
}
static int kgsl_contiguous_vmfault(struct kgsl_memdesc *memdesc,
struct vm_area_struct *vma,
struct vm_fault *vmf)
{
unsigned long offset, pfn;
int ret;
offset = ((unsigned long) vmf->virtual_address - vma->vm_start) >>
PAGE_SHIFT;
pfn = (memdesc->physaddr >> PAGE_SHIFT) + offset;
ret = vm_insert_pfn(vma, (unsigned long) vmf->virtual_address, pfn);
if (ret == -ENOMEM || ret == -EAGAIN)
return VM_FAULT_OOM;
else if (ret == -EFAULT)
return VM_FAULT_SIGBUS;
return VM_FAULT_NOPAGE;
}
static void kgsl_ebimem_free(struct kgsl_memdesc *memdesc)
{
kgsl_driver.stats.coherent -= memdesc->size;
if (memdesc->hostptr)
iounmap(memdesc->hostptr);
free_contiguous_memory_by_paddr(memdesc->physaddr);
}
static void kgsl_coherent_free(struct kgsl_memdesc *memdesc)
{
kgsl_driver.stats.coherent -= memdesc->size;
@ -268,78 +301,24 @@ static void kgsl_coherent_free(struct kgsl_memdesc *memdesc)
memdesc->hostptr, memdesc->physaddr);
}
static unsigned long kgsl_contig_physaddr(struct kgsl_memdesc *memdesc,
unsigned int offset)
{
if (offset > memdesc->size)
return 0;
return memdesc->physaddr + offset;
}
#ifdef CONFIG_OUTER_CACHE
static void kgsl_contig_outer_cache(struct kgsl_memdesc *memdesc, int op)
{
_outer_cache_range_op(op, memdesc->physaddr, memdesc->size);
}
#endif
#ifdef CONFIG_OUTER_CACHE
static void kgsl_userptr_outer_cache(struct kgsl_memdesc *memdesc, int op)
{
void *vaddr = memdesc->hostptr;
for (; vaddr < (memdesc->hostptr + memdesc->size); vaddr += PAGE_SIZE) {
unsigned long paddr = kgsl_virtaddr_to_physaddr(vaddr);
if (paddr)
_outer_cache_range_op(op, paddr, PAGE_SIZE);
}
}
#endif
static unsigned long kgsl_userptr_physaddr(struct kgsl_memdesc *memdesc,
unsigned int offset)
{
return kgsl_virtaddr_to_physaddr(memdesc->hostptr + offset);
}
/* Global - also used by kgsl_drm.c */
struct kgsl_memdesc_ops kgsl_vmalloc_ops = {
.physaddr = kgsl_vmalloc_physaddr,
.free = kgsl_vmalloc_free,
.vmflags = kgsl_vmalloc_vmflags,
.vmfault = kgsl_vmalloc_vmfault,
#ifdef CONFIG_OUTER_CACHE
.outer_cache = kgsl_vmalloc_outer_cache,
#endif
};
EXPORT_SYMBOL(kgsl_vmalloc_ops);
static struct kgsl_memdesc_ops kgsl_ebimem_ops = {
.free = kgsl_ebimem_free,
.vmflags = kgsl_contiguous_vmflags,
.vmfault = kgsl_contiguous_vmfault,
};
static struct kgsl_memdesc_ops kgsl_coherent_ops = {
.physaddr = kgsl_contig_physaddr,
.free = kgsl_coherent_free,
#ifdef CONFIG_OUTER_CACHE
.outer_cache = kgsl_contig_outer_cache,
#endif
};
/* Global - also used by kgsl.c and kgsl_drm.c */
struct kgsl_memdesc_ops kgsl_contig_ops = {
.physaddr = kgsl_contig_physaddr,
#ifdef CONFIG_OUTER_CACHE
.outer_cache = kgsl_contig_outer_cache
#endif
};
EXPORT_SYMBOL(kgsl_contig_ops);
/* Global - also used by kgsl.c */
struct kgsl_memdesc_ops kgsl_userptr_ops = {
.physaddr = kgsl_userptr_physaddr,
#ifdef CONFIG_OUTER_CACHE
.outer_cache = kgsl_userptr_outer_cache,
#endif
};
EXPORT_SYMBOL(kgsl_userptr_ops);
void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op)
{
void *addr = memdesc->hostptr;
@ -357,8 +336,7 @@ void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op)
break;
}
if (memdesc->ops->outer_cache)
memdesc->ops->outer_cache(memdesc, op);
outer_cache_range_op_sg(memdesc->sg, memdesc->sglen, op);
}
EXPORT_SYMBOL(kgsl_cache_range_op);
@ -367,7 +345,9 @@ _kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable,
void *ptr, size_t size, unsigned int protflags)
{
int result;
int order, ret = 0;
int sglen = PAGE_ALIGN(size) / PAGE_SIZE;
int i;
memdesc->size = size;
memdesc->pagetable = pagetable;
@ -375,25 +355,44 @@ _kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc,
memdesc->ops = &kgsl_vmalloc_ops;
memdesc->hostptr = (void *) ptr;
kgsl_cache_range_op(memdesc, KGSL_CACHE_OP_INV);
result = kgsl_mmu_map(pagetable, memdesc, protflags);
if (result) {
kgsl_sharedmem_free(memdesc);
} else {
int order;
KGSL_STATS_ADD(size, kgsl_driver.stats.vmalloc,
kgsl_driver.stats.vmalloc_max);
order = get_order(size);
if (order < 16)
kgsl_driver.stats.histogram[order]++;
memdesc->sg = kmalloc(sglen * sizeof(struct scatterlist), GFP_KERNEL);
if (memdesc->sg == NULL) {
ret = -ENOMEM;
goto done;
}
return result;
memdesc->sglen = sglen;
sg_init_table(memdesc->sg, sglen);
for (i = 0; i < memdesc->sglen; i++, ptr += PAGE_SIZE) {
struct page *page = vmalloc_to_page(ptr);
if (!page) {
ret = -EINVAL;
goto done;
}
sg_set_page(&memdesc->sg[i], page, PAGE_SIZE, 0);
}
kgsl_cache_range_op(memdesc, KGSL_CACHE_OP_INV);
ret = kgsl_mmu_map(pagetable, memdesc, protflags);
if (ret)
goto done;
KGSL_STATS_ADD(size, kgsl_driver.stats.vmalloc,
kgsl_driver.stats.vmalloc_max);
order = get_order(size);
if (order < 16)
kgsl_driver.stats.histogram[order]++;
done:
if (ret)
kgsl_sharedmem_free(memdesc);
return ret;
}
int
@ -446,24 +445,35 @@ EXPORT_SYMBOL(kgsl_sharedmem_vmalloc_user);
int
kgsl_sharedmem_alloc_coherent(struct kgsl_memdesc *memdesc, size_t size)
{
int result = 0;
size = ALIGN(size, PAGE_SIZE);
memdesc->size = size;
memdesc->ops = &kgsl_coherent_ops;
memdesc->hostptr = dma_alloc_coherent(NULL, size, &memdesc->physaddr,
GFP_KERNEL);
if (memdesc->hostptr == NULL) {
KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size);
return -ENOMEM;
result = -ENOMEM;
goto err;
}
memdesc->size = size;
memdesc->ops = &kgsl_coherent_ops;
result = memdesc_sg_phys(memdesc, memdesc->physaddr, size);
if (result)
goto err;
/* Record statistics */
KGSL_STATS_ADD(size, kgsl_driver.stats.coherent,
kgsl_driver.stats.coherent_max);
return 0;
err:
if (result)
kgsl_sharedmem_free(memdesc);
return result;
}
EXPORT_SYMBOL(kgsl_sharedmem_alloc_coherent);
@ -475,13 +485,86 @@ void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc)
if (memdesc->gpuaddr)
kgsl_mmu_unmap(memdesc->pagetable, memdesc);
if (memdesc->ops->free)
if (memdesc->ops && memdesc->ops->free)
memdesc->ops->free(memdesc);
kfree(memdesc->sg);
memset(memdesc, 0, sizeof(*memdesc));
}
EXPORT_SYMBOL(kgsl_sharedmem_free);
static int
_kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable, size_t size)
{
int result = 0;
memdesc->size = size;
memdesc->pagetable = pagetable;
memdesc->ops = &kgsl_ebimem_ops;
memdesc->physaddr = allocate_contiguous_ebi_nomap(size, SZ_8K);
if (memdesc->physaddr == 0) {
KGSL_CORE_ERR("allocate_contiguous_ebi_nomap(%d) failed\n",
size);
return -ENOMEM;
}
result = memdesc_sg_phys(memdesc, memdesc->physaddr, size);
if (result)
goto err;
result = kgsl_mmu_map(pagetable, memdesc,
GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
if (result)
goto err;
KGSL_STATS_ADD(size, kgsl_driver.stats.coherent,
kgsl_driver.stats.coherent_max);
err:
if (result)
kgsl_sharedmem_free(memdesc);
return result;
}
int
kgsl_sharedmem_ebimem_user(struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable,
size_t size, int flags)
{
size = ALIGN(size, PAGE_SIZE);
return _kgsl_sharedmem_ebimem(memdesc, pagetable, size);
}
EXPORT_SYMBOL(kgsl_sharedmem_ebimem_user);
int
kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable, size_t size)
{
int result;
size = ALIGN(size, 8192);
result = _kgsl_sharedmem_ebimem(memdesc, pagetable, size);
if (result)
return result;
memdesc->hostptr = ioremap(memdesc->physaddr, size);
if (memdesc->hostptr == NULL) {
KGSL_CORE_ERR("ioremap failed\n");
kgsl_sharedmem_free(memdesc);
return -ENOMEM;
}
return 0;
}
EXPORT_SYMBOL(kgsl_sharedmem_ebimem);
int
kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc,
uint32_t *dst,

@ -1,37 +1,27 @@
/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
* Copyright (C) 2011 Sony Ericsson Mobile Communications AB.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __KGSL_SHAREDMEM_H
#define __KGSL_SHAREDMEM_H
#include <linux/slab.h>
#include <linux/dma-mapping.h>
struct kgsl_pagetable;
/*
* Convert a page to a physical address
*/
#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
struct kgsl_device;
struct kgsl_process_private;
@ -42,31 +32,14 @@ struct kgsl_process_private;
/** Set if the memdesc describes cached memory */
#define KGSL_MEMFLAGS_CACHED 0x00000001
struct kgsl_memdesc;
struct kgsl_memdesc_ops {
unsigned long (*physaddr)(struct kgsl_memdesc *, unsigned int);
void (*outer_cache)(struct kgsl_memdesc *, int);
int (*vmflags)(struct kgsl_memdesc *);
int (*vmfault)(struct kgsl_memdesc *, struct vm_area_struct *,
struct vm_fault *);
void (*free)(struct kgsl_memdesc *memdesc);
};
/* shared memory allocation */
struct kgsl_memdesc {
struct kgsl_pagetable *pagetable;
void *hostptr;
unsigned int gpuaddr;
unsigned int physaddr;
unsigned int size;
unsigned int priv;
struct kgsl_memdesc_ops *ops;
};
extern struct kgsl_memdesc_ops kgsl_vmalloc_ops;
extern struct kgsl_memdesc_ops kgsl_contig_ops;
extern struct kgsl_memdesc_ops kgsl_userptr_ops;
int kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable, size_t size);
@ -77,6 +50,14 @@ int kgsl_sharedmem_vmalloc_user(struct kgsl_memdesc *memdesc,
int kgsl_sharedmem_alloc_coherent(struct kgsl_memdesc *memdesc, size_t size);
int kgsl_sharedmem_ebimem_user(struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable,
size_t size, int flags);
int kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable,
size_t size);
void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc);
int kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc,
@ -99,18 +80,54 @@ void kgsl_process_uninit_sysfs(struct kgsl_process_private *private);
int kgsl_sharedmem_init_sysfs(void);
void kgsl_sharedmem_uninit_sysfs(void);
static inline int
memdesc_sg_phys(struct kgsl_memdesc *memdesc,
unsigned int physaddr, unsigned int size)
{
struct page *page = phys_to_page(physaddr);
memdesc->sg = kmalloc(sizeof(struct scatterlist) * 1, GFP_KERNEL);
if (memdesc->sg == NULL)
return -ENOMEM;
memdesc->sglen = 1;
sg_init_table(memdesc->sg, 1);
sg_set_page(&memdesc->sg[0], page, size, 0);
return 0;
}
static inline int
kgsl_allocate(struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable, size_t size)
{
#ifdef CONFIG_MSM_KGSL_MMU
return kgsl_sharedmem_vmalloc(memdesc, pagetable, size);
#else
return kgsl_sharedmem_ebimem(memdesc, pagetable, size);
#endif
}
static inline int
kgsl_allocate_user(struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable,
size_t size, unsigned int flags)
{
#ifdef CONFIG_MSM_KGSL_MMU
return kgsl_sharedmem_vmalloc_user(memdesc, pagetable, size, flags);
#else
return kgsl_sharedmem_ebimem_user(memdesc, pagetable, size, flags);
#endif
}
static inline int
kgsl_allocate_contig(struct kgsl_memdesc *memdesc, size_t size)
kgsl_allocate_contiguous(struct kgsl_memdesc *memdesc, size_t size)
{
return kgsl_sharedmem_alloc_coherent(memdesc, size);
int ret = kgsl_sharedmem_alloc_coherent(memdesc, size);
#ifndef CONFIG_MSM_KGSL_MMU
if (!ret)
memdesc->gpuaddr = memdesc->physaddr;
#endif
return ret;
}
#endif /* __KGSL_SHAREDMEM_H */

@ -14,6 +14,7 @@
#include "kgsl.h"
#include "kgsl_cffdump.h"
#include "kgsl_sharedmem.h"
#include "z180.h"
#include "z180_reg.h"
@ -86,6 +87,11 @@
#define Z180_TIMESTAMP_EPSILON 20000
#define Z180_IDLE_COUNT_MAX 1000000
enum z180_cmdwindow_type {
Z180_CMDWINDOW_2D = 0x00000000,
Z180_CMDWINDOW_MMU = 0x00000002,
};
#define Z180_CMDWINDOW_TARGET_MASK 0x000000FF
#define Z180_CMDWINDOW_ADDR_MASK 0x00FFFF00
#define Z180_CMDWINDOW_TARGET_SHIFT 0
@ -102,17 +108,9 @@ static void z180_regread(struct kgsl_device *device,
static void z180_regwrite(struct kgsl_device *device,
unsigned int offsetwords,
unsigned int value);
static int z180_cmdwindow_write(struct kgsl_device *device,
enum kgsl_cmdwindow_type target,
static void z180_cmdwindow_write(struct kgsl_device *device,
unsigned int addr,
unsigned int data);
static void z180_regread_isr(struct kgsl_device *device,
unsigned int offsetwords,
unsigned int *value);
static void z180_regwrite_isr(struct kgsl_device *device,
unsigned int offsetwords,
unsigned int value);
static void __devinit z180_getfunctable(struct kgsl_functable *ftbl);
#define Z180_MMU_CONFIG \
(0x01 \
@ -128,35 +126,29 @@ static void __devinit z180_getfunctable(struct kgsl_functable *ftbl);
| (MMU_CONFIG << MH_MMU_CONFIG__TC_R_CLNT_BEHAVIOR__SHIFT) \
| (MMU_CONFIG << MH_MMU_CONFIG__PA_W_CLNT_BEHAVIOR__SHIFT))
static const struct kgsl_functable z180_functable;
static struct z180_device device_2d0 = {
.dev = {
.name = DEVICE_2D0_NAME,
.id = KGSL_DEVICE_2D0,
.ver_major = DRIVER_VERSION_MAJOR,
.ver_minor = DRIVER_VERSION_MINOR,
.mmu = {
.config = Z180_MMU_CONFIG,
.mh = {
.mharb = Z180_CFG_MHARB,
.mh_intf_cfg1 = 0x00032f07,
.mh_intf_cfg2 = 0x004b274f,
/* turn off memory protection unit by setting
acceptable physical address range to include
all pages. */
.mpu_base = 0x00000000,
.mpu_range = 0xFFFFF000,
.reg = {
.config = ADDR_MH_MMU_CONFIG,
.mpu_base = ADDR_MH_MMU_MPU_BASE,
.mpu_end = ADDR_MH_MMU_MPU_END,
.va_range = ADDR_MH_MMU_VA_RANGE,
.pt_page = ADDR_MH_MMU_PT_BASE,
.page_fault = ADDR_MH_MMU_PAGE_FAULT,
.tran_error = ADDR_MH_MMU_TRAN_ERROR,
.invalidate = ADDR_MH_MMU_INVALIDATE,
.interrupt_mask = ADDR_MH_INTERRUPT_MASK,
.interrupt_status = ADDR_MH_INTERRUPT_STATUS,
.interrupt_clear = ADDR_MH_INTERRUPT_CLEAR,
.axi_error = ADDR_MH_AXI_ERROR,
},
},
.mmu = {
.config = Z180_MMU_CONFIG,
},
.pwrctrl = {
.pwr_rail = PWR_RAIL_GRP_2D_CLK,
.regulator_name = "fs_gfx2d0",
.irq_name = KGSL_2D0_IRQ,
},
@ -164,6 +156,14 @@ static struct z180_device device_2d0 = {
.state = KGSL_STATE_INIT,
.active_cnt = 0,
.iomemname = KGSL_2D0_REG_MEMORY,
.ftbl = &z180_functable,
#ifdef CONFIG_HAS_EARLYSUSPEND
.display_off = {
.level = EARLY_SUSPEND_LEVEL_STOP_DRAWING,
.suspend = kgsl_early_suspend_driver,
.resume = kgsl_late_resume_driver,
},
#endif
},
};
@ -173,29 +173,21 @@ static struct z180_device device_2d1 = {
.id = KGSL_DEVICE_2D1,
.ver_major = DRIVER_VERSION_MAJOR,
.ver_minor = DRIVER_VERSION_MINOR,
.mmu = {
.config = Z180_MMU_CONFIG,
.mh = {
.mharb = Z180_CFG_MHARB,
.mh_intf_cfg1 = 0x00032f07,
.mh_intf_cfg2 = 0x004b274f,
/* turn off memory protection unit by setting
acceptable physical address range to include
all pages. */
.mpu_base = 0x00000000,
.mpu_range = 0xFFFFF000,
.reg = {
.config = ADDR_MH_MMU_CONFIG,
.mpu_base = ADDR_MH_MMU_MPU_BASE,
.mpu_end = ADDR_MH_MMU_MPU_END,
.va_range = ADDR_MH_MMU_VA_RANGE,
.pt_page = ADDR_MH_MMU_PT_BASE,
.page_fault = ADDR_MH_MMU_PAGE_FAULT,
.tran_error = ADDR_MH_MMU_TRAN_ERROR,
.invalidate = ADDR_MH_MMU_INVALIDATE,
.interrupt_mask = ADDR_MH_INTERRUPT_MASK,
.interrupt_status = ADDR_MH_INTERRUPT_STATUS,
.interrupt_clear = ADDR_MH_INTERRUPT_CLEAR,
.axi_error = ADDR_MH_AXI_ERROR,
},
},
.mmu = {
.config = Z180_MMU_CONFIG,
},
.pwrctrl = {
.pwr_rail = PWR_RAIL_GRP_2D_CLK,
.regulator_name = "fs_gfx2d1",
.irq_name = KGSL_2D1_IRQ,
},
@ -203,6 +195,14 @@ static struct z180_device device_2d1 = {
.state = KGSL_STATE_INIT,
.active_cnt = 0,
.iomemname = KGSL_2D1_REG_MEMORY,
.ftbl = &z180_functable,
.display_off = {
#ifdef CONFIG_HAS_EARLYSUSPEND
.level = EARLY_SUSPEND_LEVEL_STOP_DRAWING,
.suspend = kgsl_early_suspend_driver,
.resume = kgsl_late_resume_driver,
#endif
},
},
};
@ -213,10 +213,10 @@ static irqreturn_t z180_isr(int irq, void *data)
struct kgsl_device *device = (struct kgsl_device *) data;
struct z180_device *z180_dev = Z180_DEVICE(device);
z180_regread_isr(device, ADDR_VGC_IRQSTATUS >> 2, &status);
z180_regread(device, ADDR_VGC_IRQSTATUS >> 2, &status);
if (status & GSL_VGC_INT_MASK) {
z180_regwrite_isr(device,
z180_regwrite(device,
ADDR_VGC_IRQSTATUS >> 2, status & GSL_VGC_INT_MASK);
result = IRQ_HANDLED;
@ -228,7 +228,7 @@ static irqreturn_t z180_isr(int irq, void *data)
if (status & REG_VGC_IRQSTATUS__G2D_MASK) {
int count;
z180_regread_isr(device,
z180_regread(device,
ADDR_VGC_IRQ_ACTIVE_CNT >> 2,
&count);
@ -236,6 +236,7 @@ static irqreturn_t z180_isr(int irq, void *data)
count &= 255;
z180_dev->timestamp += count;
queue_work(device->work_queue, &device->ts_expired_ws);
wake_up_interruptible(&device->wait_queue);
atomic_notifier_call_chain(
@ -255,18 +256,16 @@ static irqreturn_t z180_isr(int irq, void *data)
return result;
}
static int z180_cleanup_pt(struct kgsl_device *device,
static void z180_cleanup_pt(struct kgsl_device *device,
struct kgsl_pagetable *pagetable)
{
struct z180_device *z180_dev = Z180_DEVICE(device);
kgsl_mmu_unmap(pagetable, &device->mmu.dummyspace);
kgsl_mmu_unmap(pagetable, &device->mmu.setstate_memory);
kgsl_mmu_unmap(pagetable, &device->memstore);
kgsl_mmu_unmap(pagetable, &z180_dev->ringbuffer.cmdbufdesc);
return 0;
}
static int z180_setup_pt(struct kgsl_device *device,
@ -275,7 +274,7 @@ static int z180_setup_pt(struct kgsl_device *device,
int result = 0;
struct z180_device *z180_dev = Z180_DEVICE(device);
result = kgsl_mmu_map_global(pagetable, &device->mmu.dummyspace,
result = kgsl_mmu_map_global(pagetable, &device->mmu.setstate_memory,
GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
if (result)
@ -294,7 +293,7 @@ static int z180_setup_pt(struct kgsl_device *device,
return result;
error_unmap_dummy:
kgsl_mmu_unmap(pagetable, &device->mmu.dummyspace);
kgsl_mmu_unmap(pagetable, &device->mmu.setstate_memory);
error_unmap_memstore:
kgsl_mmu_unmap(pagetable, &device->memstore);
@ -339,10 +338,9 @@ static void addcmd(struct z180_ringbuffer *rb, unsigned int index,
*p++ = ADDR_VGV3_LAST << 24;
}
static int z180_cmdstream_start(struct kgsl_device *device)
static void z180_cmdstream_start(struct kgsl_device *device)
{
struct z180_device *z180_dev = Z180_DEVICE(device);
int result;
unsigned int cmd = VGV3_NEXTCMD_JUMP << VGV3_NEXTCMD_NEXTCMD_FSHIFT;
z180_dev->timestamp = 0;
@ -350,43 +348,22 @@ static int z180_cmdstream_start(struct kgsl_device *device)
addmarker(&z180_dev->ringbuffer, 0);
result = z180_cmdwindow_write(device, KGSL_CMDWINDOW_2D,
ADDR_VGV3_MODE, 4);
if (result != 0)
return result;
z180_cmdwindow_write(device, ADDR_VGV3_MODE, 4);
result = z180_cmdwindow_write(device, KGSL_CMDWINDOW_2D,
ADDR_VGV3_NEXTADDR,
z180_cmdwindow_write(device, ADDR_VGV3_NEXTADDR,
z180_dev->ringbuffer.cmdbufdesc.gpuaddr);
if (result != 0)
return result;
result = z180_cmdwindow_write(device, KGSL_CMDWINDOW_2D,
ADDR_VGV3_NEXTCMD, cmd | 5);
if (result != 0)
return result;
z180_cmdwindow_write(device, ADDR_VGV3_NEXTCMD, cmd | 5);
result = z180_cmdwindow_write(device, KGSL_CMDWINDOW_2D,
ADDR_VGV3_WRITEADDR, device->memstore.gpuaddr);
if (result != 0)
return result;
z180_cmdwindow_write(device, ADDR_VGV3_WRITEADDR,
device->memstore.gpuaddr);
cmd = (int)(((1) & VGV3_CONTROL_MARKADD_FMASK)
<< VGV3_CONTROL_MARKADD_FSHIFT);
result = z180_cmdwindow_write(device, KGSL_CMDWINDOW_2D,
ADDR_VGV3_CONTROL, cmd);
z180_cmdwindow_write(device, ADDR_VGV3_CONTROL, cmd);
if (result != 0)
return result;
result = z180_cmdwindow_write(device, KGSL_CMDWINDOW_2D,
ADDR_VGV3_CONTROL, 0);
if (result != 0)
return result;
return result;
z180_cmdwindow_write(device, ADDR_VGV3_CONTROL, 0);
}
static int room_in_rb(struct z180_device *device)
@ -403,7 +380,8 @@ static int z180_idle(struct kgsl_device *device, unsigned int timeout)
int status = 0;
struct z180_device *z180_dev = Z180_DEVICE(device);
if (z180_dev->current_timestamp > z180_dev->timestamp)
if (timestamp_cmp(z180_dev->current_timestamp,
z180_dev->timestamp) > 0)
status = z180_wait(device, z180_dev->current_timestamp,
timeout);
@ -413,30 +391,6 @@ static int z180_idle(struct kgsl_device *device, unsigned int timeout)
return status;
}
static int z180_setstate(struct kgsl_device *device, uint32_t flags)
{
#ifdef CONFIG_MSM_KGSL_MMU
unsigned int mh_mmu_invalidate = 0x00000003; /*invalidate all and tc */
if (flags & KGSL_MMUFLAGS_PTUPDATE) {
z180_idle(device, KGSL_TIMEOUT_DEFAULT);
z180_regwrite(device, ADDR_MH_MMU_PT_BASE,
device->mmu.hwpagetable->base.gpuaddr);
z180_regwrite(device, ADDR_MH_MMU_VA_RANGE,
(device->mmu.hwpagetable->
va_base | (device->mmu.hwpagetable->
va_range >> 16)));
z180_regwrite(device, ADDR_MH_MMU_INVALIDATE,
mh_mmu_invalidate);
}
if (flags & KGSL_MMUFLAGS_TLBFLUSH)
z180_regwrite(device, ADDR_MH_MMU_INVALIDATE,
mh_mmu_invalidate);
#endif
return 0;
}
int
z180_cmdstream_issueibcmds(struct kgsl_device_private *dev_priv,
struct kgsl_context *context,
@ -445,7 +399,7 @@ z180_cmdstream_issueibcmds(struct kgsl_device_private *dev_priv,
uint32_t *timestamp,
unsigned int ctrl)
{
unsigned int result = 0;
long result = 0;
unsigned int ofs = PACKETSIZE_STATESTREAM * sizeof(unsigned int);
unsigned int cnt = 5;
unsigned int nextaddr = 0;
@ -460,7 +414,7 @@ z180_cmdstream_issueibcmds(struct kgsl_device_private *dev_priv,
unsigned int sizedwords;
if (device->state & KGSL_STATE_HUNG) {
return -EINVAL;
result = -EINVAL;
goto error;
}
if (numibs != 1) {
@ -484,7 +438,7 @@ z180_cmdstream_issueibcmds(struct kgsl_device_private *dev_priv,
cnt = PACKETSIZE_STATESTREAM;
ofs = 0;
}
z180_setstate(device, kgsl_pt_get_flags(device->mmu.hwpagetable,
kgsl_setstate(device, kgsl_mmu_pt_get_flags(device->mmu.hwpagetable,
device->id));
result = wait_event_interruptible_timeout(device->wait_queue,
@ -492,7 +446,7 @@ z180_cmdstream_issueibcmds(struct kgsl_device_private *dev_priv,
msecs_to_jiffies(KGSL_TIMEOUT_DEFAULT));
if (result < 0) {
KGSL_CMD_ERR(device, "wait_event_interruptible_timeout "
"failed: %d\n", result);
"failed: %ld\n", result);
goto error;
}
result = 0;
@ -525,12 +479,10 @@ z180_cmdstream_issueibcmds(struct kgsl_device_private *dev_priv,
cmd = (int)(((2) & VGV3_CONTROL_MARKADD_FMASK)
<< VGV3_CONTROL_MARKADD_FSHIFT);
z180_cmdwindow_write(device,
KGSL_CMDWINDOW_2D, ADDR_VGV3_CONTROL, cmd);
z180_cmdwindow_write(device,
KGSL_CMDWINDOW_2D, ADDR_VGV3_CONTROL, 0);
z180_cmdwindow_write(device, ADDR_VGV3_CONTROL, cmd);
z180_cmdwindow_write(device, ADDR_VGV3_CONTROL, 0);
error:
return result;
return (int)result;
}
static int z180_ringbuffer_init(struct kgsl_device *device)
@ -538,9 +490,8 @@ static int z180_ringbuffer_init(struct kgsl_device *device)
struct z180_device *z180_dev = Z180_DEVICE(device);
memset(&z180_dev->ringbuffer, 0, sizeof(struct z180_ringbuffer));
z180_dev->ringbuffer.prevctx = Z180_INVALID_CONTEXT;
return kgsl_sharedmem_alloc_coherent(
&z180_dev->ringbuffer.cmdbufdesc,
Z180_RB_SIZE);
return kgsl_allocate_contiguous(&z180_dev->ringbuffer.cmdbufdesc,
Z180_RB_SIZE);
}
static void z180_ringbuffer_close(struct kgsl_device *device)
@ -559,8 +510,6 @@ static int __devinit z180_probe(struct platform_device *pdev)
device = (struct kgsl_device *)pdev->id_entry->driver_data;
device->parentdev = &pdev->dev;
z180_getfunctable(&device->ftbl);
z180_dev = Z180_DEVICE(device);
spin_lock_init(&z180_dev->cmdwin_lock);
@ -572,6 +521,8 @@ static int __devinit z180_probe(struct platform_device *pdev)
if (status)
goto error_close_ringbuffer;
kgsl_pwrscale_init(device);
return status;
error_close_ringbuffer:
@ -587,6 +538,7 @@ static int __devexit z180_remove(struct platform_device *pdev)
device = (struct kgsl_device *)pdev->id_entry->driver_data;
kgsl_pwrscale_close(device);
kgsl_device_platform_remove(device);
z180_ringbuffer_close(device);
@ -604,31 +556,24 @@ static int z180_start(struct kgsl_device *device, unsigned int init_ram)
kgsl_pwrctrl_enable(device);
/* Set up MH arbiter. MH offsets are considered to be dword
* based, therefore no down shift. */
z180_regwrite(device, ADDR_MH_ARBITER_CONFIG, Z180_CFG_MHARB);
/* Set interrupts to 0 to ensure a good state */
z180_regwrite(device, (ADDR_VGC_IRQENABLE >> 2), 0x0);
z180_regwrite(device, ADDR_MH_CLNT_INTF_CTRL_CONFIG1, 0x00030F27);
z180_regwrite(device, ADDR_MH_CLNT_INTF_CTRL_CONFIG2, 0x004B274F);
z180_regwrite(device, (ADDR_VGC_IRQENABLE >> 2), 0x3);
kgsl_mh_start(device);
status = kgsl_mmu_start(device);
if (status)
goto error_clk_off;
status = z180_cmdstream_start(device);
if (status)
goto error_mmu_stop;
z180_cmdstream_start(device);
mod_timer(&device->idle_timer, jiffies + FIRST_TIMEOUT);
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_IRQ_ON);
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
return 0;
error_clk_off:
z180_regwrite(device, (ADDR_VGC_IRQENABLE >> 2), 0);
kgsl_pwrctrl_disable(device);
error_mmu_stop:
kgsl_mmu_stop(device);
return status;
}
@ -636,7 +581,7 @@ static int z180_stop(struct kgsl_device *device)
{
z180_idle(device, KGSL_TIMEOUT_DEFAULT);
del_timer(&device->idle_timer);
del_timer_sync(&device->idle_timer);
kgsl_mmu_stop(device);
@ -680,16 +625,12 @@ static int z180_getproperty(struct kgsl_device *device,
break;
case KGSL_PROP_MMU_ENABLE:
{
#ifdef CONFIG_MSM_KGSL_MMU
int mmuProp = 1;
#else
int mmuProp = 0;
#endif
int mmu_prop = kgsl_mmu_enabled();
if (sizebytes != sizeof(int)) {
status = -EINVAL;
break;
}
if (copy_to_user(value, &mmuProp, sizeof(mmuProp))) {
if (copy_to_user(value, &mmu_prop, sizeof(mmu_prop))) {
status = -EFAULT;
break;
}
@ -706,22 +647,10 @@ static int z180_getproperty(struct kgsl_device *device,
static unsigned int z180_isidle(struct kgsl_device *device)
{
int status = false;
struct z180_device *z180_dev = Z180_DEVICE(device);
int timestamp = z180_dev->timestamp;
if (timestamp == z180_dev->current_timestamp)
status = true;
return status;
}
static int z180_resume_context(struct kgsl_device *device)
{
/* Context is in the pre-amble, automatically restored. */
return 0;
return (timestamp_cmp(z180_dev->timestamp,
z180_dev->current_timestamp) == 0) ? true : false;
}
static int z180_suspend_context(struct kgsl_device *device)
@ -800,7 +729,7 @@ static void _z180_regwrite_mmu(struct kgsl_device *device,
unsigned int cmdwinaddr;
unsigned long flags;
cmdwinaddr = ((KGSL_CMDWINDOW_MMU << Z180_CMDWINDOW_TARGET_SHIFT) &
cmdwinaddr = ((Z180_CMDWINDOW_MMU << Z180_CMDWINDOW_TARGET_SHIFT) &
Z180_CMDWINDOW_TARGET_MASK);
cmdwinaddr |= ((offsetwords << Z180_CMDWINDOW_ADDR_SHIFT) &
Z180_CMDWINDOW_ADDR_MASK);
@ -815,91 +744,52 @@ static void _z180_regwrite_mmu(struct kgsl_device *device,
/* the rest of the code doesn't want to think about if it is writing mmu
* registers or normal registers so handle it here
*/
static void _z180_regread(struct kgsl_device *device, unsigned int offsetwords,
unsigned int *value)
static void z180_regread(struct kgsl_device *device,
unsigned int offsetwords,
unsigned int *value)
{
if ((offsetwords >= ADDR_MH_ARBITER_CONFIG &&
offsetwords <= ADDR_MH_AXI_HALT_CONTROL) ||
(offsetwords >= ADDR_MH_MMU_CONFIG &&
offsetwords <= ADDR_MH_MMU_MPU_END)) {
if (!in_interrupt())
kgsl_pre_hwaccess(device);
if ((offsetwords >= MH_ARBITER_CONFIG &&
offsetwords <= MH_AXI_HALT_CONTROL) ||
(offsetwords >= MH_MMU_CONFIG &&
offsetwords <= MH_MMU_MPU_END)) {
_z180_regread_mmu(device, offsetwords, value);
} else {
_z180_regread_simple(device, offsetwords, value);
}
}
static void _z180_regwrite(struct kgsl_device *device, unsigned int offsetwords,
static void z180_regwrite(struct kgsl_device *device,
unsigned int offsetwords,
unsigned int value)
{
if ((offsetwords >= ADDR_MH_ARBITER_CONFIG &&
offsetwords <= ADDR_MH_CLNT_INTF_CTRL_CONFIG2) ||
(offsetwords >= ADDR_MH_MMU_CONFIG &&
offsetwords <= ADDR_MH_MMU_MPU_END)) {
_z180_regwrite_mmu(device, offsetwords, value);
if (!in_interrupt())
kgsl_pre_hwaccess(device);
if ((offsetwords >= MH_ARBITER_CONFIG &&
offsetwords <= MH_CLNT_INTF_CTRL_CONFIG2) ||
(offsetwords >= MH_MMU_CONFIG &&
offsetwords <= MH_MMU_MPU_END)) {
_z180_regwrite_mmu(device, offsetwords, value);
} else {
_z180_regwrite_simple(device, offsetwords, value);
}
}
static void z180_regread(struct kgsl_device *device,
unsigned int offsetwords,
unsigned int *value)
{
kgsl_pre_hwaccess(device);
_z180_regread(device, offsetwords, value);
}
static void z180_regread_isr(struct kgsl_device *device,
unsigned int offsetwords,
unsigned int *value)
{
_z180_regread(device, offsetwords, value);
}
static void z180_regwrite(struct kgsl_device *device,
unsigned int offsetwords,
unsigned int value)
{
kgsl_pre_hwaccess(device);
_z180_regwrite(device, offsetwords, value);
}
static void z180_regwrite_isr(struct kgsl_device *device,
unsigned int offsetwords,
unsigned int value)
{
_z180_regwrite(device, offsetwords, value);
}
static int z180_cmdwindow_write(struct kgsl_device *device,
enum kgsl_cmdwindow_type target, unsigned int addr,
unsigned int data)
static void z180_cmdwindow_write(struct kgsl_device *device,
unsigned int addr, unsigned int data)
{
unsigned int cmdwinaddr;
unsigned int cmdstream;
if (target < KGSL_CMDWINDOW_MIN ||
target > KGSL_CMDWINDOW_MAX) {
KGSL_DRV_ERR(device, "invalid target\n");
return -EINVAL;
}
if (target == KGSL_CMDWINDOW_MMU)
cmdstream = ADDR_VGC_MMUCOMMANDSTREAM;
else
cmdstream = ADDR_VGC_COMMANDSTREAM;
cmdwinaddr = ((target << Z180_CMDWINDOW_TARGET_SHIFT) &
cmdwinaddr = ((Z180_CMDWINDOW_2D << Z180_CMDWINDOW_TARGET_SHIFT) &
Z180_CMDWINDOW_TARGET_MASK);
cmdwinaddr |= ((addr << Z180_CMDWINDOW_ADDR_SHIFT) &
Z180_CMDWINDOW_ADDR_MASK);
z180_regwrite(device, cmdstream >> 2, cmdwinaddr);
z180_regwrite(device, cmdstream >> 2, data);
return 0;
z180_regwrite(device, ADDR_VGC_COMMANDSTREAM >> 2, cmdwinaddr);
z180_regwrite(device, ADDR_VGC_COMMANDSTREAM >> 2, data);
}
static unsigned int z180_readtimestamp(struct kgsl_device *device,
@ -915,6 +805,11 @@ static int z180_waittimestamp(struct kgsl_device *device,
unsigned int msecs)
{
int status = -EINVAL;
/* Don't wait forever, set a max (10 sec) value for now */
if (msecs == -1)
msecs = 10 * MSEC_PER_SEC;
mutex_unlock(&device->mutex);
status = z180_wait(device, timestamp, msecs);
mutex_lock(&device->mutex);
@ -946,19 +841,7 @@ static int z180_wait(struct kgsl_device *device,
return status;
}
static long
z180_ioctl_cmdwindow_write(struct kgsl_device_private *dev_priv,
void *data)
{
struct kgsl_cmdwindow_write *param = data;
return z180_cmdwindow_write(dev_priv->device,
param->target,
param->addr,
param->data);
}
static int
static void
z180_drawctxt_destroy(struct kgsl_device *device,
struct kgsl_context *context)
{
@ -971,63 +854,62 @@ z180_drawctxt_destroy(struct kgsl_device *device,
device->mmu.hwpagetable = device->mmu.defaultpagetable;
kgsl_setstate(device, KGSL_MMUFLAGS_PTUPDATE);
}
return 0;
}
static long z180_ioctl(struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data)
{
int result = 0;
switch (cmd) {
case IOCTL_KGSL_CMDWINDOW_WRITE:
result = z180_ioctl_cmdwindow_write(dev_priv, data);
break;
default:
KGSL_DRV_INFO(dev_priv->device,
"invalid ioctl code %08x\n", cmd);
result = -EINVAL;
break;
}
return result;
}
static void z180_power_stats(struct kgsl_device *device,
struct kgsl_power_stats *stats)
{
stats->total_time = 0;
stats->busy_time = 0;
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
if (pwr->time == 0) {
pwr->time = ktime_to_us(ktime_get());
stats->total_time = 0;
stats->busy_time = 0;
} else {
s64 tmp;
tmp = ktime_to_us(ktime_get());
stats->total_time = tmp - pwr->time;
stats->busy_time = tmp - pwr->time;
pwr->time = tmp;
}
}
static void __devinit z180_getfunctable(struct kgsl_functable *ftbl)
static void z180_irqctrl(struct kgsl_device *device, int state)
{
if (ftbl == NULL)
return;
ftbl->device_regread = z180_regread;
ftbl->device_regwrite = z180_regwrite;
ftbl->device_regread_isr = z180_regread_isr;
ftbl->device_regwrite_isr = z180_regwrite_isr;
ftbl->device_setstate = z180_setstate;
ftbl->device_idle = z180_idle;
ftbl->device_isidle = z180_isidle;
ftbl->device_suspend_context = z180_suspend_context;
ftbl->device_resume_context = z180_resume_context;
ftbl->device_start = z180_start;
ftbl->device_stop = z180_stop;
ftbl->device_getproperty = z180_getproperty;
ftbl->device_waittimestamp = z180_waittimestamp;
ftbl->device_readtimestamp = z180_readtimestamp;
ftbl->device_issueibcmds = z180_cmdstream_issueibcmds;
ftbl->device_drawctxt_create = NULL;
ftbl->device_drawctxt_destroy = z180_drawctxt_destroy;
ftbl->device_ioctl = z180_ioctl;
ftbl->device_setup_pt = z180_setup_pt;
ftbl->device_cleanup_pt = z180_cleanup_pt;
ftbl->device_power_stats = z180_power_stats,
/* Control interrupts for Z180 and the Z180 MMU */
if (state) {
z180_regwrite(device, (ADDR_VGC_IRQENABLE >> 2), 3);
z180_regwrite(device, MH_INTERRUPT_MASK, KGSL_MMU_INT_MASK);
} else {
z180_regwrite(device, (ADDR_VGC_IRQENABLE >> 2), 0);
z180_regwrite(device, MH_INTERRUPT_MASK, 0);
}
}
static const struct kgsl_functable z180_functable = {
/* Mandatory functions */
.regread = z180_regread,
.regwrite = z180_regwrite,
.idle = z180_idle,
.isidle = z180_isidle,
.suspend_context = z180_suspend_context,
.start = z180_start,
.stop = z180_stop,
.getproperty = z180_getproperty,
.waittimestamp = z180_waittimestamp,
.readtimestamp = z180_readtimestamp,
.issueibcmds = z180_cmdstream_issueibcmds,
.setup_pt = z180_setup_pt,
.cleanup_pt = z180_cleanup_pt,
.power_stats = z180_power_stats,
.irqctrl = z180_irqctrl,
/* Optional functions */
.drawctxt_create = NULL,
.drawctxt_destroy = z180_drawctxt_destroy,
.ioctl = NULL,
};
static struct platform_device_id z180_id_table[] = {
{ DEVICE_2D0_NAME, (kernel_ulong_t)&device_2d0.dev, },
{ DEVICE_2D1_NAME, (kernel_ulong_t)&device_2d1.dev, },

@ -1,34 +1,20 @@
/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __Z180_H
#define __Z180_H
#include "kgsl_device.h"
#define DEVICE_2D_NAME "kgsl-2d"
#define DEVICE_2D0_NAME "kgsl-2d0"
#define DEVICE_2D1_NAME "kgsl-2d1"

@ -1,29 +1,13 @@
/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __Z80_REG_H
@ -48,36 +32,8 @@
#define MH_ARBITER_CONFIG__RB_CLNT_ENABLE__SHIFT 0x00000019
#define MH_ARBITER_CONFIG__PA_CLNT_ENABLE__SHIFT 0x0000001a
#define MH_MMU_CONFIG__RB_W_CLNT_BEHAVIOR__SHIFT 0x00000004
#define MH_MMU_CONFIG__CP_W_CLNT_BEHAVIOR__SHIFT 0x00000006
#define MH_MMU_CONFIG__CP_R0_CLNT_BEHAVIOR__SHIFT 0x00000008
#define MH_MMU_CONFIG__CP_R1_CLNT_BEHAVIOR__SHIFT 0x0000000a
#define MH_MMU_CONFIG__CP_R2_CLNT_BEHAVIOR__SHIFT 0x0000000c
#define MH_MMU_CONFIG__CP_R3_CLNT_BEHAVIOR__SHIFT 0x0000000e
#define MH_MMU_CONFIG__CP_R4_CLNT_BEHAVIOR__SHIFT 0x00000010
#define MH_MMU_CONFIG__VGT_R0_CLNT_BEHAVIOR__SHIFT 0x00000012
#define MH_MMU_CONFIG__VGT_R1_CLNT_BEHAVIOR__SHIFT 0x00000014
#define MH_MMU_CONFIG__TC_R_CLNT_BEHAVIOR__SHIFT 0x00000016
#define MH_MMU_CONFIG__PA_W_CLNT_BEHAVIOR__SHIFT 0x00000018
#define ADDR_MH_ARBITER_CONFIG 0x0A40
#define ADDR_MH_INTERRUPT_CLEAR 0x0A44
#define ADDR_MH_INTERRUPT_MASK 0x0A42
#define ADDR_MH_INTERRUPT_STATUS 0x0A43
#define ADDR_MH_AXI_ERROR 0x0A45
#define ADDR_MH_AXI_HALT_CONTROL 0x0A50
#define ADDR_MH_CLNT_INTF_CTRL_CONFIG1 0x0A54
#define ADDR_MH_CLNT_INTF_CTRL_CONFIG2 0x0A55
#define ADDR_MH_MMU_CONFIG 0x0040
#define ADDR_MH_MMU_INVALIDATE 0x0045
#define ADDR_MH_MMU_MPU_BASE 0x0046
#define ADDR_MH_MMU_MPU_END 0x0047
#define ADDR_MH_MMU_PT_BASE 0x0042
#define ADDR_MH_MMU_TRAN_ERROR 0x0044
#define ADDR_MH_MMU_VA_RANGE 0x0041
#define ADDR_VGC_MH_READ_ADDR 0x0510
#define ADDR_VGC_MH_DATA_ADDR 0x0518
#define ADDR_MH_MMU_PAGE_FAULT 0x0043
#define ADDR_VGC_COMMANDSTREAM 0x0000
#define ADDR_VGC_IRQENABLE 0x0438
#define ADDR_VGC_IRQSTATUS 0x0418

@ -12,15 +12,7 @@ obj-$(CONFIG_LKDTM) += lkdtm.o
obj-$(CONFIG_TIFM_CORE) += tifm_core.o
obj-$(CONFIG_TIFM_7XX1) += tifm_7xx1.o
obj-$(CONFIG_PHANTOM) += phantom.o
ifeq ($(CONFIG_ARCH_MSM7227),y)
obj-$(CONFIG_ANDROID_PMEM) += pmem_7x27.o
else
ifeq ($(CONFIG_ARCH_MSM7X30),y)
obj-$(CONFIG_ANDROID_PMEM) += pmem_7x30.o
else
obj-$(CONFIG_ANDROID_PMEM) += pmem.o
endif
endif
obj-$(CONFIG_ANDROID_PMEM) += pmem.o
obj-$(CONFIG_SGI_IOC4) += ioc4.o
obj-$(CONFIG_ENCLOSURE_SERVICES) += enclosure.o
obj-$(CONFIG_KERNEL_DEBUGGER_CORE) += kernel_debugger.o

File diff suppressed because it is too large Load Diff

@ -20,7 +20,7 @@
#include <linux/proc_fs.h>
#include <linux/string.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include <asm/io.h>
#if defined(CONFIG_VERY_EARLY_CONSOLE)
#include <asm/mach/map.h>
@ -395,7 +395,7 @@ static ssize_t ram_console_read_old(struct file *file, char __user *buf,
return count;
}
static const struct file_operations ram_console_file_ops = {
static struct file_operations ram_console_file_ops = {
.owner = THIS_MODULE,
.read = ram_console_read_old,
};

221
include/drm/kgsl_drm.h Normal file

@ -0,0 +1,221 @@
/* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef _KGSL_DRM_H_
#define _KGSL_DRM_H_
#include "drm.h"
#define DRM_KGSL_GEM_CREATE 0x00
#define DRM_KGSL_GEM_PREP 0x01
#define DRM_KGSL_GEM_SETMEMTYPE 0x02
#define DRM_KGSL_GEM_GETMEMTYPE 0x03
#define DRM_KGSL_GEM_MMAP 0x04
#define DRM_KGSL_GEM_ALLOC 0x05
#define DRM_KGSL_GEM_BIND_GPU 0x06
#define DRM_KGSL_GEM_UNBIND_GPU 0x07
#define DRM_KGSL_GEM_GET_BUFINFO 0x08
#define DRM_KGSL_GEM_SET_BUFCOUNT 0x09
#define DRM_KGSL_GEM_SET_ACTIVE 0x0A
#define DRM_KGSL_GEM_LOCK_HANDLE 0x0B
#define DRM_KGSL_GEM_UNLOCK_HANDLE 0x0C
#define DRM_KGSL_GEM_UNLOCK_ON_TS 0x0D
#define DRM_KGSL_GEM_CREATE_FD 0x0E
#define DRM_IOCTL_KGSL_GEM_CREATE \
DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_CREATE, struct drm_kgsl_gem_create)
#define DRM_IOCTL_KGSL_GEM_PREP \
DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_PREP, struct drm_kgsl_gem_prep)
#define DRM_IOCTL_KGSL_GEM_SETMEMTYPE \
DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_SETMEMTYPE, \
struct drm_kgsl_gem_memtype)
#define DRM_IOCTL_KGSL_GEM_GETMEMTYPE \
DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_GETMEMTYPE, \
struct drm_kgsl_gem_memtype)
#define DRM_IOCTL_KGSL_GEM_MMAP \
DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_MMAP, struct drm_kgsl_gem_mmap)
#define DRM_IOCTL_KGSL_GEM_ALLOC \
DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_ALLOC, struct drm_kgsl_gem_alloc)
#define DRM_IOCTL_KGSL_GEM_BIND_GPU \
DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_BIND_GPU, struct drm_kgsl_gem_bind_gpu)
#define DRM_IOCTL_KGSL_GEM_UNBIND_GPU \
DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_UNBIND_GPU, \
struct drm_kgsl_gem_bind_gpu)
#define DRM_IOCTL_KGSL_GEM_GET_BUFINFO \
DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_GET_BUFINFO, \
struct drm_kgsl_gem_bufinfo)
#define DRM_IOCTL_KGSL_GEM_SET_BUFCOUNT \
DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_SET_BUFCOUNT, \
struct drm_kgsl_gem_bufcount)
#define DRM_IOCTL_KGSL_GEM_SET_ACTIVE \
DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_SET_ACTIVE, \
struct drm_kgsl_gem_active)
#define DRM_IOCTL_KGSL_GEM_LOCK_HANDLE \
DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_LOCK_HANDLE, \
struct drm_kgsl_gem_lock_handles)
#define DRM_IOCTL_KGSL_GEM_UNLOCK_HANDLE \
DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_UNLOCK_HANDLE, \
struct drm_kgsl_gem_unlock_handles)
#define DRM_IOCTL_KGSL_GEM_UNLOCK_ON_TS \
DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_UNLOCK_ON_TS, \
struct drm_kgsl_gem_unlock_on_ts)
#define DRM_IOCTL_KGSL_GEM_CREATE_FD \
DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_CREATE_FD, \
struct drm_kgsl_gem_create_fd)
/* Maximum number of sub buffers per GEM object */
#define DRM_KGSL_GEM_MAX_BUFFERS 2
/* Memory types - these define the source and caching policies
of the GEM memory chunk */
/* Legacy definitions left for compatability */
#define DRM_KGSL_GEM_TYPE_EBI 0
#define DRM_KGSL_GEM_TYPE_SMI 1
#define DRM_KGSL_GEM_TYPE_KMEM 2
#define DRM_KGSL_GEM_TYPE_KMEM_NOCACHE 3
#define DRM_KGSL_GEM_TYPE_MEM_MASK 0xF
/* Contiguous memory (PMEM) */
#define DRM_KGSL_GEM_TYPE_PMEM 0x000100
/* PMEM memory types */
#define DRM_KGSL_GEM_PMEM_EBI 0x001000
#define DRM_KGSL_GEM_PMEM_SMI 0x002000
/* Standard paged memory */
#define DRM_KGSL_GEM_TYPE_MEM 0x010000
/* Caching controls */
#define DRM_KGSL_GEM_CACHE_NONE 0x000000
#define DRM_KGSL_GEM_CACHE_WCOMBINE 0x100000
#define DRM_KGSL_GEM_CACHE_WTHROUGH 0x200000
#define DRM_KGSL_GEM_CACHE_WBACK 0x400000
#define DRM_KGSL_GEM_CACHE_WBACKWA 0x800000
#define DRM_KGSL_GEM_CACHE_MASK 0xF00000
/* FD based objects */
#define DRM_KGSL_GEM_TYPE_FD_FBMEM 0x1000000
#define DRM_KGSL_GEM_TYPE_FD_MASK 0xF000000
/* Timestamp types */
#define DRM_KGSL_GEM_TS_3D 0x00000430
#define DRM_KGSL_GEM_TS_2D 0x00000180
struct drm_kgsl_gem_create {
uint32_t size;
uint32_t handle;
};
struct drm_kgsl_gem_prep {
uint32_t handle;
uint32_t phys;
uint64_t offset;
};
struct drm_kgsl_gem_memtype {
uint32_t handle;
uint32_t type;
};
struct drm_kgsl_gem_mmap {
uint32_t handle;
uint32_t size;
uint32_t hostptr;
uint64_t offset;
};
struct drm_kgsl_gem_alloc {
uint32_t handle;
uint64_t offset;
};
struct drm_kgsl_gem_bind_gpu {
uint32_t handle;
uint32_t gpuptr;
};
struct drm_kgsl_gem_bufinfo {
uint32_t handle;
uint32_t count;
uint32_t active;
uint32_t offset[DRM_KGSL_GEM_MAX_BUFFERS];
uint32_t gpuaddr[DRM_KGSL_GEM_MAX_BUFFERS];
};
struct drm_kgsl_gem_bufcount {
uint32_t handle;
uint32_t bufcount;
};
struct drm_kgsl_gem_active {
uint32_t handle;
uint32_t active;
};
struct drm_kgsl_gem_lock_handles {
uint32_t num_handles;
uint32_t *handle_list;
uint32_t pid;
uint32_t lock_id; /* Returned lock id used for unlocking */
};
struct drm_kgsl_gem_unlock_handles {
uint32_t lock_id;
};
struct drm_kgsl_gem_unlock_on_ts {
uint32_t lock_id;
uint32_t timestamp; /* This field is a hw generated ts */
uint32_t type; /* Which pipe to check for ts generation */
};
struct drm_kgsl_gem_create_fd {
uint32_t fd;
uint32_t handle;
};
#endif

@ -16,6 +16,7 @@
#include <linux/mutex.h>
#include <linux/genalloc.h>
#include <linux/rbtree.h>
#include <linux/pfn.h>
struct mem_pool {
struct mutex pool_mutex;
@ -56,4 +57,3 @@ unsigned long memory_pool_node_len(void *vaddr);
int memory_pool_init(void);
#endif /* _LINUX_MEMALLOC_H */

@ -35,7 +35,7 @@
#define _MSM_KGSL_H
#define KGSL_VERSION_MAJOR 3
#define KGSL_VERSION_MINOR 7
#define KGSL_VERSION_MINOR 8
/*context flags */
#define KGSL_CONTEXT_SAVE_GMEM 1
@ -60,6 +60,9 @@
#define KGSL_MAX_PWRLEVELS 5
#define KGSL_CONVERT_TO_MBPS(val) \
(val*1000*1000U)
/* device id */
enum kgsl_deviceid {
KGSL_DEVICE_3D0 = 0x00000000,
@ -170,7 +173,6 @@ struct kgsl_device_pwr_data {
int (*set_grp_async)(void);
unsigned int idle_timeout;
unsigned int nap_allowed;
unsigned int idle_pass;
};
struct kgsl_clk_data {
@ -183,6 +185,8 @@ struct kgsl_device_platform_data {
struct kgsl_clk_data clk;
/* imem_clk_name is for 3d only, not used in 2d devices */
struct kgsl_grp_clk_name imem_clk_name;
const char *iommu_user_ctx_name;
const char *iommu_priv_ctx_name;
};
#endif
@ -454,6 +458,30 @@ struct kgsl_cff_syncmem {
#define IOCTL_KGSL_CFF_SYNCMEM \
_IOW(KGSL_IOC_TYPE, 0x30, struct kgsl_cff_syncmem)
/*
* A timestamp event allows the user space to register an action following an
* expired timestamp.
*/
struct kgsl_timestamp_event {
int type; /* Type of event (see list below) */
unsigned int timestamp; /* Timestamp to trigger event on */
unsigned int context_id; /* Context for the timestamp */
void *priv; /* Pointer to the event specific blob */
size_t len; /* Size of the event specific blob */
};
#define IOCTL_KGSL_TIMESTAMP_EVENT \
_IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_timestamp_event)
/* A genlock timestamp event releases an existing lock on timestamp expire */
#define KGSL_TIMESTAMP_EVENT_GENLOCK 1
struct kgsl_timestamp_event_genlock {
int handle; /* Handle of the genlock lock to release */
};
#ifdef __KERNEL__
#ifdef CONFIG_MSM_KGSL_DRM
int kgsl_gem_obj_addr(int drm_fd, int handle, unsigned long *start,

@ -46,6 +46,8 @@
#define VDEC_IOCTL_GETDECATTRIBUTES _IOR(VDEC_IOCTL_MAGIC, 10, \
struct vdec_dec_attributes)
#define VDEC_IOCTL_GETVERSION _IOR(VDEC_IOCTL_MAGIC, 11, struct vdec_version)
#define VDEC_IOCTL_PERFORMANCE_CHANGE_REQ _IOW(VDEC_IOCTL_MAGIC, 14, \
unsigned int)
enum {
VDEC_FRAME_DECODE_OK,
@ -83,6 +85,13 @@ enum {
VDEC_COLOR_FORMAT_NV21_YAMOTO = 0x02
};
enum {
PERF_REQUEST_SET_MIN = 0,
PERF_REQUEST_LOWER,
PERF_REQUEST_RAISE,
PERF_REQUEST_SET_MAX
};
struct vdec_input_buf_info {
u32 offset;
u32 data;

@ -1,6 +1,8 @@
/* interface for the pm_qos_power infrastructure of the linux kernel.
*
* Mark Gross <mgross@linux.intel.com>
*
* Copyright (c) 2010, Code Aurora Forum. All rights reserved.
*/
#ifndef __PM_QOS_PARAMS_H__
#define __PM_QOS_PARAMS_H__
@ -57,3 +59,4 @@ int pm_qos_add_notifier(int qos, struct notifier_block *notifier);
int pm_qos_remove_notifier(int qos, struct notifier_block *notifier);
#endif /* __PM_QOS_PARAMS_H__ */

@ -12,7 +12,8 @@ lib-y := ctype.o string.o vsprintf.o cmdline.o \
idr.o int_sqrt.o extable.o prio_tree.o \
sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \
proportions.o prio_heap.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o flex_array.o memcopy.o
is_single_threaded.o plist.o decompress.o flex_array.o memcopy.o \
memory_alloc.o
lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o
@ -38,9 +39,10 @@ lib-$(CONFIG_RWSEM_GENERIC_SPINLOCK) += rwsem-spinlock.o
lib-$(CONFIG_RWSEM_XCHGADD_ALGORITHM) += rwsem.o
lib-$(CONFIG_GENERIC_FIND_FIRST_BIT) += find_next_bit.o
lib-$(CONFIG_GENERIC_FIND_NEXT_BIT) += find_next_bit.o
obj-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o
lib-$(CONFIG_GENERIC_FIND_LAST_BIT) += find_last_bit.o
obj-$(CONFIG_GENERIC_HWEIGHT) += hweight.o
obj-$(CONFIG_LOCK_KERNEL) += kernel_lock.o
obj-$(CONFIG_PLIST) += plist.o
obj-$(CONFIG_DEBUG_PREEMPT) += smp_processor_id.o
obj-$(CONFIG_DEBUG_LIST) += list_debug.o
obj-$(CONFIG_DEBUG_OBJECTS) += debugobjects.o

@ -29,10 +29,9 @@
#include <linux/mutex.h>
#include <linux/shmem_fs.h>
#include <linux/ashmem.h>
#include <asm/cacheflush.h>
#define ASHMEM_NAME_PREFIX "dev/ashmem/"
#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
#define ASHMEM_NAME_PREFIX ""
#define ASHMEM_NAME_PREFIX_LEN 0
#define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
/*
@ -46,8 +45,6 @@ struct ashmem_area {
struct list_head unpinned_list; /* list of all ashmem areas */
struct file *file; /* the shmem-based backing file */
size_t size; /* size of the mapping, in bytes */
unsigned long vm_start; /* Start address of vm_area
* which maps this ashmem */
unsigned long prot_mask; /* allowed prot bits, as vm_flags */
};
@ -181,7 +178,7 @@ static int ashmem_open(struct inode *inode, struct file *file)
struct ashmem_area *asma;
int ret;
ret = generic_file_open(inode, file);
ret = nonseekable_open(inode, file);
if (unlikely(ret))
return ret;
@ -190,7 +187,6 @@ static int ashmem_open(struct inode *inode, struct file *file)
return -ENOMEM;
INIT_LIST_HEAD(&asma->unpinned_list);
memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN);
asma->prot_mask = PROT_MASK;
file->private_data = asma;
@ -214,67 +210,6 @@ static int ashmem_release(struct inode *ignored, struct file *file)
return 0;
}
static ssize_t ashmem_read(struct file *file, char __user *buf,
size_t len, loff_t *pos)
{
struct ashmem_area *asma = file->private_data;
int ret = 0;
mutex_lock(&ashmem_mutex);
/* If size is not set, or set to 0, always return EOF. */
if (asma->size == 0) {
goto out;
}
if (!asma->file) {
ret = -EBADF;
goto out;
}
ret = asma->file->f_op->read(asma->file, buf, len, pos);
if (ret < 0) {
goto out;
}
/** Update backing file pos, since f_ops->read() doesn't */
asma->file->f_pos = *pos;
out:
mutex_unlock(&ashmem_mutex);
return ret;
}
static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
{
struct ashmem_area *asma = file->private_data;
int ret;
mutex_lock(&ashmem_mutex);
if (asma->size == 0) {
ret = -EINVAL;
goto out;
}
if (!asma->file) {
ret = -EBADF;
goto out;
}
ret = asma->file->f_op->llseek(asma->file, offset, origin);
if (ret < 0) {
goto out;
}
/** Copy f_pos from backing file, since f_ops->llseek() sets it */
file->f_pos = asma->file->f_pos;
out:
mutex_unlock(&ashmem_mutex);
return ret;
}
static inline unsigned long
calc_vm_may_flags(unsigned long prot)
{
@ -329,7 +264,6 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
vma->vm_file = asma->file;
}
vma->vm_flags |= VM_CAN_NONLINEAR;
asma->vm_start = vma->vm_start;
out:
mutex_unlock(&ashmem_mutex);
@ -351,7 +285,7 @@ out:
* chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
* pages freed.
*/
static int ashmem_shrink(struct shrinker *s, int nr_to_scan, gfp_t gfp_mask)
static int ashmem_shrink(int nr_to_scan, gfp_t gfp_mask)
{
struct ashmem_range *range, *next;
@ -630,69 +564,6 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
return ret;
}
#ifdef CONFIG_OUTER_CACHE
static unsigned int virtaddr_to_physaddr(unsigned int virtaddr)
{
unsigned int physaddr = 0;
pgd_t *pgd_ptr = NULL;
pmd_t *pmd_ptr = NULL;
pte_t *pte_ptr = NULL, pte;
spin_lock(&current->mm->page_table_lock);
pgd_ptr = pgd_offset(current->mm, virtaddr);
if (pgd_none(*pgd) || pgd_bad(*pgd)) {
pr_err("Failed to convert virtaddr %x to pgd_ptr\n",
virtaddr);
goto done;
}
pmd_ptr = pmd_offset(pgd_ptr, virtaddr);
if (pmd_none(*pmd_ptr) || pmd_bad(*pmd_ptr)) {
pr_err("Failed to convert pgd_ptr %p to pmd_ptr\n",
(void *)pgd_ptr);
goto done;
}
pte_ptr = pte_offset_map(pmd_ptr, virtaddr);
if (!pte_ptr) {
pr_err("Failed to convert pmd_ptr %p to pte_ptr\n",
(void *)pmd_ptr);
goto done;
}
pte = *pte_ptr;
physaddr = pte_pfn(pte);
pte_unmap(pte_ptr);
done:
spin_unlock(&current->mm->page_table_lock);
physaddr <<= PAGE_SHIFT;
return physaddr;
}
#endif
static int ashmem_cache_op(struct ashmem_area *asma,
void (*cache_func)(unsigned long vstart, unsigned long length,
unsigned long pstart))
{
#ifdef CONFIG_OUTER_CACHE
unsigned long vaddr;
#endif
mutex_lock(&ashmem_mutex);
#ifndef CONFIG_OUTER_CACHE
cache_func(asma->vm_start, asma->size, 0);
#else
for (vaddr = asma->vm_start; vaddr < asma->vm_start + asma->size;
vaddr += PAGE_SIZE) {
unsigned long physaddr;
physaddr = virtaddr_to_physaddr(vaddr);
if (!physaddr)
return -EINVAL;
cache_func(vaddr, PAGE_SIZE, physaddr);
}
#endif
mutex_unlock(&ashmem_mutex);
return 0;
}
static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
{
struct ashmem_area *asma = file->private_data;
@ -729,19 +600,10 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case ASHMEM_PURGE_ALL_CACHES:
ret = -EPERM;
if (capable(CAP_SYS_ADMIN)) {
ret = ashmem_shrink(&ashmem_shrinker, 0, GFP_KERNEL);
ashmem_shrink(&ashmem_shrinker, ret, GFP_KERNEL);
ret = ashmem_shrink(0, GFP_KERNEL);
ashmem_shrink(ret, GFP_KERNEL);
}
break;
case ASHMEM_CACHE_FLUSH_RANGE:
ret = ashmem_cache_op(asma, &clean_and_invalidate_caches);
break;
case ASHMEM_CACHE_CLEAN_RANGE:
ret = ashmem_cache_op(asma, &clean_caches);
break;
case ASHMEM_CACHE_INV_RANGE:
ret = ashmem_cache_op(asma, &invalidate_caches);
break;
}
return ret;
@ -804,8 +666,6 @@ static struct file_operations ashmem_fops = {
.owner = THIS_MODULE,
.open = ashmem_open,
.release = ashmem_release,
.read = ashmem_read,
.llseek = ashmem_llseek,
.mmap = ashmem_mmap,
.unlocked_ioctl = ashmem_ioctl,
.compat_ioctl = ashmem_ioctl,