Added the latest KGSL driver /dev/kgsl-3d0 for ICS HWA (Hardware Acceleration). (Credits to Securecrt and Rick_1995)

This commit is contained in:
tytung 2012-05-01 13:12:22 +08:00
parent 0290a3be7c
commit c6de4393cf
77 changed files with 22054 additions and 69 deletions

View File

@ -185,6 +185,12 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
#define readsw(p,d,l) __raw_readsw(__mem_pci(p),d,l)
#define readsl(p,d,l) __raw_readsl(__mem_pci(p),d,l)
#define writeb_relaxed(v,c) ((void)__raw_writeb(v,__mem_pci(c)))
#define writew_relaxed(v,c) ((void)__raw_writew((__force u16) \
cpu_to_le16(v),__mem_pci(c)))
#define writel_relaxed(v,c) ((void)__raw_writel((__force u32) \
cpu_to_le32(v),__mem_pci(c)))
#define writeb(v,c) __raw_writeb(v,__mem_pci(c))
#define writew(v,c) __raw_writew((__force __u16) \
cpu_to_le16(v),__mem_pci(c))

View File

@ -59,6 +59,10 @@
#include <mach/htc_headset_mgr.h>
#include <mach/htc_headset_gpio.h>
#ifdef CONFIG_MSM_KGSL
#include <linux/msm_kgsl.h>
#endif
#include <mach/board-htcleo-microp.h>
#include "board-htcleo.h"
@ -790,19 +794,83 @@ static struct platform_device msm_kgsl_device =
.num_resources = ARRAY_SIZE(msm_kgsl_resources),
};
#ifdef CONFIG_MSM_KGSL
/* start kgsl */
static struct resource kgsl_3d0_resources[] = {
{
.name = KGSL_3D0_REG_MEMORY,
.start = 0xA0000000,
.end = 0xA001ffff,
.flags = IORESOURCE_MEM,
},
{
.name = KGSL_3D0_IRQ,
.start = INT_GRAPHICS,
.end = INT_GRAPHICS,
.flags = IORESOURCE_IRQ,
},
};
static struct kgsl_device_platform_data kgsl_3d0_pdata = {
.pwr_data = {
.pwrlevel = {
{
.gpu_freq = 0,
.bus_freq = 128000000,
},
},
.init_level = 0,
.num_levels = 1,
.set_grp_async = NULL,
.idle_timeout = HZ/5,
},
.clk = {
.name = {
.clk = "grp_clk",
},
},
.imem_clk_name = {
.clk = "imem_clk",
},
};
struct platform_device msm_kgsl_3d0 = {
.name = "kgsl-3d0",
.id = 0,
.num_resources = ARRAY_SIZE(kgsl_3d0_resources),
.resource = kgsl_3d0_resources,
.dev = {
.platform_data = &kgsl_3d0_pdata,
},
};
/* end kgsl */
#endif
///////////////////////////////////////////////////////////////////////
// Memory
///////////////////////////////////////////////////////////////////////
static struct android_pmem_platform_data mdp_pmem_pdata = {
.name = "pmem",
.start = MSM_PMEM_MDP_BASE,
.size = MSM_PMEM_MDP_SIZE,
#ifdef CONFIG_MSM_KGSL
.allocator_type = PMEM_ALLOCATORTYPE_BITMAP,
#else
.no_allocator = 0,
#endif
.cached = 1,
};
static struct android_pmem_platform_data android_pmem_adsp_pdata = {
.name = "pmem_adsp",
.start = MSM_PMEM_ADSP_BASE,
.size = MSM_PMEM_ADSP_SIZE,
#ifdef CONFIG_MSM_KGSL
.allocator_type = PMEM_ALLOCATORTYPE_BITMAP,
#else
.no_allocator = 0,
#endif
.cached = 1,
};
@ -811,7 +879,11 @@ static struct android_pmem_platform_data android_pmem_venc_pdata = {
.name = "pmem_venc",
.start = MSM_PMEM_VENC_BASE,
.size = MSM_PMEM_VENC_SIZE,
#ifdef CONFIG_MSM_KGSL
.allocator_type = PMEM_ALLOCATORTYPE_BITMAP,
#else
.no_allocator = 0,
#endif
.cached = 1,
};
@ -945,7 +1017,11 @@ static struct platform_device *devices[] __initdata =
&msm_device_i2c,
&ds2746_battery_pdev,
&htc_battery_pdev,
#ifdef CONFIG_MSM_KGSL
&msm_kgsl_3d0,
#else
&msm_kgsl_device,
#endif
&msm_camera_sensor_s5k3e2fx,
&htcleo_flashlight_device,
&qsd_device_spi,

View File

@ -38,6 +38,12 @@
#define MSM_FB_BASE MSM_PMEM_SMI_BASE
#define MSM_FB_SIZE 0x00600000
#define MSM_PMEM_MDP_BASE 0x3B700000
#define MSM_PMEM_MDP_SIZE 0x02000000
#define MSM_PMEM_ADSP_BASE 0x3D700000
#define MSM_PMEM_ADSP_SIZE 0x02900000
#define MSM_GPU_PHYS_BASE (MSM_PMEM_SMI_BASE + MSM_FB_SIZE)
#define MSM_GPU_PHYS_SIZE 0x00800000
/* #define MSM_GPU_PHYS_SIZE 0x00300000 */
@ -54,8 +60,6 @@
#define MSM_PMEM_SF_SIZE 0x02000000
#define MSM_PMEM_ADSP_SIZE 0x02196000
/* MSM_RAM_CONSOLE uses the last 0x00040000 of EBI memory, defined in msm_iomap.h
#define MSM_RAM_CONSOLE_SIZE 0x00040000
#define MSM_RAM_CONSOLE_BASE (MSM_EBI1_BANK0_BASE + MSM_EBI1_BANK0_SIZE - MSM_RAM_CONSOLE_SIZE) //0x2FFC0000

View File

@ -0,0 +1,134 @@
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, and the entire permission notice in its entirety,
* including the disclaimer of warranties.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote
* products derived from this software without specific prior
* written permission.
*
* ALTERNATIVELY, this product may be distributed under the terms of
* the GNU General Public License, version 2, in which case the provisions
* of the GPL version 2 are required INSTEAD OF the BSD license.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
* WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
#ifndef _ARCH_ARM_MACH_MSM_BUS_H
#define _ARCH_ARM_MACH_MSM_BUS_H
#include <linux/types.h>
#include <linux/input.h>
/*
* Macros for clients to convert their data to ib and ab
* Ws : Time window over which to transfer the data in SECONDS
* Bs : Size of the data block in bytes
* Per : Recurrence period
* Tb : Throughput bandwidth to prevent stalling
* R : Ratio of actual bandwidth used to Tb
* Ib : Instantaneous bandwidth
* Ab : Arbitrated bandwidth
*
* IB_RECURRBLOCK and AB_RECURRBLOCK:
* These are used if the requirement is to transfer a
* recurring block of data over a known time window.
*
* IB_THROUGHPUTBW and AB_THROUGHPUTBW:
* These are used for CPU style masters. Here the requirement
* is to have minimum throughput bandwidth available to avoid
* stalling.
*/
#define IB_RECURRBLOCK(Ws, Bs) ((Ws) == 0 ? 0 : ((Bs)/(Ws)))
#define AB_RECURRBLOCK(Ws, Per) ((Ws) == 0 ? 0 : ((Bs)/(Per)))
#define IB_THROUGHPUTBW(Tb) (Tb)
#define AB_THROUGHPUTBW(Tb, R) ((Tb) * (R))
struct msm_bus_vectors {
int src; /* Master */
int dst; /* Slave */
unsigned int ab; /* Arbitrated bandwidth */
unsigned int ib; /* Instantaneous bandwidth */
};
struct msm_bus_paths {
int num_paths;
struct msm_bus_vectors *vectors;
};
struct msm_bus_scale_pdata {
struct msm_bus_paths *usecase;
int num_usecases;
const char *name;
/*
* If the active_only flag is set to 1, the BW request is applied
* only when at least one CPU is active (powered on). If the flag
* is set to 0, then the BW request is always applied irrespective
* of the CPU state.
*/
unsigned int active_only;
};
/* Scaling APIs */
/*
* This function returns a handle to the client. This should be used to
* call msm_bus_scale_client_update_request.
* The function returns 0 if bus driver is unable to register a client
*/
#ifdef CONFIG_MSM_BUS_SCALING
uint32_t msm_bus_scale_register_client(struct msm_bus_scale_pdata *pdata);
int msm_bus_scale_client_update_request(uint32_t cl, unsigned int index);
void msm_bus_scale_unregister_client(uint32_t cl);
/* AXI Port configuration APIs */
int msm_bus_axi_porthalt(int master_port);
int msm_bus_axi_portunhalt(int master_port);
#else
static inline uint32_t
msm_bus_scale_register_client(struct msm_bus_scale_pdata *pdata)
{
return 1;
}
static inline int
msm_bus_scale_client_update_request(uint32_t cl, unsigned int index)
{
return 0;
}
static inline void
msm_bus_scale_unregister_client(uint32_t cl)
{
}
static inline int msm_bus_axi_porthalt(int master_port)
{
return 0;
}
static inline int msm_bus_axi_portunhalt(int master_port)
{
return 0;
}
#endif
#endif /*_ARCH_ARM_MACH_MSM_BUS_H*/

View File

@ -1 +1,2 @@
obj-y += drm/ vga/
obj-$(CONFIG_MSM_KGSL) += msm/

105
drivers/gpu/msm/Kconfig Normal file
View File

@ -0,0 +1,105 @@
config MSM_KGSL
tristate "MSM 3D Graphics driver"
default n
depends on ARCH_MSM && !ARCH_MSM7X00A && !ARCH_MSM7X25
select GENERIC_ALLOCATOR
select FW_LOADER
---help---
3D graphics driver. Required to use hardware accelerated
OpenGL ES 2.0 and 1.1.
config MSM_KGSL_CFF_DUMP
bool "Enable KGSL Common File Format (CFF) Dump Feature [Use with caution]"
default n
depends on MSM_KGSL
select RELAY
---help---
This is an analysis and diagnostic feature only, and should only be
turned on during KGSL GPU diagnostics and will slow down the KGSL
performance sigificantly, hence *do not use in production builds*.
When enabled, CFF Dump is on at boot. It can be turned off at runtime
via 'echo 0 > /d/kgsl/cff_dump'. The log can be captured via
/d/kgsl-cff/cpu[0|1].
config MSM_KGSL_CFF_DUMP_NO_CONTEXT_MEM_DUMP
bool "When selected will disable KGSL CFF Dump for context switches"
default n
depends on MSM_KGSL_CFF_DUMP
---help---
Dumping all the memory for every context switch can produce quite
huge log files, to reduce this, turn this feature on.
config MSM_KGSL_PSTMRTMDMP_CP_STAT_NO_DETAIL
bool "Disable human readable CP_STAT fields in post-mortem dump"
default n
depends on MSM_KGSL
---help---
For a more compact kernel log the human readable output of
CP_STAT can be turned off with this option.
config MSM_KGSL_PSTMRTMDMP_NO_IB_DUMP
bool "Disable dumping current IB1 and IB2 in post-mortem dump"
default n
depends on MSM_KGSL
---help---
For a more compact kernel log the IB1 and IB2 embedded dump
can be turned off with this option. Some IB dumps take up
so much space that vital other information gets cut from the
post-mortem dump.
config MSM_KGSL_PSTMRTMDMP_RB_HEX
bool "Use hex version for ring-buffer in post-mortem dump"
default n
depends on MSM_KGSL
---help---
Use hex version for the ring-buffer in the post-mortem dump, instead
of the human readable version.
config MSM_KGSL_2D
tristate "MSM 2D graphics driver. Required for OpenVG"
default y
depends on MSM_KGSL && !ARCH_MSM7X27 && !ARCH_MSM7X27A && !(ARCH_QSD8X50 && !MSM_SOC_REV_A)
config MSM_KGSL_DRM
bool "Build a DRM interface for the MSM_KGSL driver"
depends on MSM_KGSL && DRM
config MSM_KGSL_MMU
bool "Enable the GPU MMU in the MSM_KGSL driver"
depends on MSM_KGSL && MMU && !MSM_KGSL_CFF_DUMP
default y
config KGSL_PER_PROCESS_PAGE_TABLE
bool "Enable Per Process page tables for the KGSL driver"
default n
depends on MSM_KGSL_MMU && !MSM_KGSL_DRM
---help---
The MMU will use per process pagetables when enabled.
config MSM_KGSL_PAGE_TABLE_SIZE
hex "Size of pagetables"
default 0xFFF0000
depends on MSM_KGSL_MMU
---help---
Sets the pagetable size used by the MMU. The max value
is 0xFFF0000 or (256M - 64K).
config MSM_KGSL_PAGE_TABLE_COUNT
int "Minimum of concurrent pagetables to support"
default 8
depends on KGSL_PER_PROCESS_PAGE_TABLE
---help---
Specify the number of pagetables to allocate at init time
This is the number of concurrent processes that are guaranteed to
to run at any time. Additional processes can be created dynamically
assuming there is enough contiguous memory to allocate the pagetable.
config MSM_KGSL_MMU_PAGE_FAULT
bool "Force the GPU MMU to page fault for unmapped regions"
default y
depends on MSM_KGSL_MMU
config MSM_KGSL_DISABLE_SHADOW_WRITES
bool "Disable register shadow writes for context switches"
default n
depends on MSM_KGSL

30
drivers/gpu/msm/Makefile Normal file
View File

@ -0,0 +1,30 @@
ccflags-y := -Iinclude/drm
msm_kgsl_core-y = \
kgsl.o \
kgsl_sharedmem.o \
kgsl_pwrctrl.o \
kgsl_pwrscale.o
msm_kgsl_core-$(CONFIG_DEBUG_FS) += kgsl_debugfs.o
msm_kgsl_core-$(CONFIG_MSM_KGSL_MMU) += kgsl_mmu.o
msm_kgsl_core-$(CONFIG_MSM_KGSL_CFF_DUMP) += kgsl_cffdump.o
msm_kgsl_core-$(CONFIG_MSM_KGSL_DRM) += kgsl_drm.o
msm_adreno-y += \
adreno_ringbuffer.o \
adreno_drawctxt.o \
adreno_postmortem.o \
adreno.o
msm_adreno-$(CONFIG_DEBUG_FS) += adreno_debugfs.o
msm_z180-y += z180.o
msm_kgsl_core-objs = $(msm_kgsl_core-y)
msm_adreno-objs = $(msm_adreno-y)
msm_z180-objs = $(msm_z180-y)
obj-$(CONFIG_MSM_KGSL) += msm_kgsl_core.o
obj-$(CONFIG_MSM_KGSL) += msm_adreno.o
obj-$(CONFIG_MSM_KGSL_2D) += msm_z180.o

448
drivers/gpu/msm/a200_reg.h Normal file
View File

@ -0,0 +1,448 @@
/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef __A200_REG_H
#define __A200_REG_H
enum VGT_EVENT_TYPE {
VS_DEALLOC = 0,
PS_DEALLOC = 1,
VS_DONE_TS = 2,
PS_DONE_TS = 3,
CACHE_FLUSH_TS = 4,
CONTEXT_DONE = 5,
CACHE_FLUSH = 6,
VIZQUERY_START = 7,
VIZQUERY_END = 8,
SC_WAIT_WC = 9,
RST_PIX_CNT = 13,
RST_VTX_CNT = 14,
TILE_FLUSH = 15,
CACHE_FLUSH_AND_INV_TS_EVENT = 20,
ZPASS_DONE = 21,
CACHE_FLUSH_AND_INV_EVENT = 22,
PERFCOUNTER_START = 23,
PERFCOUNTER_STOP = 24,
VS_FETCH_DONE = 27,
FACENESS_FLUSH = 28,
};
enum COLORFORMATX {
COLORX_4_4_4_4 = 0,
COLORX_1_5_5_5 = 1,
COLORX_5_6_5 = 2,
COLORX_8 = 3,
COLORX_8_8 = 4,
COLORX_8_8_8_8 = 5,
COLORX_S8_8_8_8 = 6,
COLORX_16_FLOAT = 7,
COLORX_16_16_FLOAT = 8,
COLORX_16_16_16_16_FLOAT = 9,
COLORX_32_FLOAT = 10,
COLORX_32_32_FLOAT = 11,
COLORX_32_32_32_32_FLOAT = 12,
COLORX_2_3_3 = 13,
COLORX_8_8_8 = 14,
};
enum SURFACEFORMAT {
FMT_1_REVERSE = 0,
FMT_1 = 1,
FMT_8 = 2,
FMT_1_5_5_5 = 3,
FMT_5_6_5 = 4,
FMT_6_5_5 = 5,
FMT_8_8_8_8 = 6,
FMT_2_10_10_10 = 7,
FMT_8_A = 8,
FMT_8_B = 9,
FMT_8_8 = 10,
FMT_Cr_Y1_Cb_Y0 = 11,
FMT_Y1_Cr_Y0_Cb = 12,
FMT_5_5_5_1 = 13,
FMT_8_8_8_8_A = 14,
FMT_4_4_4_4 = 15,
FMT_10_11_11 = 16,
FMT_11_11_10 = 17,
FMT_DXT1 = 18,
FMT_DXT2_3 = 19,
FMT_DXT4_5 = 20,
FMT_24_8 = 22,
FMT_24_8_FLOAT = 23,
FMT_16 = 24,
FMT_16_16 = 25,
FMT_16_16_16_16 = 26,
FMT_16_EXPAND = 27,
FMT_16_16_EXPAND = 28,
FMT_16_16_16_16_EXPAND = 29,
FMT_16_FLOAT = 30,
FMT_16_16_FLOAT = 31,
FMT_16_16_16_16_FLOAT = 32,
FMT_32 = 33,
FMT_32_32 = 34,
FMT_32_32_32_32 = 35,
FMT_32_FLOAT = 36,
FMT_32_32_FLOAT = 37,
FMT_32_32_32_32_FLOAT = 38,
FMT_32_AS_8 = 39,
FMT_32_AS_8_8 = 40,
FMT_16_MPEG = 41,
FMT_16_16_MPEG = 42,
FMT_8_INTERLACED = 43,
FMT_32_AS_8_INTERLACED = 44,
FMT_32_AS_8_8_INTERLACED = 45,
FMT_16_INTERLACED = 46,
FMT_16_MPEG_INTERLACED = 47,
FMT_16_16_MPEG_INTERLACED = 48,
FMT_DXN = 49,
FMT_8_8_8_8_AS_16_16_16_16 = 50,
FMT_DXT1_AS_16_16_16_16 = 51,
FMT_DXT2_3_AS_16_16_16_16 = 52,
FMT_DXT4_5_AS_16_16_16_16 = 53,
FMT_2_10_10_10_AS_16_16_16_16 = 54,
FMT_10_11_11_AS_16_16_16_16 = 55,
FMT_11_11_10_AS_16_16_16_16 = 56,
FMT_32_32_32_FLOAT = 57,
FMT_DXT3A = 58,
FMT_DXT5A = 59,
FMT_CTX1 = 60,
FMT_DXT3A_AS_1_1_1_1 = 61
};
#define REG_PERF_MODE_CNT 0x0
#define REG_PERF_STATE_RESET 0x0
#define REG_PERF_STATE_ENABLE 0x1
#define REG_PERF_STATE_FREEZE 0x2
#define RB_EDRAM_INFO_EDRAM_SIZE_SIZE 4
#define RB_EDRAM_INFO_EDRAM_MAPPING_MODE_SIZE 2
#define RB_EDRAM_INFO_UNUSED0_SIZE 8
#define RB_EDRAM_INFO_EDRAM_RANGE_SIZE 18
struct rb_edram_info_t {
unsigned int edram_size:RB_EDRAM_INFO_EDRAM_SIZE_SIZE;
unsigned int edram_mapping_mode:RB_EDRAM_INFO_EDRAM_MAPPING_MODE_SIZE;
unsigned int unused0:RB_EDRAM_INFO_UNUSED0_SIZE;
unsigned int edram_range:RB_EDRAM_INFO_EDRAM_RANGE_SIZE;
};
union reg_rb_edram_info {
unsigned int val;
struct rb_edram_info_t f;
};
#define RBBM_READ_ERROR_UNUSED0_SIZE 2
#define RBBM_READ_ERROR_READ_ADDRESS_SIZE 15
#define RBBM_READ_ERROR_UNUSED1_SIZE 13
#define RBBM_READ_ERROR_READ_REQUESTER_SIZE 1
#define RBBM_READ_ERROR_READ_ERROR_SIZE 1
struct rbbm_read_error_t {
unsigned int unused0:RBBM_READ_ERROR_UNUSED0_SIZE;
unsigned int read_address:RBBM_READ_ERROR_READ_ADDRESS_SIZE;
unsigned int unused1:RBBM_READ_ERROR_UNUSED1_SIZE;
unsigned int read_requester:RBBM_READ_ERROR_READ_REQUESTER_SIZE;
unsigned int read_error:RBBM_READ_ERROR_READ_ERROR_SIZE;
};
union rbbm_read_error_u {
unsigned int val:32;
struct rbbm_read_error_t f;
};
#define CP_RB_CNTL_RB_BUFSZ_SIZE 6
#define CP_RB_CNTL_UNUSED0_SIZE 2
#define CP_RB_CNTL_RB_BLKSZ_SIZE 6
#define CP_RB_CNTL_UNUSED1_SIZE 2
#define CP_RB_CNTL_BUF_SWAP_SIZE 2
#define CP_RB_CNTL_UNUSED2_SIZE 2
#define CP_RB_CNTL_RB_POLL_EN_SIZE 1
#define CP_RB_CNTL_UNUSED3_SIZE 6
#define CP_RB_CNTL_RB_NO_UPDATE_SIZE 1
#define CP_RB_CNTL_UNUSED4_SIZE 3
#define CP_RB_CNTL_RB_RPTR_WR_ENA_SIZE 1
struct cp_rb_cntl_t {
unsigned int rb_bufsz:CP_RB_CNTL_RB_BUFSZ_SIZE;
unsigned int unused0:CP_RB_CNTL_UNUSED0_SIZE;
unsigned int rb_blksz:CP_RB_CNTL_RB_BLKSZ_SIZE;
unsigned int unused1:CP_RB_CNTL_UNUSED1_SIZE;
unsigned int buf_swap:CP_RB_CNTL_BUF_SWAP_SIZE;
unsigned int unused2:CP_RB_CNTL_UNUSED2_SIZE;
unsigned int rb_poll_en:CP_RB_CNTL_RB_POLL_EN_SIZE;
unsigned int unused3:CP_RB_CNTL_UNUSED3_SIZE;
unsigned int rb_no_update:CP_RB_CNTL_RB_NO_UPDATE_SIZE;
unsigned int unused4:CP_RB_CNTL_UNUSED4_SIZE;
unsigned int rb_rptr_wr_ena:CP_RB_CNTL_RB_RPTR_WR_ENA_SIZE;
};
union reg_cp_rb_cntl {
unsigned int val:32;
struct cp_rb_cntl_t f;
};
#define RB_COLOR_INFO__COLOR_FORMAT_MASK 0x0000000fL
#define RB_COPY_DEST_INFO__COPY_DEST_FORMAT__SHIFT 0x00000004
#define SQ_INT_CNTL__PS_WATCHDOG_MASK 0x00000001L
#define SQ_INT_CNTL__VS_WATCHDOG_MASK 0x00000002L
#define RBBM_INT_CNTL__RDERR_INT_MASK 0x00000001L
#define RBBM_INT_CNTL__DISPLAY_UPDATE_INT_MASK 0x00000002L
#define RBBM_INT_CNTL__GUI_IDLE_INT_MASK 0x00080000L
#define RBBM_STATUS__CMDFIFO_AVAIL_MASK 0x0000001fL
#define RBBM_STATUS__TC_BUSY_MASK 0x00000020L
#define RBBM_STATUS__HIRQ_PENDING_MASK 0x00000100L
#define RBBM_STATUS__CPRQ_PENDING_MASK 0x00000200L
#define RBBM_STATUS__CFRQ_PENDING_MASK 0x00000400L
#define RBBM_STATUS__PFRQ_PENDING_MASK 0x00000800L
#define RBBM_STATUS__VGT_BUSY_NO_DMA_MASK 0x00001000L
#define RBBM_STATUS__RBBM_WU_BUSY_MASK 0x00004000L
#define RBBM_STATUS__CP_NRT_BUSY_MASK 0x00010000L
#define RBBM_STATUS__MH_BUSY_MASK 0x00040000L
#define RBBM_STATUS__MH_COHERENCY_BUSY_MASK 0x00080000L
#define RBBM_STATUS__SX_BUSY_MASK 0x00200000L
#define RBBM_STATUS__TPC_BUSY_MASK 0x00400000L
#define RBBM_STATUS__SC_CNTX_BUSY_MASK 0x01000000L
#define RBBM_STATUS__PA_BUSY_MASK 0x02000000L
#define RBBM_STATUS__VGT_BUSY_MASK 0x04000000L
#define RBBM_STATUS__SQ_CNTX17_BUSY_MASK 0x08000000L
#define RBBM_STATUS__SQ_CNTX0_BUSY_MASK 0x10000000L
#define RBBM_STATUS__RB_CNTX_BUSY_MASK 0x40000000L
#define RBBM_STATUS__GUI_ACTIVE_MASK 0x80000000L
#define CP_INT_CNTL__SW_INT_MASK 0x00080000L
#define CP_INT_CNTL__T0_PACKET_IN_IB_MASK 0x00800000L
#define CP_INT_CNTL__OPCODE_ERROR_MASK 0x01000000L
#define CP_INT_CNTL__PROTECTED_MODE_ERROR_MASK 0x02000000L
#define CP_INT_CNTL__RESERVED_BIT_ERROR_MASK 0x04000000L
#define CP_INT_CNTL__IB_ERROR_MASK 0x08000000L
#define CP_INT_CNTL__IB2_INT_MASK 0x20000000L
#define CP_INT_CNTL__IB1_INT_MASK 0x40000000L
#define CP_INT_CNTL__RB_INT_MASK 0x80000000L
#define MASTER_INT_SIGNAL__MH_INT_STAT 0x00000020L
#define MASTER_INT_SIGNAL__SQ_INT_STAT 0x04000000L
#define MASTER_INT_SIGNAL__CP_INT_STAT 0x40000000L
#define MASTER_INT_SIGNAL__RBBM_INT_STAT 0x80000000L
#define RB_EDRAM_INFO__EDRAM_SIZE_MASK 0x0000000fL
#define RB_EDRAM_INFO__EDRAM_RANGE_MASK 0xffffc000L
#define MH_ARBITER_CONFIG__SAME_PAGE_GRANULARITY__SHIFT 0x00000006
#define MH_ARBITER_CONFIG__L1_ARB_ENABLE__SHIFT 0x00000007
#define MH_ARBITER_CONFIG__L1_ARB_HOLD_ENABLE__SHIFT 0x00000008
#define MH_ARBITER_CONFIG__L2_ARB_CONTROL__SHIFT 0x00000009
#define MH_ARBITER_CONFIG__PAGE_SIZE__SHIFT 0x0000000a
#define MH_ARBITER_CONFIG__TC_REORDER_ENABLE__SHIFT 0x0000000d
#define MH_ARBITER_CONFIG__TC_ARB_HOLD_ENABLE__SHIFT 0x0000000e
#define MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT_ENABLE__SHIFT 0x0000000f
#define MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT__SHIFT 0x00000010
#define MH_ARBITER_CONFIG__CP_CLNT_ENABLE__SHIFT 0x00000016
#define MH_ARBITER_CONFIG__VGT_CLNT_ENABLE__SHIFT 0x00000017
#define MH_ARBITER_CONFIG__TC_CLNT_ENABLE__SHIFT 0x00000018
#define MH_ARBITER_CONFIG__RB_CLNT_ENABLE__SHIFT 0x00000019
#define MH_ARBITER_CONFIG__PA_CLNT_ENABLE__SHIFT 0x0000001a
#define MH_MMU_CONFIG__RB_W_CLNT_BEHAVIOR__SHIFT 0x00000004
#define MH_MMU_CONFIG__CP_W_CLNT_BEHAVIOR__SHIFT 0x00000006
#define MH_MMU_CONFIG__CP_R0_CLNT_BEHAVIOR__SHIFT 0x00000008
#define MH_MMU_CONFIG__CP_R1_CLNT_BEHAVIOR__SHIFT 0x0000000a
#define MH_MMU_CONFIG__CP_R2_CLNT_BEHAVIOR__SHIFT 0x0000000c
#define MH_MMU_CONFIG__CP_R3_CLNT_BEHAVIOR__SHIFT 0x0000000e
#define MH_MMU_CONFIG__CP_R4_CLNT_BEHAVIOR__SHIFT 0x00000010
#define MH_MMU_CONFIG__VGT_R0_CLNT_BEHAVIOR__SHIFT 0x00000012
#define MH_MMU_CONFIG__VGT_R1_CLNT_BEHAVIOR__SHIFT 0x00000014
#define MH_MMU_CONFIG__TC_R_CLNT_BEHAVIOR__SHIFT 0x00000016
#define MH_MMU_CONFIG__PA_W_CLNT_BEHAVIOR__SHIFT 0x00000018
#define CP_RB_CNTL__RB_BUFSZ__SHIFT 0x00000000
#define CP_RB_CNTL__RB_BLKSZ__SHIFT 0x00000008
#define CP_RB_CNTL__RB_POLL_EN__SHIFT 0x00000014
#define CP_RB_CNTL__RB_NO_UPDATE__SHIFT 0x0000001b
#define RB_COLOR_INFO__COLOR_FORMAT__SHIFT 0x00000000
#define RB_EDRAM_INFO__EDRAM_MAPPING_MODE__SHIFT 0x00000004
#define RB_EDRAM_INFO__EDRAM_RANGE__SHIFT 0x0000000e
#define REG_CP_CSQ_IB1_STAT 0x01FE
#define REG_CP_CSQ_IB2_STAT 0x01FF
#define REG_CP_CSQ_RB_STAT 0x01FD
#define REG_CP_DEBUG 0x01FC
#define REG_CP_IB1_BASE 0x0458
#define REG_CP_IB1_BUFSZ 0x0459
#define REG_CP_IB2_BASE 0x045A
#define REG_CP_IB2_BUFSZ 0x045B
#define REG_CP_INT_ACK 0x01F4
#define REG_CP_INT_CNTL 0x01F2
#define REG_CP_INT_STATUS 0x01F3
#define REG_CP_ME_CNTL 0x01F6
#define REG_CP_ME_RAM_DATA 0x01FA
#define REG_CP_ME_RAM_WADDR 0x01F8
#define REG_CP_ME_STATUS 0x01F7
#define REG_CP_PFP_UCODE_ADDR 0x00C0
#define REG_CP_PFP_UCODE_DATA 0x00C1
#define REG_CP_QUEUE_THRESHOLDS 0x01D5
#define REG_CP_RB_BASE 0x01C0
#define REG_CP_RB_CNTL 0x01C1
#define REG_CP_RB_RPTR 0x01C4
#define REG_CP_RB_RPTR_ADDR 0x01C3
#define REG_CP_RB_RPTR_WR 0x01C7
#define REG_CP_RB_WPTR 0x01C5
#define REG_CP_RB_WPTR_BASE 0x01C8
#define REG_CP_RB_WPTR_DELAY 0x01C6
#define REG_CP_STAT 0x047F
#define REG_CP_STATE_DEBUG_DATA 0x01ED
#define REG_CP_STATE_DEBUG_INDEX 0x01EC
#define REG_CP_ST_BASE 0x044D
#define REG_CP_ST_BUFSZ 0x044E
#define REG_CP_PERFMON_CNTL 0x0444
#define REG_CP_PERFCOUNTER_SELECT 0x0445
#define REG_CP_PERFCOUNTER_LO 0x0446
#define REG_CP_PERFCOUNTER_HI 0x0447
#define REG_RBBM_PERFCOUNTER1_SELECT 0x0395
#define REG_RBBM_PERFCOUNTER1_HI 0x0398
#define REG_RBBM_PERFCOUNTER1_LO 0x0397
#define REG_MASTER_INT_SIGNAL 0x03B7
#define REG_MH_ARBITER_CONFIG 0x0A40
#define REG_MH_INTERRUPT_CLEAR 0x0A44
#define REG_MH_INTERRUPT_MASK 0x0A42
#define REG_MH_INTERRUPT_STATUS 0x0A43
#define REG_MH_MMU_CONFIG 0x0040
#define REG_MH_MMU_INVALIDATE 0x0045
#define REG_MH_MMU_MPU_BASE 0x0046
#define REG_MH_MMU_MPU_END 0x0047
#define REG_MH_MMU_PAGE_FAULT 0x0043
#define REG_MH_MMU_PT_BASE 0x0042
#define REG_MH_MMU_TRAN_ERROR 0x0044
#define REG_MH_MMU_VA_RANGE 0x0041
#define REG_MH_CLNT_INTF_CTRL_CONFIG1 0x0A54
#define REG_MH_CLNT_INTF_CTRL_CONFIG2 0x0A55
#define REG_PA_CL_VPORT_XSCALE 0x210F
#define REG_PA_CL_VPORT_ZOFFSET 0x2114
#define REG_PA_CL_VPORT_ZSCALE 0x2113
#define REG_PA_CL_CLIP_CNTL 0x2204
#define REG_PA_CL_VTE_CNTL 0x2206
#define REG_PA_SC_AA_MASK 0x2312
#define REG_PA_SC_LINE_CNTL 0x2300
#define REG_PA_SC_SCREEN_SCISSOR_BR 0x200F
#define REG_PA_SC_SCREEN_SCISSOR_TL 0x200E
#define REG_PA_SC_VIZ_QUERY 0x2293
#define REG_PA_SC_VIZ_QUERY_STATUS 0x0C44
#define REG_PA_SC_WINDOW_OFFSET 0x2080
#define REG_PA_SC_WINDOW_SCISSOR_BR 0x2082
#define REG_PA_SC_WINDOW_SCISSOR_TL 0x2081
#define REG_PA_SU_FACE_DATA 0x0C86
#define REG_PA_SU_POINT_SIZE 0x2280
#define REG_PA_SU_LINE_CNTL 0x2282
#define REG_PA_SU_POLY_OFFSET_BACK_OFFSET 0x2383
#define REG_PA_SU_POLY_OFFSET_FRONT_SCALE 0x2380
#define REG_PA_SU_SC_MODE_CNTL 0x2205
#define REG_PC_INDEX_OFFSET 0x2102
#define REG_RBBM_CNTL 0x003B
#define REG_RBBM_INT_ACK 0x03B6
#define REG_RBBM_INT_CNTL 0x03B4
#define REG_RBBM_INT_STATUS 0x03B5
#define REG_RBBM_PATCH_RELEASE 0x0001
#define REG_RBBM_PERIPHID1 0x03F9
#define REG_RBBM_PERIPHID2 0x03FA
#define REG_RBBM_DEBUG 0x039B
#define REG_RBBM_DEBUG_OUT 0x03A0
#define REG_RBBM_DEBUG_CNTL 0x03A1
#define REG_RBBM_PM_OVERRIDE1 0x039C
#define REG_RBBM_PM_OVERRIDE2 0x039D
#define REG_RBBM_READ_ERROR 0x03B3
#define REG_RBBM_SOFT_RESET 0x003C
#define REG_RBBM_STATUS 0x05D0
#define REG_RB_COLORCONTROL 0x2202
#define REG_RB_COLOR_DEST_MASK 0x2326
#define REG_RB_COLOR_MASK 0x2104
#define REG_RB_COPY_CONTROL 0x2318
#define REG_RB_DEPTHCONTROL 0x2200
#define REG_RB_EDRAM_INFO 0x0F02
#define REG_RB_MODECONTROL 0x2208
#define REG_RB_SURFACE_INFO 0x2000
#define REG_RB_SAMPLE_POS 0x220a
#define REG_SCRATCH_ADDR 0x01DD
#define REG_SCRATCH_REG0 0x0578
#define REG_SCRATCH_REG2 0x057A
#define REG_SCRATCH_UMSK 0x01DC
#define REG_SQ_CF_BOOLEANS 0x4900
#define REG_SQ_CF_LOOP 0x4908
#define REG_SQ_GPR_MANAGEMENT 0x0D00
#define REG_SQ_INST_STORE_MANAGMENT 0x0D02
#define REG_SQ_INT_ACK 0x0D36
#define REG_SQ_INT_CNTL 0x0D34
#define REG_SQ_INT_STATUS 0x0D35
#define REG_SQ_PROGRAM_CNTL 0x2180
#define REG_SQ_PS_PROGRAM 0x21F6
#define REG_SQ_VS_PROGRAM 0x21F7
#define REG_SQ_WRAPPING_0 0x2183
#define REG_SQ_WRAPPING_1 0x2184
#define REG_VGT_ENHANCE 0x2294
#define REG_VGT_INDX_OFFSET 0x2102
#define REG_VGT_MAX_VTX_INDX 0x2100
#define REG_VGT_MIN_VTX_INDX 0x2101
#define REG_TP0_CHICKEN 0x0E1E
#define REG_TC_CNTL_STATUS 0x0E00
#define REG_PA_SC_AA_CONFIG 0x2301
#define REG_VGT_VERTEX_REUSE_BLOCK_CNTL 0x2316
#define REG_SQ_INTERPOLATOR_CNTL 0x2182
#define REG_RB_DEPTH_INFO 0x2002
#define REG_COHER_DEST_BASE_0 0x2006
#define REG_RB_FOG_COLOR 0x2109
#define REG_RB_STENCILREFMASK_BF 0x210C
#define REG_PA_SC_LINE_STIPPLE 0x2283
#define REG_SQ_PS_CONST 0x2308
#define REG_RB_DEPTH_CLEAR 0x231D
#define REG_RB_SAMPLE_COUNT_CTL 0x2324
#define REG_SQ_CONSTANT_0 0x4000
#define REG_SQ_FETCH_0 0x4800
#define REG_MH_AXI_ERROR 0xA45
#define REG_MH_DEBUG_CTRL 0xA4E
#define REG_MH_DEBUG_DATA 0xA4F
#define REG_COHER_BASE_PM4 0xA2A
#define REG_COHER_STATUS_PM4 0xA2B
#define REG_COHER_SIZE_PM4 0xA29
#endif /* __A200_REG_H */

View File

@ -0,0 +1,39 @@
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef __A205_REG_H
#define __A205_REG_H
#define REG_LEIA_PC_INDX_OFFSET REG_VGT_INDX_OFFSET
#define REG_LEIA_PC_VERTEX_REUSE_BLOCK_CNTL REG_VGT_VERTEX_REUSE_BLOCK_CNTL
#define REG_LEIA_PC_MAX_VTX_INDX REG_VGT_MAX_VTX_INDX
#define REG_LEIA_GRAS_CONTROL 0x2210
#define REG_LEIA_VSC_BIN_SIZE 0x0C01
#define REG_LEIA_VSC_PIPE_DATA_LENGTH_7 0x0C1D
#endif /*__A205_REG_H */

1364
drivers/gpu/msm/adreno.c Normal file

File diff suppressed because it is too large Load Diff

121
drivers/gpu/msm/adreno.h Normal file
View File

@ -0,0 +1,121 @@
/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef __ADRENO_H
#define __ADRENO_H
#include "adreno_drawctxt.h"
#include "adreno_ringbuffer.h"
#define DEVICE_3D_NAME "kgsl-3d"
#define DEVICE_3D0_NAME "kgsl-3d0"
#define ADRENO_DEVICE(device) \
KGSL_CONTAINER_OF(device, struct adreno_device, dev)
/* Flags to control command packet settings */
#define KGSL_CMD_FLAGS_PMODE 0x00000001
#define KGSL_CMD_FLAGS_NO_TS_CMP 0x00000002
#define KGSL_CMD_FLAGS_NOT_KERNEL_CMD 0x00000004
/* Command identifiers */
#define KGSL_CONTEXT_TO_MEM_IDENTIFIER 0xDEADBEEF
#define KGSL_CMD_IDENTIFIER 0xFEEDFACE
struct adreno_device {
struct kgsl_device dev; /* Must be first field in this struct */
unsigned int chip_id;
struct kgsl_memregion gmemspace;
struct adreno_context *drawctxt_active;
wait_queue_head_t ib1_wq;
unsigned int *pfp_fw;
size_t pfp_fw_size;
unsigned int *pm4_fw;
size_t pm4_fw_size;
struct adreno_ringbuffer ringbuffer;
unsigned int mharb;
};
int adreno_idle(struct kgsl_device *device, unsigned int timeout);
void adreno_regread(struct kgsl_device *device, unsigned int offsetwords,
unsigned int *value);
void adreno_regwrite(struct kgsl_device *device, unsigned int offsetwords,
unsigned int value);
void adreno_regread_isr(struct kgsl_device *device,
unsigned int offsetwords,
unsigned int *value);
void adreno_regwrite_isr(struct kgsl_device *device,
unsigned int offsetwords,
unsigned int value);
uint8_t *kgsl_sharedmem_convertaddr(struct kgsl_device *device,
unsigned int pt_base, unsigned int gpuaddr, unsigned int *size);
enum adreno_gpurev {
ADRENO_REV_UNKNOWN = 0,
ADRENO_REV_A200 = 200,
ADRENO_REV_A205 = 205,
ADRENO_REV_A220 = 220,
ADRENO_REV_A225 = 225,
};
enum adreno_gpurev adreno_get_rev(struct adreno_device *adreno_dev);
static inline int adreno_is_a200(struct adreno_device *adreno_dev)
{
return (adreno_get_rev(adreno_dev) == ADRENO_REV_A200);
}
static inline int adreno_is_a205(struct adreno_device *adreno_dev)
{
return (adreno_get_rev(adreno_dev) == ADRENO_REV_A200);
}
static inline int adreno_is_a20x(struct adreno_device *adreno_dev)
{
enum adreno_gpurev rev = adreno_get_rev(adreno_dev);
return (rev == ADRENO_REV_A200 || rev == ADRENO_REV_A205);
}
static inline int adreno_is_a220(struct adreno_device *adreno_dev)
{
return (adreno_get_rev(adreno_dev) == ADRENO_REV_A220);
}
static inline int adreno_is_a225(struct adreno_device *adreno_dev)
{
return (adreno_get_rev(adreno_dev) == ADRENO_REV_A225);
}
static inline int adreno_is_a22x(struct adreno_device *adreno_dev)
{
enum adreno_gpurev rev = adreno_get_rev(adreno_dev);
return (rev == ADRENO_REV_A220 || rev == ADRENO_REV_A225);
}
#endif /*__ADRENO_H */

View File

@ -0,0 +1,450 @@
/* Copyright (c) 2002,2008-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/delay.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include "kgsl.h"
#include "adreno_postmortem.h"
#include "adreno.h"
#include "a200_reg.h"
unsigned int kgsl_cff_dump_enable;
int kgsl_pm_regs_enabled;
static uint32_t kgsl_ib_base;
static uint32_t kgsl_ib_size;
static struct dentry *pm_d_debugfs;
static int pm_dump_set(void *data, u64 val)
{
struct kgsl_device *device = data;
if (val) {
mutex_lock(&device->mutex);
adreno_postmortem_dump(device, 1);
mutex_unlock(&device->mutex);
}
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(pm_dump_fops,
NULL,
pm_dump_set, "%llu\n");
static int pm_regs_enabled_set(void *data, u64 val)
{
kgsl_pm_regs_enabled = val ? 1 : 0;
return 0;
}
static int pm_regs_enabled_get(void *data, u64 *val)
{
*val = kgsl_pm_regs_enabled;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(pm_regs_enabled_fops,
pm_regs_enabled_get,
pm_regs_enabled_set, "%llu\n");
static int kgsl_cff_dump_enable_set(void *data, u64 val)
{
#ifdef CONFIG_MSM_KGSL_CFF_DUMP
kgsl_cff_dump_enable = (val != 0);
return 0;
#else
return -EINVAL;
#endif
}
static int kgsl_cff_dump_enable_get(void *data, u64 *val)
{
*val = kgsl_cff_dump_enable;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(kgsl_cff_dump_enable_fops, kgsl_cff_dump_enable_get,
kgsl_cff_dump_enable_set, "%llu\n");
static int kgsl_dbgfs_open(struct inode *inode, struct file *file)
{
file->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE);
file->private_data = inode->i_private;
return 0;
}
static int kgsl_dbgfs_release(struct inode *inode, struct file *file)
{
return 0;
}
static int kgsl_hex_dump(const char *prefix, int c, uint8_t *data,
int rowc, int linec, char __user *buff)
{
int ss;
/* Prefix of 20 chars max, 32 bytes per row, in groups of four - that's
* 8 groups at 8 chars per group plus a space, plus new-line, plus
* ending character */
char linebuf[20 + 64 + 1 + 1];
ss = snprintf(linebuf, sizeof(linebuf), prefix, c);
hex_dump_to_buffer(data, linec, rowc, 4, linebuf+ss,
sizeof(linebuf)-ss, 0);
strlcat(linebuf, "\n", sizeof(linebuf));
linebuf[sizeof(linebuf)-1] = 0;
ss = strlen(linebuf);
if (copy_to_user(buff, linebuf, ss+1))
return -EFAULT;
return ss;
}
static ssize_t kgsl_ib_dump_read(
struct file *file,
char __user *buff,
size_t buff_count,
loff_t *ppos)
{
int i, count = kgsl_ib_size, remaining, pos = 0, tot = 0, ss;
struct kgsl_device *device = file->private_data;
const int rowc = 32;
unsigned int pt_base, ib_memsize;
uint8_t *base_addr;
char linebuf[80];
if (!ppos || !device || !kgsl_ib_base)
return 0;
kgsl_regread(device, REG_MH_MMU_PT_BASE, &pt_base);
base_addr = kgsl_sharedmem_convertaddr(device, pt_base, kgsl_ib_base,
&ib_memsize);
if (!base_addr)
return 0;
pr_info("%s ppos=%ld, buff_count=%d, count=%d\n", __func__, (long)*ppos,
buff_count, count);
ss = snprintf(linebuf, sizeof(linebuf), "IB: base=%08x(%08x"
"), size=%d, memsize=%d\n", kgsl_ib_base,
(uint32_t)base_addr, kgsl_ib_size, ib_memsize);
if (*ppos == 0) {
if (copy_to_user(buff, linebuf, ss+1))
return -EFAULT;
tot += ss;
buff += ss;
*ppos += ss;
}
pos += ss;
remaining = count;
for (i = 0; i < count; i += rowc) {
int linec = min(remaining, rowc);
remaining -= rowc;
ss = kgsl_hex_dump("IB: %05x: ", i, base_addr, rowc, linec,
buff);
if (ss < 0)
return ss;
if (pos >= *ppos) {
if (tot+ss >= buff_count) {
ss = copy_to_user(buff, "", 1);
return tot;
}
tot += ss;
buff += ss;
*ppos += ss;
}
pos += ss;
base_addr += linec;
}
return tot;
}
static ssize_t kgsl_ib_dump_write(
struct file *file,
const char __user *buff,
size_t count,
loff_t *ppos)
{
char local_buff[64];
if (count >= sizeof(local_buff))
return -EFAULT;
if (copy_from_user(local_buff, buff, count))
return -EFAULT;
local_buff[count] = 0; /* end of string */
sscanf(local_buff, "%x %d", &kgsl_ib_base, &kgsl_ib_size);
pr_info("%s: base=%08X size=%d\n", __func__, kgsl_ib_base,
kgsl_ib_size);
return count;
}
static const struct file_operations kgsl_ib_dump_fops = {
.open = kgsl_dbgfs_open,
.release = kgsl_dbgfs_release,
.read = kgsl_ib_dump_read,
.write = kgsl_ib_dump_write,
};
static int kgsl_regread_nolock(struct kgsl_device *device,
unsigned int offsetwords, unsigned int *value)
{
unsigned int *reg;
if (offsetwords*sizeof(uint32_t) >= device->regspace.sizebytes) {
KGSL_DRV_ERR(device, "invalid offset %d\n", offsetwords);
return -ERANGE;
}
reg = (unsigned int *)(device->regspace.mmio_virt_base
+ (offsetwords << 2));
*value = __raw_readl(reg);
return 0;
}
#define KGSL_ISTORE_START 0x5000
#define KGSL_ISTORE_LENGTH 0x600
static ssize_t kgsl_istore_read(
struct file *file,
char __user *buff,
size_t buff_count,
loff_t *ppos)
{
int i, count = KGSL_ISTORE_LENGTH, remaining, pos = 0, tot = 0;
struct kgsl_device *device = file->private_data;
const int rowc = 8;
if (!ppos || !device)
return 0;
remaining = count;
for (i = 0; i < count; i += rowc) {
unsigned int vals[rowc];
int j, ss;
int linec = min(remaining, rowc);
remaining -= rowc;
if (pos >= *ppos) {
for (j = 0; j < linec; ++j)
kgsl_regread_nolock(device,
KGSL_ISTORE_START+i+j, vals+j);
} else
memset(vals, 0, sizeof(vals));
ss = kgsl_hex_dump("IS: %04x: ", i, (uint8_t *)vals, rowc*4,
linec*4, buff);
if (ss < 0)
return ss;
if (pos >= *ppos) {
if (tot+ss >= buff_count)
return tot;
tot += ss;
buff += ss;
*ppos += ss;
}
pos += ss;
}
return tot;
}
static const struct file_operations kgsl_istore_fops = {
.open = kgsl_dbgfs_open,
.release = kgsl_dbgfs_release,
.read = kgsl_istore_read,
.llseek = default_llseek,
};
typedef void (*reg_read_init_t)(struct kgsl_device *device);
typedef void (*reg_read_fill_t)(struct kgsl_device *device, int i,
unsigned int *vals, int linec);
static ssize_t kgsl_reg_read(struct kgsl_device *device, int count,
reg_read_init_t reg_read_init,
reg_read_fill_t reg_read_fill, const char *prefix, char __user *buff,
loff_t *ppos)
{
int i, remaining;
const int rowc = 8;
if (!ppos || *ppos || !device)
return 0;
mutex_lock(&device->mutex);
reg_read_init(device);
remaining = count;
for (i = 0; i < count; i += rowc) {
unsigned int vals[rowc];
int ss;
int linec = min(remaining, rowc);
remaining -= rowc;
reg_read_fill(device, i, vals, linec);
ss = kgsl_hex_dump(prefix, i, (uint8_t *)vals, rowc*4, linec*4,
buff);
if (ss < 0) {
mutex_unlock(&device->mutex);
return ss;
}
buff += ss;
*ppos += ss;
}
mutex_unlock(&device->mutex);
return *ppos;
}
static void kgsl_sx_reg_read_init(struct kgsl_device *device)
{
kgsl_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0xFF);
kgsl_regwrite(device, REG_RBBM_DEBUG_CNTL, 0);
}
static void kgsl_sx_reg_read_fill(struct kgsl_device *device, int i,
unsigned int *vals, int linec)
{
int j;
for (j = 0; j < linec; ++j) {
kgsl_regwrite(device, REG_RBBM_DEBUG_CNTL, 0x1B00 | i);
kgsl_regread(device, REG_RBBM_DEBUG_OUT, vals+j);
}
}
static ssize_t kgsl_sx_debug_read(
struct file *file,
char __user *buff,
size_t buff_count,
loff_t *ppos)
{
struct kgsl_device *device = file->private_data;
return kgsl_reg_read(device, 0x1B, kgsl_sx_reg_read_init,
kgsl_sx_reg_read_fill, "SX: %02x: ", buff, ppos);
}
static const struct file_operations kgsl_sx_debug_fops = {
.open = kgsl_dbgfs_open,
.release = kgsl_dbgfs_release,
.read = kgsl_sx_debug_read,
};
static void kgsl_cp_reg_read_init(struct kgsl_device *device)
{
kgsl_regwrite(device, REG_RBBM_DEBUG_CNTL, 0);
}
static void kgsl_cp_reg_read_fill(struct kgsl_device *device, int i,
unsigned int *vals, int linec)
{
int j;
for (j = 0; j < linec; ++j) {
kgsl_regwrite(device, REG_RBBM_DEBUG_CNTL, 0x1628);
kgsl_regread(device, REG_RBBM_DEBUG_OUT, vals+j);
msleep(100);
}
}
static ssize_t kgsl_cp_debug_read(
struct file *file,
char __user *buff,
size_t buff_count,
loff_t *ppos)
{
struct kgsl_device *device = file->private_data;
return kgsl_reg_read(device, 20, kgsl_cp_reg_read_init,
kgsl_cp_reg_read_fill,
"CP: %02x: ", buff, ppos);
}
static const struct file_operations kgsl_cp_debug_fops = {
.open = kgsl_dbgfs_open,
.release = kgsl_dbgfs_release,
.read = kgsl_cp_debug_read,
};
static void kgsl_mh_reg_read_init(struct kgsl_device *device)
{
kgsl_regwrite(device, REG_RBBM_DEBUG_CNTL, 0);
}
static void kgsl_mh_reg_read_fill(struct kgsl_device *device, int i,
unsigned int *vals, int linec)
{
int j;
for (j = 0; j < linec; ++j) {
kgsl_regwrite(device, REG_MH_DEBUG_CTRL, i+j);
kgsl_regread(device, REG_MH_DEBUG_DATA, vals+j);
}
}
static ssize_t kgsl_mh_debug_read(
struct file *file,
char __user *buff,
size_t buff_count,
loff_t *ppos)
{
struct kgsl_device *device = file->private_data;
return kgsl_reg_read(device, 0x40, kgsl_mh_reg_read_init,
kgsl_mh_reg_read_fill,
"MH: %02x: ", buff, ppos);
}
static const struct file_operations kgsl_mh_debug_fops = {
.open = kgsl_dbgfs_open,
.release = kgsl_dbgfs_release,
.read = kgsl_mh_debug_read,
};
void adreno_debugfs_init(struct kgsl_device *device)
{
if (!device->d_debugfs || IS_ERR(device->d_debugfs))
return;
debugfs_create_file("ib_dump", 0600, device->d_debugfs, device,
&kgsl_ib_dump_fops);
debugfs_create_file("istore", 0400, device->d_debugfs, device,
&kgsl_istore_fops);
debugfs_create_file("sx_debug", 0400, device->d_debugfs, device,
&kgsl_sx_debug_fops);
debugfs_create_file("cp_debug", 0400, device->d_debugfs, device,
&kgsl_cp_debug_fops);
debugfs_create_file("mh_debug", 0400, device->d_debugfs, device,
&kgsl_mh_debug_fops);
debugfs_create_file("cff_dump", 0644, device->d_debugfs, device,
&kgsl_cff_dump_enable_fops);
/* Create post mortem control files */
pm_d_debugfs = debugfs_create_dir("postmortem", device->d_debugfs);
if (IS_ERR(pm_d_debugfs))
return;
debugfs_create_file("dump", 0600, pm_d_debugfs, device,
&pm_dump_fops);
debugfs_create_file("regs_enabled", 0644, pm_d_debugfs, device,
&pm_regs_enabled_fops);
}

View File

@ -0,0 +1,56 @@
/* Copyright (c) 2002,2008-2011, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef __ADRENO_DEBUGFS_H
#define __ADRENO_DEBUGFS_H
#ifdef CONFIG_DEBUG_FS
int adreno_debugfs_init(struct kgsl_device *device);
extern int kgsl_pm_regs_enabled;
static inline int kgsl_pmregs_enabled(void)
{
return kgsl_pm_regs_enabled;
}
#else
static inline int adreno_debugfs_init(struct kgsl_device *device)
{
return 0;
}
static inline int kgsl_pmregs_enabled(void)
{
/* If debugfs is turned off, then always print registers */
return 1;
}
#endif
#endif /* __ADRENO_DEBUGFS_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,113 @@
/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef __ADRENO_DRAWCTXT_H
#define __ADRENO_DRAWCTXT_H
#include "a200_reg.h"
#include "a220_reg.h"
/* Flags */
#define CTXT_FLAGS_NOT_IN_USE 0x00000000
#define CTXT_FLAGS_IN_USE 0x00000001
/* state shadow memory allocated */
#define CTXT_FLAGS_STATE_SHADOW 0x00000010
/* gmem shadow memory allocated */
#define CTXT_FLAGS_GMEM_SHADOW 0x00000100
/* gmem must be copied to shadow */
#define CTXT_FLAGS_GMEM_SAVE 0x00000200
/* gmem can be restored from shadow */
#define CTXT_FLAGS_GMEM_RESTORE 0x00000400
/* shader must be copied to shadow */
#define CTXT_FLAGS_SHADER_SAVE 0x00002000
/* shader can be restored from shadow */
#define CTXT_FLAGS_SHADER_RESTORE 0x00004000
/* Context has caused a GPU hang */
#define CTXT_FLAGS_GPU_HANG 0x00008000
struct kgsl_device;
struct adreno_device;
struct kgsl_device_private;
struct kgsl_context;
/* draw context */
struct gmem_shadow_t {
struct kgsl_memdesc gmemshadow; /* Shadow buffer address */
/* 256 KB GMEM surface = 4 bytes-per-pixel x 256 pixels/row x
* 256 rows. */
/* width & height must be a multiples of 32, in case tiled textures
* are used. */
enum COLORFORMATX format;
unsigned int size; /* Size of surface used to store GMEM */
unsigned int width; /* Width of surface used to store GMEM */
unsigned int height; /* Height of surface used to store GMEM */
unsigned int pitch; /* Pitch of surface used to store GMEM */
unsigned int gmem_pitch; /* Pitch value used for GMEM */
unsigned int *gmem_save_commands;
unsigned int *gmem_restore_commands;
unsigned int gmem_save[3];
unsigned int gmem_restore[3];
struct kgsl_memdesc quad_vertices;
struct kgsl_memdesc quad_texcoords;
};
struct adreno_context {
uint32_t flags;
struct kgsl_pagetable *pagetable;
struct kgsl_memdesc gpustate;
unsigned int reg_save[3];
unsigned int reg_restore[3];
unsigned int shader_save[3];
unsigned int shader_fixup[3];
unsigned int shader_restore[3];
unsigned int chicken_restore[3];
unsigned int bin_base_offset;
/* Information of the GMEM shadow that is created in context create */
struct gmem_shadow_t context_gmem_shadow;
};
int adreno_drawctxt_create(struct kgsl_device_private *dev_priv,
uint32_t flags,
struct kgsl_context *context);
int adreno_drawctxt_destroy(struct kgsl_device *device,
struct kgsl_context *context);
void adreno_drawctxt_switch(struct adreno_device *adreno_dev,
struct adreno_context *drawctxt,
unsigned int flags);
int adreno_drawctxt_set_bin_base_offset(struct kgsl_device *device,
struct kgsl_context *context,
unsigned int offset);
#endif /* __ADRENO_DRAWCTXT_H */

View File

@ -0,0 +1,193 @@
/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef __ADRENO_PM4TYPES_H
#define __ADRENO_PM4TYPES_H
#define PM4_PKT_MASK 0xc0000000
#define PM4_TYPE0_PKT ((unsigned int)0 << 30)
#define PM4_TYPE1_PKT ((unsigned int)1 << 30)
#define PM4_TYPE2_PKT ((unsigned int)2 << 30)
#define PM4_TYPE3_PKT ((unsigned int)3 << 30)
/* type3 packets */
/* initialize CP's micro-engine */
#define PM4_ME_INIT 0x48
/* skip N 32-bit words to get to the next packet */
#define PM4_NOP 0x10
/* indirect buffer dispatch. prefetch parser uses this packet type to determine
* whether to pre-fetch the IB
*/
#define PM4_INDIRECT_BUFFER 0x3f
/* indirect buffer dispatch. same as IB, but init is pipelined */
#define PM4_INDIRECT_BUFFER_PFD 0x37
/* wait for the IDLE state of the engine */
#define PM4_WAIT_FOR_IDLE 0x26
/* wait until a register or memory location is a specific value */
#define PM4_WAIT_REG_MEM 0x3c
/* wait until a register location is equal to a specific value */
#define PM4_WAIT_REG_EQ 0x52
/* wait until a register location is >= a specific value */
#define PM4_WAT_REG_GTE 0x53
/* wait until a read completes */
#define PM4_WAIT_UNTIL_READ 0x5c
/* wait until all base/size writes from an IB_PFD packet have completed */
#define PM4_WAIT_IB_PFD_COMPLETE 0x5d
/* register read/modify/write */
#define PM4_REG_RMW 0x21
/* reads register in chip and writes to memory */
#define PM4_REG_TO_MEM 0x3e
/* write N 32-bit words to memory */
#define PM4_MEM_WRITE 0x3d
/* write CP_PROG_COUNTER value to memory */
#define PM4_MEM_WRITE_CNTR 0x4f
/* conditional execution of a sequence of packets */
#define PM4_COND_EXEC 0x44
/* conditional write to memory or register */
#define PM4_COND_WRITE 0x45
/* generate an event that creates a write to memory when completed */
#define PM4_EVENT_WRITE 0x46
/* generate a VS|PS_done event */
#define PM4_EVENT_WRITE_SHD 0x58
/* generate a cache flush done event */
#define PM4_EVENT_WRITE_CFL 0x59
/* generate a z_pass done event */
#define PM4_EVENT_WRITE_ZPD 0x5b
/* initiate fetch of index buffer and draw */
#define PM4_DRAW_INDX 0x22
/* draw using supplied indices in packet */
#define PM4_DRAW_INDX_2 0x36
/* initiate fetch of index buffer and binIDs and draw */
#define PM4_DRAW_INDX_BIN 0x34
/* initiate fetch of bin IDs and draw using supplied indices */
#define PM4_DRAW_INDX_2_BIN 0x35
/* begin/end initiator for viz query extent processing */
#define PM4_VIZ_QUERY 0x23
/* fetch state sub-blocks and initiate shader code DMAs */
#define PM4_SET_STATE 0x25
/* load constant into chip and to memory */
#define PM4_SET_CONSTANT 0x2d
/* load sequencer instruction memory (pointer-based) */
#define PM4_IM_LOAD 0x27
/* load sequencer instruction memory (code embedded in packet) */
#define PM4_IM_LOAD_IMMEDIATE 0x2b
/* load constants from a location in memory */
#define PM4_LOAD_CONSTANT_CONTEXT 0x2e
/* selective invalidation of state pointers */
#define PM4_INVALIDATE_STATE 0x3b
/* dynamically changes shader instruction memory partition */
#define PM4_SET_SHADER_BASES 0x4A
/* sets the 64-bit BIN_MASK register in the PFP */
#define PM4_SET_BIN_MASK 0x50
/* sets the 64-bit BIN_SELECT register in the PFP */
#define PM4_SET_BIN_SELECT 0x51
/* updates the current context, if needed */
#define PM4_CONTEXT_UPDATE 0x5e
/* generate interrupt from the command stream */
#define PM4_INTERRUPT 0x40
/* copy sequencer instruction memory to system memory */
#define PM4_IM_STORE 0x2c
/* program an offset that will added to the BIN_BASE value of
* the 3D_DRAW_INDX_BIN packet */
#define PM4_SET_BIN_BASE_OFFSET 0x4B
#define PM4_SET_PROTECTED_MODE 0x5f /* sets the register protection mode */
/* packet header building macros */
#define pm4_type0_packet(regindx, cnt) \
(PM4_TYPE0_PKT | (((cnt)-1) << 16) | ((regindx) & 0x7FFF))
#define pm4_type0_packet_for_sameregister(regindx, cnt) \
((PM4_TYPE0_PKT | (((cnt)-1) << 16) | ((1 << 15) | \
((regindx) & 0x7FFF)))
#define pm4_type1_packet(reg0, reg1) \
(PM4_TYPE1_PKT | ((reg1) << 12) | (reg0))
#define pm4_type3_packet(opcode, cnt) \
(PM4_TYPE3_PKT | (((cnt)-1) << 16) | (((opcode) & 0xFF) << 8))
#define pm4_predicated_type3_packet(opcode, cnt) \
(PM4_TYPE3_PKT | (((cnt)-1) << 16) | (((opcode) & 0xFF) << 8) | 0x1)
#define pm4_nop_packet(cnt) \
(PM4_TYPE3_PKT | (((cnt)-1) << 16) | (PM4_NOP << 8))
/* packet headers */
#define PM4_HDR_ME_INIT pm4_type3_packet(PM4_ME_INIT, 18)
#define PM4_HDR_INDIRECT_BUFFER_PFD pm4_type3_packet(PM4_INDIRECT_BUFFER_PFD, 2)
#define PM4_HDR_INDIRECT_BUFFER pm4_type3_packet(PM4_INDIRECT_BUFFER, 2)
#endif /* __ADRENO_PM4TYPES_H */

View File

@ -0,0 +1,854 @@
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/vmalloc.h>
#include "kgsl.h"
#include "adreno.h"
#include "adreno_pm4types.h"
#include "adreno_ringbuffer.h"
#include "adreno_postmortem.h"
#include "adreno_debugfs.h"
#include "kgsl_cffdump.h"
#include "a200_reg.h"
#define INVALID_RB_CMD 0xaaaaaaaa
struct pm_id_name {
uint32_t id;
char name[9];
};
static const struct pm_id_name pm0_types[] = {
{REG_PA_SC_AA_CONFIG, "RPASCAAC"},
{REG_RBBM_PM_OVERRIDE2, "RRBBPMO2"},
{REG_SCRATCH_REG2, "RSCRTRG2"},
{REG_SQ_GPR_MANAGEMENT, "RSQGPRMN"},
{REG_SQ_INST_STORE_MANAGMENT, "RSQINSTS"},
{REG_TC_CNTL_STATUS, "RTCCNTLS"},
{REG_TP0_CHICKEN, "RTP0CHCK"},
{REG_CP_TIMESTAMP, "CP_TM_ST"},
};
static const struct pm_id_name pm3_types[] = {
{PM4_COND_EXEC, "CND_EXEC"},
{PM4_CONTEXT_UPDATE, "CX__UPDT"},
{PM4_DRAW_INDX, "DRW_NDX_"},
{PM4_DRAW_INDX_BIN, "DRW_NDXB"},
{PM4_EVENT_WRITE, "EVENT_WT"},
{PM4_IM_LOAD, "IN__LOAD"},
{PM4_IM_LOAD_IMMEDIATE, "IM_LOADI"},
{PM4_IM_STORE, "IM_STORE"},
{PM4_INDIRECT_BUFFER, "IND_BUF_"},
{PM4_INDIRECT_BUFFER_PFD, "IND_BUFP"},
{PM4_INTERRUPT, "PM4_INTR"},
{PM4_INVALIDATE_STATE, "INV_STAT"},
{PM4_LOAD_CONSTANT_CONTEXT, "LD_CN_CX"},
{PM4_ME_INIT, "ME__INIT"},
{PM4_NOP, "PM4__NOP"},
{PM4_REG_RMW, "REG__RMW"},
{PM4_REG_TO_MEM, "REG2_MEM"},
{PM4_SET_BIN_BASE_OFFSET, "ST_BIN_O"},
{PM4_SET_CONSTANT, "ST_CONST"},
{PM4_SET_PROTECTED_MODE, "ST_PRT_M"},
{PM4_SET_SHADER_BASES, "ST_SHD_B"},
{PM4_WAIT_FOR_IDLE, "WAIT4IDL"},
};
/* Offset address pairs: start, end of range to dump (inclusive) */
/* GPU < Z470 */
static const int a200_registers[] = {
0x0000, 0x0008, 0x0010, 0x002c, 0x00ec, 0x00f4,
0x0100, 0x0110, 0x0118, 0x011c,
0x0700, 0x0704, 0x070c, 0x0720, 0x0754, 0x0764,
0x0770, 0x0774, 0x07a8, 0x07a8, 0x07b8, 0x07cc,
0x07d8, 0x07dc, 0x07f0, 0x07fc, 0x0e44, 0x0e48,
0x0e6c, 0x0e78, 0x0ec8, 0x0ed4, 0x0edc, 0x0edc,
0x0fe0, 0x0fec, 0x1100, 0x1100,
0x110c, 0x1110, 0x112c, 0x112c, 0x1134, 0x113c,
0x1148, 0x1148, 0x1150, 0x116c, 0x11fc, 0x11fc,
0x15e0, 0x161c, 0x1724, 0x1724, 0x1740, 0x1740,
0x1804, 0x1810, 0x1818, 0x1824, 0x182c, 0x1838,
0x184c, 0x1850, 0x28a4, 0x28ac, 0x28bc, 0x28c4,
0x2900, 0x290c, 0x2914, 0x2914, 0x2938, 0x293c,
0x30b0, 0x30b0, 0x30c0, 0x30c0, 0x30e0, 0x30f0,
0x3100, 0x3100, 0x3110, 0x3110, 0x3200, 0x3218,
0x3220, 0x3250, 0x3264, 0x3268, 0x3290, 0x3294,
0x3400, 0x340c, 0x3418, 0x3418, 0x3420, 0x342c,
0x34d0, 0x34d4, 0x36b8, 0x3704, 0x3720, 0x3750,
0x3760, 0x3764, 0x3800, 0x3800, 0x3808, 0x3810,
0x385c, 0x3878, 0x3b00, 0x3b24, 0x3b2c, 0x3b30,
0x3b40, 0x3b40, 0x3b50, 0x3b5c, 0x3b80, 0x3b88,
0x3c04, 0x3c08, 0x3c30, 0x3c30, 0x3c38, 0x3c48,
0x3c98, 0x3ca8, 0x3cb0, 0x3cb0,
0x8000, 0x8008, 0x8018, 0x803c, 0x8200, 0x8208,
0x8400, 0x8424, 0x8430, 0x8450, 0x8600, 0x8610,
0x87d4, 0x87dc, 0x8800, 0x8820, 0x8a00, 0x8a0c,
0x8a4c, 0x8a50, 0x8c00, 0x8c20, 0x8c48, 0x8c48,
0x8c58, 0x8c74, 0x8c90, 0x8c98, 0x8e00, 0x8e0c,
0x9000, 0x9008, 0x9018, 0x903c, 0x9200, 0x9208,
0x9400, 0x9424, 0x9430, 0x9450, 0x9600, 0x9610,
0x97d4, 0x97dc, 0x9800, 0x9820, 0x9a00, 0x9a0c,
0x9a4c, 0x9a50, 0x9c00, 0x9c20, 0x9c48, 0x9c48,
0x9c58, 0x9c74, 0x9c90, 0x9c98, 0x9e00, 0x9e0c,
0x10000, 0x1000c, 0x12000, 0x12014,
0x12400, 0x12400, 0x12420, 0x12420
};
/* GPU = Z470 */
static const int a220_registers[] = {
0x0000, 0x0008, 0x0010, 0x002c, 0x00ec, 0x00f4,
0x0100, 0x0110, 0x0118, 0x011c,
0x0700, 0x0704, 0x070c, 0x0720, 0x0754, 0x0764,
0x0770, 0x0774, 0x07a8, 0x07a8, 0x07b8, 0x07cc,
0x07d8, 0x07dc, 0x07f0, 0x07fc, 0x0e44, 0x0e48,
0x0e6c, 0x0e78, 0x0ec8, 0x0ed4, 0x0edc, 0x0edc,
0x0fe0, 0x0fec, 0x1100, 0x1100,
0x110c, 0x1110, 0x112c, 0x112c, 0x1134, 0x113c,
0x1148, 0x1148, 0x1150, 0x116c, 0x11fc, 0x11fc,
0x15e0, 0x161c, 0x1724, 0x1724, 0x1740, 0x1740,
0x1804, 0x1810, 0x1818, 0x1824, 0x182c, 0x1838,
0x184c, 0x1850, 0x28a4, 0x28ac, 0x28bc, 0x28c4,
0x2900, 0x2900, 0x2908, 0x290c, 0x2914, 0x2914,
0x2938, 0x293c, 0x30c0, 0x30c0, 0x30e0, 0x30e4,
0x30f0, 0x30f0, 0x3200, 0x3204, 0x3220, 0x324c,
0x3400, 0x340c, 0x3414, 0x3418, 0x3420, 0x342c,
0x34d0, 0x34d4, 0x36b8, 0x3704, 0x3720, 0x3750,
0x3760, 0x3764, 0x3800, 0x3800, 0x3808, 0x3810,
0x385c, 0x3878, 0x3b00, 0x3b24, 0x3b2c, 0x3b30,
0x3b40, 0x3b40, 0x3b50, 0x3b5c, 0x3b80, 0x3b88,
0x3c04, 0x3c08, 0x8000, 0x8008, 0x8018, 0x803c,
0x8200, 0x8208, 0x8400, 0x8408, 0x8410, 0x8424,
0x8430, 0x8450, 0x8600, 0x8610, 0x87d4, 0x87dc,
0x8800, 0x8808, 0x8810, 0x8810, 0x8820, 0x8820,
0x8a00, 0x8a08, 0x8a50, 0x8a50,
0x8c00, 0x8c20, 0x8c24, 0x8c28, 0x8c48, 0x8c48,
0x8c58, 0x8c58, 0x8c60, 0x8c74, 0x8c90, 0x8c98,
0x8e00, 0x8e0c, 0x9000, 0x9008, 0x9018, 0x903c,
0x9200, 0x9208, 0x9400, 0x9408, 0x9410, 0x9424,
0x9430, 0x9450, 0x9600, 0x9610, 0x97d4, 0x97dc,
0x9800, 0x9808, 0x9810, 0x9818, 0x9820, 0x9820,
0x9a00, 0x9a08, 0x9a50, 0x9a50, 0x9c00, 0x9c20,
0x9c48, 0x9c48, 0x9c58, 0x9c58, 0x9c60, 0x9c74,
0x9c90, 0x9c98, 0x9e00, 0x9e0c,
0x10000, 0x1000c, 0x12000, 0x12014,
0x12400, 0x12400, 0x12420, 0x12420
};
static uint32_t adreno_is_pm4_len(uint32_t word)
{
if (word == INVALID_RB_CMD)
return 0;
return (word >> 16) & 0x3FFF;
}
static bool adreno_is_pm4_type(uint32_t word)
{
int i;
if (word == INVALID_RB_CMD)
return 1;
if (adreno_is_pm4_len(word) > 16)
return 0;
if ((word & (3<<30)) == PM4_TYPE0_PKT) {
for (i = 0; i < ARRAY_SIZE(pm0_types); ++i) {
if ((word & 0x7FFF) == pm0_types[i].id)
return 1;
}
return 0;
}
if ((word & (3<<30)) == PM4_TYPE3_PKT) {
for (i = 0; i < ARRAY_SIZE(pm3_types); ++i) {
if ((word & 0xFFFF) == (pm3_types[i].id << 8))
return 1;
}
return 0;
}
return 0;
}
static const char *adreno_pm4_name(uint32_t word)
{
int i;
if (word == INVALID_RB_CMD)
return "--------";
if ((word & (3<<30)) == PM4_TYPE0_PKT) {
for (i = 0; i < ARRAY_SIZE(pm0_types); ++i) {
if ((word & 0x7FFF) == pm0_types[i].id)
return pm0_types[i].name;
}
return "????????";
}
if ((word & (3<<30)) == PM4_TYPE3_PKT) {
for (i = 0; i < ARRAY_SIZE(pm3_types); ++i) {
if ((word & 0xFFFF) == (pm3_types[i].id << 8))
return pm3_types[i].name;
}
return "????????";
}
return "????????";
}
static void adreno_dump_regs(struct kgsl_device *device,
const int *registers, int size)
{
int range = 0, offset = 0;
for (range = 0; range < size; range++) {
/* start and end are in dword offsets */
int start = registers[range * 2] / 4;
int end = registers[range * 2 + 1] / 4;
unsigned char linebuf[32 * 3 + 2 + 32 + 1];
int linelen, i;
for (offset = start; offset <= end; offset += linelen) {
unsigned int regvals[32/4];
linelen = min(end+1-offset, 32/4);
for (i = 0; i < linelen; ++i)
kgsl_regread(device, offset+i, regvals+i);
hex_dump_to_buffer(regvals, linelen*4, 32, 4,
linebuf, sizeof(linebuf), 0);
KGSL_LOG_DUMP(device,
"REG: %5.5X: %s\n", offset<<2, linebuf);
}
}
}
static void dump_ib(struct kgsl_device *device, char* buffId, uint32_t pt_base,
uint32_t base_offset, uint32_t ib_base, uint32_t ib_size, bool dump)
{
unsigned int memsize;
uint8_t *base_addr = kgsl_sharedmem_convertaddr(device, pt_base,
ib_base, &memsize);
if (base_addr && dump)
print_hex_dump(KERN_ERR, buffId, DUMP_PREFIX_OFFSET,
32, 4, base_addr, ib_size*4, 0);
else
KGSL_LOG_DUMP(device, "%s base:%8.8X ib_size:%d "
"offset:%5.5X%s\n",
buffId, ib_base, ib_size*4, base_offset,
base_addr ? "" : " [Invalid]");
}
#define IB_LIST_SIZE 64
struct ib_list {
int count;
uint32_t bases[IB_LIST_SIZE];
uint32_t sizes[IB_LIST_SIZE];
uint32_t offsets[IB_LIST_SIZE];
};
static void dump_ib1(struct kgsl_device *device, uint32_t pt_base,
uint32_t base_offset,
uint32_t ib1_base, uint32_t ib1_size,
struct ib_list *ib_list, bool dump)
{
int i, j;
uint32_t value;
uint32_t *ib1_addr;
unsigned int memsize;
dump_ib(device, "IB1:", pt_base, base_offset, ib1_base,
ib1_size, dump);
/* fetch virtual address for given IB base */
ib1_addr = (uint32_t *)kgsl_sharedmem_convertaddr(device, pt_base,
ib1_base, &memsize);
if (!ib1_addr)
return;
for (i = 0; i+3 < ib1_size; ) {
value = ib1_addr[i++];
if (value == pm4_type3_packet(PM4_INDIRECT_BUFFER_PFD, 2)) {
uint32_t ib2_base = ib1_addr[i++];
uint32_t ib2_size = ib1_addr[i++];
/* find previous match */
for (j = 0; j < ib_list->count; ++j)
if (ib_list->sizes[j] == ib2_size
&& ib_list->bases[j] == ib2_base)
break;
if (j < ib_list->count || ib_list->count
>= IB_LIST_SIZE)
continue;
/* store match */
ib_list->sizes[ib_list->count] = ib2_size;
ib_list->bases[ib_list->count] = ib2_base;
ib_list->offsets[ib_list->count] = i<<2;
++ib_list->count;
}
}
}
static void adreno_dump_rb_buffer(const void *buf, size_t len,
char *linebuf, size_t linebuflen, int *argp)
{
const u32 *ptr4 = buf;
const int ngroups = len;
int lx = 0, j;
bool nxsp = 1;
for (j = 0; j < ngroups; j++) {
if (*argp < 0) {
lx += scnprintf(linebuf + lx, linebuflen - lx, " <");
*argp = -*argp;
} else if (nxsp)
lx += scnprintf(linebuf + lx, linebuflen - lx, " ");
else
nxsp = 1;
if (!*argp && adreno_is_pm4_type(ptr4[j])) {
lx += scnprintf(linebuf + lx, linebuflen - lx,
"%s", adreno_pm4_name(ptr4[j]));
*argp = -(adreno_is_pm4_len(ptr4[j])+1);
} else {
lx += scnprintf(linebuf + lx, linebuflen - lx,
"%8.8X", ptr4[j]);
if (*argp > 1)
--*argp;
else if (*argp == 1) {
*argp = 0;
nxsp = 0;
lx += scnprintf(linebuf + lx, linebuflen - lx,
"> ");
}
}
}
linebuf[lx] = '\0';
}
static bool adreno_rb_use_hex(void)
{
#ifdef CONFIG_MSM_KGSL_PSTMRTMDMP_RB_HEX
return 1;
#else
return 0;
#endif
}
static void adreno_dump_rb(struct kgsl_device *device, const void *buf,
size_t len, int start, int size)
{
const uint32_t *ptr = buf;
int i, remaining, args = 0;
unsigned char linebuf[32 * 3 + 2 + 32 + 1];
const int rowsize = 8;
len >>= 2;
remaining = len;
for (i = 0; i < len; i += rowsize) {
int linelen = min(remaining, rowsize);
remaining -= rowsize;
if (adreno_rb_use_hex())
hex_dump_to_buffer(ptr+i, linelen*4, rowsize*4, 4,
linebuf, sizeof(linebuf), 0);
else
adreno_dump_rb_buffer(ptr+i, linelen, linebuf,
sizeof(linebuf), &args);
KGSL_LOG_DUMP(device,
"RB: %4.4X:%s\n", (start+i)%size, linebuf);
}
}
static bool adreno_ib_dump_enabled(void)
{
#ifdef CONFIG_MSM_KGSL_PSTMRTMDMP_NO_IB_DUMP
return 0;
#else
return 1;
#endif
}
struct log_field {
bool show;
const char *display;
};
static int adreno_dump_fields_line(struct kgsl_device *device,
const char *start, char *str, int slen,
const struct log_field **lines,
int num)
{
const struct log_field *l = *lines;
int sptr, count = 0;
sptr = snprintf(str, slen, "%s", start);
for ( ; num && sptr < slen; num--, l++) {
int ilen = strlen(l->display);
if (!l->show)
continue;
if (count)
ilen += strlen(" | ");
if (ilen > (slen - sptr))
break;
if (count++)
sptr += snprintf(str + sptr, slen - sptr, " | ");
sptr += snprintf(str + sptr, slen - sptr, "%s", l->display);
}
KGSL_LOG_DUMP(device, "%s\n", str);
*lines = l;
return num;
}
static void adreno_dump_fields(struct kgsl_device *device,
const char *start, const struct log_field *lines,
int num)
{
char lb[90];
const char *sstr = start;
lb[sizeof(lb) - 1] = '\0';
while (num) {
int ret = adreno_dump_fields_line(device, sstr, lb,
sizeof(lb) - 1, &lines, num);
if (ret == num)
break;
num = ret;
sstr = " ";
}
}
static int adreno_dump(struct kgsl_device *device)
{
unsigned int r1, r2, r3, rbbm_status;
unsigned int cp_ib1_base, cp_ib1_bufsz, cp_stat;
unsigned int cp_ib2_base, cp_ib2_bufsz;
unsigned int pt_base;
unsigned int cp_rb_base, rb_count;
unsigned int cp_rb_wptr, cp_rb_rptr;
unsigned int i;
int result = 0;
uint32_t *rb_copy;
const uint32_t *rb_vaddr;
int num_item = 0;
int read_idx, write_idx;
unsigned int ts_processed, rb_memsize;
static struct ib_list ib_list;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
mb();
KGSL_LOG_DUMP(device, "POWER: FLAGS = %08lX | ACTIVE POWERLEVEL = %08X",
pwr->power_flags, pwr->active_pwrlevel);
KGSL_LOG_DUMP(device, "POWER: INTERVAL TIMEOUT = %08X ",
pwr->interval_timeout);
KGSL_LOG_DUMP(device, "GRP_CLK = %lu ",
kgsl_get_clkrate(pwr->grp_clks[0]));
KGSL_LOG_DUMP(device, "BUS CLK = %lu ",
kgsl_get_clkrate(pwr->ebi1_clk));
kgsl_regread(device, REG_RBBM_STATUS, &rbbm_status);
kgsl_regread(device, REG_RBBM_PM_OVERRIDE1, &r2);
kgsl_regread(device, REG_RBBM_PM_OVERRIDE2, &r3);
KGSL_LOG_DUMP(device, "RBBM: STATUS = %08X | PM_OVERRIDE1 = %08X | "
"PM_OVERRIDE2 = %08X\n", rbbm_status, r2, r3);
kgsl_regread(device, REG_RBBM_INT_CNTL, &r1);
kgsl_regread(device, REG_RBBM_INT_STATUS, &r2);
kgsl_regread(device, REG_RBBM_READ_ERROR, &r3);
KGSL_LOG_DUMP(device, " INT_CNTL = %08X | INT_STATUS = %08X | "
"READ_ERROR = %08X\n", r1, r2, r3);
{
char cmdFifo[16];
struct log_field lines[] = {
{rbbm_status & 0x001F, cmdFifo},
{rbbm_status & BIT(5), "TC busy "},
{rbbm_status & BIT(8), "HIRQ pending"},
{rbbm_status & BIT(9), "CPRQ pending"},
{rbbm_status & BIT(10), "CFRQ pending"},
{rbbm_status & BIT(11), "PFRQ pending"},
{rbbm_status & BIT(12), "VGT 0DMA bsy"},
{rbbm_status & BIT(14), "RBBM WU busy"},
{rbbm_status & BIT(16), "CP NRT busy "},
{rbbm_status & BIT(18), "MH busy "},
{rbbm_status & BIT(19), "MH chncy bsy"},
{rbbm_status & BIT(21), "SX busy "},
{rbbm_status & BIT(22), "TPC busy "},
{rbbm_status & BIT(24), "SC CNTX busy"},
{rbbm_status & BIT(25), "PA busy "},
{rbbm_status & BIT(26), "VGT busy "},
{rbbm_status & BIT(27), "SQ cntx1 bsy"},
{rbbm_status & BIT(28), "SQ cntx0 bsy"},
{rbbm_status & BIT(30), "RB busy "},
{rbbm_status & BIT(31), "Grphs pp bsy"},
};
snprintf(cmdFifo, sizeof(cmdFifo), "CMD FIFO=%01X ",
rbbm_status & 0xf);
adreno_dump_fields(device, " STATUS=", lines,
ARRAY_SIZE(lines));
}
kgsl_regread(device, REG_CP_RB_BASE, &cp_rb_base);
kgsl_regread(device, REG_CP_RB_CNTL, &r2);
rb_count = 2 << (r2 & (BIT(6)-1));
kgsl_regread(device, REG_CP_RB_RPTR_ADDR, &r3);
KGSL_LOG_DUMP(device,
"CP_RB: BASE = %08X | CNTL = %08X | RPTR_ADDR = %08X"
"\n", cp_rb_base, r2, r3);
kgsl_regread(device, REG_CP_RB_RPTR, &cp_rb_rptr);
kgsl_regread(device, REG_CP_RB_WPTR, &cp_rb_wptr);
kgsl_regread(device, REG_CP_RB_RPTR_WR, &r3);
KGSL_LOG_DUMP(device,
" RPTR = %08X | WPTR = %08X | RPTR_WR = %08X"
"\n", cp_rb_rptr, cp_rb_wptr, r3);
kgsl_regread(device, REG_CP_IB1_BASE, &cp_ib1_base);
kgsl_regread(device, REG_CP_IB1_BUFSZ, &cp_ib1_bufsz);
KGSL_LOG_DUMP(device,
"CP_IB1: BASE = %08X | BUFSZ = %d\n", cp_ib1_base,
cp_ib1_bufsz);
kgsl_regread(device, REG_CP_IB2_BASE, &cp_ib2_base);
kgsl_regread(device, REG_CP_IB2_BUFSZ, &cp_ib2_bufsz);
KGSL_LOG_DUMP(device,
"CP_IB2: BASE = %08X | BUFSZ = %d\n", cp_ib2_base,
cp_ib2_bufsz);
kgsl_regread(device, REG_CP_INT_CNTL, &r1);
kgsl_regread(device, REG_CP_INT_STATUS, &r2);
KGSL_LOG_DUMP(device, "CP_INT: CNTL = %08X | STATUS = %08X\n", r1, r2);
kgsl_regread(device, REG_CP_ME_CNTL, &r1);
kgsl_regread(device, REG_CP_ME_STATUS, &r2);
kgsl_regread(device, REG_MASTER_INT_SIGNAL, &r3);
KGSL_LOG_DUMP(device,
"CP_ME: CNTL = %08X | STATUS = %08X | MSTR_INT_SGNL = "
"%08X\n", r1, r2, r3);
kgsl_regread(device, REG_CP_STAT, &cp_stat);
KGSL_LOG_DUMP(device, "CP_STAT = %08X\n", cp_stat);
#ifndef CONFIG_MSM_KGSL_PSTMRTMDMP_CP_STAT_NO_DETAIL
{
struct log_field lns[] = {
{cp_stat & BIT(0), "WR_BSY 0"},
{cp_stat & BIT(1), "RD_RQ_BSY 1"},
{cp_stat & BIT(2), "RD_RTN_BSY 2"},
};
adreno_dump_fields(device, " MIU=", lns, ARRAY_SIZE(lns));
}
{
struct log_field lns[] = {
{cp_stat & BIT(5), "RING_BUSY 5"},
{cp_stat & BIT(6), "NDRCTS_BSY 6"},
{cp_stat & BIT(7), "NDRCT2_BSY 7"},
{cp_stat & BIT(9), "ST_BUSY 9"},
{cp_stat & BIT(10), "BUSY 10"},
};
adreno_dump_fields(device, " CSF=", lns, ARRAY_SIZE(lns));
}
{
struct log_field lns[] = {
{cp_stat & BIT(11), "RNG_Q_BSY 11"},
{cp_stat & BIT(12), "NDRCTS_Q_B12"},
{cp_stat & BIT(13), "NDRCT2_Q_B13"},
{cp_stat & BIT(16), "ST_QUEUE_B16"},
{cp_stat & BIT(17), "PFP_BUSY 17"},
};
adreno_dump_fields(device, " RING=", lns, ARRAY_SIZE(lns));
}
{
struct log_field lns[] = {
{cp_stat & BIT(3), "RBIU_BUSY 3"},
{cp_stat & BIT(4), "RCIU_BUSY 4"},
{cp_stat & BIT(18), "MQ_RG_BSY 18"},
{cp_stat & BIT(19), "MQ_NDRS_BS19"},
{cp_stat & BIT(20), "MQ_NDR2_BS20"},
{cp_stat & BIT(21), "MIU_WC_STL21"},
{cp_stat & BIT(22), "CP_NRT_BSY22"},
{cp_stat & BIT(23), "3D_BUSY 23"},
{cp_stat & BIT(26), "ME_BUSY 26"},
{cp_stat & BIT(29), "ME_WC_BSY 29"},
{cp_stat & BIT(30), "MIU_FF EM 30"},
{cp_stat & BIT(31), "CP_BUSY 31"},
};
adreno_dump_fields(device, " CP_STT=", lns, ARRAY_SIZE(lns));
}
#endif
kgsl_regread(device, REG_SCRATCH_REG0, &r1);
KGSL_LOG_DUMP(device, "SCRATCH_REG0 = %08X\n", r1);
kgsl_regread(device, REG_COHER_SIZE_PM4, &r1);
kgsl_regread(device, REG_COHER_BASE_PM4, &r2);
kgsl_regread(device, REG_COHER_STATUS_PM4, &r3);
KGSL_LOG_DUMP(device,
"COHER: SIZE_PM4 = %08X | BASE_PM4 = %08X | STATUS_PM4"
" = %08X\n", r1, r2, r3);
kgsl_regread(device, REG_MH_AXI_ERROR, &r1);
KGSL_LOG_DUMP(device, "MH: AXI_ERROR = %08X\n", r1);
kgsl_regread(device, REG_MH_MMU_PAGE_FAULT, &r1);
kgsl_regread(device, REG_MH_MMU_CONFIG, &r2);
kgsl_regread(device, REG_MH_MMU_MPU_BASE, &r3);
KGSL_LOG_DUMP(device,
"MH_MMU: PAGE_FAULT = %08X | CONFIG = %08X | MPU_BASE ="
" %08X\n", r1, r2, r3);
kgsl_regread(device, REG_MH_MMU_MPU_END, &r1);
kgsl_regread(device, REG_MH_MMU_VA_RANGE, &r2);
kgsl_regread(device, REG_MH_MMU_PT_BASE, &pt_base);
KGSL_LOG_DUMP(device,
" MPU_END = %08X | VA_RANGE = %08X | PT_BASE ="
" %08X\n", r1, r2, pt_base);
KGSL_LOG_DUMP(device, "PAGETABLE SIZE: %08X ", KGSL_PAGETABLE_SIZE);
kgsl_regread(device, REG_MH_MMU_TRAN_ERROR, &r1);
KGSL_LOG_DUMP(device, " TRAN_ERROR = %08X\n", r1);
kgsl_regread(device, REG_MH_INTERRUPT_MASK, &r1);
kgsl_regread(device, REG_MH_INTERRUPT_STATUS, &r2);
KGSL_LOG_DUMP(device,
"MH_INTERRUPT: MASK = %08X | STATUS = %08X\n", r1, r2);
if (device->ftbl.device_readtimestamp != NULL) {
ts_processed = device->ftbl.device_readtimestamp(
device, KGSL_TIMESTAMP_RETIRED);
KGSL_LOG_DUMP(device, "TIMESTM RTRD: %08X\n", ts_processed);
}
num_item = adreno_ringbuffer_count(&adreno_dev->ringbuffer,
cp_rb_rptr);
if (num_item <= 0)
KGSL_LOG_POSTMORTEM_WRITE(device, "Ringbuffer is Empty.\n");
rb_copy = vmalloc(rb_count<<2);
if (!rb_copy) {
KGSL_LOG_POSTMORTEM_WRITE(device,
"vmalloc(%d) failed\n", rb_count << 2);
result = -ENOMEM;
goto end;
}
KGSL_LOG_DUMP(device, "RB: rd_addr:%8.8x rb_size:%d num_item:%d\n",
cp_rb_base, rb_count<<2, num_item);
rb_vaddr = (const uint32_t *)kgsl_sharedmem_convertaddr(device, pt_base,
cp_rb_base, &rb_memsize);
if (!rb_vaddr) {
KGSL_LOG_POSTMORTEM_WRITE(device,
"Can't fetch vaddr for CP_RB_BASE\n");
goto error_vfree;
}
read_idx = (int)cp_rb_rptr - 64;
if (read_idx < 0)
read_idx += rb_count;
write_idx = (int)cp_rb_wptr + 16;
if (write_idx > rb_count)
write_idx -= rb_count;
num_item += 64+16;
if (num_item > rb_count)
num_item = rb_count;
if (write_idx >= read_idx)
memcpy(rb_copy, rb_vaddr+read_idx, num_item<<2);
else {
int part1_c = rb_count-read_idx;
memcpy(rb_copy, rb_vaddr+read_idx, part1_c<<2);
memcpy(rb_copy+part1_c, rb_vaddr, (num_item-part1_c)<<2);
}
/* extract the latest ib commands from the buffer */
ib_list.count = 0;
i = 0;
for (read_idx = 0; read_idx < num_item; ) {
uint32_t this_cmd = rb_copy[read_idx++];
if (this_cmd == pm4_type3_packet(PM4_INDIRECT_BUFFER_PFD, 2)) {
uint32_t ib_addr = rb_copy[read_idx++];
uint32_t ib_size = rb_copy[read_idx++];
dump_ib1(device, pt_base, (read_idx-3)<<2, ib_addr,
ib_size, &ib_list, 0);
for (; i < ib_list.count; ++i)
dump_ib(device, "IB2:", pt_base,
ib_list.offsets[i],
ib_list.bases[i],
ib_list.sizes[i], 0);
}
}
read_idx = (int)cp_rb_rptr - 64;
if (read_idx < 0)
read_idx += rb_count;
KGSL_LOG_DUMP(device,
"RB: addr=%8.8x window:%4.4x-%4.4x, start:%4.4x\n",
cp_rb_base, cp_rb_rptr, cp_rb_wptr, read_idx);
adreno_dump_rb(device, rb_copy, num_item<<2, read_idx, rb_count);
if (adreno_ib_dump_enabled()) {
for (read_idx = 64; read_idx >= 0; --read_idx) {
uint32_t this_cmd = rb_copy[read_idx];
if (this_cmd == pm4_type3_packet(
PM4_INDIRECT_BUFFER_PFD, 2)) {
uint32_t ib_addr = rb_copy[read_idx+1];
uint32_t ib_size = rb_copy[read_idx+2];
if (cp_ib1_bufsz && cp_ib1_base == ib_addr) {
KGSL_LOG_DUMP(device,
"IB1: base:%8.8X "
"count:%d\n", ib_addr, ib_size);
dump_ib(device, "IB1: ", pt_base,
read_idx<<2, ib_addr, ib_size,
1);
}
}
}
for (i = 0; i < ib_list.count; ++i) {
if (cp_ib2_bufsz && cp_ib2_base == ib_list.bases[i]) {
uint32_t ib_size = ib_list.sizes[i];
uint32_t ib_offset = ib_list.offsets[i];
KGSL_LOG_DUMP(device,
"IB2: base:%8.8X count:%d\n",
cp_ib2_base, ib_size);
dump_ib(device, "IB2: ", pt_base, ib_offset,
ib_list.bases[i], ib_size, 1);
}
}
}
/* Dump the registers if the user asked for it */
if (adreno_is_a20x(adreno_dev))
adreno_dump_regs(device, a200_registers,
ARRAY_SIZE(a200_registers) / 2);
else if (adreno_is_a22x(adreno_dev))
adreno_dump_regs(device, a220_registers,
ARRAY_SIZE(a220_registers) / 2);
error_vfree:
vfree(rb_copy);
end:
return result;
}
/**
* adreno_postmortem_dump - Dump the current GPU state
* @device - A pointer to the KGSL device to dump
* @manual - A flag that indicates if this was a manually triggered
* dump (from debugfs). If zero, then this is assumed to be a
* dump automaticlaly triggered from a hang
*/
int adreno_postmortem_dump(struct kgsl_device *device, int manual)
{
bool saved_nap;
BUG_ON(device == NULL);
kgsl_cffdump_hang(device->id);
/* For a manual dump, make sure that the system is idle */
if (manual) {
if (device->active_cnt != 0) {
mutex_unlock(&device->mutex);
wait_for_completion(&device->suspend_gate);
mutex_lock(&device->mutex);
}
if (device->state == KGSL_STATE_ACTIVE)
kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
}
/* Disable the idle timer so we don't get interrupted */
del_timer(&device->idle_timer);
/* Turn off napping to make sure we have the clocks full
attention through the following process */
saved_nap = device->pwrctrl.nap_allowed;
device->pwrctrl.nap_allowed = false;
/* Force on the clocks */
kgsl_pwrctrl_wake(device);
/* Disable the irq */
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
/* If this is not a manual trigger, then set up the
state to try to recover */
if (!manual) {
device->state = KGSL_STATE_DUMP_AND_RECOVER;
KGSL_PWR_WARN(device,
"state -> DUMP_AND_RECOVER, device %d\n",
device->id);
}
KGSL_DRV_ERR(device,
"wait for work in workqueue to complete\n");
mutex_unlock(&device->mutex);
flush_workqueue(device->work_queue);
mutex_lock(&device->mutex);
adreno_dump(device);
/* Restore nap mode */
device->pwrctrl.nap_allowed = saved_nap;
/* On a manual trigger, turn on the interrupts and put
the clocks to sleep. They will recover themselves
on the next event. For a hang, leave things as they
are until recovery kicks in. */
if (manual) {
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
/* try to go into a sleep mode until the next event */
device->requested_state = KGSL_STATE_SLEEP;
kgsl_pwrctrl_sleep(device);
}
KGSL_DRV_ERR(device, "Dump Finished\n");
return 0;
}

View File

@ -0,0 +1,37 @@
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef __ADRENO_POSTMORTEM_H
#define __ADRENO_POSTMORTEM_H
struct kgsl_device;
int adreno_postmortem_dump(struct kgsl_device *device, int manual);
#endif /* __ADRENO_POSTMORTEM_H */

View File

@ -0,0 +1,932 @@
/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/firmware.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/log2.h>
#include "kgsl.h"
#include "adreno.h"
#include "adreno_pm4types.h"
#include "adreno_ringbuffer.h"
#include "a200_reg.h"
#define VALID_STATUS_COUNT_MAX 10
#define GSL_RB_NOP_SIZEDWORDS 2
/* protected mode error checking below register address 0x800
* note: if CP_INTERRUPT packet is used then checking needs
* to change to below register address 0x7C8
*/
#define GSL_RB_PROTECTED_MODE_CONTROL 0x200001F2
#define GSL_CP_INT_MASK \
(CP_INT_CNTL__SW_INT_MASK | \
CP_INT_CNTL__T0_PACKET_IN_IB_MASK | \
CP_INT_CNTL__OPCODE_ERROR_MASK | \
CP_INT_CNTL__PROTECTED_MODE_ERROR_MASK | \
CP_INT_CNTL__RESERVED_BIT_ERROR_MASK | \
CP_INT_CNTL__IB_ERROR_MASK | \
CP_INT_CNTL__IB2_INT_MASK | \
CP_INT_CNTL__IB1_INT_MASK | \
CP_INT_CNTL__RB_INT_MASK)
/* Firmware file names
* Legacy names must remain but replacing macro names to
* match current kgsl model.
* a200 is yamato
* a220 is leia
*/
#define A200_PFP_FW "yamato_pfp.fw"
#define A200_PM4_FW "yamato_pm4.fw"
#define A220_PFP_470_FW "leia_pfp_470.fw"
#define A220_PM4_470_FW "leia_pm4_470.fw"
/* functions */
void kgsl_cp_intrcallback(struct kgsl_device *device)
{
unsigned int status = 0, num_reads = 0, master_status = 0;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
adreno_regread_isr(device, REG_MASTER_INT_SIGNAL, &master_status);
while (!status && (num_reads < VALID_STATUS_COUNT_MAX) &&
(master_status & MASTER_INT_SIGNAL__CP_INT_STAT)) {
adreno_regread_isr(device, REG_CP_INT_STATUS, &status);
adreno_regread_isr(device, REG_MASTER_INT_SIGNAL,
&master_status);
num_reads++;
}
if (num_reads > 1)
KGSL_DRV_WARN(device,
"Looped %d times to read REG_CP_INT_STATUS\n",
num_reads);
if (!status) {
if (master_status & MASTER_INT_SIGNAL__CP_INT_STAT) {
/* This indicates that we could not read CP_INT_STAT.
* As a precaution just wake up processes so
* they can check their timestamps. Since, we
* did not ack any interrupts this interrupt will
* be generated again */
KGSL_DRV_WARN(device, "Unable to read CP_INT_STATUS\n");
wake_up_interruptible_all(&device->wait_queue);
} else
KGSL_DRV_WARN(device, "Spurious interrput detected\n");
return;
}
if (status & CP_INT_CNTL__RB_INT_MASK) {
/* signal intr completion event */
unsigned int enableflag = 0;
kgsl_sharedmem_writel(&rb->device->memstore,
KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable),
enableflag);
wmb();
KGSL_CMD_WARN(rb->device, "ringbuffer rb interrupt\n");
}
if (status & CP_INT_CNTL__T0_PACKET_IN_IB_MASK) {
KGSL_CMD_CRIT(rb->device,
"ringbuffer TO packet in IB interrupt\n");
adreno_regwrite_isr(rb->device, REG_CP_INT_CNTL, 0);
}
if (status & CP_INT_CNTL__OPCODE_ERROR_MASK) {
KGSL_CMD_CRIT(rb->device,
"ringbuffer opcode error interrupt\n");
adreno_regwrite_isr(rb->device, REG_CP_INT_CNTL, 0);
}
if (status & CP_INT_CNTL__PROTECTED_MODE_ERROR_MASK) {
KGSL_CMD_CRIT(rb->device,
"ringbuffer protected mode error interrupt\n");
adreno_regwrite_isr(rb->device, REG_CP_INT_CNTL, 0);
}
if (status & CP_INT_CNTL__RESERVED_BIT_ERROR_MASK) {
KGSL_CMD_CRIT(rb->device,
"ringbuffer reserved bit error interrupt\n");
adreno_regwrite_isr(rb->device, REG_CP_INT_CNTL, 0);
}
if (status & CP_INT_CNTL__IB_ERROR_MASK) {
KGSL_CMD_CRIT(rb->device,
"ringbuffer IB error interrupt\n");
adreno_regwrite_isr(rb->device, REG_CP_INT_CNTL, 0);
}
if (status & CP_INT_CNTL__SW_INT_MASK)
KGSL_CMD_INFO(rb->device, "ringbuffer software interrupt\n");
if (status & CP_INT_CNTL__IB2_INT_MASK)
KGSL_CMD_INFO(rb->device, "ringbuffer ib2 interrupt\n");
if (status & (~GSL_CP_INT_MASK))
KGSL_CMD_WARN(rb->device,
"bad bits in REG_CP_INT_STATUS %08x\n", status);
/* only ack bits we understand */
status &= GSL_CP_INT_MASK;
adreno_regwrite_isr(device, REG_CP_INT_ACK, status);
if (status & (CP_INT_CNTL__IB1_INT_MASK | CP_INT_CNTL__RB_INT_MASK)) {
KGSL_CMD_WARN(rb->device, "ringbuffer ib1/rb interrupt\n");
wake_up_interruptible_all(&device->wait_queue);
atomic_notifier_call_chain(&(device->ts_notifier_list),
device->id,
NULL);
}
}
static void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb)
{
BUG_ON(rb->wptr == 0);
/*synchronize memory before informing the hardware of the
*new commands.
*/
mb();
adreno_regwrite(rb->device, REG_CP_RB_WPTR, rb->wptr);
}
static int
adreno_ringbuffer_waitspace(struct adreno_ringbuffer *rb, unsigned int numcmds,
int wptr_ahead)
{
int nopcount;
unsigned int freecmds;
unsigned int *cmds;
uint cmds_gpu;
/* if wptr ahead, fill the remaining with NOPs */
if (wptr_ahead) {
/* -1 for header */
nopcount = rb->sizedwords - rb->wptr - 1;
cmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*rb->wptr;
GSL_RB_WRITE(cmds, cmds_gpu, pm4_nop_packet(nopcount));
/* Make sure that rptr is not 0 before submitting
* commands at the end of ringbuffer. We do not
* want the rptr and wptr to become equal when
* the ringbuffer is not empty */
do {
GSL_RB_GET_READPTR(rb, &rb->rptr);
} while (!rb->rptr);
rb->wptr++;
adreno_ringbuffer_submit(rb);
rb->wptr = 0;
}
/* wait for space in ringbuffer */
do {
GSL_RB_GET_READPTR(rb, &rb->rptr);
freecmds = rb->rptr - rb->wptr;
} while ((freecmds != 0) && (freecmds <= numcmds));
return 0;
}
static unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb,
unsigned int numcmds)
{
unsigned int *ptr = NULL;
int status = 0;
BUG_ON(numcmds >= rb->sizedwords);
GSL_RB_GET_READPTR(rb, &rb->rptr);
/* check for available space */
if (rb->wptr >= rb->rptr) {
/* wptr ahead or equal to rptr */
/* reserve dwords for nop packet */
if ((rb->wptr + numcmds) > (rb->sizedwords -
GSL_RB_NOP_SIZEDWORDS))
status = adreno_ringbuffer_waitspace(rb, numcmds, 1);
} else {
/* wptr behind rptr */
if ((rb->wptr + numcmds) >= rb->rptr)
status = adreno_ringbuffer_waitspace(rb, numcmds, 0);
/* check for remaining space */
/* reserve dwords for nop packet */
if ((rb->wptr + numcmds) > (rb->sizedwords -
GSL_RB_NOP_SIZEDWORDS))
status = adreno_ringbuffer_waitspace(rb, numcmds, 1);
}
if (status == 0) {
ptr = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
rb->wptr += numcmds;
}
return ptr;
}
static int _load_firmware(struct kgsl_device *device, const char *fwfile,
void **data, int *len)
{
const struct firmware *fw = NULL;
int ret;
ret = request_firmware(&fw, fwfile, device->dev);
if (ret) {
KGSL_DRV_ERR(device, "request_firmware(%s) failed: %d\n",
fwfile, ret);
return ret;
}
*data = kmalloc(fw->size, GFP_KERNEL);
if (*data) {
memcpy(*data, fw->data, fw->size);
*len = fw->size;
} else
KGSL_MEM_ERR(device, "kmalloc(%d) failed\n", fw->size);
release_firmware(fw);
return (*data != NULL) ? 0 : -ENOMEM;
}
static int adreno_ringbuffer_load_pm4_ucode(struct kgsl_device *device)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
const char *fwfile;
int i, ret = 0;
if (adreno_is_a220(adreno_dev))
fwfile = A220_PM4_470_FW;
else
fwfile = A200_PM4_FW;
if (adreno_dev->pm4_fw == NULL) {
int len;
unsigned int *ptr;
ret = _load_firmware(device, fwfile, (void *) &ptr, &len);
if (ret)
goto err;
/* PM4 size is 3 dword aligned plus 1 dword of version */
if (len % ((sizeof(uint32_t) * 3)) != sizeof(uint32_t)) {
KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len);
ret = -EINVAL;
kfree(ptr);
goto err;
}
adreno_dev->pm4_fw_size = len / sizeof(uint32_t);
adreno_dev->pm4_fw = ptr;
}
KGSL_DRV_INFO(device, "loading pm4 ucode version: %d\n",
adreno_dev->pm4_fw[0]);
adreno_regwrite(device, REG_CP_DEBUG, 0x02000000);
adreno_regwrite(device, REG_CP_ME_RAM_WADDR, 0);
for (i = 1; i < adreno_dev->pm4_fw_size; i++)
adreno_regwrite(device, REG_CP_ME_RAM_DATA,
adreno_dev->pm4_fw[i]);
err:
return ret;
}
static int adreno_ringbuffer_load_pfp_ucode(struct kgsl_device *device)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
const char *fwfile;
int i, ret = 0;
if (adreno_is_a220(adreno_dev))
fwfile = A220_PFP_470_FW;
else
fwfile = A200_PFP_FW;
if (adreno_dev->pfp_fw == NULL) {
int len;
unsigned int *ptr;
ret = _load_firmware(device, fwfile, (void *) &ptr, &len);
if (ret)
goto err;
/* PFP size shold be dword aligned */
if (len % sizeof(uint32_t) != 0) {
KGSL_DRV_ERR(device, "Bad firmware size: %d\n", len);
ret = -EINVAL;
kfree(ptr);
goto err;
}
adreno_dev->pfp_fw_size = len / sizeof(uint32_t);
adreno_dev->pfp_fw = ptr;
}
KGSL_DRV_INFO(device, "loading pfp ucode version: %d\n",
adreno_dev->pfp_fw[0]);
adreno_regwrite(device, REG_CP_PFP_UCODE_ADDR, 0);
for (i = 1; i < adreno_dev->pfp_fw_size; i++)
adreno_regwrite(device, REG_CP_PFP_UCODE_DATA,
adreno_dev->pfp_fw[i]);
err:
return ret;
}
int adreno_ringbuffer_start(struct adreno_ringbuffer *rb, unsigned int init_ram)
{
int status;
/*cp_rb_cntl_u cp_rb_cntl; */
union reg_cp_rb_cntl cp_rb_cntl;
unsigned int *cmds, rb_cntl;
struct kgsl_device *device = rb->device;
uint cmds_gpu;
if (rb->flags & KGSL_FLAGS_STARTED)
return 0;
if (init_ram) {
rb->timestamp = 0;
GSL_RB_INIT_TIMESTAMP(rb);
}
kgsl_sharedmem_set(&rb->memptrs_desc, 0, 0,
sizeof(struct kgsl_rbmemptrs));
kgsl_sharedmem_set(&rb->buffer_desc, 0, 0xAA,
(rb->sizedwords << 2));
adreno_regwrite(device, REG_CP_RB_WPTR_BASE,
(rb->memptrs_desc.gpuaddr
+ GSL_RB_MEMPTRS_WPTRPOLL_OFFSET));
/* setup WPTR delay */
adreno_regwrite(device, REG_CP_RB_WPTR_DELAY, 0 /*0x70000010 */);
/*setup REG_CP_RB_CNTL */
adreno_regread(device, REG_CP_RB_CNTL, &rb_cntl);
cp_rb_cntl.val = rb_cntl;
/*
* The size of the ringbuffer in the hardware is the log2
* representation of the size in quadwords (sizedwords / 2)
*/
cp_rb_cntl.f.rb_bufsz = ilog2(rb->sizedwords >> 1);
/*
* Specify the quadwords to read before updating mem RPTR.
* Like above, pass the log2 representation of the blocksize
* in quadwords.
*/
cp_rb_cntl.f.rb_blksz = ilog2(KGSL_RB_BLKSIZE >> 3);
cp_rb_cntl.f.rb_poll_en = GSL_RB_CNTL_POLL_EN; /* WPTR polling */
/* mem RPTR writebacks */
cp_rb_cntl.f.rb_no_update = GSL_RB_CNTL_NO_UPDATE;
adreno_regwrite(device, REG_CP_RB_CNTL, cp_rb_cntl.val);
adreno_regwrite(device, REG_CP_RB_BASE, rb->buffer_desc.gpuaddr);
adreno_regwrite(device, REG_CP_RB_RPTR_ADDR,
rb->memptrs_desc.gpuaddr +
GSL_RB_MEMPTRS_RPTR_OFFSET);
/* explicitly clear all cp interrupts */
adreno_regwrite(device, REG_CP_INT_ACK, 0xFFFFFFFF);
/* setup scratch/timestamp */
adreno_regwrite(device, REG_SCRATCH_ADDR,
device->memstore.gpuaddr +
KGSL_DEVICE_MEMSTORE_OFFSET(soptimestamp));
adreno_regwrite(device, REG_SCRATCH_UMSK,
GSL_RB_MEMPTRS_SCRATCH_MASK);
/* load the CP ucode */
status = adreno_ringbuffer_load_pm4_ucode(device);
if (status != 0)
return status;
/* load the prefetch parser ucode */
status = adreno_ringbuffer_load_pfp_ucode(device);
if (status != 0)
return status;
adreno_regwrite(device, REG_CP_QUEUE_THRESHOLDS, 0x000C0804);
rb->rptr = 0;
rb->wptr = 0;
/* clear ME_HALT to start micro engine */
adreno_regwrite(device, REG_CP_ME_CNTL, 0);
/* ME_INIT */
cmds = adreno_ringbuffer_allocspace(rb, 19);
cmds_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*(rb->wptr-19);
GSL_RB_WRITE(cmds, cmds_gpu, PM4_HDR_ME_INIT);
/* All fields present (bits 9:0) */
GSL_RB_WRITE(cmds, cmds_gpu, 0x000003ff);
/* Disable/Enable Real-Time Stream processing (present but ignored) */
GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
/* Enable (2D <-> 3D) implicit synchronization (present but ignored) */
GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
GSL_RB_WRITE(cmds, cmds_gpu,
GSL_HAL_SUBBLOCK_OFFSET(REG_RB_SURFACE_INFO));
GSL_RB_WRITE(cmds, cmds_gpu,
GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SC_WINDOW_OFFSET));
GSL_RB_WRITE(cmds, cmds_gpu,
GSL_HAL_SUBBLOCK_OFFSET(REG_VGT_MAX_VTX_INDX));
GSL_RB_WRITE(cmds, cmds_gpu,
GSL_HAL_SUBBLOCK_OFFSET(REG_SQ_PROGRAM_CNTL));
GSL_RB_WRITE(cmds, cmds_gpu,
GSL_HAL_SUBBLOCK_OFFSET(REG_RB_DEPTHCONTROL));
GSL_RB_WRITE(cmds, cmds_gpu,
GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SU_POINT_SIZE));
GSL_RB_WRITE(cmds, cmds_gpu,
GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SC_LINE_CNTL));
GSL_RB_WRITE(cmds, cmds_gpu,
GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SU_POLY_OFFSET_FRONT_SCALE));
/* Vertex and Pixel Shader Start Addresses in instructions
* (3 DWORDS per instruction) */
GSL_RB_WRITE(cmds, cmds_gpu, 0x80000180);
/* Maximum Contexts */
GSL_RB_WRITE(cmds, cmds_gpu, 0x00000001);
/* Write Confirm Interval and The CP will wait the
* wait_interval * 16 clocks between polling */
GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
/* NQ and External Memory Swap */
GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
/* Protected mode error checking */
GSL_RB_WRITE(cmds, cmds_gpu, GSL_RB_PROTECTED_MODE_CONTROL);
/* Disable header dumping and Header dump address */
GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
/* Header dump size */
GSL_RB_WRITE(cmds, cmds_gpu, 0x00000000);
adreno_ringbuffer_submit(rb);
/* idle device to validate ME INIT */
status = adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
adreno_regwrite(rb->device, REG_CP_INT_CNTL, GSL_CP_INT_MASK);
if (status == 0)
rb->flags |= KGSL_FLAGS_STARTED;
return status;
}
int adreno_ringbuffer_stop(struct adreno_ringbuffer *rb)
{
if (rb->flags & KGSL_FLAGS_STARTED) {
adreno_regwrite(rb->device, REG_CP_INT_CNTL, 0);
/* ME_HALT */
adreno_regwrite(rb->device, REG_CP_ME_CNTL, 0x10000000);
rb->flags &= ~KGSL_FLAGS_STARTED;
}
return 0;
}
int adreno_ringbuffer_init(struct kgsl_device *device)
{
int status;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
rb->device = device;
/*
* It is silly to convert this to words and then back to bytes
* immediately below, but most of the rest of the code deals
* in words, so we might as well only do the math once
*/
rb->sizedwords = KGSL_RB_SIZE >> 2;
/* allocate memory for ringbuffer */
status = kgsl_allocate_contig(&rb->buffer_desc, (rb->sizedwords << 2));
if (status != 0) {
adreno_ringbuffer_close(rb);
return status;
}
/* allocate memory for polling and timestamps */
/* This really can be at 4 byte alignment boundry but for using MMU
* we need to make it at page boundary */
status = kgsl_allocate_contig(&rb->memptrs_desc,
sizeof(struct kgsl_rbmemptrs));
if (status != 0) {
adreno_ringbuffer_close(rb);
return status;
}
/* overlay structure on memptrs memory */
rb->memptrs = (struct kgsl_rbmemptrs *) rb->memptrs_desc.hostptr;
return 0;
}
int adreno_ringbuffer_close(struct adreno_ringbuffer *rb)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device);
kgsl_sharedmem_free(&rb->buffer_desc);
kgsl_sharedmem_free(&rb->memptrs_desc);
kfree(adreno_dev->pfp_fw);
kfree(adreno_dev->pm4_fw);
adreno_dev->pfp_fw = NULL;
adreno_dev->pm4_fw = NULL;
memset(rb, 0, sizeof(struct adreno_ringbuffer));
return 0;
}
static uint32_t
adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb,
unsigned int flags, unsigned int *cmds,
int sizedwords)
{
unsigned int *ringcmds;
unsigned int timestamp;
unsigned int total_sizedwords = sizedwords + 6;
unsigned int i;
unsigned int rcmd_gpu;
/* reserve space to temporarily turn off protected mode
* error checking if needed
*/
total_sizedwords += flags & KGSL_CMD_FLAGS_PMODE ? 4 : 0;
total_sizedwords += !(flags & KGSL_CMD_FLAGS_NO_TS_CMP) ? 7 : 0;
total_sizedwords += !(flags & KGSL_CMD_FLAGS_NOT_KERNEL_CMD) ? 2 : 0;
ringcmds = adreno_ringbuffer_allocspace(rb, total_sizedwords);
rcmd_gpu = rb->buffer_desc.gpuaddr
+ sizeof(uint)*(rb->wptr-total_sizedwords);
if (!(flags & KGSL_CMD_FLAGS_NOT_KERNEL_CMD)) {
GSL_RB_WRITE(ringcmds, rcmd_gpu, pm4_nop_packet(1));
GSL_RB_WRITE(ringcmds, rcmd_gpu, KGSL_CMD_IDENTIFIER);
}
if (flags & KGSL_CMD_FLAGS_PMODE) {
/* disable protected mode error checking */
GSL_RB_WRITE(ringcmds, rcmd_gpu,
pm4_type3_packet(PM4_SET_PROTECTED_MODE, 1));
GSL_RB_WRITE(ringcmds, rcmd_gpu, 0);
}
for (i = 0; i < sizedwords; i++) {
GSL_RB_WRITE(ringcmds, rcmd_gpu, *cmds);
cmds++;
}
if (flags & KGSL_CMD_FLAGS_PMODE) {
/* re-enable protected mode error checking */
GSL_RB_WRITE(ringcmds, rcmd_gpu,
pm4_type3_packet(PM4_SET_PROTECTED_MODE, 1));
GSL_RB_WRITE(ringcmds, rcmd_gpu, 1);
}
rb->timestamp++;
timestamp = rb->timestamp;
/* start-of-pipeline and end-of-pipeline timestamps */
GSL_RB_WRITE(ringcmds, rcmd_gpu, pm4_type0_packet(REG_CP_TIMESTAMP, 1));
GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
GSL_RB_WRITE(ringcmds, rcmd_gpu, pm4_type3_packet(PM4_EVENT_WRITE, 3));
GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS);
GSL_RB_WRITE(ringcmds, rcmd_gpu,
(rb->device->memstore.gpuaddr +
KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp)));
GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
if (!(flags & KGSL_CMD_FLAGS_NO_TS_CMP)) {
/* Conditional execution based on memory values */
GSL_RB_WRITE(ringcmds, rcmd_gpu,
pm4_type3_packet(PM4_COND_EXEC, 4));
GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr +
KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable)) >> 2);
GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr +
KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts)) >> 2);
GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp);
/* # of conditional command DWORDs */
GSL_RB_WRITE(ringcmds, rcmd_gpu, 2);
GSL_RB_WRITE(ringcmds, rcmd_gpu,
pm4_type3_packet(PM4_INTERRUPT, 1));
GSL_RB_WRITE(ringcmds, rcmd_gpu, CP_INT_CNTL__RB_INT_MASK);
}
adreno_ringbuffer_submit(rb);
/* return timestamp of issued coREG_ands */
return timestamp;
}
void
adreno_ringbuffer_issuecmds(struct kgsl_device *device,
unsigned int flags,
unsigned int *cmds,
int sizedwords)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
if (device->state & KGSL_STATE_HUNG)
return;
adreno_ringbuffer_addcmds(rb, flags, cmds, sizedwords);
}
int
adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
struct kgsl_context *context,
struct kgsl_ibdesc *ibdesc,
unsigned int numibs,
uint32_t *timestamp,
unsigned int flags)
{
struct kgsl_device *device = dev_priv->device;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
unsigned int *link;
unsigned int *cmds;
unsigned int i;
struct adreno_context *drawctxt;
if (device->state & KGSL_STATE_HUNG)
return -EBUSY;
if (!(adreno_dev->ringbuffer.flags & KGSL_FLAGS_STARTED) ||
context == NULL || ibdesc == 0 || numibs == 0)
return -EINVAL;
drawctxt = context->devctxt;
if (drawctxt->flags & CTXT_FLAGS_GPU_HANG) {
KGSL_CTXT_WARN(device, "Context %p caused a gpu hang.."
" will not accept commands for this context\n",
drawctxt);
return -EDEADLK;
}
link = kzalloc(sizeof(unsigned int) * numibs * 3, GFP_KERNEL);
cmds = link;
if (!link) {
KGSL_MEM_ERR(device, "Failed to allocate memory for for command"
" submission, size %x\n", numibs * 3);
return -ENOMEM;
}
for (i = 0; i < numibs; i++) {
(void)kgsl_cffdump_parse_ibs(dev_priv, NULL,
ibdesc[i].gpuaddr, ibdesc[i].sizedwords, false);
*cmds++ = PM4_HDR_INDIRECT_BUFFER_PFD;
*cmds++ = ibdesc[i].gpuaddr;
*cmds++ = ibdesc[i].sizedwords;
}
kgsl_setstate(device,
kgsl_pt_get_flags(device->mmu.hwpagetable,
device->id));
adreno_drawctxt_switch(adreno_dev, drawctxt, flags);
*timestamp = adreno_ringbuffer_addcmds(&adreno_dev->ringbuffer,
KGSL_CMD_FLAGS_NOT_KERNEL_CMD,
&link[0], (cmds - link));
KGSL_CMD_INFO(device, "ctxt %d g %08x numibs %d ts %d\n",
context->id, (unsigned int)ibdesc, numibs, *timestamp);
kfree(link);
#ifdef CONFIG_MSM_KGSL_CFF_DUMP
/*
* insert wait for idle after every IB1
* this is conservative but works reliably and is ok
* even for performance simulations
*/
adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
#endif
return 0;
}
int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
unsigned int *temp_rb_buffer,
int *rb_size)
{
struct kgsl_device *device = rb->device;
unsigned int rb_rptr;
unsigned int retired_timestamp;
unsigned int temp_idx = 0;
unsigned int value;
unsigned int val1;
unsigned int val2;
unsigned int val3;
unsigned int copy_rb_contents = 0;
unsigned int cur_context;
unsigned int j;
GSL_RB_GET_READPTR(rb, &rb->rptr);
/* drewis: still not sure where this struct was changed */
#if 0
retired_timestamp = device->ftbl->readtimestamp(device,
KGSL_TIMESTAMP_RETIRED);
#endif
retired_timestamp = device->ftbl.device_readtimestamp(
device, KGSL_TIMESTAMP_RETIRED);
KGSL_DRV_ERR(device, "GPU successfully executed till ts: %x\n",
retired_timestamp);
/*
* We need to go back in history by 4 dwords from the current location
* of read pointer as 4 dwords are read to match the end of a command.
* Also, take care of wrap around when moving back
*/
if (rb->rptr >= 4)
rb_rptr = (rb->rptr - 4) * sizeof(unsigned int);
else
rb_rptr = rb->buffer_desc.size -
((4 - rb->rptr) * sizeof(unsigned int));
/* Read the rb contents going backwards to locate end of last
* sucessfully executed command */
while ((rb_rptr / sizeof(unsigned int)) != rb->wptr) {
kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
if (value == retired_timestamp) {
rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
rb->buffer_desc.size);
kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
rb->buffer_desc.size);
kgsl_sharedmem_readl(&rb->buffer_desc, &val2, rb_rptr);
rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
rb->buffer_desc.size);
kgsl_sharedmem_readl(&rb->buffer_desc, &val3, rb_rptr);
/* match the pattern found at the end of a command */
if ((val1 == 2 &&
val2 == pm4_type3_packet(PM4_INTERRUPT, 1)
&& val3 == CP_INT_CNTL__RB_INT_MASK) ||
(val1 == pm4_type3_packet(PM4_EVENT_WRITE, 3)
&& val2 == CACHE_FLUSH_TS &&
val3 == (rb->device->memstore.gpuaddr +
KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp)))) {
rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
rb->buffer_desc.size);
KGSL_DRV_ERR(device,
"Found end of last executed "
"command at offset: %x\n",
rb_rptr / sizeof(unsigned int));
break;
} else {
if (rb_rptr < (3 * sizeof(unsigned int)))
rb_rptr = rb->buffer_desc.size -
(3 * sizeof(unsigned int))
+ rb_rptr;
else
rb_rptr -= (3 * sizeof(unsigned int));
}
}
if (rb_rptr == 0)
rb_rptr = rb->buffer_desc.size - sizeof(unsigned int);
else
rb_rptr -= sizeof(unsigned int);
}
if ((rb_rptr / sizeof(unsigned int)) == rb->wptr) {
KGSL_DRV_ERR(device,
"GPU recovery from hang not possible because last"
" successful timestamp is overwritten\n");
return -EINVAL;
}
/* rb_rptr is now pointing to the first dword of the command following
* the last sucessfully executed command sequence. Assumption is that
* GPU is hung in the command sequence pointed by rb_rptr */
/* make sure the GPU is not hung in a command submitted by kgsl
* itself */
kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
kgsl_sharedmem_readl(&rb->buffer_desc, &val2,
adreno_ringbuffer_inc_wrapped(rb_rptr,
rb->buffer_desc.size));
if (val1 == pm4_nop_packet(1) && val2 == KGSL_CMD_IDENTIFIER) {
KGSL_DRV_ERR(device,
"GPU recovery from hang not possible because "
"of hang in kgsl command\n");
return -EINVAL;
}
/* current_context is the context that is presently active in the
* GPU, i.e the context in which the hang is caused */
kgsl_sharedmem_readl(&device->memstore, &cur_context,
KGSL_DEVICE_MEMSTORE_OFFSET(current_context));
while ((rb_rptr / sizeof(unsigned int)) != rb->wptr) {
kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
rb->buffer_desc.size);
/* check for context switch indicator */
if (value == KGSL_CONTEXT_TO_MEM_IDENTIFIER) {
kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
rb->buffer_desc.size);
BUG_ON(value != pm4_type3_packet(PM4_MEM_WRITE, 2));
kgsl_sharedmem_readl(&rb->buffer_desc, &val1, rb_rptr);
rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
rb->buffer_desc.size);
BUG_ON(val1 != (device->memstore.gpuaddr +
KGSL_DEVICE_MEMSTORE_OFFSET(current_context)));
kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr);
rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr,
rb->buffer_desc.size);
BUG_ON((copy_rb_contents == 0) &&
(value == cur_context));
/*
* If we were copying the commands and got to this point
* then we need to remove the 3 commands that appear
* before KGSL_CONTEXT_TO_MEM_IDENTIFIER
*/
if (temp_idx)
temp_idx -= 3;
/* if context switches to a context that did not cause
* hang then start saving the rb contents as those
* commands can be executed */
if (value != cur_context) {
copy_rb_contents = 1;
temp_rb_buffer[temp_idx++] = pm4_nop_packet(1);
temp_rb_buffer[temp_idx++] =
KGSL_CMD_IDENTIFIER;
temp_rb_buffer[temp_idx++] = pm4_nop_packet(1);
temp_rb_buffer[temp_idx++] =
KGSL_CONTEXT_TO_MEM_IDENTIFIER;
temp_rb_buffer[temp_idx++] =
pm4_type3_packet(PM4_MEM_WRITE, 2);
temp_rb_buffer[temp_idx++] = val1;
temp_rb_buffer[temp_idx++] = value;
} else {
copy_rb_contents = 0;
}
} else if (copy_rb_contents)
temp_rb_buffer[temp_idx++] = value;
}
*rb_size = temp_idx;
KGSL_DRV_ERR(device, "Extracted rb contents, size: %x\n", *rb_size);
for (temp_idx = 0; temp_idx < *rb_size;) {
char str[80];
int idx = 0;
if ((temp_idx + 8) <= *rb_size)
j = 8;
else
j = *rb_size - temp_idx;
for (; j != 0; j--)
idx += scnprintf(str + idx, 80 - idx,
"%8.8X ", temp_rb_buffer[temp_idx++]);
printk(KERN_ALERT "%s", str);
}
return 0;
}
void
adreno_ringbuffer_restore(struct adreno_ringbuffer *rb, unsigned int *rb_buff,
int num_rb_contents)
{
int i;
unsigned int *ringcmds;
unsigned int rcmd_gpu;
if (!num_rb_contents)
return;
if (num_rb_contents > (rb->buffer_desc.size - rb->wptr)) {
adreno_regwrite(rb->device, REG_CP_RB_RPTR, 0);
rb->rptr = 0;
BUG_ON(num_rb_contents > rb->buffer_desc.size);
}
ringcmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
rcmd_gpu = rb->buffer_desc.gpuaddr + sizeof(unsigned int) * rb->wptr;
for (i = 0; i < num_rb_contents; i++)
GSL_RB_WRITE(ringcmds, rcmd_gpu, rb_buff[i]);
rb->wptr += num_rb_contents;
adreno_ringbuffer_submit(rb);
}

View File

@ -0,0 +1,172 @@
/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef __ADRENO_RINGBUFFER_H
#define __ADRENO_RINGBUFFER_H
#define GSL_RB_USE_MEM_RPTR
#define GSL_RB_USE_MEM_TIMESTAMP
#define GSL_DEVICE_SHADOW_MEMSTORE_TO_USER
/*
* Adreno ringbuffer sizes in bytes - these are converted to
* the appropriate log2 values in the code
*/
#define KGSL_RB_SIZE (32 * 1024)
#define KGSL_RB_BLKSIZE 16
/* CP timestamp register */
#define REG_CP_TIMESTAMP REG_SCRATCH_REG0
struct kgsl_device;
struct kgsl_device_private;
#define GSL_RB_MEMPTRS_SCRATCH_COUNT 8
struct kgsl_rbmemptrs {
int rptr;
int wptr_poll;
};
#define GSL_RB_MEMPTRS_RPTR_OFFSET \
(offsetof(struct kgsl_rbmemptrs, rptr))
#define GSL_RB_MEMPTRS_WPTRPOLL_OFFSET \
(offsetof(struct kgsl_rbmemptrs, wptr_poll))
struct adreno_ringbuffer {
struct kgsl_device *device;
uint32_t flags;
struct kgsl_memdesc buffer_desc;
struct kgsl_memdesc memptrs_desc;
struct kgsl_rbmemptrs *memptrs;
/*ringbuffer size */
unsigned int sizedwords;
unsigned int wptr; /* write pointer offset in dwords from baseaddr */
unsigned int rptr; /* read pointer offset in dwords from baseaddr */
uint32_t timestamp;
};
/* dword base address of the GFX decode space */
#define GSL_HAL_SUBBLOCK_OFFSET(reg) ((unsigned int)((reg) - (0x2000)))
#define GSL_RB_WRITE(ring, gpuaddr, data) \
do { \
writel_relaxed(data, ring); \
wmb(); \
kgsl_cffdump_setmem(gpuaddr, data, 4); \
ring++; \
gpuaddr += sizeof(uint); \
} while (0)
/* timestamp */
#ifdef GSL_DEVICE_SHADOW_MEMSTORE_TO_USER
#define GSL_RB_USE_MEM_TIMESTAMP
#endif /* GSL_DEVICE_SHADOW_MEMSTORE_TO_USER */
#ifdef GSL_RB_USE_MEM_TIMESTAMP
/* enable timestamp (...scratch0) memory shadowing */
#define GSL_RB_MEMPTRS_SCRATCH_MASK 0x1
#define GSL_RB_INIT_TIMESTAMP(rb)
#else
#define GSL_RB_MEMPTRS_SCRATCH_MASK 0x0
#define GSL_RB_INIT_TIMESTAMP(rb) \
adreno_regwrite((rb)->device->id, REG_CP_TIMESTAMP, 0)
#endif /* GSL_RB_USE_MEMTIMESTAMP */
/* mem rptr */
#ifdef GSL_RB_USE_MEM_RPTR
#define GSL_RB_CNTL_NO_UPDATE 0x0 /* enable */
#define GSL_RB_GET_READPTR(rb, data) \
do { \
*(data) = readl_relaxed(&(rb)->memptrs->rptr); \
} while (0)
#else
#define GSL_RB_CNTL_NO_UPDATE 0x1 /* disable */
#define GSL_RB_GET_READPTR(rb, data) \
do { \
adreno_regread((rb)->device->id, REG_CP_RB_RPTR, (data)); \
} while (0)
#endif /* GSL_RB_USE_MEMRPTR */
#define GSL_RB_CNTL_POLL_EN 0x0 /* disable */
int adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
struct kgsl_context *context,
struct kgsl_ibdesc *ibdesc,
unsigned int numibs,
uint32_t *timestamp,
unsigned int flags);
int adreno_ringbuffer_init(struct kgsl_device *device);
int adreno_ringbuffer_start(struct adreno_ringbuffer *rb,
unsigned int init_ram);
int adreno_ringbuffer_stop(struct adreno_ringbuffer *rb);
int adreno_ringbuffer_close(struct adreno_ringbuffer *rb);
void adreno_ringbuffer_issuecmds(struct kgsl_device *device,
unsigned int flags,
unsigned int *cmdaddr,
int sizedwords);
void kgsl_cp_intrcallback(struct kgsl_device *device);
int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
unsigned int *temp_rb_buffer,
int *rb_size);
void
adreno_ringbuffer_restore(struct adreno_ringbuffer *rb, unsigned int *rb_buff,
int num_rb_contents);
static inline int adreno_ringbuffer_count(struct adreno_ringbuffer *rb,
unsigned int rptr)
{
if (rb->wptr >= rptr)
return rb->wptr - rptr;
return rb->wptr + rb->sizedwords - rptr;
}
/* Increment a value by 4 bytes with wrap-around based on size */
static inline unsigned int adreno_ringbuffer_inc_wrapped(unsigned int val,
unsigned int size)
{
return (val + sizeof(unsigned int)) % size;
}
#endif /* __ADRENO_RINGBUFFER_H */

2177
drivers/gpu/msm/kgsl.c Normal file

File diff suppressed because it is too large Load Diff

290
drivers/gpu/msm/kgsl.h Normal file
View File

@ -0,0 +1,290 @@
/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef __KGSL_H
#define __KGSL_H
#include <linux/types.h>
#include <linux/msm_kgsl.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/cdev.h>
#include <linux/regulator/consumer.h>
#include <linux/atomic.h>
#include "kgsl_device.h"
#include "kgsl_pwrctrl.h"
#include "kgsl_sharedmem.h"
#include "kgsl_log.h"
#include "kgsl_cffdump.h"
#define KGSL_NAME "kgsl"
#define CHIP_REV_251 0x020501
/* Flags to control whether to flush or invalidate a cached memory range */
#define KGSL_CACHE_INV 0x00000000
#define KGSL_CACHE_CLEAN 0x00000001
#define KGSL_CACHE_FLUSH 0x00000002
#define KGSL_CACHE_USER_ADDR 0x00000010
#define KGSL_CACHE_VMALLOC_ADDR 0x00000020
/*cache coherency ops */
#define DRM_KGSL_GEM_CACHE_OP_TO_DEV 0x0001
#define DRM_KGSL_GEM_CACHE_OP_FROM_DEV 0x0002
/* The size of each entry in a page table */
#define KGSL_PAGETABLE_ENTRY_SIZE 4
/* Pagetable Virtual Address base */
#define KGSL_PAGETABLE_BASE 0x66000000
/* Extra accounting entries needed in the pagetable */
#define KGSL_PT_EXTRA_ENTRIES 16
#define KGSL_PAGETABLE_ENTRIES(_sz) (((_sz) >> PAGE_SHIFT) + \
KGSL_PT_EXTRA_ENTRIES)
#ifdef CONFIG_MSM_KGSL_MMU
#define KGSL_PAGETABLE_SIZE \
ALIGN(KGSL_PAGETABLE_ENTRIES(CONFIG_MSM_KGSL_PAGE_TABLE_SIZE) * \
KGSL_PAGETABLE_ENTRY_SIZE, PAGE_SIZE)
#else
#define KGSL_PAGETABLE_SIZE 0
#endif
#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
#define KGSL_PAGETABLE_COUNT (CONFIG_MSM_KGSL_PAGE_TABLE_COUNT)
#else
#define KGSL_PAGETABLE_COUNT 1
#endif
/* Casting using container_of() for structures that kgsl owns. */
#define KGSL_CONTAINER_OF(ptr, type, member) \
container_of(ptr, type, member)
/* A macro for memory statistics - add the new size to the stat and if
the statisic is greater then _max, set _max
*/
#define KGSL_STATS_ADD(_size, _stat, _max) \
do { _stat += (_size); if (_stat > _max) _max = _stat; } while (0)
struct kgsl_driver {
struct cdev cdev;
dev_t major;
struct class *class;
/* Virtual device for managing the core */
struct device virtdev;
/* Kobjects for storing pagetable and process statistics */
struct kobject *ptkobj;
struct kobject *prockobj;
atomic_t device_count;
struct kgsl_device *devp[KGSL_DEVICE_MAX];
uint32_t flags_debug;
/* Global lilst of open processes */
struct list_head process_list;
/* Global list of pagetables */
struct list_head pagetable_list;
/* Mutex for accessing the pagetable list */
struct mutex pt_mutex;
/* Mutex for accessing the process list */
struct mutex process_mutex;
/* Mutex for protecting the device list */
struct mutex devlock;
struct kgsl_ptpool ptpool;
struct {
unsigned int vmalloc;
unsigned int vmalloc_max;
unsigned int coherent;
unsigned int coherent_max;
unsigned int mapped;
unsigned int mapped_max;
unsigned int histogram[16];
} stats;
};
extern struct kgsl_driver kgsl_driver;
#define KGSL_USER_MEMORY 1
#define KGSL_MAPPED_MEMORY 2
struct kgsl_mem_entry {
struct kref refcount;
struct kgsl_memdesc memdesc;
int memtype;
struct file *file_ptr;
struct list_head list;
uint32_t free_timestamp;
/* back pointer to private structure under whose context this
* allocation is made */
struct kgsl_process_private *priv;
};
#ifdef CONFIG_MSM_KGSL_MMU_PAGE_FAULT
#define MMU_CONFIG 2
#else
#define MMU_CONFIG 1
#endif
void kgsl_mem_entry_destroy(struct kref *kref);
uint8_t *kgsl_gpuaddr_to_vaddr(const struct kgsl_memdesc *memdesc,
unsigned int gpuaddr, unsigned int *size);
struct kgsl_mem_entry *kgsl_sharedmem_find_region(
struct kgsl_process_private *private, unsigned int gpuaddr,
size_t size);
int kgsl_idle(struct kgsl_device *device, unsigned int timeout);
int kgsl_setstate(struct kgsl_device *device, uint32_t flags);
static inline void kgsl_regread(struct kgsl_device *device,
unsigned int offsetwords,
unsigned int *value)
{
device->ftbl.device_regread(device, offsetwords, value);
}
static inline void kgsl_regwrite(struct kgsl_device *device,
unsigned int offsetwords,
unsigned int value)
{
device->ftbl.device_regwrite(device, offsetwords, value);
}
static inline void kgsl_regread_isr(struct kgsl_device *device,
unsigned int offsetwords,
unsigned int *value)
{
device->ftbl.device_regread_isr(device, offsetwords, value);
}
static inline void kgsl_regwrite_isr(struct kgsl_device *device,
unsigned int offsetwords,
unsigned int value)
{
device->ftbl.device_regwrite_isr(device, offsetwords, value);
}
int kgsl_check_timestamp(struct kgsl_device *device, unsigned int timestamp);
int kgsl_register_ts_notifier(struct kgsl_device *device,
struct notifier_block *nb);
int kgsl_unregister_ts_notifier(struct kgsl_device *device,
struct notifier_block *nb);
int kgsl_device_platform_probe(struct kgsl_device *device,
irqreturn_t (*dev_isr) (int, void*));
void kgsl_device_platform_remove(struct kgsl_device *device);
extern const struct dev_pm_ops kgsl_pm_ops;
int kgsl_suspend_driver(struct platform_device *pdev, pm_message_t state);
int kgsl_resume_driver(struct platform_device *pdev);
#ifdef CONFIG_MSM_KGSL_DRM
extern int kgsl_drm_init(struct platform_device *dev);
extern void kgsl_drm_exit(void);
extern void kgsl_gpu_mem_flush(int op);
#else
static inline int kgsl_drm_init(struct platform_device *dev)
{
return 0;
}
static inline void kgsl_drm_exit(void)
{
}
#endif
static inline int kgsl_gpuaddr_in_memdesc(const struct kgsl_memdesc *memdesc,
unsigned int gpuaddr)
{
if (gpuaddr >= memdesc->gpuaddr && (gpuaddr + sizeof(unsigned int)) <=
(memdesc->gpuaddr + memdesc->size)) {
return 1;
}
return 0;
}
static inline struct kgsl_device *kgsl_device_from_dev(struct device *dev)
{
int i;
for (i = 0; i < KGSL_DEVICE_MAX; i++) {
if (kgsl_driver.devp[i] && kgsl_driver.devp[i]->dev == dev)
return kgsl_driver.devp[i];
}
return NULL;
}
static inline bool timestamp_cmp(unsigned int new, unsigned int old)
{
int ts_diff = new - old;
return (ts_diff >= 0) || (ts_diff < -20000);
}
static inline void
kgsl_mem_entry_get(struct kgsl_mem_entry *entry)
{
kref_get(&entry->refcount);
}
static inline void
kgsl_mem_entry_put(struct kgsl_mem_entry *entry)
{
kref_put(&entry->refcount, kgsl_mem_entry_destroy);
}
static inline int kgsl_create_device_sysfs_files(struct device *root,
struct device_attribute **list)
{
int ret = 0, i;
for (i = 0; list[i] != NULL; i++)
ret |= device_create_file(root, list[i]);
return ret;
}
static inline void kgsl_remove_device_sysfs_files(struct device *root,
struct device_attribute **list)
{
int i;
for (i = 0; list[i] != NULL; i++)
device_remove_file(root, list[i]);
}
#endif /* __KGSL_H */

View File

@ -0,0 +1,798 @@
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
/* #define DEBUG */
#define ALIGN_CPU
#include <linux/spinlock.h>
#include <linux/debugfs.h>
#include <linux/relay.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/sched.h>
#include "kgsl.h"
#include "kgsl_cffdump.h"
#include "kgsl_debugfs.h"
#include "kgsl_log.h"
#include "kgsl_sharedmem.h"
#include "adreno_pm4types.h"
static struct rchan *chan;
static struct dentry *dir;
static int suspended;
static size_t dropped;
static size_t subbuf_size = 256*1024;
static size_t n_subbufs = 64;
/* forward declarations */
static void destroy_channel(void);
static struct rchan *create_channel(unsigned subbuf_size, unsigned n_subbufs);
static spinlock_t cffdump_lock;
static ulong serial_nr;
static ulong total_bytes;
static ulong total_syncmem;
static long last_sec;
#define MEMBUF_SIZE 64
#define CFF_OP_WRITE_REG 0x00000002
struct cff_op_write_reg {
unsigned char op;
uint addr;
uint value;
} __packed;
#define CFF_OP_POLL_REG 0x00000004
struct cff_op_poll_reg {
unsigned char op;
uint addr;
uint value;
uint mask;
} __packed;
#define CFF_OP_WAIT_IRQ 0x00000005
struct cff_op_wait_irq {
unsigned char op;
} __packed;
#define CFF_OP_RMW 0x0000000a
#define CFF_OP_WRITE_MEM 0x0000000b
struct cff_op_write_mem {
unsigned char op;
uint addr;
uint value;
} __packed;
#define CFF_OP_WRITE_MEMBUF 0x0000000c
struct cff_op_write_membuf {
unsigned char op;
uint addr;
ushort count;
uint buffer[MEMBUF_SIZE];
} __packed;
#define CFF_OP_MEMORY_BASE 0x0000000d
struct cff_op_memory_base {
unsigned char op;
uint base;
uint size;
uint gmemsize;
} __packed;
#define CFF_OP_HANG 0x0000000e
struct cff_op_hang {
unsigned char op;
} __packed;
#define CFF_OP_EOF 0xffffffff
struct cff_op_eof {
unsigned char op;
} __packed;
#define CFF_OP_VERIFY_MEM_FILE 0x00000007
#define CFF_OP_WRITE_SURFACE_PARAMS 0x00000011
struct cff_op_user_event {
unsigned char op;
unsigned int op1;
unsigned int op2;
unsigned int op3;
unsigned int op4;
unsigned int op5;
} __packed;
static void b64_encodeblock(unsigned char in[3], unsigned char out[4], int len)
{
static const char tob64[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmno"
"pqrstuvwxyz0123456789+/";
out[0] = tob64[in[0] >> 2];
out[1] = tob64[((in[0] & 0x03) << 4) | ((in[1] & 0xf0) >> 4)];
out[2] = (unsigned char) (len > 1 ? tob64[((in[1] & 0x0f) << 2)
| ((in[2] & 0xc0) >> 6)] : '=');
out[3] = (unsigned char) (len > 2 ? tob64[in[2] & 0x3f] : '=');
}
static void b64_encode(const unsigned char *in_buf, int in_size,
unsigned char *out_buf, int out_bufsize, int *out_size)
{
unsigned char in[3], out[4];
int i, len;
*out_size = 0;
while (in_size > 0) {
len = 0;
for (i = 0; i < 3; ++i) {
if (in_size-- > 0) {
in[i] = *in_buf++;
++len;
} else
in[i] = 0;
}
if (len) {
b64_encodeblock(in, out, len);
if (out_bufsize < 4) {
pr_warn("kgsl: cffdump: %s: out of buffer\n",
__func__);
return;
}
for (i = 0; i < 4; ++i)
*out_buf++ = out[i];
*out_size += 4;
out_bufsize -= 4;
}
}
}
#define KLOG_TMPBUF_SIZE (1024)
static void klog_printk(const char *fmt, ...)
{
/* per-cpu klog formatting temporary buffer */
static char klog_buf[NR_CPUS][KLOG_TMPBUF_SIZE];
va_list args;
int len;
char *cbuf;
unsigned long flags;
local_irq_save(flags);
cbuf = klog_buf[smp_processor_id()];
va_start(args, fmt);
len = vsnprintf(cbuf, KLOG_TMPBUF_SIZE, fmt, args);
total_bytes += len;
va_end(args);
relay_write(chan, cbuf, len);
local_irq_restore(flags);
}
static struct cff_op_write_membuf cff_op_write_membuf;
static void cffdump_membuf(int id, unsigned char *out_buf, int out_bufsize)
{
void *data;
int len, out_size;
struct cff_op_write_mem cff_op_write_mem;
uint addr = cff_op_write_membuf.addr
- sizeof(uint)*cff_op_write_membuf.count;
if (!cff_op_write_membuf.count) {
pr_warn("kgsl: cffdump: membuf: count == 0, skipping");
return;
}
if (cff_op_write_membuf.count != 1) {
cff_op_write_membuf.op = CFF_OP_WRITE_MEMBUF;
cff_op_write_membuf.addr = addr;
len = sizeof(cff_op_write_membuf) -
sizeof(uint)*(MEMBUF_SIZE - cff_op_write_membuf.count);
data = &cff_op_write_membuf;
} else {
cff_op_write_mem.op = CFF_OP_WRITE_MEM;
cff_op_write_mem.addr = addr;
cff_op_write_mem.value = cff_op_write_membuf.buffer[0];
data = &cff_op_write_mem;
len = sizeof(cff_op_write_mem);
}
b64_encode(data, len, out_buf, out_bufsize, &out_size);
out_buf[out_size] = 0;
klog_printk("%ld:%d;%s\n", ++serial_nr, id, out_buf);
cff_op_write_membuf.count = 0;
cff_op_write_membuf.addr = 0;
}
static void cffdump_printline(int id, uint opcode, uint op1, uint op2,
uint op3, uint op4, uint op5)
{
struct cff_op_write_reg cff_op_write_reg;
struct cff_op_poll_reg cff_op_poll_reg;
struct cff_op_wait_irq cff_op_wait_irq;
struct cff_op_memory_base cff_op_memory_base;
struct cff_op_hang cff_op_hang;
struct cff_op_eof cff_op_eof;
struct cff_op_user_event cff_op_user_event;
unsigned char out_buf[sizeof(cff_op_write_membuf)/3*4 + 16];
void *data;
int len = 0, out_size;
long cur_secs;
spin_lock(&cffdump_lock);
if (opcode == CFF_OP_WRITE_MEM) {
if (op1 < 0x40000000 || op1 >= 0x60000000)
KGSL_CORE_ERR("addr out-of-range: op1=%08x", op1);
if ((cff_op_write_membuf.addr != op1 &&
cff_op_write_membuf.count)
|| (cff_op_write_membuf.count == MEMBUF_SIZE))
cffdump_membuf(id, out_buf, sizeof(out_buf));
cff_op_write_membuf.buffer[cff_op_write_membuf.count++] = op2;
cff_op_write_membuf.addr = op1 + sizeof(uint);
spin_unlock(&cffdump_lock);
return;
} else if (cff_op_write_membuf.count)
cffdump_membuf(id, out_buf, sizeof(out_buf));
spin_unlock(&cffdump_lock);
switch (opcode) {
case CFF_OP_WRITE_REG:
cff_op_write_reg.op = opcode;
cff_op_write_reg.addr = op1;
cff_op_write_reg.value = op2;
data = &cff_op_write_reg;
len = sizeof(cff_op_write_reg);
break;
case CFF_OP_POLL_REG:
cff_op_poll_reg.op = opcode;
cff_op_poll_reg.addr = op1;
cff_op_poll_reg.value = op2;
cff_op_poll_reg.mask = op3;
data = &cff_op_poll_reg;
len = sizeof(cff_op_poll_reg);
break;
case CFF_OP_WAIT_IRQ:
cff_op_wait_irq.op = opcode;
data = &cff_op_wait_irq;
len = sizeof(cff_op_wait_irq);
break;
case CFF_OP_MEMORY_BASE:
cff_op_memory_base.op = opcode;
cff_op_memory_base.base = op1;
cff_op_memory_base.size = op2;
cff_op_memory_base.gmemsize = op3;
data = &cff_op_memory_base;
len = sizeof(cff_op_memory_base);
break;
case CFF_OP_HANG:
cff_op_hang.op = opcode;
data = &cff_op_hang;
len = sizeof(cff_op_hang);
break;
case CFF_OP_EOF:
cff_op_eof.op = opcode;
data = &cff_op_eof;
len = sizeof(cff_op_eof);
break;
case CFF_OP_WRITE_SURFACE_PARAMS:
case CFF_OP_VERIFY_MEM_FILE:
cff_op_user_event.op = opcode;
cff_op_user_event.op1 = op1;
cff_op_user_event.op2 = op2;
cff_op_user_event.op3 = op3;
cff_op_user_event.op4 = op4;
cff_op_user_event.op5 = op5;
data = &cff_op_user_event;
len = sizeof(cff_op_user_event);
break;
}
if (len) {
b64_encode(data, len, out_buf, sizeof(out_buf), &out_size);
out_buf[out_size] = 0;
klog_printk("%ld:%d;%s\n", ++serial_nr, id, out_buf);
} else
pr_warn("kgsl: cffdump: unhandled opcode: %d\n", opcode);
cur_secs = get_seconds();
if ((cur_secs - last_sec) > 10 || (last_sec - cur_secs) > 10) {
pr_info("kgsl: cffdump: total [bytes:%lu kB, syncmem:%lu kB], "
"seq#: %lu\n", total_bytes/1024, total_syncmem/1024,
serial_nr);
last_sec = cur_secs;
}
}
void kgsl_cffdump_init()
{
struct dentry *debugfs_dir = kgsl_get_debugfs_dir();
#ifdef ALIGN_CPU
cpumask_t mask;
cpumask_clear(&mask);
cpumask_set_cpu(0, &mask);
sched_setaffinity(0, &mask);
#endif
if (!debugfs_dir || IS_ERR(debugfs_dir)) {
KGSL_CORE_ERR("Debugfs directory is bad\n");
return;
}
kgsl_cff_dump_enable = 1;
spin_lock_init(&cffdump_lock);
dir = debugfs_create_dir("cff", debugfs_dir);
if (!dir) {
KGSL_CORE_ERR("debugfs_create_dir failed\n");
return;
}
chan = create_channel(subbuf_size, n_subbufs);
}
void kgsl_cffdump_destroy()
{
if (chan)
relay_flush(chan);
destroy_channel();
if (dir)
debugfs_remove(dir);
}
void kgsl_cffdump_open(enum kgsl_deviceid device_id)
{
/*TODO: move this to where we can report correct gmemsize*/
unsigned int va_base;
/* XXX: drewis edit: only for 8x50 */
va_base = 0x20000000;
kgsl_cffdump_memory_base(device_id, va_base,
CONFIG_MSM_KGSL_PAGE_TABLE_SIZE, SZ_256K);
}
void kgsl_cffdump_memory_base(enum kgsl_deviceid device_id, unsigned int base,
unsigned int range, unsigned gmemsize)
{
cffdump_printline(device_id, CFF_OP_MEMORY_BASE, base,
range, gmemsize, 0, 0);
}
void kgsl_cffdump_hang(enum kgsl_deviceid device_id)
{
cffdump_printline(device_id, CFF_OP_HANG, 0, 0, 0, 0, 0);
}
void kgsl_cffdump_close(enum kgsl_deviceid device_id)
{
cffdump_printline(device_id, CFF_OP_EOF, 0, 0, 0, 0, 0);
}
void kgsl_cffdump_user_event(unsigned int cff_opcode, unsigned int op1,
unsigned int op2, unsigned int op3,
unsigned int op4, unsigned int op5)
{
cffdump_printline(-1, cff_opcode, op1, op2, op3, op4, op5);
}
void kgsl_cffdump_syncmem(struct kgsl_device_private *dev_priv,
const struct kgsl_memdesc *memdesc, uint gpuaddr, uint sizebytes,
bool clean_cache)
{
const void *src;
uint host_size;
uint physaddr;
if (!kgsl_cff_dump_enable)
return;
total_syncmem += sizebytes;
if (memdesc == NULL) {
struct kgsl_mem_entry *entry;
spin_lock(&dev_priv->process_priv->mem_lock);
entry = kgsl_sharedmem_find_region(dev_priv->process_priv,
gpuaddr, sizebytes);
spin_unlock(&dev_priv->process_priv->mem_lock);
if (entry == NULL) {
KGSL_CORE_ERR("did not find mapping "
"for gpuaddr: 0x%08x\n", gpuaddr);
return;
}
memdesc = &entry->memdesc;
}
BUG_ON(memdesc->gpuaddr == 0);
BUG_ON(gpuaddr == 0);
physaddr = kgsl_get_realaddr(memdesc) + (gpuaddr - memdesc->gpuaddr);
src = kgsl_gpuaddr_to_vaddr(memdesc, gpuaddr, &host_size);
if (src == NULL || host_size < sizebytes) {
KGSL_CORE_ERR("did not find mapping for "
"gpuaddr: 0x%08x, m->host: 0x%p, phys: 0x%08x\n",
gpuaddr, memdesc->hostptr, memdesc->physaddr);
return;
}
if (clean_cache) {
/* Ensure that this memory region is not read from the
* cache but fetched fresh */
mb();
kgsl_cache_range_op((struct kgsl_memdesc *)memdesc,
KGSL_CACHE_OP_INV);
}
BUG_ON(physaddr > 0x66000000 && physaddr < 0x66ffffff);
while (sizebytes > 3) {
cffdump_printline(-1, CFF_OP_WRITE_MEM, gpuaddr, *(uint *)src,
0, 0, 0);
gpuaddr += 4;
src += 4;
sizebytes -= 4;
}
if (sizebytes > 0)
cffdump_printline(-1, CFF_OP_WRITE_MEM, gpuaddr, *(uint *)src,
0, 0, 0);
}
void kgsl_cffdump_setmem(uint addr, uint value, uint sizebytes)
{
if (!kgsl_cff_dump_enable)
return;
BUG_ON(addr > 0x66000000 && addr < 0x66ffffff);
while (sizebytes > 3) {
/* Use 32bit memory writes as long as there's at least
* 4 bytes left */
cffdump_printline(-1, CFF_OP_WRITE_MEM, addr, value,
0, 0, 0);
addr += 4;
sizebytes -= 4;
}
if (sizebytes > 0)
cffdump_printline(-1, CFF_OP_WRITE_MEM, addr, value,
0, 0, 0);
}
void kgsl_cffdump_regwrite(enum kgsl_deviceid device_id, uint addr,
uint value)
{
if (!kgsl_cff_dump_enable)
return;
cffdump_printline(device_id, CFF_OP_WRITE_REG, addr, value,
0, 0, 0);
}
void kgsl_cffdump_regpoll(enum kgsl_deviceid device_id, uint addr,
uint value, uint mask)
{
if (!kgsl_cff_dump_enable)
return;
cffdump_printline(device_id, CFF_OP_POLL_REG, addr, value,
mask, 0, 0);
}
void kgsl_cffdump_slavewrite(uint addr, uint value)
{
if (!kgsl_cff_dump_enable)
return;
cffdump_printline(-1, CFF_OP_WRITE_REG, addr, value, 0, 0, 0);
}
int kgsl_cffdump_waitirq(void)
{
if (!kgsl_cff_dump_enable)
return 0;
cffdump_printline(-1, CFF_OP_WAIT_IRQ, 0, 0, 0, 0, 0);
return 1;
}
EXPORT_SYMBOL(kgsl_cffdump_waitirq);
#define ADDRESS_STACK_SIZE 256
#define GET_PM4_TYPE3_OPCODE(x) ((*(x) >> 8) & 0xFF)
static unsigned int kgsl_cffdump_addr_count;
static bool kgsl_cffdump_handle_type3(struct kgsl_device_private *dev_priv,
uint *hostaddr, bool check_only)
{
static uint addr_stack[ADDRESS_STACK_SIZE];
static uint size_stack[ADDRESS_STACK_SIZE];
switch (GET_PM4_TYPE3_OPCODE(hostaddr)) {
case PM4_INDIRECT_BUFFER_PFD:
case PM4_INDIRECT_BUFFER:
{
/* traverse indirect buffers */
int i;
uint ibaddr = hostaddr[1];
uint ibsize = hostaddr[2];
/* is this address already in encountered? */
for (i = 0;
i < kgsl_cffdump_addr_count && addr_stack[i] != ibaddr;
++i)
;
if (kgsl_cffdump_addr_count == i) {
addr_stack[kgsl_cffdump_addr_count] = ibaddr;
size_stack[kgsl_cffdump_addr_count++] = ibsize;
if (kgsl_cffdump_addr_count >= ADDRESS_STACK_SIZE) {
KGSL_CORE_ERR("stack overflow\n");
return false;
}
return kgsl_cffdump_parse_ibs(dev_priv, NULL,
ibaddr, ibsize, check_only);
} else if (size_stack[i] != ibsize) {
KGSL_CORE_ERR("gpuaddr: 0x%08x, "
"wc: %u, with size wc: %u already on the "
"stack\n", ibaddr, ibsize, size_stack[i]);
return false;
}
}
break;
}
return true;
}
/*
* Traverse IBs and dump them to test vector. Detect swap by inspecting
* register writes, keeping note of the current state, and dump
* framebuffer config to test vector
*/
bool kgsl_cffdump_parse_ibs(struct kgsl_device_private *dev_priv,
const struct kgsl_memdesc *memdesc, uint gpuaddr, int sizedwords,
bool check_only)
{
static uint level; /* recursion level */
bool ret = true;
uint host_size;
uint *hostaddr, *hoststart;
int dwords_left = sizedwords; /* dwords left in the current command
buffer */
if (level == 0)
kgsl_cffdump_addr_count = 0;
if (memdesc == NULL) {
struct kgsl_mem_entry *entry;
spin_lock(&dev_priv->process_priv->mem_lock);
entry = kgsl_sharedmem_find_region(dev_priv->process_priv,
gpuaddr, sizedwords * sizeof(uint));
spin_unlock(&dev_priv->process_priv->mem_lock);
if (entry == NULL) {
KGSL_CORE_ERR("did not find mapping "
"for gpuaddr: 0x%08x\n", gpuaddr);
return true;
}
memdesc = &entry->memdesc;
}
hostaddr = (uint *)kgsl_gpuaddr_to_vaddr(memdesc, gpuaddr, &host_size);
if (hostaddr == NULL) {
KGSL_CORE_ERR("did not find mapping for "
"gpuaddr: 0x%08x\n", gpuaddr);
return true;
}
hoststart = hostaddr;
level++;
if (!memdesc->physaddr) {
KGSL_CORE_ERR("no physaddr");
return true;
} else {
mb();
kgsl_cache_range_op((struct kgsl_memdesc *)memdesc,
KGSL_CACHE_OP_INV);
}
#ifdef DEBUG
pr_info("kgsl: cffdump: ib: gpuaddr:0x%08x, wc:%d, hptr:%p\n",
gpuaddr, sizedwords, hostaddr);
#endif
while (dwords_left > 0) {
int count = 0; /* dword count including packet header */
bool cur_ret = true;
switch (*hostaddr >> 30) {
case 0x0: /* type-0 */
count = (*hostaddr >> 16)+2;
break;
case 0x1: /* type-1 */
count = 2;
break;
case 0x3: /* type-3 */
count = ((*hostaddr >> 16) & 0x3fff) + 2;
cur_ret = kgsl_cffdump_handle_type3(dev_priv,
hostaddr, check_only);
break;
default:
pr_warn("kgsl: cffdump: parse-ib: unexpected type: "
"type:%d, word:0x%08x @ 0x%p, gpu:0x%08x\n",
*hostaddr >> 30, *hostaddr, hostaddr,
gpuaddr+4*(sizedwords-dwords_left));
cur_ret = false;
count = dwords_left;
break;
}
#ifdef DEBUG
if (!cur_ret) {
pr_info("kgsl: cffdump: bad sub-type: #:%d/%d, v:0x%08x"
" @ 0x%p[gb:0x%08x], level:%d\n",
sizedwords-dwords_left, sizedwords, *hostaddr,
hostaddr, gpuaddr+4*(sizedwords-dwords_left),
level);
print_hex_dump(KERN_ERR, level == 1 ? "IB1:" : "IB2:",
DUMP_PREFIX_OFFSET, 32, 4, hoststart,
sizedwords*4, 0);
}
#endif
ret = ret && cur_ret;
/* jump to next packet */
dwords_left -= count;
hostaddr += count;
cur_ret = dwords_left >= 0;
#ifdef DEBUG
if (!cur_ret) {
pr_info("kgsl: cffdump: bad count: c:%d, #:%d/%d, "
"v:0x%08x @ 0x%p[gb:0x%08x], level:%d\n",
count, sizedwords-(dwords_left+count),
sizedwords, *(hostaddr-count), hostaddr-count,
gpuaddr+4*(sizedwords-(dwords_left+count)),
level);
print_hex_dump(KERN_ERR, level == 1 ? "IB1:" : "IB2:",
DUMP_PREFIX_OFFSET, 32, 4, hoststart,
sizedwords*4, 0);
}
#endif
ret = ret && cur_ret;
}
if (!ret)
pr_info("kgsl: cffdump: parsing failed: gpuaddr:0x%08x, "
"host:0x%p, wc:%d\n", gpuaddr, hoststart, sizedwords);
if (!check_only) {
#ifdef DEBUG
uint offset = gpuaddr - memdesc->gpuaddr;
pr_info("kgsl: cffdump: ib-dump: hostptr:%p, gpuaddr:%08x, "
"physaddr:%08x, offset:%d, size:%d", hoststart,
gpuaddr, memdesc->physaddr + offset, offset,
sizedwords*4);
#endif
kgsl_cffdump_syncmem(dev_priv, memdesc, gpuaddr, sizedwords*4,
false);
}
level--;
return ret;
}
static int subbuf_start_handler(struct rchan_buf *buf,
void *subbuf, void *prev_subbuf, uint prev_padding)
{
pr_debug("kgsl: cffdump: subbuf_start_handler(subbuf=%p, prev_subbuf"
"=%p, prev_padding=%08x)\n", subbuf, prev_subbuf, prev_padding);
if (relay_buf_full(buf)) {
if (!suspended) {
suspended = 1;
pr_warn("kgsl: cffdump: relay: cpu %d buffer full!!!\n",
smp_processor_id());
}
dropped++;
return 0;
} else if (suspended) {
suspended = 0;
pr_warn("kgsl: cffdump: relay: cpu %d buffer no longer full.\n",
smp_processor_id());
}
subbuf_start_reserve(buf, 0);
return 1;
}
static struct dentry *create_buf_file_handler(const char *filename,
struct dentry *parent, int mode, struct rchan_buf *buf,
int *is_global)
{
return debugfs_create_file(filename, mode, parent, buf,
&relay_file_operations);
}
/*
* file_remove() default callback. Removes relay file in debugfs.
*/
static int remove_buf_file_handler(struct dentry *dentry)
{
pr_info("kgsl: cffdump: %s()\n", __func__);
debugfs_remove(dentry);
return 0;
}
/*
* relay callbacks
*/
static struct rchan_callbacks relay_callbacks = {
.subbuf_start = subbuf_start_handler,
.create_buf_file = create_buf_file_handler,
.remove_buf_file = remove_buf_file_handler,
};
/**
* create_channel - creates channel /debug/klog/cpuXXX
*
* Creates channel along with associated produced/consumed control files
*
* Returns channel on success, NULL otherwise
*/
static struct rchan *create_channel(unsigned subbuf_size, unsigned n_subbufs)
{
struct rchan *chan;
pr_info("kgsl: cffdump: relay: create_channel: subbuf_size %u, "
"n_subbufs %u, dir 0x%p\n", subbuf_size, n_subbufs, dir);
chan = relay_open("cpu", dir, subbuf_size,
n_subbufs, &relay_callbacks, NULL);
if (!chan) {
KGSL_CORE_ERR("relay_open failed\n");
return NULL;
}
suspended = 0;
dropped = 0;
return chan;
}
/**
* destroy_channel - destroys channel /debug/kgsl/cff/cpuXXX
*
* Destroys channel along with associated produced/consumed control files
*/
static void destroy_channel(void)
{
pr_info("kgsl: cffdump: relay: destroy_channel\n");
if (chan) {
relay_close(chan);
chan = NULL;
}
}

View File

@ -0,0 +1,85 @@
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef __KGSL_CFFDUMP_H
#define __KGSL_CFFDUMP_H
#ifdef CONFIG_MSM_KGSL_CFF_DUMP
#include <linux/types.h>
#include "kgsl_device.h"
void kgsl_cffdump_init(void);
void kgsl_cffdump_destroy(void);
void kgsl_cffdump_open(enum kgsl_deviceid device_id);
void kgsl_cffdump_close(enum kgsl_deviceid device_id);
void kgsl_cffdump_syncmem(struct kgsl_device_private *dev_priv,
const struct kgsl_memdesc *memdesc, uint physaddr, uint sizebytes,
bool clean_cache);
void kgsl_cffdump_setmem(uint addr, uint value, uint sizebytes);
void kgsl_cffdump_regwrite(enum kgsl_deviceid device_id, uint addr,
uint value);
void kgsl_cffdump_regpoll(enum kgsl_deviceid device_id, uint addr,
uint value, uint mask);
bool kgsl_cffdump_parse_ibs(struct kgsl_device_private *dev_priv,
const struct kgsl_memdesc *memdesc, uint gpuaddr, int sizedwords,
bool check_only);
void kgsl_cffdump_user_event(unsigned int cff_opcode, unsigned int op1,
unsigned int op2, unsigned int op3,
unsigned int op4, unsigned int op5);
static inline bool kgsl_cffdump_flags_no_memzero(void) { return true; }
void kgsl_cffdump_memory_base(enum kgsl_deviceid device_id, unsigned int base,
unsigned int range, unsigned int gmemsize);
void kgsl_cffdump_hang(enum kgsl_deviceid device_id);
#else
#define kgsl_cffdump_init() (void)0
#define kgsl_cffdump_destroy() (void)0
#define kgsl_cffdump_open(device_id) (void)0
#define kgsl_cffdump_close(device_id) (void)0
#define kgsl_cffdump_syncmem(dev_priv, memdesc, addr, sizebytes, clean_cache) \
(void) 0
#define kgsl_cffdump_setmem(addr, value, sizebytes) (void)0
#define kgsl_cffdump_regwrite(device_id, addr, value) (void)0
#define kgsl_cffdump_regpoll(device_id, addr, value, mask) (void)0
#define kgsl_cffdump_parse_ibs(dev_priv, memdesc, gpuaddr, \
sizedwords, check_only) true
#define kgsl_cffdump_flags_no_memzero() true
#define kgsl_cffdump_memory_base(base, range, gmemsize) (void)0
#define kgsl_cffdump_hang(device_id) (void)0
#define kgsl_cffdump_user_event(cff_opcode, op1, op2, op3, op4, op5) \
(void)param
#endif /* CONFIG_MSM_KGSL_CFF_DUMP */
#endif /* __KGSL_CFFDUMP_H */

View File

@ -0,0 +1,86 @@
/* Copyright (c) 2002,2008-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/debugfs.h>
#include "kgsl.h"
/*default log levels is error for everything*/
#define KGSL_LOG_LEVEL_DEFAULT 3
#define KGSL_LOG_LEVEL_MAX 7
struct dentry *kgsl_debugfs_dir;
static inline int kgsl_log_set(unsigned int *log_val, void *data, u64 val)
{
*log_val = min((unsigned int)val, (unsigned int)KGSL_LOG_LEVEL_MAX);
return 0;
}
#define KGSL_DEBUGFS_LOG(__log) \
static int __log ## _set(void *data, u64 val) \
{ \
struct kgsl_device *device = data; \
return kgsl_log_set(&device->__log, data, val); \
} \
static int __log ## _get(void *data, u64 *val) \
{ \
struct kgsl_device *device = data; \
*val = device->__log; \
return 0; \
} \
DEFINE_SIMPLE_ATTRIBUTE(__log ## _fops, \
__log ## _get, __log ## _set, "%llu\n"); \
KGSL_DEBUGFS_LOG(drv_log);
KGSL_DEBUGFS_LOG(cmd_log);
KGSL_DEBUGFS_LOG(ctxt_log);
KGSL_DEBUGFS_LOG(mem_log);
KGSL_DEBUGFS_LOG(pwr_log);
void kgsl_device_debugfs_init(struct kgsl_device *device)
{
if (kgsl_debugfs_dir && !IS_ERR(kgsl_debugfs_dir))
device->d_debugfs = debugfs_create_dir(device->name,
kgsl_debugfs_dir);
if (!device->d_debugfs || IS_ERR(device->d_debugfs))
return;
device->cmd_log = KGSL_LOG_LEVEL_DEFAULT;
device->ctxt_log = KGSL_LOG_LEVEL_DEFAULT;
device->drv_log = KGSL_LOG_LEVEL_DEFAULT;
device->mem_log = KGSL_LOG_LEVEL_DEFAULT;
device->pwr_log = KGSL_LOG_LEVEL_DEFAULT;
debugfs_create_file("log_level_cmd", 0644, device->d_debugfs, device,
&cmd_log_fops);
debugfs_create_file("log_level_ctxt", 0644, device->d_debugfs, device,
&ctxt_log_fops);
debugfs_create_file("log_level_drv", 0644, device->d_debugfs, device,
&drv_log_fops);
debugfs_create_file("log_level_mem", 0644, device->d_debugfs, device,
&mem_log_fops);
debugfs_create_file("log_level_pwr", 0644, device->d_debugfs, device,
&pwr_log_fops);
}
void kgsl_core_debugfs_init(void)
{
kgsl_debugfs_dir = debugfs_create_dir("kgsl", 0);
}
void kgsl_core_debugfs_close(void)
{
debugfs_remove_recursive(kgsl_debugfs_dir);
}

View File

@ -0,0 +1,39 @@
/* Copyright (c) 2002,2008-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _KGSL_DEBUGFS_H
#define _KGSL_DEBUGFS_H
struct kgsl_device;
#ifdef CONFIG_DEBUG_FS
void kgsl_core_debugfs_init(void);
void kgsl_core_debugfs_close(void);
void kgsl_device_debugfs_init(struct kgsl_device *device);
extern struct dentry *kgsl_debugfs_dir;
static inline struct dentry *kgsl_get_debugfs_dir(void)
{
return kgsl_debugfs_dir;
}
#else
static inline void kgsl_core_debugfs_init(void) { }
static inline void kgsl_device_debugfs_init(struct kgsl_device *device) { }
static inline void kgsl_core_debugfs_close(void) { }
static inline struct dentry *kgsl_get_debugfs_dir(void) { return NULL; }
#endif
#endif

View File

@ -0,0 +1,247 @@
/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef __KGSL_DEVICE_H
#define __KGSL_DEVICE_H
#include <linux/idr.h>
#include <linux/wakelock.h>
#include "kgsl_mmu.h"
#include "kgsl_pwrctrl.h"
#include "kgsl_log.h"
#include "kgsl_pwrscale.h"
#define KGSL_TIMEOUT_NONE 0
#define KGSL_TIMEOUT_DEFAULT 0xFFFFFFFF
#define FIRST_TIMEOUT (HZ / 2)
/* KGSL device state is initialized to INIT when platform_probe *
* sucessfully initialized the device. Once a device has been opened *
* (started) it becomes active. NAP implies that only low latency *
* resources (for now clocks on some platforms) are off. SLEEP implies *
* that the KGSL module believes a device is idle (has been inactive *
* past its timer) and all system resources are released. SUSPEND is *
* requested by the kernel and will be enforced upon all open devices. */
#define KGSL_STATE_NONE 0x00000000
#define KGSL_STATE_INIT 0x00000001
#define KGSL_STATE_ACTIVE 0x00000002
#define KGSL_STATE_NAP 0x00000004
#define KGSL_STATE_SLEEP 0x00000008
#define KGSL_STATE_SUSPEND 0x00000010
#define KGSL_STATE_HUNG 0x00000020
#define KGSL_STATE_DUMP_AND_RECOVER 0x00000040
#define KGSL_GRAPHICS_MEMORY_LOW_WATERMARK 0x1000000
#define KGSL_IS_PAGE_ALIGNED(addr) (!((addr) & (~PAGE_MASK)))
struct kgsl_device;
struct platform_device;
struct kgsl_device_private;
struct kgsl_context;
struct kgsl_power_stats;
struct kgsl_functable {
void (*device_regread) (struct kgsl_device *device,
unsigned int offsetwords,
unsigned int *value);
void (*device_regwrite) (struct kgsl_device *device,
unsigned int offsetwords,
unsigned int value);
void (*device_regread_isr) (struct kgsl_device *device,
unsigned int offsetwords,
unsigned int *value);
void (*device_regwrite_isr) (struct kgsl_device *device,
unsigned int offsetwords,
unsigned int value);
int (*device_setstate) (struct kgsl_device *device, uint32_t flags);
int (*device_idle) (struct kgsl_device *device, unsigned int timeout);
unsigned int (*device_isidle) (struct kgsl_device *device);
int (*device_suspend_context) (struct kgsl_device *device);
int (*device_resume_context) (struct kgsl_device *device);
int (*device_start) (struct kgsl_device *device, unsigned int init_ram);
int (*device_stop) (struct kgsl_device *device);
int (*device_getproperty) (struct kgsl_device *device,
enum kgsl_property_type type,
void *value,
unsigned int sizebytes);
int (*device_waittimestamp) (struct kgsl_device *device,
unsigned int timestamp,
unsigned int msecs);
unsigned int (*device_readtimestamp) (
struct kgsl_device *device,
enum kgsl_timestamp_type type);
int (*device_issueibcmds) (struct kgsl_device_private *dev_priv,
struct kgsl_context *context,
struct kgsl_ibdesc *ibdesc,
unsigned int sizedwords,
uint32_t *timestamp,
unsigned int flags);
int (*device_drawctxt_create) (struct kgsl_device_private *dev_priv,
uint32_t flags,
struct kgsl_context *context);
int (*device_drawctxt_destroy) (struct kgsl_device *device,
struct kgsl_context *context);
long (*device_ioctl) (struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data);
int (*device_setup_pt)(struct kgsl_device *device,
struct kgsl_pagetable *pagetable);
int (*device_cleanup_pt)(struct kgsl_device *device,
struct kgsl_pagetable *pagetable);
void (*device_power_stats)(struct kgsl_device *device,
struct kgsl_power_stats *stats);
};
struct kgsl_memregion {
unsigned char *mmio_virt_base;
unsigned int mmio_phys_base;
uint32_t gpu_base;
unsigned int sizebytes;
};
struct kgsl_device {
struct device *dev;
const char *name;
unsigned int ver_major;
unsigned int ver_minor;
uint32_t flags;
enum kgsl_deviceid id;
struct kgsl_memregion regspace;
struct kgsl_memdesc memstore;
const char *iomemname;
struct kgsl_mmu mmu;
struct completion hwaccess_gate;
struct kgsl_functable ftbl;
struct work_struct idle_check_ws;
struct timer_list idle_timer;
struct kgsl_pwrctrl pwrctrl;
int open_count;
struct atomic_notifier_head ts_notifier_list;
struct mutex mutex;
uint32_t state;
uint32_t requested_state;
struct list_head memqueue;
unsigned int active_cnt;
struct completion suspend_gate;
wait_queue_head_t wait_queue;
struct workqueue_struct *work_queue;
struct device *parentdev;
struct completion recovery_gate;
struct dentry *d_debugfs;
struct idr context_idr;
/* Logging levels */
int cmd_log;
int ctxt_log;
int drv_log;
int mem_log;
int pwr_log;
struct wake_lock idle_wakelock;
struct kgsl_pwrscale pwrscale;
struct kobject pwrscale_kobj;
};
struct kgsl_context {
uint32_t id;
/* Pointer to the owning device instance */
struct kgsl_device_private *dev_priv;
/* Pointer to the device specific context information */
void *devctxt;
};
struct kgsl_process_private {
unsigned int refcnt;
pid_t pid;
spinlock_t mem_lock;
struct list_head mem_list;
struct kgsl_pagetable *pagetable;
struct list_head list;
struct kobject *kobj;
struct {
unsigned int user;
unsigned int user_max;
unsigned int mapped;
unsigned int mapped_max;
unsigned int flushes;
} stats;
};
struct kgsl_device_private {
struct kgsl_device *device;
struct kgsl_process_private *process_priv;
};
struct kgsl_power_stats {
s64 total_time;
s64 busy_time;
};
struct kgsl_device *kgsl_get_device(int dev_idx);
static inline struct kgsl_mmu *
kgsl_get_mmu(struct kgsl_device *device)
{
return (struct kgsl_mmu *) (device ? &device->mmu : NULL);
}
static inline int kgsl_create_device_workqueue(struct kgsl_device *device)
{
device->work_queue = create_workqueue(device->name);
if (!device->work_queue) {
KGSL_DRV_ERR(device, "create_workqueue(%s) failed\n",
device->name);
return -EINVAL;
}
return 0;
}
static inline struct kgsl_context *
kgsl_find_context(struct kgsl_device_private *dev_priv, uint32_t id)
{
struct kgsl_context *ctxt =
idr_find(&dev_priv->device->context_idr, id);
/* Make sure that the context belongs to the current instance so
that other processes can't guess context IDs and mess things up */
return (ctxt && ctxt->dev_priv == dev_priv) ? ctxt : NULL;
}
#endif /* __KGSL_DEVICE_H */

1690
drivers/gpu/msm/kgsl_drm.c Normal file

File diff suppressed because it is too large Load Diff

118
drivers/gpu/msm/kgsl_log.h Normal file
View File

@ -0,0 +1,118 @@
/* Copyright (c) 2002,2008-2011, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef __KGSL_LOG_H
#define __KGSL_LOG_H
extern unsigned int kgsl_cff_dump_enable;
#define KGSL_LOG_INFO(dev, lvl, fmt, args...) \
do { \
if ((lvl) >= 6) \
dev_info(dev, "|%s| " fmt, \
__func__, ##args);\
} while (0)
#define KGSL_LOG_WARN(dev, lvl, fmt, args...) \
do { \
if ((lvl) >= 4) \
dev_warn(dev, "|%s| " fmt, \
__func__, ##args);\
} while (0)
#define KGSL_LOG_ERR(dev, lvl, fmt, args...) \
do { \
if ((lvl) >= 3) \
dev_err(dev, "|%s| " fmt, \
__func__, ##args);\
} while (0)
#define KGSL_LOG_CRIT(dev, lvl, fmt, args...) \
do { \
if ((lvl) >= 2) \
dev_crit(dev, "|%s| " fmt, \
__func__, ##args);\
} while (0)
#define KGSL_LOG_POSTMORTEM_WRITE(_dev, fmt, args...) \
do { dev_crit(_dev->dev, fmt, ##args); } while (0)
#define KGSL_LOG_DUMP(_dev, fmt, args...) dev_err(_dev->dev, fmt, ##args)
#define KGSL_DRV_INFO(_dev, fmt, args...) \
KGSL_LOG_INFO(_dev->dev, _dev->drv_log, fmt, ##args)
#define KGSL_DRV_WARN(_dev, fmt, args...) \
KGSL_LOG_WARN(_dev->dev, _dev->drv_log, fmt, ##args)
#define KGSL_DRV_ERR(_dev, fmt, args...) \
KGSL_LOG_ERR(_dev->dev, _dev->drv_log, fmt, ##args)
#define KGSL_DRV_CRIT(_dev, fmt, args...) \
KGSL_LOG_CRIT(_dev->dev, _dev->drv_log, fmt, ##args)
#define KGSL_CMD_INFO(_dev, fmt, args...) \
KGSL_LOG_INFO(_dev->dev, _dev->cmd_log, fmt, ##args)
#define KGSL_CMD_WARN(_dev, fmt, args...) \
KGSL_LOG_WARN(_dev->dev, _dev->cmd_log, fmt, ##args)
#define KGSL_CMD_ERR(_dev, fmt, args...) \
KGSL_LOG_ERR(_dev->dev, _dev->cmd_log, fmt, ##args)
#define KGSL_CMD_CRIT(_dev, fmt, args...) \
KGSL_LOG_CRIT(_dev->dev, _dev->cmd_log, fmt, ##args)
#define KGSL_CTXT_INFO(_dev, fmt, args...) \
KGSL_LOG_INFO(_dev->dev, _dev->ctxt_log, fmt, ##args)
#define KGSL_CTXT_WARN(_dev, fmt, args...) \
KGSL_LOG_WARN(_dev->dev, _dev->ctxt_log, fmt, ##args)
#define KGSL_CTXT_ERR(_dev, fmt, args...) \
KGSL_LOG_ERR(_dev->dev, _dev->ctxt_log, fmt, ##args)
#define KGSL_CTXT_CRIT(_dev, fmt, args...) \
KGSL_LOG_CRIT(_dev->dev, _dev->ctxt_log, fmt, ##args)
#define KGSL_MEM_INFO(_dev, fmt, args...) \
KGSL_LOG_INFO(_dev->dev, _dev->mem_log, fmt, ##args)
#define KGSL_MEM_WARN(_dev, fmt, args...) \
KGSL_LOG_WARN(_dev->dev, _dev->mem_log, fmt, ##args)
#define KGSL_MEM_ERR(_dev, fmt, args...) \
KGSL_LOG_ERR(_dev->dev, _dev->mem_log, fmt, ##args)
#define KGSL_MEM_CRIT(_dev, fmt, args...) \
KGSL_LOG_CRIT(_dev->dev, _dev->mem_log, fmt, ##args)
#define KGSL_PWR_INFO(_dev, fmt, args...) \
KGSL_LOG_INFO(_dev->dev, _dev->pwr_log, fmt, ##args)
#define KGSL_PWR_WARN(_dev, fmt, args...) \
KGSL_LOG_WARN(_dev->dev, _dev->pwr_log, fmt, ##args)
#define KGSL_PWR_ERR(_dev, fmt, args...) \
KGSL_LOG_ERR(_dev->dev, _dev->pwr_log, fmt, ##args)
#define KGSL_PWR_CRIT(_dev, fmt, args...) \
KGSL_LOG_CRIT(_dev->dev, _dev->pwr_log, fmt, ##args)
/* Core error messages - these are for core KGSL functions that have
no device associated with them (such as memory) */
#define KGSL_CORE_ERR(fmt, args...) \
pr_err("kgsl: %s: " fmt, __func__, ##args)
#endif /* __KGSL_LOG_H */

1141
drivers/gpu/msm/kgsl_mmu.c Normal file

File diff suppressed because it is too large Load Diff

267
drivers/gpu/msm/kgsl_mmu.h Normal file
View File

@ -0,0 +1,267 @@
/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef __KGSL_MMU_H
#define __KGSL_MMU_H
#include "kgsl_sharedmem.h"
/* Identifier for the global page table */
/* Per process page tables will probably pass in the thread group
as an identifier */
#define KGSL_MMU_GLOBAL_PT 0
#define GSL_PT_SUPER_PTE 8
#define GSL_PT_PAGE_WV 0x00000001
#define GSL_PT_PAGE_RV 0x00000002
#define GSL_PT_PAGE_DIRTY 0x00000004
/* MMU Flags */
#define KGSL_MMUFLAGS_TLBFLUSH 0x10000000
#define KGSL_MMUFLAGS_PTUPDATE 0x20000000
#define MH_INTERRUPT_MASK__AXI_READ_ERROR 0x00000001L
#define MH_INTERRUPT_MASK__AXI_WRITE_ERROR 0x00000002L
#define MH_INTERRUPT_MASK__MMU_PAGE_FAULT 0x00000004L
/* Macros to manage TLB flushing */
#define GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS (sizeof(unsigned char) * 8)
#define GSL_TLBFLUSH_FILTER_GET(superpte) \
(*((unsigned char *) \
(((unsigned int)pagetable->tlbflushfilter.base) \
+ (superpte / GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS))))
#define GSL_TLBFLUSH_FILTER_SETDIRTY(superpte) \
(GSL_TLBFLUSH_FILTER_GET((superpte)) |= 1 << \
(superpte % GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS))
#define GSL_TLBFLUSH_FILTER_ISDIRTY(superpte) \
(GSL_TLBFLUSH_FILTER_GET((superpte)) & \
(1 << (superpte % GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS)))
#define GSL_TLBFLUSH_FILTER_RESET() memset(pagetable->tlbflushfilter.base,\
0, pagetable->tlbflushfilter.size)
struct kgsl_device;
struct kgsl_tlbflushfilter {
unsigned int *base;
unsigned int size;
};
struct kgsl_pagetable {
spinlock_t lock;
unsigned int refcnt;
struct kgsl_memdesc base;
uint32_t va_base;
unsigned int va_range;
unsigned int last_superpte;
unsigned int max_entries;
struct gen_pool *pool;
struct list_head list;
unsigned int name;
/* Maintain filter to manage tlb flushing */
struct kgsl_tlbflushfilter tlbflushfilter;
unsigned int tlb_flags;
struct kobject *kobj;
struct {
unsigned int entries;
unsigned int mapped;
unsigned int max_mapped;
unsigned int max_entries;
} stats;
};
struct kgsl_mmu_reg {
uint32_t config;
uint32_t mpu_base;
uint32_t mpu_end;
uint32_t va_range;
uint32_t pt_page;
uint32_t page_fault;
uint32_t tran_error;
uint32_t invalidate;
uint32_t interrupt_mask;
uint32_t interrupt_status;
uint32_t interrupt_clear;
uint32_t axi_error;
};
struct kgsl_mmu {
unsigned int refcnt;
uint32_t flags;
struct kgsl_device *device;
unsigned int config;
uint32_t mpu_base;
int mpu_range;
struct kgsl_memdesc dummyspace;
struct kgsl_mmu_reg reg;
/* current page table object being used by device mmu */
struct kgsl_pagetable *defaultpagetable;
struct kgsl_pagetable *hwpagetable;
};
struct kgsl_ptpool_chunk {
size_t size;
unsigned int count;
int dynamic;
void *data;
unsigned int phys;
unsigned long *bitmap;
struct list_head list;
};
struct kgsl_ptpool {
size_t ptsize;
struct mutex lock;
struct list_head list;
int entries;
int static_entries;
int chunks;
};
struct kgsl_pagetable *kgsl_mmu_getpagetable(unsigned long name);
#ifdef CONFIG_MSM_KGSL_MMU
int kgsl_mmu_init(struct kgsl_device *device);
int kgsl_mmu_start(struct kgsl_device *device);
int kgsl_mmu_stop(struct kgsl_device *device);
int kgsl_mmu_close(struct kgsl_device *device);
int kgsl_mmu_setstate(struct kgsl_device *device,
struct kgsl_pagetable *pagetable);
int kgsl_mmu_map(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc,
unsigned int protflags);
int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc, unsigned int protflags);
int kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc);
void kgsl_ptpool_destroy(struct kgsl_ptpool *pool);
int kgsl_ptpool_init(struct kgsl_ptpool *pool, int ptsize, int entries);
void kgsl_mh_intrcallback(struct kgsl_device *device);
void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable);
unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr);
static inline int kgsl_mmu_enabled(void)
{
return 1;
}
#else
static inline int kgsl_mmu_enabled(void)
{
return 0;
}
static inline int kgsl_mmu_init(struct kgsl_device *device)
{
return 0;
}
static inline int kgsl_mmu_start(struct kgsl_device *device)
{
return 0;
}
static inline int kgsl_mmu_stop(struct kgsl_device *device)
{
return 0;
}
static inline int kgsl_mmu_close(struct kgsl_device *device)
{
return 0;
}
static inline int kgsl_mmu_setstate(struct kgsl_device *device,
struct kgsl_pagetable *pagetable)
{
return 0;
}
static inline int kgsl_mmu_map(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc,
unsigned int protflags)
{
memdesc->gpuaddr = memdesc->physaddr;
return 0;
}
static inline int kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc)
{
return 0;
}
static inline int kgsl_ptpool_init(struct kgsl_ptpool *pool, int ptsize,
int entries)
{
return 0;
}
static inline int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc, unsigned int protflags)
{
/* gpuaddr is the same that gets passed in */
return 0;
}
static inline void kgsl_ptpool_destroy(struct kgsl_ptpool *pool) { }
static inline void kgsl_mh_intrcallback(struct kgsl_device *device) { }
static inline void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable) { }
static inline unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr)
{
return 0;
}
#endif
static inline unsigned int kgsl_pt_get_flags(struct kgsl_pagetable *pt,
enum kgsl_deviceid id)
{
unsigned int result = 0;
if (pt == NULL)
return 0;
spin_lock(&pt->lock);
if (pt->tlb_flags && (1<<id)) {
result = KGSL_MMUFLAGS_TLBFLUSH;
pt->tlb_flags &= ~(1<<id);
}
spin_unlock(&pt->lock);
return result;
}
#endif /* __KGSL_MMU_H */

View File

@ -0,0 +1,771 @@
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/interrupt.h>
#include <mach/msm_iomap.h>
#include <mach/msm_bus.h>
#include "kgsl.h"
#define SWITCH_OFF 200
#define TZ_UPDATE_ID 0x01404000
#define TZ_RESET_ID 0x01403000
#ifdef CONFIG_MSM_SECURE_IO
/* Trap into the TrustZone, and call funcs there. */
static int __secure_tz_entry(u32 cmd, u32 val)
{
register u32 r0 asm("r0") = cmd;
register u32 r1 asm("r1") = 0x0;
register u32 r2 asm("r2") = val;
__iowmb();
asm(
__asmeq("%0", "r0")
__asmeq("%1", "r0")
__asmeq("%2", "r1")
__asmeq("%3", "r2")
"smc #0 @ switch to secure world\n"
: "=r" (r0)
: "r" (r0), "r" (r1), "r" (r2)
);
return r0;
}
#else
static int __secure_tz_entry(u32 cmd, u32 val)
{
return 0;
}
#endif /* CONFIG_MSM_SECURE_IO */
/* Returns the requested update to our power level. *
* Either up/down (-1/1) a level, or stay the same (0). */
static inline int kgsl_pwrctrl_tz_update(u32 idle)
{
return __secure_tz_entry(TZ_UPDATE_ID, idle);
}
static inline void kgsl_pwrctrl_tz_reset(void)
{
__secure_tz_entry(TZ_RESET_ID, 0);
}
void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
unsigned int new_level)
{
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
if (new_level < (pwr->num_pwrlevels - 1) &&
new_level >= pwr->thermal_pwrlevel &&
new_level != pwr->active_pwrlevel) {
pwr->active_pwrlevel = new_level;
if (test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags))
clk_set_rate(pwr->grp_clks[0],
pwr->pwrlevels[pwr->active_pwrlevel].
gpu_freq);
if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags))
if (pwr->pcl)
msm_bus_scale_client_update_request(pwr->pcl,
pwr->pwrlevels[pwr->active_pwrlevel].
bus_freq);
KGSL_PWR_WARN(device, "pwr level changed to %d\n",
pwr->active_pwrlevel);
}
}
EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change);
static int __gpuclk_store(int max, struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{ int ret, i, delta = 5000000;
unsigned long val;
struct kgsl_device *device = kgsl_device_from_dev(dev);
struct kgsl_pwrctrl *pwr;
if (device == NULL)
return 0;
pwr = &device->pwrctrl;
ret = sscanf(buf, "%ld", &val);
if (ret != 1)
return count;
mutex_lock(&device->mutex);
for (i = 0; i < pwr->num_pwrlevels; i++) {
if (abs(pwr->pwrlevels[i].gpu_freq - val) < delta) {
if (max)
pwr->thermal_pwrlevel = i;
break;
}
}
if (i == pwr->num_pwrlevels)
goto done;
/*
* If the current or requested clock speed is greater than the
* thermal limit, bump down immediately.
*/
if (pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq >
pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq)
kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
else if (!max)
kgsl_pwrctrl_pwrlevel_change(device, i);
done:
mutex_unlock(&device->mutex);
return count;
}
static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return __gpuclk_store(1, dev, attr, buf, count);
}
static int kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct kgsl_device *device = kgsl_device_from_dev(dev);
struct kgsl_pwrctrl *pwr;
if (device == NULL)
return 0;
pwr = &device->pwrctrl;
return snprintf(buf, PAGE_SIZE, "%d\n",
pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq);
}
static int kgsl_pwrctrl_gpuclk_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return __gpuclk_store(0, dev, attr, buf, count);
}
static int kgsl_pwrctrl_gpuclk_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct kgsl_device *device = kgsl_device_from_dev(dev);
struct kgsl_pwrctrl *pwr;
if (device == NULL)
return 0;
pwr = &device->pwrctrl;
return snprintf(buf, PAGE_SIZE, "%d\n",
pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
}
static int kgsl_pwrctrl_pwrnap_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
char temp[20];
unsigned long val;
struct kgsl_device *device = kgsl_device_from_dev(dev);
struct kgsl_pwrctrl *pwr;
int rc;
if (device == NULL)
return 0;
pwr = &device->pwrctrl;
snprintf(temp, sizeof(temp), "%.*s",
(int)min(count, sizeof(temp) - 1), buf);
rc = strict_strtoul(temp, 0, &val);
if (rc)
return rc;
mutex_lock(&device->mutex);
if (val == 1)
pwr->nap_allowed = true;
else if (val == 0)
pwr->nap_allowed = false;
mutex_unlock(&device->mutex);
return count;
}
static int kgsl_pwrctrl_pwrnap_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct kgsl_device *device = kgsl_device_from_dev(dev);
if (device == NULL)
return 0;
return snprintf(buf, PAGE_SIZE, "%d\n", device->pwrctrl.nap_allowed);
}
static int kgsl_pwrctrl_idle_timer_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
char temp[20];
unsigned long val;
struct kgsl_device *device = kgsl_device_from_dev(dev);
struct kgsl_pwrctrl *pwr;
const long div = 1000/HZ;
static unsigned int org_interval_timeout = 1;
int rc;
if (device == NULL)
return 0;
pwr = &device->pwrctrl;
snprintf(temp, sizeof(temp), "%.*s",
(int)min(count, sizeof(temp) - 1), buf);
rc = strict_strtoul(temp, 0, &val);
if (rc)
return rc;
if (org_interval_timeout == 1)
org_interval_timeout = pwr->interval_timeout;
mutex_lock(&device->mutex);
/* Let the timeout be requested in ms, but convert to jiffies. */
val /= div;
if (val >= org_interval_timeout)
pwr->interval_timeout = val;
mutex_unlock(&device->mutex);
return count;
}
static int kgsl_pwrctrl_idle_timer_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct kgsl_device *device = kgsl_device_from_dev(dev);
if (device == NULL)
return 0;
return snprintf(buf, PAGE_SIZE, "%d\n",
device->pwrctrl.interval_timeout);
}
static int kgsl_pwrctrl_scaling_governor_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
char temp[20];
struct kgsl_device *device = kgsl_device_from_dev(dev);
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
unsigned int reset = pwr->idle_pass;
snprintf(temp, sizeof(temp), "%.*s",
(int)min(count, sizeof(temp) - 1), buf);
if (strncmp(temp, "ondemand", 8) == 0)
reset = 1;
else if (strncmp(temp, "performance", 11) == 0)
reset = 0;
mutex_lock(&device->mutex);
pwr->idle_pass = reset;
if (pwr->idle_pass == 0)
kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
mutex_unlock(&device->mutex);
return count;
}
static int kgsl_pwrctrl_scaling_governor_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct kgsl_device *device = kgsl_device_from_dev(dev);
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
if (pwr->idle_pass)
return snprintf(buf, 10, "ondemand\n");
else
return snprintf(buf, 13, "performance\n");
}
DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store);
DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
kgsl_pwrctrl_max_gpuclk_store);
DEVICE_ATTR(pwrnap, 0644, kgsl_pwrctrl_pwrnap_show, kgsl_pwrctrl_pwrnap_store);
DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show,
kgsl_pwrctrl_idle_timer_store);
DEVICE_ATTR(scaling_governor, 0644, kgsl_pwrctrl_scaling_governor_show,
kgsl_pwrctrl_scaling_governor_store);
static const struct device_attribute *pwrctrl_attr_list[] = {
&dev_attr_gpuclk,
&dev_attr_max_gpuclk,
&dev_attr_pwrnap,
&dev_attr_idle_timer,
&dev_attr_scaling_governor,
NULL
};
int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device)
{
return kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list);
}
void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device)
{
kgsl_remove_device_sysfs_files(device->dev, pwrctrl_attr_list);
}
static void kgsl_pwrctrl_idle_calc(struct kgsl_device *device)
{
int val;
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
struct kgsl_power_stats stats;
device->ftbl.device_power_stats(device, &stats);
if (stats.total_time == 0)
return;
/* If the GPU has stayed in turbo mode for a while, *
* stop writing out values. */
if (pwr->active_pwrlevel)
pwr->no_switch_cnt = 0;
else if (pwr->no_switch_cnt > SWITCH_OFF)
return;
pwr->no_switch_cnt++;
val = kgsl_pwrctrl_tz_update(stats.total_time - stats.busy_time);
if (val)
kgsl_pwrctrl_pwrlevel_change(device,
pwr->active_pwrlevel + val);
}
void kgsl_pwrctrl_clk(struct kgsl_device *device, int state)
{
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
int i = 0;
if (state == KGSL_PWRFLAGS_OFF) {
if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON,
&pwr->power_flags)) {
KGSL_PWR_INFO(device,
"clocks off, device %d\n", device->id);
for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
if (pwr->grp_clks[i])
clk_disable(pwr->grp_clks[i]);
if ((pwr->pwrlevels[0].gpu_freq > 0) &&
(device->requested_state != KGSL_STATE_NAP))
clk_set_rate(pwr->grp_clks[0],
pwr->pwrlevels[pwr->num_pwrlevels - 1].
gpu_freq);
}
} else if (state == KGSL_PWRFLAGS_ON) {
if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
&pwr->power_flags)) {
KGSL_PWR_INFO(device,
"clocks on, device %d\n", device->id);
if ((pwr->pwrlevels[0].gpu_freq > 0) &&
(device->state != KGSL_STATE_NAP))
clk_set_rate(pwr->grp_clks[0],
pwr->pwrlevels[pwr->active_pwrlevel].
gpu_freq);
/* as last step, enable grp_clk
this is to let GPU interrupt to come */
for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
if (pwr->grp_clks[i])
clk_enable(pwr->grp_clks[i]);
}
}
}
EXPORT_SYMBOL(kgsl_pwrctrl_clk);
void kgsl_pwrctrl_axi(struct kgsl_device *device, int state)
{
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
if (state == KGSL_PWRFLAGS_OFF) {
if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON,
&pwr->power_flags)) {
KGSL_PWR_INFO(device,
"axi off, device %d\n", device->id);
if (pwr->ebi1_clk) {
clk_set_rate(pwr->ebi1_clk, 0);
clk_disable(pwr->ebi1_clk);
}
if (pwr->pcl)
msm_bus_scale_client_update_request(pwr->pcl,
0);
}
} else if (state == KGSL_PWRFLAGS_ON) {
if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON,
&pwr->power_flags)) {
KGSL_PWR_INFO(device,
"axi on, device %d\n", device->id);
if (pwr->ebi1_clk) {
clk_enable(pwr->ebi1_clk);
clk_set_rate(pwr->ebi1_clk,
pwr->pwrlevels[pwr->active_pwrlevel].
bus_freq);
}
if (pwr->pcl)
msm_bus_scale_client_update_request(pwr->pcl,
pwr->pwrlevels[pwr->active_pwrlevel].
bus_freq);
}
}
}
EXPORT_SYMBOL(kgsl_pwrctrl_axi);
void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state)
{
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
if (state == KGSL_PWRFLAGS_OFF) {
if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
&pwr->power_flags)) {
KGSL_PWR_INFO(device,
"power off, device %d\n", device->id);
if (pwr->gpu_reg)
regulator_disable(pwr->gpu_reg);
}
} else if (state == KGSL_PWRFLAGS_ON) {
if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON,
&pwr->power_flags)) {
KGSL_PWR_INFO(device,
"power on, device %d\n", device->id);
if (pwr->gpu_reg)
regulator_enable(pwr->gpu_reg);
}
}
}
EXPORT_SYMBOL(kgsl_pwrctrl_pwrrail);
void kgsl_pwrctrl_irq(struct kgsl_device *device, int state)
{
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
if (state == KGSL_PWRFLAGS_ON) {
if (!test_and_set_bit(KGSL_PWRFLAGS_IRQ_ON,
&pwr->power_flags)) {
KGSL_PWR_INFO(device,
"irq on, device %d\n", device->id);
enable_irq(pwr->interrupt_num);
}
} else if (state == KGSL_PWRFLAGS_OFF) {
if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON,
&pwr->power_flags)) {
KGSL_PWR_INFO(device,
"irq off, device %d\n", device->id);
disable_irq(pwr->interrupt_num);
}
}
}
EXPORT_SYMBOL(kgsl_pwrctrl_irq);
int kgsl_pwrctrl_init(struct kgsl_device *device)
{
int i, result = 0;
struct clk *clk;
struct platform_device *pdev =
container_of(device->parentdev, struct platform_device, dev);
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
struct kgsl_device_platform_data *pdata_dev = pdev->dev.platform_data;
struct kgsl_device_pwr_data *pdata_pwr = &pdata_dev->pwr_data;
const char *clk_names[KGSL_MAX_CLKS] = {pwr->src_clk_name,
pdata_dev->clk.name.clk,
pdata_dev->clk.name.pclk,
pdata_dev->imem_clk_name.clk,
pdata_dev->imem_clk_name.pclk};
/*acquire clocks */
for (i = 1; i < KGSL_MAX_CLKS; i++) {
if (clk_names[i]) {
clk = clk_get(&pdev->dev, clk_names[i]);
if (IS_ERR(clk))
goto clk_err;
pwr->grp_clks[i] = clk;
}
}
/* Make sure we have a source clk for freq setting */
clk = clk_get(&pdev->dev, clk_names[0]);
pwr->grp_clks[0] = (IS_ERR(clk)) ? pwr->grp_clks[1] : clk;
/* put the AXI bus into asynchronous mode with the graphics cores */
if (pdata_pwr->set_grp_async != NULL)
pdata_pwr->set_grp_async();
if (pdata_pwr->num_levels > KGSL_MAX_PWRLEVELS) {
KGSL_PWR_ERR(device, "invalid power level count: %d\n",
pdata_pwr->num_levels);
result = -EINVAL;
goto done;
}
pwr->num_pwrlevels = pdata_pwr->num_levels;
pwr->active_pwrlevel = pdata_pwr->init_level;
for (i = 0; i < pdata_pwr->num_levels; i++) {
// pwr->pwrlevels[i].gpu_freq =
// (pdata_pwr->pwrlevel[i].gpu_freq > 0) ?
// clk_round_rate(pwr->grp_clks[0],
// pdata_pwr->pwrlevel[i].
// gpu_freq) : 0;
pwr->pwrlevels[i].gpu_freq =(pdata_pwr->pwrlevel[i].gpu_freq > 0)?
pdata_pwr->pwrlevel[i].gpu_freq:0;
pwr->pwrlevels[i].bus_freq =
pdata_pwr->pwrlevel[i].bus_freq;
}
/* Do not set_rate for targets in sync with AXI */
if (pwr->pwrlevels[0].gpu_freq > 0)
clk_set_rate(pwr->grp_clks[0], pwr->
pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
pwr->gpu_reg = regulator_get(NULL, pwr->regulator_name);
if (IS_ERR(pwr->gpu_reg))
pwr->gpu_reg = NULL;
pwr->power_flags = 0;
pwr->nap_allowed = pdata_pwr->nap_allowed;
/* drewis: below was removed at some point before i cherry-picked the below commit */
pwr->idle_pass = pdata_pwr->idle_pass;
/*dc14311... msm: kgsl: Replace internal_power_rail API calls with regulator APIs*/
pwr->interval_timeout = pdata_pwr->idle_timeout;
pwr->ebi1_clk = clk_get(NULL, "ebi1_kgsl_clk");
if (IS_ERR(pwr->ebi1_clk))
pwr->ebi1_clk = NULL;
else
clk_set_rate(pwr->ebi1_clk,
pwr->pwrlevels[pwr->active_pwrlevel].
bus_freq);
if (pdata_dev->clk.bus_scale_table != NULL) {
pwr->pcl =
msm_bus_scale_register_client(pdata_dev->clk.
bus_scale_table);
if (!pwr->pcl) {
KGSL_PWR_ERR(device,
"msm_bus_scale_register_client failed: "
"id %d table %p", device->id,
pdata_dev->clk.bus_scale_table);
result = -EINVAL;
goto done;
}
}
/*acquire interrupt */
pwr->interrupt_num =
platform_get_irq_byname(pdev, pwr->irq_name);
if (pwr->interrupt_num <= 0) {
KGSL_PWR_ERR(device, "platform_get_irq_byname failed: %d\n",
pwr->interrupt_num);
result = -EINVAL;
goto done;
}
return result;
clk_err:
result = PTR_ERR(clk);
KGSL_PWR_ERR(device, "clk_get(%s) failed: %d\n",
clk_names[i], result);
done:
return result;
}
void kgsl_pwrctrl_close(struct kgsl_device *device)
{
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
int i;
KGSL_PWR_INFO(device, "close device %d\n", device->id);
if (pwr->interrupt_num > 0) {
if (pwr->have_irq) {
free_irq(pwr->interrupt_num, NULL);
pwr->have_irq = 0;
}
pwr->interrupt_num = 0;
}
clk_put(pwr->ebi1_clk);
if (pwr->pcl)
msm_bus_scale_unregister_client(pwr->pcl);
pwr->pcl = 0;
if (pwr->gpu_reg) {
regulator_put(pwr->gpu_reg);
pwr->gpu_reg = NULL;
}
for (i = 1; i < KGSL_MAX_CLKS; i++)
if (pwr->grp_clks[i]) {
clk_put(pwr->grp_clks[i]);
pwr->grp_clks[i] = NULL;
}
pwr->grp_clks[0] = NULL;
pwr->power_flags = 0;
}
void kgsl_idle_check(struct work_struct *work)
{
struct kgsl_device *device = container_of(work, struct kgsl_device,
idle_check_ws);
mutex_lock(&device->mutex);
if ((device->pwrctrl.idle_pass) &&
(device->requested_state != KGSL_STATE_SLEEP))
kgsl_pwrctrl_idle_calc(device);
if (device->state & (KGSL_STATE_ACTIVE | KGSL_STATE_NAP)) {
if (kgsl_pwrctrl_sleep(device) != 0)
mod_timer(&device->idle_timer,
jiffies +
device->pwrctrl.interval_timeout);
} else if (device->state & (KGSL_STATE_HUNG |
KGSL_STATE_DUMP_AND_RECOVER)) {
device->requested_state = KGSL_STATE_NONE;
}
mutex_unlock(&device->mutex);
}
void kgsl_timer(unsigned long data)
{
struct kgsl_device *device = (struct kgsl_device *) data;
KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id);
if (device->requested_state != KGSL_STATE_SUSPEND) {
device->requested_state = KGSL_STATE_SLEEP;
/* Have work run in a non-interrupt context. */
queue_work(device->work_queue, &device->idle_check_ws);
}
}
void kgsl_pre_hwaccess(struct kgsl_device *device)
{
BUG_ON(!mutex_is_locked(&device->mutex));
if (device->state & (KGSL_STATE_SLEEP | KGSL_STATE_NAP))
kgsl_pwrctrl_wake(device);
}
EXPORT_SYMBOL(kgsl_pre_hwaccess);
void kgsl_check_suspended(struct kgsl_device *device)
{
if (device->requested_state == KGSL_STATE_SUSPEND ||
device->state == KGSL_STATE_SUSPEND) {
mutex_unlock(&device->mutex);
wait_for_completion(&device->hwaccess_gate);
mutex_lock(&device->mutex);
}
if (device->state == KGSL_STATE_DUMP_AND_RECOVER) {
mutex_unlock(&device->mutex);
wait_for_completion(&device->recovery_gate);
mutex_lock(&device->mutex);
}
}
/******************************************************************/
/* Caller must hold the device mutex. */
int kgsl_pwrctrl_sleep(struct kgsl_device *device)
{
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
KGSL_PWR_INFO(device, "sleep device %d\n", device->id);
/* Work through the legal state transitions */
if (device->requested_state == KGSL_STATE_NAP) {
if (device->ftbl.device_isidle(device))
goto nap;
} else if (device->requested_state == KGSL_STATE_SLEEP) {
if (device->state == KGSL_STATE_NAP ||
device->ftbl.device_isidle(device))
goto sleep;
}
device->requested_state = KGSL_STATE_NONE;
return -EBUSY;
sleep:
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
if (pwr->pwrlevels[0].gpu_freq > 0)
clk_set_rate(pwr->grp_clks[0],
pwr->pwrlevels[pwr->num_pwrlevels - 1].
gpu_freq);
device->pwrctrl.no_switch_cnt = 0;
device->pwrctrl.time = 0;
kgsl_pwrctrl_tz_reset();
goto clk_off;
nap:
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
clk_off:
kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
device->state = device->requested_state;
device->requested_state = KGSL_STATE_NONE;
wake_unlock(&device->idle_wakelock);
KGSL_PWR_WARN(device, "state -> NAP/SLEEP(%d), device %d\n",
device->state, device->id);
return 0;
}
EXPORT_SYMBOL(kgsl_pwrctrl_sleep);
/******************************************************************/
/* Caller must hold the device mutex. */
void kgsl_pwrctrl_wake(struct kgsl_device *device)
{
if (device->state == KGSL_STATE_SUSPEND)
return;
if (device->state != KGSL_STATE_NAP) {
if (device->pwrctrl.idle_pass)
kgsl_pwrctrl_pwrlevel_change(device,
device->pwrctrl.thermal_pwrlevel);
kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
}
/* Turn on the core clocks */
kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON);
/* Enable state before turning on irq */
device->state = KGSL_STATE_ACTIVE;
KGSL_PWR_WARN(device, "state -> ACTIVE, device %d\n", device->id);
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
/* Re-enable HW access */
mod_timer(&device->idle_timer,
jiffies + device->pwrctrl.interval_timeout);
wake_lock(&device->idle_wakelock);
KGSL_PWR_INFO(device, "wake return for device %d\n", device->id);
}
EXPORT_SYMBOL(kgsl_pwrctrl_wake);
void kgsl_pwrctrl_enable(struct kgsl_device *device)
{
/* Order pwrrail/clk sequence based upon platform */
kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON);
kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
}
EXPORT_SYMBOL(kgsl_pwrctrl_enable);
void kgsl_pwrctrl_disable(struct kgsl_device *device)
{
/* Order pwrrail/clk sequence based upon platform */
kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
}
EXPORT_SYMBOL(kgsl_pwrctrl_disable);

View File

@ -0,0 +1,94 @@
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef __KGSL_PWRCTRL_H
#define __KGSL_PWRCTRL_H
/*****************************************************************************
** power flags
*****************************************************************************/
#define KGSL_PWRFLAGS_POWER_ON 0
#define KGSL_PWRFLAGS_CLK_ON 1
#define KGSL_PWRFLAGS_AXI_ON 2
#define KGSL_PWRFLAGS_IRQ_ON 3
#define KGSL_PWRFLAGS_ON 1
#define KGSL_PWRFLAGS_OFF 0
#define KGSL_DEFAULT_PWRLEVEL 1
#define KGSL_MAX_CLKS 5
struct platform_device;
struct kgsl_pwrctrl {
int interrupt_num;
int have_irq;
struct clk *ebi1_clk;
struct clk *grp_clks[KGSL_MAX_CLKS];
unsigned long power_flags;
struct kgsl_pwrlevel pwrlevels[KGSL_MAX_PWRLEVELS];
unsigned int active_pwrlevel;
int thermal_pwrlevel;
unsigned int num_pwrlevels;
unsigned int interval_timeout;
struct regulator *gpu_reg;
uint32_t pcl;
unsigned int nap_allowed;
struct adreno_context *suspended_ctxt;
const char *regulator_name;
const char *irq_name;
const char *src_clk_name;
s64 time;
unsigned int no_switch_cnt;
unsigned int idle_pass;
};
void kgsl_pwrctrl_clk(struct kgsl_device *device, int state);
void kgsl_pwrctrl_axi(struct kgsl_device *device, int state);
void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state);
void kgsl_pwrctrl_irq(struct kgsl_device *device, int state);
int kgsl_pwrctrl_init(struct kgsl_device *device);
void kgsl_pwrctrl_close(struct kgsl_device *device);
void kgsl_timer(unsigned long data);
void kgsl_idle_check(struct work_struct *work);
void kgsl_pre_hwaccess(struct kgsl_device *device);
void kgsl_check_suspended(struct kgsl_device *device);
int kgsl_pwrctrl_sleep(struct kgsl_device *device);
void kgsl_pwrctrl_wake(struct kgsl_device *device);
void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
unsigned int level);
int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device);
void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device);
void kgsl_pwrctrl_enable(struct kgsl_device *device);
void kgsl_pwrctrl_disable(struct kgsl_device *device);
static inline unsigned long kgsl_get_clkrate(struct clk *clk)
{
return (clk != NULL) ? clk_get_rate(clk) : 0;
}
#endif /* __KGSL_PWRCTRL_H */

View File

@ -0,0 +1,308 @@
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/kernel.h>
#include "kgsl.h"
#include "kgsl_pwrscale.h"
struct kgsl_pwrscale_attribute {
struct attribute attr;
ssize_t (*show)(struct kgsl_device *device, char *buf);
ssize_t (*store)(struct kgsl_device *device, const char *buf,
size_t count);
};
#define to_pwrscale(k) container_of(k, struct kgsl_pwrscale, kobj)
#define pwrscale_to_device(p) container_of(p, struct kgsl_device, pwrscale)
#define to_device(k) container_of(k, struct kgsl_device, pwrscale_kobj)
#define to_pwrscale_attr(a) \
container_of(a, struct kgsl_pwrscale_attribute, attr)
#define to_policy_attr(a) \
container_of(a, struct kgsl_pwrscale_policy_attribute, attr)
#define PWRSCALE_ATTR(_name, _mode, _show, _store) \
struct kgsl_pwrscale_attribute pwrscale_attr_##_name = \
__ATTR(_name, _mode, _show, _store)
/* Master list of available policies */
static struct kgsl_pwrscale_policy *kgsl_pwrscale_policies[] = {
NULL
};
static ssize_t pwrscale_policy_store(struct kgsl_device *device,
const char *buf, size_t count)
{
int i;
struct kgsl_pwrscale_policy *policy = NULL;
/* The special keyword none allows the user to detach all
policies */
if (!strncmp("none", buf, 4)) {
kgsl_pwrscale_detach_policy(device);
return count;
}
for (i = 0; kgsl_pwrscale_policies[i]; i++) {
if (!strncmp(kgsl_pwrscale_policies[i]->name, buf,
strnlen(kgsl_pwrscale_policies[i]->name,
PAGE_SIZE))) {
policy = kgsl_pwrscale_policies[i];
break;
}
}
if (policy)
if (kgsl_pwrscale_attach_policy(device, policy))
return -EIO;
return count;
}
static ssize_t pwrscale_policy_show(struct kgsl_device *device, char *buf)
{
int ret;
if (device->pwrscale.policy)
ret = snprintf(buf, PAGE_SIZE, "%s\n",
device->pwrscale.policy->name);
else
ret = snprintf(buf, PAGE_SIZE, "none\n");
return ret;
}
PWRSCALE_ATTR(policy, 0644, pwrscale_policy_show, pwrscale_policy_store);
static ssize_t pwrscale_avail_policies_show(struct kgsl_device *device,
char *buf)
{
int i, ret = 0;
for (i = 0; kgsl_pwrscale_policies[i]; i++) {
ret += snprintf(buf + ret, PAGE_SIZE - ret, "%s ",
kgsl_pwrscale_policies[i]->name);
}
ret += snprintf(buf + ret, PAGE_SIZE - ret, "none\n");
return ret;
}
PWRSCALE_ATTR(avail_policies, 0444, pwrscale_avail_policies_show, NULL);
static struct attribute *pwrscale_attrs[] = {
&pwrscale_attr_policy.attr,
&pwrscale_attr_avail_policies.attr,
NULL
};
static ssize_t policy_sysfs_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
struct kgsl_pwrscale *pwrscale = to_pwrscale(kobj);
struct kgsl_device *device = pwrscale_to_device(pwrscale);
struct kgsl_pwrscale_policy_attribute *pattr = to_policy_attr(attr);
ssize_t ret;
if (pattr->show)
ret = pattr->show(device, pwrscale, buf);
else
ret = -EIO;
return ret;
}
static ssize_t policy_sysfs_store(struct kobject *kobj,
struct attribute *attr,
const char *buf, size_t count)
{
struct kgsl_pwrscale *pwrscale = to_pwrscale(kobj);
struct kgsl_device *device = pwrscale_to_device(pwrscale);
struct kgsl_pwrscale_policy_attribute *pattr = to_policy_attr(attr);
ssize_t ret;
if (pattr->store)
ret = pattr->store(device, pwrscale, buf, count);
else
ret = -EIO;
return ret;
}
static void policy_sysfs_release(struct kobject *kobj)
{
struct kgsl_pwrscale *pwrscale = to_pwrscale(kobj);
complete(&pwrscale->kobj_unregister);
}
static ssize_t pwrscale_sysfs_show(struct kobject *kobj,
struct attribute *attr, char *buf)
{
struct kgsl_device *device = to_device(kobj);
struct kgsl_pwrscale_attribute *pattr = to_pwrscale_attr(attr);
ssize_t ret;
if (pattr->show)
ret = pattr->show(device, buf);
else
ret = -EIO;
return ret;
}
static ssize_t pwrscale_sysfs_store(struct kobject *kobj,
struct attribute *attr,
const char *buf, size_t count)
{
struct kgsl_device *device = to_device(kobj);
struct kgsl_pwrscale_attribute *pattr = to_pwrscale_attr(attr);
ssize_t ret;
if (pattr->store)
ret = pattr->store(device, buf, count);
else
ret = -EIO;
return ret;
}
static void pwrscale_sysfs_release(struct kobject *kobj)
{
}
static const struct sysfs_ops policy_sysfs_ops = {
.show = policy_sysfs_show,
.store = policy_sysfs_store
};
static const struct sysfs_ops pwrscale_sysfs_ops = {
.show = pwrscale_sysfs_show,
.store = pwrscale_sysfs_store
};
static struct kobj_type ktype_pwrscale_policy = {
.sysfs_ops = &policy_sysfs_ops,
.default_attrs = NULL,
.release = policy_sysfs_release
};
static struct kobj_type ktype_pwrscale = {
.sysfs_ops = &pwrscale_sysfs_ops,
.default_attrs = pwrscale_attrs,
.release = pwrscale_sysfs_release
};
void kgsl_pwrscale_sleep(struct kgsl_device *device)
{
if (device->pwrscale.policy && device->pwrscale.policy->sleep)
device->pwrscale.policy->sleep(device, &device->pwrscale);
}
EXPORT_SYMBOL(kgsl_pwrscale_sleep);
void kgsl_pwrscale_wake(struct kgsl_device *device)
{
if (device->pwrscale.policy && device->pwrscale.policy->wake)
device->pwrscale.policy->wake(device, &device->pwrscale);
}
EXPORT_SYMBOL(kgsl_pwrscale_wake);
void kgsl_pwrscale_busy(struct kgsl_device *device)
{
if (device->pwrscale.policy && device->pwrscale.policy->busy)
device->pwrscale.policy->busy(device, &device->pwrscale);
}
void kgsl_pwrscale_idle(struct kgsl_device *device)
{
if (device->pwrscale.policy && device->pwrscale.policy->idle)
device->pwrscale.policy->idle(device, &device->pwrscale);
}
EXPORT_SYMBOL(kgsl_pwrscale_idle);
int kgsl_pwrscale_policy_add_files(struct kgsl_device *device,
struct kgsl_pwrscale *pwrscale,
struct attribute_group *attr_group)
{
int ret;
init_completion(&pwrscale->kobj_unregister);
ret = kobject_init_and_add(&pwrscale->kobj,
&ktype_pwrscale_policy,
&device->pwrscale_kobj,
"%s", pwrscale->policy->name);
if (ret)
return ret;
ret = sysfs_create_group(&pwrscale->kobj, attr_group);
if (ret) {
kobject_put(&pwrscale->kobj);
wait_for_completion(&pwrscale->kobj_unregister);
}
return ret;
}
void kgsl_pwrscale_policy_remove_files(struct kgsl_device *device,
struct kgsl_pwrscale *pwrscale,
struct attribute_group *attr_group)
{
sysfs_remove_group(&pwrscale->kobj, attr_group);
kobject_put(&pwrscale->kobj);
wait_for_completion(&pwrscale->kobj_unregister);
}
void kgsl_pwrscale_detach_policy(struct kgsl_device *device)
{
mutex_lock(&device->mutex);
if (device->pwrscale.policy != NULL)
device->pwrscale.policy->close(device, &device->pwrscale);
device->pwrscale.policy = NULL;
mutex_unlock(&device->mutex);
}
EXPORT_SYMBOL(kgsl_pwrscale_detach_policy);
int kgsl_pwrscale_attach_policy(struct kgsl_device *device,
struct kgsl_pwrscale_policy *policy)
{
int ret;
if (device->pwrscale.policy != NULL)
kgsl_pwrscale_detach_policy(device);
mutex_lock(&device->mutex);
device->pwrscale.policy = policy;
ret = device->pwrscale.policy->init(device, &device->pwrscale);
if (ret)
device->pwrscale.policy = NULL;
mutex_unlock(&device->mutex);
return ret;
}
EXPORT_SYMBOL(kgsl_pwrscale_attach_policy);
int kgsl_pwrscale_init(struct kgsl_device *device)
{
return kobject_init_and_add(&device->pwrscale_kobj, &ktype_pwrscale,
&device->dev->kobj, "pwrscale");
}
EXPORT_SYMBOL(kgsl_pwrscale_init);
void kgsl_pwrscale_close(struct kgsl_device *device)
{
kobject_put(&device->pwrscale_kobj);
}
EXPORT_SYMBOL(kgsl_pwrscale_close);

View File

@ -0,0 +1,89 @@
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef __KGSL_PWRSCALE_H
#define __KGSL_PWRSCALE_H
struct kgsl_pwrscale;
struct kgsl_pwrscale_policy {
const char *name;
int (*init)(struct kgsl_device *device,
struct kgsl_pwrscale *pwrscale);
void (*close)(struct kgsl_device *device,
struct kgsl_pwrscale *pwrscale);
void (*idle)(struct kgsl_device *device,
struct kgsl_pwrscale *pwrscale);
void (*busy)(struct kgsl_device *device,
struct kgsl_pwrscale *pwrscale);
void (*sleep)(struct kgsl_device *device,
struct kgsl_pwrscale *pwrscale);
void (*wake)(struct kgsl_device *device,
struct kgsl_pwrscale *pwrscale);
};
struct kgsl_pwrscale {
struct kgsl_pwrscale_policy *policy;
struct kobject kobj;
struct completion kobj_unregister;
void *priv;
};
struct kgsl_pwrscale_policy_attribute {
struct attribute attr;
ssize_t (*show)(struct kgsl_device *device,
struct kgsl_pwrscale *pwrscale, char *buf);
ssize_t (*store)(struct kgsl_device *device,
struct kgsl_pwrscale *pwrscale, const char *buf,
size_t count);
};
#define PWRSCALE_POLICY_ATTR(_name, _mode, _show, _store) \
struct kgsl_pwrscale_policy_attribute policy_attr_##_name = \
__ATTR(_name, _mode, _show, _store)
int kgsl_pwrscale_init(struct kgsl_device *device);
void kgsl_pwrscale_close(struct kgsl_device *device);
int kgsl_pwrscale_attach_policy(struct kgsl_device *device,
struct kgsl_pwrscale_policy *policy);
void kgsl_pwrscale_detach_policy(struct kgsl_device *device);
void kgsl_pwrscale_idle(struct kgsl_device *device);
void kgsl_pwrscale_busy(struct kgsl_device *device);
void kgsl_pwrscale_sleep(struct kgsl_device *device);
void kgsl_pwrscale_wake(struct kgsl_device *device);
int kgsl_pwrscale_policy_add_files(struct kgsl_device *device,
struct kgsl_pwrscale *pwrscale,
struct attribute_group *attr_group);
void kgsl_pwrscale_policy_remove_files(struct kgsl_device *device,
struct kgsl_pwrscale *pwrscale,
struct attribute_group *attr_group);
#endif

View File

@ -0,0 +1,528 @@
/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/vmalloc.h>
#include <asm/cacheflush.h>
#include "kgsl.h"
#include "kgsl_sharedmem.h"
#include "kgsl_cffdump.h"
static struct kgsl_process_private *
_get_priv_from_kobj(struct kobject *kobj)
{
struct kgsl_process_private *private;
unsigned long name;
if (!kobj)
return NULL;
if (sscanf(kobj->name, "%ld", &name) != 1)
return NULL;
list_for_each_entry(private, &kgsl_driver.process_list, list) {
if (private->pid == name)
return private;
}
return NULL;
}
/* sharedmem / memory sysfs files */
static ssize_t
process_show(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
struct kgsl_process_private *priv;
unsigned int val = 0;
mutex_lock(&kgsl_driver.process_mutex);
priv = _get_priv_from_kobj(kobj);
if (priv == NULL) {
mutex_unlock(&kgsl_driver.process_mutex);
return 0;
}
if (!strncmp(attr->attr.name, "user", 4))
val = priv->stats.user;
if (!strncmp(attr->attr.name, "user_max", 8))
val = priv->stats.user_max;
if (!strncmp(attr->attr.name, "mapped", 6))
val = priv->stats.mapped;
if (!strncmp(attr->attr.name, "mapped_max", 10))
val = priv->stats.mapped_max;
if (!strncmp(attr->attr.name, "flushes", 7))
val = priv->stats.flushes;
mutex_unlock(&kgsl_driver.process_mutex);
return snprintf(buf, PAGE_SIZE, "%u\n", val);
}
#define KGSL_MEMSTAT_ATTR(_name, _show) \
static struct kobj_attribute attr_##_name = \
__ATTR(_name, 0444, _show, NULL)
KGSL_MEMSTAT_ATTR(user, process_show);
KGSL_MEMSTAT_ATTR(user_max, process_show);
KGSL_MEMSTAT_ATTR(mapped, process_show);
KGSL_MEMSTAT_ATTR(mapped_max, process_show);
KGSL_MEMSTAT_ATTR(flushes, process_show);
static struct attribute *process_attrs[] = {
&attr_user.attr,
&attr_user_max.attr,
&attr_mapped.attr,
&attr_mapped_max.attr,
&attr_flushes.attr,
NULL
};
static struct attribute_group process_attr_group = {
.attrs = process_attrs,
};
void
kgsl_process_uninit_sysfs(struct kgsl_process_private *private)
{
/* Remove the sysfs entry */
if (private->kobj) {
sysfs_remove_group(private->kobj, &process_attr_group);
kobject_put(private->kobj);
}
}
void
kgsl_process_init_sysfs(struct kgsl_process_private *private)
{
unsigned char name[16];
/* Add a entry to the sysfs device */
snprintf(name, sizeof(name), "%d", private->pid);
private->kobj = kobject_create_and_add(name, kgsl_driver.prockobj);
/* sysfs failure isn't fatal, just annoying */
if (private->kobj != NULL) {
if (sysfs_create_group(private->kobj, &process_attr_group)) {
kobject_put(private->kobj);
private->kobj = NULL;
}
}
}
static int kgsl_drv_memstat_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
unsigned int val = 0;
if (!strncmp(attr->attr.name, "vmalloc", 7))
val = kgsl_driver.stats.vmalloc;
else if (!strncmp(attr->attr.name, "vmalloc_max", 11))
val = kgsl_driver.stats.vmalloc_max;
else if (!strncmp(attr->attr.name, "coherent", 8))
val = kgsl_driver.stats.coherent;
else if (!strncmp(attr->attr.name, "coherent_max", 12))
val = kgsl_driver.stats.coherent_max;
else if (!strncmp(attr->attr.name, "mapped", 6))
val = kgsl_driver.stats.mapped;
else if (!strncmp(attr->attr.name, "mapped_max", 10))
val = kgsl_driver.stats.mapped_max;
return snprintf(buf, PAGE_SIZE, "%u\n", val);
}
static int kgsl_drv_histogram_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
int len = 0;
int i;
for (i = 0; i < 16; i++)
len += snprintf(buf + len, PAGE_SIZE - len, "%d ",
kgsl_driver.stats.histogram[i]);
len += snprintf(buf + len, PAGE_SIZE - len, "\n");
return len;
}
DEVICE_ATTR(vmalloc, 0444, kgsl_drv_memstat_show, NULL);
DEVICE_ATTR(vmalloc_max, 0444, kgsl_drv_memstat_show, NULL);
DEVICE_ATTR(coherent, 0444, kgsl_drv_memstat_show, NULL);
DEVICE_ATTR(coherent_max, 0444, kgsl_drv_memstat_show, NULL);
DEVICE_ATTR(mapped, 0444, kgsl_drv_memstat_show, NULL);
DEVICE_ATTR(mapped_max, 0444, kgsl_drv_memstat_show, NULL);
DEVICE_ATTR(histogram, 0444, kgsl_drv_histogram_show, NULL);
static const struct device_attribute *drv_attr_list[] = {
&dev_attr_vmalloc,
&dev_attr_vmalloc_max,
&dev_attr_coherent,
&dev_attr_coherent_max,
&dev_attr_mapped,
&dev_attr_mapped_max,
&dev_attr_histogram,
NULL
};
void
kgsl_sharedmem_uninit_sysfs(void)
{
kgsl_remove_device_sysfs_files(&kgsl_driver.virtdev, drv_attr_list);
}
int
kgsl_sharedmem_init_sysfs(void)
{
return kgsl_create_device_sysfs_files(&kgsl_driver.virtdev,
drv_attr_list);
}
#ifdef CONFIG_OUTER_CACHE
static void _outer_cache_range_op(int op, unsigned long addr, size_t size)
{
switch (op) {
case KGSL_CACHE_OP_FLUSH:
outer_flush_range(addr, addr + size);
break;
case KGSL_CACHE_OP_CLEAN:
outer_clean_range(addr, addr + size);
break;
case KGSL_CACHE_OP_INV:
outer_inv_range(addr, addr + size);
break;
}
}
#endif
static unsigned long kgsl_vmalloc_physaddr(struct kgsl_memdesc *memdesc,
unsigned int offset)
{
unsigned int addr;
if (offset > memdesc->size)
return 0;
addr = vmalloc_to_pfn(memdesc->hostptr + offset);
return addr << PAGE_SHIFT;
}
#ifdef CONFIG_OUTER_CACHE
static void kgsl_vmalloc_outer_cache(struct kgsl_memdesc *memdesc, int op)
{
void *vaddr = memdesc->hostptr;
for (; vaddr < (memdesc->hostptr + memdesc->size); vaddr += PAGE_SIZE) {
unsigned long paddr = page_to_phys(vmalloc_to_page(vaddr));
_outer_cache_range_op(op, paddr, PAGE_SIZE);
}
}
#endif
static int kgsl_vmalloc_vmfault(struct kgsl_memdesc *memdesc,
struct vm_area_struct *vma,
struct vm_fault *vmf)
{
unsigned long offset, pg;
struct page *page;
offset = (unsigned long) vmf->virtual_address - vma->vm_start;
pg = (unsigned long) memdesc->hostptr + offset;
page = vmalloc_to_page((void *) pg);
if (page == NULL)
return VM_FAULT_SIGBUS;
get_page(page);
vmf->page = page;
return 0;
}
static int kgsl_vmalloc_vmflags(struct kgsl_memdesc *memdesc)
{
return VM_RESERVED | VM_DONTEXPAND;
}
static void kgsl_vmalloc_free(struct kgsl_memdesc *memdesc)
{
kgsl_driver.stats.vmalloc -= memdesc->size;
vfree(memdesc->hostptr);
}
static void kgsl_coherent_free(struct kgsl_memdesc *memdesc)
{
kgsl_driver.stats.coherent -= memdesc->size;
dma_free_coherent(NULL, memdesc->size,
memdesc->hostptr, memdesc->physaddr);
}
static unsigned long kgsl_contig_physaddr(struct kgsl_memdesc *memdesc,
unsigned int offset)
{
if (offset > memdesc->size)
return 0;
return memdesc->physaddr + offset;
}
#ifdef CONFIG_OUTER_CACHE
static void kgsl_contig_outer_cache(struct kgsl_memdesc *memdesc, int op)
{
_outer_cache_range_op(op, memdesc->physaddr, memdesc->size);
}
#endif
#ifdef CONFIG_OUTER_CACHE
static void kgsl_userptr_outer_cache(struct kgsl_memdesc *memdesc, int op)
{
void *vaddr = memdesc->hostptr;
for (; vaddr < (memdesc->hostptr + memdesc->size); vaddr += PAGE_SIZE) {
unsigned long paddr = kgsl_virtaddr_to_physaddr(vaddr);
if (paddr)
_outer_cache_range_op(op, paddr, PAGE_SIZE);
}
}
#endif
static unsigned long kgsl_userptr_physaddr(struct kgsl_memdesc *memdesc,
unsigned int offset)
{
return kgsl_virtaddr_to_physaddr(memdesc->hostptr + offset);
}
/* Global - also used by kgsl_drm.c */
struct kgsl_memdesc_ops kgsl_vmalloc_ops = {
.physaddr = kgsl_vmalloc_physaddr,
.free = kgsl_vmalloc_free,
.vmflags = kgsl_vmalloc_vmflags,
.vmfault = kgsl_vmalloc_vmfault,
#ifdef CONFIG_OUTER_CACHE
.outer_cache = kgsl_vmalloc_outer_cache,
#endif
};
EXPORT_SYMBOL(kgsl_vmalloc_ops);
static struct kgsl_memdesc_ops kgsl_coherent_ops = {
.physaddr = kgsl_contig_physaddr,
.free = kgsl_coherent_free,
#ifdef CONFIG_OUTER_CACHE
.outer_cache = kgsl_contig_outer_cache,
#endif
};
/* Global - also used by kgsl.c and kgsl_drm.c */
struct kgsl_memdesc_ops kgsl_contig_ops = {
.physaddr = kgsl_contig_physaddr,
#ifdef CONFIG_OUTER_CACHE
.outer_cache = kgsl_contig_outer_cache
#endif
};
EXPORT_SYMBOL(kgsl_contig_ops);
/* Global - also used by kgsl.c */
struct kgsl_memdesc_ops kgsl_userptr_ops = {
.physaddr = kgsl_userptr_physaddr,
#ifdef CONFIG_OUTER_CACHE
.outer_cache = kgsl_userptr_outer_cache,
#endif
};
EXPORT_SYMBOL(kgsl_userptr_ops);
void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op)
{
void *addr = memdesc->hostptr;
int size = memdesc->size;
switch (op) {
case KGSL_CACHE_OP_FLUSH:
dmac_flush_range(addr, addr + size);
break;
case KGSL_CACHE_OP_CLEAN:
dmac_clean_range(addr, addr + size);
break;
case KGSL_CACHE_OP_INV:
dmac_inv_range(addr, addr + size);
break;
}
if (memdesc->ops->outer_cache)
memdesc->ops->outer_cache(memdesc, op);
}
EXPORT_SYMBOL(kgsl_cache_range_op);
static int
_kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable,
void *ptr, size_t size, unsigned int protflags)
{
int result;
memdesc->size = size;
memdesc->pagetable = pagetable;
memdesc->priv = KGSL_MEMFLAGS_CACHED;
memdesc->ops = &kgsl_vmalloc_ops;
memdesc->hostptr = (void *) ptr;
kgsl_cache_range_op(memdesc, KGSL_CACHE_OP_INV);
result = kgsl_mmu_map(pagetable, memdesc, protflags);
if (result) {
kgsl_sharedmem_free(memdesc);
} else {
int order;
KGSL_STATS_ADD(size, kgsl_driver.stats.vmalloc,
kgsl_driver.stats.vmalloc_max);
order = get_order(size);
if (order < 16)
kgsl_driver.stats.histogram[order]++;
}
return result;
}
int
kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable, size_t size)
{
void *ptr;
BUG_ON(size == 0);
size = ALIGN(size, PAGE_SIZE * 2);
ptr = vmalloc(size);
if (ptr == NULL) {
KGSL_CORE_ERR("vmalloc(%d) failed\n", size);
return -ENOMEM;
}
return _kgsl_sharedmem_vmalloc(memdesc, pagetable, ptr, size,
GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
}
EXPORT_SYMBOL(kgsl_sharedmem_vmalloc);
int
kgsl_sharedmem_vmalloc_user(struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable,
size_t size, int flags)
{
void *ptr;
unsigned int protflags;
BUG_ON(size == 0);
ptr = vmalloc_user(size);
if (ptr == NULL) {
KGSL_CORE_ERR("vmalloc_user(%d) failed: allocated=%d\n",
size, kgsl_driver.stats.vmalloc);
return -ENOMEM;
}
protflags = GSL_PT_PAGE_RV;
if (!(flags & KGSL_MEMFLAGS_GPUREADONLY))
protflags |= GSL_PT_PAGE_WV;
return _kgsl_sharedmem_vmalloc(memdesc, pagetable, ptr, size,
protflags);
}
EXPORT_SYMBOL(kgsl_sharedmem_vmalloc_user);
int
kgsl_sharedmem_alloc_coherent(struct kgsl_memdesc *memdesc, size_t size)
{
size = ALIGN(size, PAGE_SIZE);
memdesc->hostptr = dma_alloc_coherent(NULL, size, &memdesc->physaddr,
GFP_KERNEL);
if (memdesc->hostptr == NULL) {
KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size);
return -ENOMEM;
}
memdesc->size = size;
memdesc->ops = &kgsl_coherent_ops;
/* Record statistics */
KGSL_STATS_ADD(size, kgsl_driver.stats.coherent,
kgsl_driver.stats.coherent_max);
return 0;
}
EXPORT_SYMBOL(kgsl_sharedmem_alloc_coherent);
void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc)
{
if (memdesc == NULL || memdesc->size == 0)
return;
if (memdesc->gpuaddr)
kgsl_mmu_unmap(memdesc->pagetable, memdesc);
if (memdesc->ops->free)
memdesc->ops->free(memdesc);
memset(memdesc, 0, sizeof(*memdesc));
}
EXPORT_SYMBOL(kgsl_sharedmem_free);
int
kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc,
uint32_t *dst,
unsigned int offsetbytes)
{
BUG_ON(memdesc == NULL || memdesc->hostptr == NULL || dst == NULL);
WARN_ON(offsetbytes + sizeof(unsigned int) > memdesc->size);
if (offsetbytes + sizeof(unsigned int) > memdesc->size)
return -ERANGE;
*dst = readl_relaxed(memdesc->hostptr + offsetbytes);
return 0;
}
EXPORT_SYMBOL(kgsl_sharedmem_readl);
int
kgsl_sharedmem_writel(const struct kgsl_memdesc *memdesc,
unsigned int offsetbytes,
uint32_t src)
{
BUG_ON(memdesc == NULL || memdesc->hostptr == NULL);
BUG_ON(offsetbytes + sizeof(unsigned int) > memdesc->size);
kgsl_cffdump_setmem(memdesc->physaddr + offsetbytes,
src, sizeof(uint));
writel_relaxed(src, memdesc->hostptr + offsetbytes);
return 0;
}
EXPORT_SYMBOL(kgsl_sharedmem_writel);
int
kgsl_sharedmem_set(const struct kgsl_memdesc *memdesc, unsigned int offsetbytes,
unsigned int value, unsigned int sizebytes)
{
BUG_ON(memdesc == NULL || memdesc->hostptr == NULL);
BUG_ON(offsetbytes + sizebytes > memdesc->size);
kgsl_cffdump_setmem(memdesc->physaddr + offsetbytes, value,
sizebytes);
memset(memdesc->hostptr + offsetbytes, value, sizebytes);
return 0;
}
EXPORT_SYMBOL(kgsl_sharedmem_set);

View File

@ -0,0 +1,116 @@
/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef __KGSL_SHAREDMEM_H
#define __KGSL_SHAREDMEM_H
#include <linux/dma-mapping.h>
struct kgsl_pagetable;
struct kgsl_device;
struct kgsl_process_private;
#define KGSL_CACHE_OP_INV 0x01
#define KGSL_CACHE_OP_FLUSH 0x02
#define KGSL_CACHE_OP_CLEAN 0x03
/** Set if the memdesc describes cached memory */
#define KGSL_MEMFLAGS_CACHED 0x00000001
struct kgsl_memdesc;
struct kgsl_memdesc_ops {
unsigned long (*physaddr)(struct kgsl_memdesc *, unsigned int);
void (*outer_cache)(struct kgsl_memdesc *, int);
int (*vmflags)(struct kgsl_memdesc *);
int (*vmfault)(struct kgsl_memdesc *, struct vm_area_struct *,
struct vm_fault *);
void (*free)(struct kgsl_memdesc *memdesc);
};
/* shared memory allocation */
struct kgsl_memdesc {
struct kgsl_pagetable *pagetable;
void *hostptr;
unsigned int gpuaddr;
unsigned int physaddr;
unsigned int size;
unsigned int priv;
struct kgsl_memdesc_ops *ops;
};
extern struct kgsl_memdesc_ops kgsl_vmalloc_ops;
extern struct kgsl_memdesc_ops kgsl_contig_ops;
extern struct kgsl_memdesc_ops kgsl_userptr_ops;
int kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable, size_t size);
int kgsl_sharedmem_vmalloc_user(struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable,
size_t size, int flags);
int kgsl_sharedmem_alloc_coherent(struct kgsl_memdesc *memdesc, size_t size);
void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc);
int kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc,
uint32_t *dst,
unsigned int offsetbytes);
int kgsl_sharedmem_writel(const struct kgsl_memdesc *memdesc,
unsigned int offsetbytes,
uint32_t src);
int kgsl_sharedmem_set(const struct kgsl_memdesc *memdesc,
unsigned int offsetbytes, unsigned int value,
unsigned int sizebytes);
void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op);
void kgsl_process_init_sysfs(struct kgsl_process_private *private);
void kgsl_process_uninit_sysfs(struct kgsl_process_private *private);
int kgsl_sharedmem_init_sysfs(void);
void kgsl_sharedmem_uninit_sysfs(void);
static inline int
kgsl_allocate_user(struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable,
size_t size, unsigned int flags)
{
return kgsl_sharedmem_vmalloc_user(memdesc, pagetable, size, flags);
}
static inline int
kgsl_allocate_contig(struct kgsl_memdesc *memdesc, size_t size)
{
return kgsl_sharedmem_alloc_coherent(memdesc, size);
}
#endif /* __KGSL_SHAREDMEM_H */

1067
drivers/gpu/msm/z180.c Normal file

File diff suppressed because it is too large Load Diff

49
drivers/gpu/msm/z180.h Normal file
View File

@ -0,0 +1,49 @@
/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef __Z180_H
#define __Z180_H
#define DEVICE_2D_NAME "kgsl-2d"
#define DEVICE_2D0_NAME "kgsl-2d0"
#define DEVICE_2D1_NAME "kgsl-2d1"
struct z180_ringbuffer {
unsigned int prevctx;
struct kgsl_memdesc cmdbufdesc;
};
struct z180_device {
struct kgsl_device dev; /* Must be first field in this struct */
int current_timestamp;
int timestamp;
struct z180_ringbuffer ringbuffer;
spinlock_t cmdwin_lock;
};
#endif /* __Z180_H */

View File

@ -0,0 +1,93 @@
/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef __Z80_REG_H
#define __Z80_REG_H
#define REG_VGC_IRQSTATUS__MH_MASK 0x00000001L
#define REG_VGC_IRQSTATUS__G2D_MASK 0x00000002L
#define REG_VGC_IRQSTATUS__FIFO_MASK 0x00000004L
#define MH_ARBITER_CONFIG__SAME_PAGE_GRANULARITY__SHIFT 0x00000006
#define MH_ARBITER_CONFIG__L1_ARB_ENABLE__SHIFT 0x00000007
#define MH_ARBITER_CONFIG__L1_ARB_HOLD_ENABLE__SHIFT 0x00000008
#define MH_ARBITER_CONFIG__L2_ARB_CONTROL__SHIFT 0x00000009
#define MH_ARBITER_CONFIG__PAGE_SIZE__SHIFT 0x0000000a
#define MH_ARBITER_CONFIG__TC_REORDER_ENABLE__SHIFT 0x0000000d
#define MH_ARBITER_CONFIG__TC_ARB_HOLD_ENABLE__SHIFT 0x0000000e
#define MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT_ENABLE__SHIFT 0x0000000f
#define MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT__SHIFT 0x00000010
#define MH_ARBITER_CONFIG__CP_CLNT_ENABLE__SHIFT 0x00000016
#define MH_ARBITER_CONFIG__VGT_CLNT_ENABLE__SHIFT 0x00000017
#define MH_ARBITER_CONFIG__TC_CLNT_ENABLE__SHIFT 0x00000018
#define MH_ARBITER_CONFIG__RB_CLNT_ENABLE__SHIFT 0x00000019
#define MH_ARBITER_CONFIG__PA_CLNT_ENABLE__SHIFT 0x0000001a
#define MH_MMU_CONFIG__RB_W_CLNT_BEHAVIOR__SHIFT 0x00000004
#define MH_MMU_CONFIG__CP_W_CLNT_BEHAVIOR__SHIFT 0x00000006
#define MH_MMU_CONFIG__CP_R0_CLNT_BEHAVIOR__SHIFT 0x00000008
#define MH_MMU_CONFIG__CP_R1_CLNT_BEHAVIOR__SHIFT 0x0000000a
#define MH_MMU_CONFIG__CP_R2_CLNT_BEHAVIOR__SHIFT 0x0000000c
#define MH_MMU_CONFIG__CP_R3_CLNT_BEHAVIOR__SHIFT 0x0000000e
#define MH_MMU_CONFIG__CP_R4_CLNT_BEHAVIOR__SHIFT 0x00000010
#define MH_MMU_CONFIG__VGT_R0_CLNT_BEHAVIOR__SHIFT 0x00000012
#define MH_MMU_CONFIG__VGT_R1_CLNT_BEHAVIOR__SHIFT 0x00000014
#define MH_MMU_CONFIG__TC_R_CLNT_BEHAVIOR__SHIFT 0x00000016
#define MH_MMU_CONFIG__PA_W_CLNT_BEHAVIOR__SHIFT 0x00000018
#define ADDR_MH_ARBITER_CONFIG 0x0A40
#define ADDR_MH_INTERRUPT_CLEAR 0x0A44
#define ADDR_MH_INTERRUPT_MASK 0x0A42
#define ADDR_MH_INTERRUPT_STATUS 0x0A43
#define ADDR_MH_AXI_ERROR 0x0A45
#define ADDR_MH_AXI_HALT_CONTROL 0x0A50
#define ADDR_MH_CLNT_INTF_CTRL_CONFIG1 0x0A54
#define ADDR_MH_CLNT_INTF_CTRL_CONFIG2 0x0A55
#define ADDR_MH_MMU_CONFIG 0x0040
#define ADDR_MH_MMU_INVALIDATE 0x0045
#define ADDR_MH_MMU_MPU_BASE 0x0046
#define ADDR_MH_MMU_MPU_END 0x0047
#define ADDR_MH_MMU_PT_BASE 0x0042
#define ADDR_MH_MMU_TRAN_ERROR 0x0044
#define ADDR_MH_MMU_VA_RANGE 0x0041
#define ADDR_VGC_MH_READ_ADDR 0x0510
#define ADDR_VGC_MH_DATA_ADDR 0x0518
#define ADDR_MH_MMU_PAGE_FAULT 0x0043
#define ADDR_VGC_COMMANDSTREAM 0x0000
#define ADDR_VGC_IRQENABLE 0x0438
#define ADDR_VGC_IRQSTATUS 0x0418
#define ADDR_VGC_IRQ_ACTIVE_CNT 0x04E0
#define ADDR_VGC_MMUCOMMANDSTREAM 0x03FC
#define ADDR_VGV3_CONTROL 0x0070
#define ADDR_VGV3_LAST 0x007F
#define ADDR_VGV3_MODE 0x0071
#define ADDR_VGV3_NEXTADDR 0x0075
#define ADDR_VGV3_NEXTCMD 0x0076
#define ADDR_VGV3_WRITEADDR 0x0072
#endif /* __Z180_REG_H */

File diff suppressed because it is too large Load Diff

View File

@ -11,6 +11,8 @@ source "drivers/gpu/vga/Kconfig"
source "drivers/gpu/drm/Kconfig"
source "drivers/gpu/msm/Kconfig"
config VGASTATE
tristate
default n

View File

@ -55,7 +55,7 @@ config MSM_ROTATOR_USE_IMEM
block. Or some systems may want the iMem to be dedicated to a
different function.
config MSM_KGSL_MMU
config GPU_MSM_KGSL_MMU
bool "Turn on MMU for graphics driver "
depends on GPU_MSM_KGSL && MMU
default n

View File

@ -34,10 +34,4 @@ obj-$(CONFIG_FB_MSM_LCDC) += mdp_lcdc.o
obj-$(CONFIG_FB_MSM_TVOUT) += tvenc.o tvfb.o
# Yamato GL driver
ifeq ($(CONFIG_ARCH_MSM7X30),y)
obj-$(CONFIG_GPU_MSM_KGSL) += gpu/kgsl_adreno205/
else
obj-$(CONFIG_GPU_MSM_KGSL) += gpu/kgsl/
endif
obj-$(CONFIG_MSM_HDMI) += hdmi/

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

Binary file not shown.

View File

@ -0,0 +1,12 @@
---- CONFIG_MSM_KGSL_MMU Matches (11 in 6 files) ----
kgsl.c (\\jupiter\xinwang\hd2\kernel\msm8x50\20120427A\dorimanx-Dorimanx-HD2-2.6.32.X-69084da_hwa_mix\drivers\video\msm\gpu\kgsl):#ifdef CONFIG_MSM_KGSL_MMU
kgsl.c (\\jupiter\xinwang\hd2\kernel\msm8x50\20120427A\dorimanx-Dorimanx-HD2-2.6.32.X-69084da_hwa_mix\drivers\video\msm\gpu\kgsl):#ifdef CONFIG_MSM_KGSL_MMU
kgsl.c (\\jupiter\xinwang\hd2\kernel\msm8x50\20120427A\dorimanx-Dorimanx-HD2-2.6.32.X-69084da_hwa_mix\drivers\video\msm\gpu\kgsl):#ifdef CONFIG_MSM_KGSL_MMU
kgsl_device.h (\\jupiter\xinwang\hd2\kernel\msm8x50\20120427A\dorimanx-Dorimanx-HD2-2.6.32.X-69084da_hwa_mix\drivers\video\msm\gpu\kgsl):#ifdef CONFIG_MSM_KGSL_MMU
kgsl_log.c (\\jupiter\xinwang\hd2\kernel\msm8x50\20120427A\dorimanx-Dorimanx-HD2-2.6.32.X-69084da_hwa_mix\drivers\video\msm\gpu\kgsl):#ifdef CONFIG_MSM_KGSL_MMU
kgsl_log.c (\\jupiter\xinwang\hd2\kernel\msm8x50\20120427A\dorimanx-Dorimanx-HD2-2.6.32.X-69084da_hwa_mix\drivers\video\msm\gpu\kgsl):#ifdef CONFIG_MSM_KGSL_MMU
kgsl_mmu.c (\\jupiter\xinwang\hd2\kernel\msm8x50\20120427A\dorimanx-Dorimanx-HD2-2.6.32.X-69084da_hwa_mix\drivers\video\msm\gpu\kgsl):#ifndef CONFIG_MSM_KGSL_MMU
kgsl_mmu.c (\\jupiter\xinwang\hd2\kernel\msm8x50\20120427A\dorimanx-Dorimanx-HD2-2.6.32.X-69084da_hwa_mix\drivers\video\msm\gpu\kgsl):#ifdef CONFIG_MSM_KGSL_MMU
kgsl_mmu.h (\\jupiter\xinwang\hd2\kernel\msm8x50\20120427A\dorimanx-Dorimanx-HD2-2.6.32.X-69084da_hwa_mix\drivers\video\msm\gpu\kgsl):#ifdef CONFIG_MSM_KGSL_MMU
kgsl_yamato.c (\\jupiter\xinwang\hd2\kernel\msm8x50\20120427A\dorimanx-Dorimanx-HD2-2.6.32.X-69084da_hwa_mix\drivers\video\msm\gpu\kgsl):#ifdef CONFIG_MSM_KGSL_MMU
kgsl_yamato.c (\\jupiter\xinwang\hd2\kernel\msm8x50\20120427A\dorimanx-Dorimanx-HD2-2.6.32.X-69084da_hwa_mix\drivers\video\msm\gpu\kgsl):#ifdef CONFIG_MSM_KGSL_MMU

View File

@ -51,7 +51,7 @@ struct kgsl_file_private {
static void kgsl_put_phys_file(struct file *file);
#ifdef CONFIG_MSM_KGSL_MMU
#ifdef CONFIG_GPU_MSM_KGSL_MMU
static long flush_l1_cache_range(unsigned long addr, int size)
{
struct page *page;
@ -190,11 +190,6 @@ static int kgsl_first_open_locked(void)
kgsl_clk_enable();
/* init memory apertures */
result = kgsl_sharedmem_init(&kgsl_driver.shmem);
if (result != 0)
goto done;
/* init devices */
result = kgsl_yamato_init(&kgsl_driver.yamato_device,
&kgsl_driver.yamato_config);
@ -221,9 +216,6 @@ static int kgsl_last_release_locked(void)
/* close devices */
kgsl_yamato_close(&kgsl_driver.yamato_device);
/* shutdown memory apertures */
kgsl_sharedmem_close(&kgsl_driver.shmem);
kgsl_clk_disable();
kgsl_driver.active = false;
wake_unlock(&kgsl_driver.wake_lock);
@ -642,7 +634,7 @@ done:
return result;
}
#ifdef CONFIG_MSM_KGSL_MMU
#ifdef CONFIG_GPU_MSM_KGSL_MMU
static int kgsl_ioctl_sharedmem_from_vmalloc(struct kgsl_file_private *private,
void __user *arg)
{
@ -888,7 +880,7 @@ error:
return result;
}
#ifdef CONFIG_MSM_KGSL_MMU
#ifdef CONFIG_GPU_MSM_KGSL_MMU
/*This function flushes a graphics memory allocation from CPU cache
*when caching is enabled with MMU*/
static int kgsl_ioctl_sharedmem_flush_cache(struct kgsl_file_private *private,
@ -1066,6 +1058,9 @@ static void kgsl_driver_cleanup(void)
kgsl_driver.interrupt_num = 0;
}
/* shutdown memory apertures */
kgsl_sharedmem_close(&kgsl_driver.shmem);
if (kgsl_driver.grp_clk) {
clk_put(kgsl_driver.grp_clk);
kgsl_driver.grp_clk = NULL;
@ -1170,6 +1165,9 @@ static int __devinit kgsl_platform_probe(struct platform_device *pdev)
kgsl_driver.shmem.physbase = res->start;
kgsl_driver.shmem.size = resource_size(res);
/* init memory apertures */
result = kgsl_sharedmem_init(&kgsl_driver.shmem);
done:
if (result)
kgsl_driver_cleanup();

View File

@ -46,6 +46,8 @@ kgsl_cmdstream_readtimestamp(struct kgsl_device *device,
KGSL_CMDSTREAM_GET_EOP_TIMESTAMP(device,
(unsigned int *)&timestamp);
rmb();
KGSL_CMD_VDBG("return %d\n", timestamp);
return timestamp;

View File

@ -129,7 +129,7 @@ int kgsl_yamato_setup_pt(struct kgsl_device *device,
struct kgsl_pagetable *pagetable);
int kgsl_yamato_cleanup_pt(struct kgsl_device *device,
struct kgsl_pagetable *pagetable);
#ifdef CONFIG_MSM_KGSL_MMU
#ifdef CONFIG_GPU_MSM_KGSL_MMU
int kgsl_yamato_setstate(struct kgsl_device *device, uint32_t flags);
#else
static inline int kgsl_yamato_setstate(struct kgsl_device *device, uint32_t flags)

View File

@ -237,7 +237,7 @@ static struct file_operations kgsl_mmu_regs_fops = {
};
#endif /*DEBUG*/
#ifdef CONFIG_MSM_KGSL_MMU
#ifdef CONFIG_GPU_MSM_KGSL_MMU
static int kgsl_cache_enable_set(void *data, u64 val)
{
kgsl_cache_enable = (val != 0);
@ -282,7 +282,7 @@ int kgsl_debug_init(void)
&kgsl_mmu_regs_fops);
#endif
#ifdef CONFIG_MSM_KGSL_MMU
#ifdef CONFIG_GPU_MSM_KGSL_MMU
debugfs_create_file("cache_enable", 0644, dent, 0,
&kgsl_cache_enable_fops);
#endif

View File

@ -319,7 +319,7 @@ int kgsl_mmu_init(struct kgsl_device *device)
mmu->device = device;
#ifndef CONFIG_MSM_KGSL_MMU
#ifndef CONFIG_GPU_MSM_KGSL_MMU
mmu->config = 0x00000000;
#endif
@ -396,6 +396,16 @@ int kgsl_mmu_init(struct kgsl_device *device)
return -ENOMEM;
}
mmu->hwpagetable = mmu->defaultpagetable;
mmu->tlbflushfilter.size = (mmu->va_range /
(PAGE_SIZE * GSL_PT_SUPER_PTE * 8)) + 1;
mmu->tlbflushfilter.base = (unsigned int *)
kzalloc(mmu->tlbflushfilter.size, GFP_KERNEL);
if (!mmu->tlbflushfilter.base) {
KGSL_MEM_ERR("Failed to create tlbflushfilter\n");
kgsl_mmu_close(device);
return -ENOMEM;
}
GSL_TLBFLUSH_FILTER_RESET();
kgsl_yamato_regwrite(device, REG_MH_MMU_PT_BASE,
mmu->hwpagetable->base.gpuaddr);
kgsl_yamato_regwrite(device, REG_MH_MMU_VA_RANGE,
@ -415,7 +425,7 @@ int kgsl_mmu_init(struct kgsl_device *device)
return 0;
}
#ifdef CONFIG_MSM_KGSL_MMU
#ifdef CONFIG_GPU_MSM_KGSL_MMU
pte_t *kgsl_get_pte_from_vaddr(unsigned int vaddr)
{
pgd_t *pgd_ptr = NULL;
@ -456,7 +466,7 @@ int kgsl_mmu_map(struct kgsl_pagetable *pagetable,
unsigned int flags)
{
int numpages;
unsigned int pte, superpte, ptefirst, ptelast, physaddr;
unsigned int pte, ptefirst, ptelast, physaddr;
int flushtlb, alloc_size;
struct kgsl_mmu *mmu = NULL;
int phys_contiguous = flags & KGSL_MEMFLAGS_CONPHYS;
@ -514,15 +524,11 @@ int kgsl_mmu_map(struct kgsl_pagetable *pagetable,
pte = ptefirst;
flushtlb = 0;
superpte = ptefirst & (GSL_PT_SUPER_PTE - 1);
for (pte = superpte; pte < ptefirst; pte++) {
/* tlb needs to be flushed only when a dirty superPTE
gets backed */
if (kgsl_pt_map_isdirty(pagetable, pte)) {
flushtlb = 1;
break;
}
}
/* tlb needs to be flushed when the first and last pte are not at
* superpte boundaries */
if ((ptefirst & (GSL_PT_SUPER_PTE - 1)) != 0 ||
((ptelast + 1) & (GSL_PT_SUPER_PTE-1)) != 0)
flushtlb = 1;
for (pte = ptefirst; pte < ptelast; pte++) {
#ifdef VERBOSE_DEBUG
@ -530,8 +536,10 @@ int kgsl_mmu_map(struct kgsl_pagetable *pagetable,
uint32_t val = kgsl_pt_map_getaddr(pagetable, pte);
BUG_ON(val != 0 && val != GSL_PT_PAGE_DIRTY);
#endif
if (kgsl_pt_map_isdirty(pagetable, pte))
if ((pte & (GSL_PT_SUPER_PTE-1)) == 0)
if (GSL_TLBFLUSH_FILTER_ISDIRTY(pte / GSL_PT_SUPER_PTE))
flushtlb = 1;
/* mark pte as in use */
if (phys_contiguous)
physaddr = address;
@ -552,17 +560,6 @@ int kgsl_mmu_map(struct kgsl_pagetable *pagetable,
address += KGSL_PAGESIZE;
}
/* set superpte to end of next superpte */
superpte = (ptelast + (GSL_PT_SUPER_PTE - 1))
& (GSL_PT_SUPER_PTE - 1);
for (pte = ptelast; pte < superpte; pte++) {
/* tlb needs to be flushed only when a dirty superPTE
gets backed */
if (kgsl_pt_map_isdirty(pagetable, pte)) {
flushtlb = 1;
break;
}
}
KGSL_MEM_INFO("pt %p p %08x g %08x pte f %d l %d n %d f %d\n",
pagetable, address, *gpuaddr, ptefirst, ptelast,
numpages, flushtlb);
@ -571,8 +568,10 @@ int kgsl_mmu_map(struct kgsl_pagetable *pagetable,
/* Invalidate tlb only if current page table used by GPU is the
* pagetable that we used to allocate */
if (pagetable == mmu->hwpagetable)
if (flushtlb && (pagetable == mmu->hwpagetable)) {
kgsl_yamato_setstate(mmu->device, KGSL_MMUFLAGS_TLBFLUSH);
GSL_TLBFLUSH_FILTER_RESET();
}
KGSL_MEM_VDBG("return %d\n", 0);
@ -585,7 +584,8 @@ kgsl_mmu_unmap(struct kgsl_pagetable *pagetable, unsigned int gpuaddr,
int range)
{
unsigned int numpages;
unsigned int pte, ptefirst, ptelast;
unsigned int pte, ptefirst, ptelast, superpte;
struct kgsl_mmu *mmu = NULL;
KGSL_MEM_VDBG("enter (pt=%p, gpuaddr=0x%08x, range=%d)\n",
pagetable, gpuaddr, range);
@ -602,22 +602,24 @@ kgsl_mmu_unmap(struct kgsl_pagetable *pagetable, unsigned int gpuaddr,
KGSL_MEM_INFO("pt %p gpu %08x pte first %d last %d numpages %d\n",
pagetable, gpuaddr, ptefirst, ptelast, numpages);
mmu = pagetable->mmu;
superpte = ptefirst - (ptefirst & (GSL_PT_SUPER_PTE-1));
GSL_TLBFLUSH_FILTER_SETDIRTY(superpte / GSL_PT_SUPER_PTE);
for (pte = ptefirst; pte < ptelast; pte++) {
#ifdef VERBOSE_DEBUG
/* check if PTE exists */
BUG_ON(!kgsl_pt_map_getaddr(pagetable, pte));
#endif
kgsl_pt_map_set(pagetable, pte, GSL_PT_PAGE_DIRTY);
superpte = pte - (pte & (GSL_PT_SUPER_PTE - 1));
if (pte == superpte)
GSL_TLBFLUSH_FILTER_SETDIRTY(superpte /
GSL_PT_SUPER_PTE);
}
dmb();
/* Invalidate tlb only if current page table used by GPU is the
* pagetable that we used to allocate */
if (pagetable == pagetable->mmu->hwpagetable)
kgsl_yamato_setstate(pagetable->mmu->device,
KGSL_MMUFLAGS_TLBFLUSH);
gen_pool_free(pagetable->pool, gpuaddr, range);
KGSL_MEM_VDBG("return %d\n", 0);
@ -651,6 +653,12 @@ int kgsl_mmu_close(struct kgsl_device *device)
if (mmu->dummyspace.gpuaddr)
kgsl_sharedmem_free(&mmu->dummyspace);
if (mmu->tlbflushfilter.base) {
mmu->tlbflushfilter.size = 0;
kfree(mmu->tlbflushfilter.base);
mmu->tlbflushfilter.base = NULL;
}
mmu->flags &= ~KGSL_FLAGS_STARTED;
mmu->flags &= ~KGSL_FLAGS_INITIALIZED;
mmu->flags &= ~KGSL_FLAGS_INITIALIZED0;

View File

@ -31,6 +31,21 @@
#define KGSL_MMUFLAGS_TLBFLUSH 0x10000000
#define KGSL_MMUFLAGS_PTUPDATE 0x20000000
/* Macros to manage TLB flushing */
#define GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS (sizeof(unsigned char) * 8)
#define GSL_TLBFLUSH_FILTER_GET(superpte) \
(*((unsigned char *) \
(((unsigned int)mmu->tlbflushfilter.base) \
+ (superpte / GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS))))
#define GSL_TLBFLUSH_FILTER_SETDIRTY(superpte) \
(GSL_TLBFLUSH_FILTER_GET((superpte)) |= 1 << \
(superpte % GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS))
#define GSL_TLBFLUSH_FILTER_ISDIRTY(superpte) \
(GSL_TLBFLUSH_FILTER_GET((superpte)) & \
(1 << (superpte % GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS)))
#define GSL_TLBFLUSH_FILTER_RESET() memset(mmu->tlbflushfilter.base,\
0, mmu->tlbflushfilter.size)
extern unsigned int kgsl_cache_enable;
struct kgsl_device;
@ -68,6 +83,11 @@ struct kgsl_pagetable {
struct gen_pool *pool;
};
struct kgsl_tlbflushfilter {
unsigned int *base;
unsigned int size;
};
struct kgsl_mmu {
unsigned int refcnt;
uint32_t flags;
@ -81,6 +101,8 @@ struct kgsl_mmu {
/* current page table object being used by device mmu */
struct kgsl_pagetable *defaultpagetable;
struct kgsl_pagetable *hwpagetable;
/* Maintain filter to manage tlb flushing */
struct kgsl_tlbflushfilter tlbflushfilter;
};
@ -102,7 +124,7 @@ int kgsl_mmu_destroypagetableobject(struct kgsl_pagetable *pagetable);
int kgsl_mmu_setstate(struct kgsl_device *device,
struct kgsl_pagetable *pagetable);
#ifdef CONFIG_MSM_KGSL_MMU
#ifdef CONFIG_GPU_MSM_KGSL_MMU
int kgsl_mmu_map(struct kgsl_pagetable *pagetable,
unsigned int address,
int range,

View File

@ -325,7 +325,7 @@ error:
}
#ifdef CONFIG_MSM_KGSL_MMU
#ifdef CONFIG_GPU_MSM_KGSL_MMU
int kgsl_yamato_setstate(struct kgsl_device *device, uint32_t flags)
{
unsigned int link[32];
@ -731,7 +731,7 @@ int kgsl_yamato_getproperty(struct kgsl_device *device,
break;
case KGSL_PROP_MMU_ENABLE:
{
#ifdef CONFIG_MSM_KGSL_MMU
#ifdef CONFIG_GPU_MSM_KGSL_MMU
int mmuProp = 1;
#else
int mmuProp = 0;

View File

@ -1,3 +1,175 @@
/* include/linux/android_pmem.h
*
* Copyright (C) 2007 Google, Inc.
* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifdef CONFIG_MSM_KGSL
#ifndef _ANDROID_PMEM_H_
#define _ANDROID_PMEM_H_
#include <linux/fs.h>
#define PMEM_KERNEL_TEST_MAGIC 0xc0
#define PMEM_KERNEL_TEST_NOMINAL_TEST_IOCTL \
_IO(PMEM_KERNEL_TEST_MAGIC, 1)
#define PMEM_KERNEL_TEST_ADVERSARIAL_TEST_IOCTL \
_IO(PMEM_KERNEL_TEST_MAGIC, 2)
#define PMEM_KERNEL_TEST_HUGE_ALLOCATION_TEST_IOCTL \
_IO(PMEM_KERNEL_TEST_MAGIC, 3)
#define PMEM_KERNEL_TEST_FREE_UNALLOCATED_TEST_IOCTL \
_IO(PMEM_KERNEL_TEST_MAGIC, 4)
#define PMEM_KERNEL_TEST_LARGE_REGION_NUMBER_TEST_IOCTL \
_IO(PMEM_KERNEL_TEST_MAGIC, 5)
#define PMEM_IOCTL_MAGIC 'p'
#define PMEM_GET_PHYS _IOW(PMEM_IOCTL_MAGIC, 1, unsigned int)
#define PMEM_MAP _IOW(PMEM_IOCTL_MAGIC, 2, unsigned int)
#define PMEM_GET_SIZE _IOW(PMEM_IOCTL_MAGIC, 3, unsigned int)
#define PMEM_UNMAP _IOW(PMEM_IOCTL_MAGIC, 4, unsigned int)
/* This ioctl will allocate pmem space, backing the file, it will fail
* if the file already has an allocation, pass it the len as the argument
* to the ioctl */
#define PMEM_ALLOCATE _IOW(PMEM_IOCTL_MAGIC, 5, unsigned int)
/* This will connect a one pmem file to another, pass the file that is already
* backed in memory as the argument to the ioctl
*/
#define PMEM_CONNECT _IOW(PMEM_IOCTL_MAGIC, 6, unsigned int)
/* Returns the total size of the pmem region it is sent to as a pmem_region
* struct (with offset set to 0).
*/
#define PMEM_GET_TOTAL_SIZE _IOW(PMEM_IOCTL_MAGIC, 7, unsigned int)
/* Revokes gpu registers and resets the gpu. Pass a pointer to the
* start of the mapped gpu regs (the vaddr returned by mmap) as the argument.
*/
#define HW3D_REVOKE_GPU _IOW(PMEM_IOCTL_MAGIC, 8, unsigned int)
#define PMEM_CACHE_FLUSH _IOW(PMEM_IOCTL_MAGIC, 8, unsigned int)
#define HW3D_GRANT_GPU _IOW(PMEM_IOCTL_MAGIC, 9, unsigned int)
#define PMEM_CLEAN_INV_CACHES _IOW(PMEM_IOCTL_MAGIC, 11, unsigned int)
#define PMEM_CLEAN_CACHES _IOW(PMEM_IOCTL_MAGIC, 12, unsigned int)
#define PMEM_INV_CACHES _IOW(PMEM_IOCTL_MAGIC, 13, unsigned int)
#define PMEM_GET_FREE_SPACE _IOW(PMEM_IOCTL_MAGIC, 14, unsigned int)
#define PMEM_ALLOCATE_ALIGNED _IOW(PMEM_IOCTL_MAGIC, 15, unsigned int)
struct pmem_region {
unsigned long offset;
unsigned long len;
};
struct pmem_addr {
unsigned long vaddr;
unsigned long offset;
unsigned long length;
};
struct pmem_freespace {
unsigned long total;
unsigned long largest;
};
struct pmem_allocation {
unsigned long size;
unsigned int align;
};
#ifdef __KERNEL__
int get_pmem_file(unsigned int fd, unsigned long *start, unsigned long *vstart,
unsigned long *end, struct file **filp);
int get_pmem_fd(int fd, unsigned long *start, unsigned long *end);
int get_pmem_user_addr(struct file *file, unsigned long *start,
unsigned long *end);
void put_pmem_file(struct file* file);
void put_pmem_fd(int fd);
void flush_pmem_fd(int fd, unsigned long start, unsigned long len);
void flush_pmem_file(struct file *file, unsigned long start, unsigned long len);
int pmem_cache_maint(struct file *file, unsigned int cmd,
struct pmem_addr *pmem_addr);
enum pmem_allocator_type {
/* Zero is a default in platform PMEM structures in the board files,
* when the "allocator_type" structure element is not explicitly
* defined
*/
PMEM_ALLOCATORTYPE_BITMAP = 0, /* forced to be zero here */
PMEM_ALLOCATORTYPE_ALLORNOTHING,
PMEM_ALLOCATORTYPE_BUDDYBESTFIT,
PMEM_ALLOCATORTYPE_MAX,
};
#define PMEM_MEMTYPE_MASK 0x7
#define PMEM_INVALID_MEMTYPE 0x0
#define PMEM_MEMTYPE_EBI1 0x1
#define PMEM_MEMTYPE_SMI 0x2
#define PMEM_MEMTYPE_RESERVED_INVALID2 0x3
#define PMEM_MEMTYPE_RESERVED_INVALID3 0x4
#define PMEM_MEMTYPE_RESERVED_INVALID4 0x5
#define PMEM_MEMTYPE_RESERVED_INVALID5 0x6
#define PMEM_MEMTYPE_RESERVED_INVALID6 0x7
#define PMEM_ALIGNMENT_MASK 0x18
#define PMEM_ALIGNMENT_RESERVED_INVALID1 0x0
#define PMEM_ALIGNMENT_4K 0x8 /* the default */
#define PMEM_ALIGNMENT_1M 0x10
#define PMEM_ALIGNMENT_RESERVED_INVALID2 0x18
/* flags in the following function defined as above. */
int32_t pmem_kalloc(const size_t size, const uint32_t flags);
int32_t pmem_kfree(const int32_t physaddr);
/* kernel api names for board specific data structures */
#define PMEM_KERNEL_EBI1_DATA_NAME "pmem_kernel_ebi1"
#define PMEM_KERNEL_SMI_DATA_NAME "pmem_kernel_smi"
struct android_pmem_platform_data
{
const char* name;
/* starting physical address of memory region */
unsigned long start;
/* size of memory region */
unsigned long size;
enum pmem_allocator_type allocator_type;
/* treated as a 'hidden' variable in the board files. Can be
* set, but default is the system init value of 0 which becomes a
* quantum of 4K pages.
*/
unsigned int quantum;
/* set to indicate maps of this region should be cached, if a mix of
* cached and uncached is desired, set this and open the device with
* O_SYNC to get an uncached region */
unsigned cached;
/* The MSM7k has bits to enable a write buffer in the bus controller*/
unsigned buffered;
/* This PMEM is on memory that may be powered off */
unsigned unstable;
};
int pmem_setup(struct android_pmem_platform_data *pdata,
long (*ioctl)(struct file *, unsigned int, unsigned long),
int (*release)(struct inode *, struct file *));
int pmem_remap(struct pmem_region *region, struct file *file,
unsigned operation);
int is_pmem_file(struct file *file);
#endif /* __KERNEL__ */
#endif //_ANDROID_PPP_H_
#else
/* include/linux/android_pmem.h
*
* Copyright (C) 2007 Google, Inc.
@ -91,3 +263,4 @@ static inline int pmem_remap(struct pmem_region *region, struct file *file,
#endif //_ANDROID_PPP_H_
#endif

View File

@ -44,5 +44,12 @@ struct ashmem_pin {
#define ASHMEM_UNPIN _IOW(__ASHMEMIOC, 8, struct ashmem_pin)
#define ASHMEM_GET_PIN_STATUS _IO(__ASHMEMIOC, 9)
#define ASHMEM_PURGE_ALL_CACHES _IO(__ASHMEMIOC, 10)
#define ASHMEM_CACHE_FLUSH_RANGE _IO(__ASHMEMIOC, 11)
#define ASHMEM_CACHE_CLEAN_RANGE _IO(__ASHMEMIOC, 12)
#define ASHMEM_CACHE_INV_RANGE _IO(__ASHMEMIOC, 13)
int get_ashmem_file(int fd, struct file **filp, struct file **vm_file,
unsigned long *len);
void put_ashmem_file(struct file *file);
#endif /* _LINUX_ASHMEM_H */

37
include/linux/atomic.h Executable file
View File

@ -0,0 +1,37 @@
#ifndef _LINUX_ATOMIC_H
#define _LINUX_ATOMIC_H
#include <asm/atomic.h>
/**
* atomic_inc_not_zero_hint - increment if not null
* @v: pointer of type atomic_t
* @hint: probable value of the atomic before the increment
*
* This version of atomic_inc_not_zero() gives a hint of probable
* value of the atomic. This helps processor to not read the memory
* before doing the atomic read/modify/write cycle, lowering
* number of bus transactions on some arches.
*
* Returns: 0 if increment was not done, 1 otherwise.
*/
#ifndef atomic_inc_not_zero_hint
static inline int atomic_inc_not_zero_hint(atomic_t *v, int hint)
{
int val, c = hint;
/* sanity test, should be removed by compiler if hint is a constant */
if (!hint)
return atomic_inc_not_zero(v);
do {
val = atomic_cmpxchg(v, c, c + 1);
if (val == c)
return 1;
c = val;
} while (c);
return 0;
}
#endif
#endif /* _LINUX_ATOMIC_H */

View File

@ -42,6 +42,10 @@
* bitmap_empty(src, nbits) Are all bits zero in *src?
* bitmap_full(src, nbits) Are all bits set in *src?
* bitmap_weight(src, nbits) Hamming Weight: number set bits
* bitmap_set(dst, pos, nbits) Set specified bit area
* bitmap_clear(dst, pos, nbits) Clear specified bit area
* bitmap_find_next_zero_area(buf, len, pos, n, mask) Find bit free area
* bitmap_find_next_zero_area_off(buf, len, pos, n, mask) as above
* bitmap_shift_right(dst, src, n, nbits) *dst = *src >> n
* bitmap_shift_left(dst, src, n, nbits) *dst = *src << n
* bitmap_remap(dst, src, old, new, nbits) *dst = map(old, new)(src)
@ -108,6 +112,27 @@ extern int __bitmap_subset(const unsigned long *bitmap1,
const unsigned long *bitmap2, int bits);
extern int __bitmap_weight(const unsigned long *bitmap, int bits);
extern void bitmap_set(unsigned long *map, int i, int len);
extern void bitmap_clear(unsigned long *map, int start, int nr);
extern unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
unsigned long size,
unsigned long start,
unsigned int nr,
unsigned long align_mask,
unsigned long align_offset);
static inline unsigned long
bitmap_find_next_zero_area(unsigned long *map,
unsigned long size,
unsigned long start,
unsigned int nr,
unsigned long align_mask)
{
return bitmap_find_next_zero_area_off(map, size, start, nr,
align_mask, 0);
}
extern int bitmap_scnprintf(char *buf, unsigned int len,
const unsigned long *src, int nbits);
extern int __bitmap_parse(const char *buf, unsigned int buflen, int is_user,
@ -118,6 +143,8 @@ extern int bitmap_scnlistprintf(char *buf, unsigned int len,
const unsigned long *src, int nbits);
extern int bitmap_parselist(const char *buf, unsigned long *maskp,
int nmaskbits);
extern int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen,
unsigned long *dst, int nbits);
extern void bitmap_remap(unsigned long *dst, const unsigned long *src,
const unsigned long *old, const unsigned long *new, int bits);
extern int bitmap_bitremap(int oldbit,

226
include/linux/memcopy.h Normal file
View File

@ -0,0 +1,226 @@
/*
* memcopy.h -- definitions for memory copy functions. Generic C version.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
* Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* The code is derived from the GNU C Library.
* Copyright (C) 1991, 1992, 1993, 1997, 2004 Free Software Foundation, Inc.
*/
#ifndef _LINUX_MEMCOPY_H_
#define _LINUX_MEMCOPY_H_
/*
* The strategy of the memory functions is:
*
* 1. Copy bytes until the destination pointer is aligned.
*
* 2. Copy words in unrolled loops. If the source and destination
* are not aligned in the same way, use word memory operations,
* but shift and merge two read words before writing.
*
* 3. Copy the few remaining bytes.
*
* This is fast on processors that have at least 10 registers for
* allocation by GCC, and that can access memory at reg+const in one
* instruction.
*/
#include <linux/types.h>
#include <linux/compiler.h>
#include <asm/byteorder.h>
/*
* The macros defined in this file are:
*
* BYTE_COPY_FWD(dst_beg_ptr, src_beg_ptr, nbytes_to_copy)
*
* BYTE_COPY_BWD(dst_end_ptr, src_end_ptr, nbytes_to_copy)
*
* WORD_COPY_FWD(dst_beg_ptr, src_beg_ptr, nbytes_remaining, nbytes_to_copy)
*
* WORD_COPY_BWD(dst_end_ptr, src_end_ptr, nbytes_remaining, nbytes_to_copy)
*
* MERGE(old_word, sh_1, new_word, sh_2)
*
* MEM_COPY_FWD(dst_beg_ptr, src_beg_ptr, nbytes_to_copy)
*
* MEM_COPY_BWD(dst_end_ptr, src_end_ptr, nbytes_to_copy)
*/
#define OP_T_THRESHOLD 16
/*
* Type to use for aligned memory operations.
* This should normally be the biggest type supported by a single load
* and store.
*/
#define op_t unsigned long int
#define OPSIZ (sizeof(op_t))
/* Type to use for unaligned operations. */
typedef unsigned char byte;
#ifndef MERGE
# ifdef __LITTLE_ENDIAN
# define MERGE(w0, sh_1, w1, sh_2) (((w0) >> (sh_1)) | ((w1) << (sh_2)))
# elif defined(__BIG_ENDIAN)
# define MERGE(w0, sh_1, w1, sh_2) (((w0) << (sh_1)) | ((w1) >> (sh_2)))
# else
# error "Macro MERGE() hasn't defined!"
# endif
#endif
/*
* Copy exactly NBYTES bytes from SRC_BP to DST_BP,
* without any assumptions about alignment of the pointers.
*/
#ifndef BYTE_COPY_FWD
#define BYTE_COPY_FWD(dst_bp, src_bp, nbytes) \
do { \
size_t __nbytes = (nbytes); \
while (__nbytes > 0) { \
byte __x = ((byte *) src_bp)[0]; \
src_bp += 1; \
__nbytes -= 1; \
((byte *) dst_bp)[0] = __x; \
dst_bp += 1; \
} \
} while (0)
#endif
/*
* Copy exactly NBYTES_TO_COPY bytes from SRC_END_PTR to DST_END_PTR,
* beginning at the bytes right before the pointers and continuing towards
* smaller addresses. Don't assume anything about alignment of the
* pointers.
*/
#ifndef BYTE_COPY_BWD
#define BYTE_COPY_BWD(dst_ep, src_ep, nbytes) \
do { \
size_t __nbytes = (nbytes); \
while (__nbytes > 0) { \
byte __x; \
src_ep -= 1; \
__x = ((byte *) src_ep)[0]; \
dst_ep -= 1; \
__nbytes -= 1; \
((byte *) dst_ep)[0] = __x; \
} \
} while (0)
#endif
/*
* Copy *up to* NBYTES bytes from SRC_BP to DST_BP, with
* the assumption that DST_BP is aligned on an OPSIZ multiple. If
* not all bytes could be easily copied, store remaining number of bytes
* in NBYTES_LEFT, otherwise store 0.
*/
extern void _wordcopy_fwd_aligned(long int, long int, size_t);
extern void _wordcopy_fwd_dest_aligned(long int, long int, size_t);
#ifndef WORD_COPY_FWD
#define WORD_COPY_FWD(dst_bp, src_bp, nbytes_left, nbytes) \
do { \
if (src_bp % OPSIZ == 0) \
_wordcopy_fwd_aligned (dst_bp, src_bp, (nbytes) / OPSIZ); \
else \
_wordcopy_fwd_dest_aligned (dst_bp, src_bp, (nbytes) / OPSIZ);\
\
src_bp += (nbytes) & -OPSIZ; \
dst_bp += (nbytes) & -OPSIZ; \
(nbytes_left) = (nbytes) % OPSIZ; \
} while (0)
#endif
/*
* Copy *up to* NBYTES_TO_COPY bytes from SRC_END_PTR to DST_END_PTR,
* beginning at the words (of type op_t) right before the pointers and
* continuing towards smaller addresses. May take advantage of that
* DST_END_PTR is aligned on an OPSIZ multiple. If not all bytes could be
* easily copied, store remaining number of bytes in NBYTES_REMAINING,
* otherwise store 0.
*/
extern void _wordcopy_bwd_aligned(long int, long int, size_t);
extern void _wordcopy_bwd_dest_aligned(long int, long int, size_t);
#ifndef WORD_COPY_BWD
#define WORD_COPY_BWD(dst_ep, src_ep, nbytes_left, nbytes) \
do { \
if (src_ep % OPSIZ == 0) \
_wordcopy_bwd_aligned (dst_ep, src_ep, (nbytes) / OPSIZ); \
else \
_wordcopy_bwd_dest_aligned (dst_ep, src_ep, (nbytes) / OPSIZ);\
\
src_ep -= (nbytes) & -OPSIZ; \
dst_ep -= (nbytes) & -OPSIZ; \
(nbytes_left) = (nbytes) % OPSIZ; \
} while (0)
#endif
/* Copy memory from the beginning to the end */
#ifndef MEM_COPY_FWD
static __always_inline void mem_copy_fwd(unsigned long dstp,
unsigned long srcp,
size_t count)
{
/* If there not too few bytes to copy, use word copy. */
if (count >= OP_T_THRESHOLD) {
/* Copy just a few bytes to make dstp aligned. */
count -= (-dstp) % OPSIZ;
BYTE_COPY_FWD(dstp, srcp, (-dstp) % OPSIZ);
/*
* Copy from srcp to dstp taking advantage of the known
* alignment of dstp. Number if bytes remaining is put in
* the third argument.
*/
WORD_COPY_FWD(dstp, srcp, count, count);
/* Fall out and copy the tail. */
}
/* There are just a few bytes to copy. Use byte memory operations. */
BYTE_COPY_FWD(dstp, srcp, count);
}
#endif
/* Copy memory from the end to the beginning. */
#ifndef MEM_COPY_BWD
static __always_inline void mem_copy_bwd(unsigned long dstp,
unsigned long srcp,
size_t count)
{
srcp += count;
dstp += count;
/* If there not too few bytes to copy, use word copy. */
if (count >= OP_T_THRESHOLD) {
/* Copy just a few bytes to make dstp aligned. */
count -= dstp % OPSIZ;
BYTE_COPY_BWD(dstp, srcp, dstp % OPSIZ);
/*
* Copy from srcp to dstp taking advantage of the known
* alignment of dstp. Number if bytes remaining is put in
* the third argument.
*/
WORD_COPY_BWD(dstp, srcp, count, count);
/* Fall out and copy the tail. */
}
/* There are just a few bytes to copy. Use byte memory operations. */
BYTE_COPY_BWD (dstp, srcp, count);
}
#endif
#endif

View File

@ -0,0 +1,59 @@
/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef _LINUX_MEMALLOC_H
#define _LINUX_MEMALLOC_H
#include <linux/mutex.h>
#include <linux/genalloc.h>
#include <linux/rbtree.h>
struct mem_pool {
struct mutex pool_mutex;
struct gen_pool *gpool;
unsigned long paddr;
unsigned long size;
unsigned long free;
unsigned int id;
};
struct alloc {
struct rb_node rb_node;
void *vaddr;
unsigned long paddr;
struct mem_pool *mpool;
unsigned long len;
void *caller;
};
struct mem_pool *initialize_memory_pool(unsigned long start,
unsigned long size, int mem_type);
void *allocate_contiguous_memory(unsigned long size,
int mem_type, unsigned long align, int cached);
unsigned long _allocate_contiguous_memory_nomap(unsigned long size,
int mem_type, unsigned long align, void *caller);
unsigned long allocate_contiguous_memory_nomap(unsigned long size,
int mem_type, unsigned long align);
void free_contiguous_memory(void *addr);
void free_contiguous_memory_by_paddr(unsigned long paddr);
unsigned long memory_pool_node_paddr(void *vaddr);
unsigned long memory_pool_node_len(void *vaddr);
int memory_pool_init(void);
#endif /* _LINUX_MEMALLOC_H */

View File

@ -1,3 +1,473 @@
#ifdef CONFIG_MSM_KGSL
/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, and the entire permission notice in its entirety,
* including the disclaimer of warranties.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote
* products derived from this software without specific prior
* written permission.
*
* ALTERNATIVELY, this product may be distributed under the terms of
* the GNU General Public License, version 2, in which case the provisions
* of the GPL version 2 are required INSTEAD OF the BSD license.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
* WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*
*/
#ifndef _MSM_KGSL_H
#define _MSM_KGSL_H
#define KGSL_VERSION_MAJOR 3
#define KGSL_VERSION_MINOR 7
/*context flags */
#define KGSL_CONTEXT_SAVE_GMEM 1
#define KGSL_CONTEXT_NO_GMEM_ALLOC 2
#define KGSL_CONTEXT_SUBMIT_IB_LIST 4
#define KGSL_CONTEXT_CTX_SWITCH 8
/* Memory allocayion flags */
#define KGSL_MEMFLAGS_GPUREADONLY 0x01000000
/* generic flag values */
#define KGSL_FLAGS_NORMALMODE 0x00000000
#define KGSL_FLAGS_SAFEMODE 0x00000001
#define KGSL_FLAGS_INITIALIZED0 0x00000002
#define KGSL_FLAGS_INITIALIZED 0x00000004
#define KGSL_FLAGS_STARTED 0x00000008
#define KGSL_FLAGS_ACTIVE 0x00000010
#define KGSL_FLAGS_RESERVED0 0x00000020
#define KGSL_FLAGS_RESERVED1 0x00000040
#define KGSL_FLAGS_RESERVED2 0x00000080
#define KGSL_FLAGS_SOFT_RESET 0x00000100
#define KGSL_MAX_PWRLEVELS 5
/* device id */
enum kgsl_deviceid {
KGSL_DEVICE_3D0 = 0x00000000,
KGSL_DEVICE_2D0 = 0x00000001,
KGSL_DEVICE_2D1 = 0x00000002,
KGSL_DEVICE_MAX = 0x00000003
};
enum kgsl_user_mem_type {
KGSL_USER_MEM_TYPE_PMEM = 0x00000000,
KGSL_USER_MEM_TYPE_ASHMEM = 0x00000001,
KGSL_USER_MEM_TYPE_ADDR = 0x00000002
};
struct kgsl_devinfo {
unsigned int device_id;
/* chip revision id
* coreid:8 majorrev:8 minorrev:8 patch:8
*/
unsigned int chip_id;
unsigned int mmu_enabled;
unsigned int gmem_gpubaseaddr;
/*
* This field contains the adreno revision
* number 200, 205, 220, etc...
*/
unsigned int gpu_id;
unsigned int gmem_sizebytes;
};
/* this structure defines the region of memory that can be mmap()ed from this
driver. The timestamp fields are volatile because they are written by the
GPU
*/
struct kgsl_devmemstore {
volatile unsigned int soptimestamp;
unsigned int sbz;
volatile unsigned int eoptimestamp;
unsigned int sbz2;
volatile unsigned int ts_cmp_enable;
unsigned int sbz3;
volatile unsigned int ref_wait_ts;
unsigned int sbz4;
unsigned int current_context;
unsigned int sbz5;
};
#define KGSL_DEVICE_MEMSTORE_OFFSET(field) \
offsetof(struct kgsl_devmemstore, field)
/* timestamp id*/
enum kgsl_timestamp_type {
KGSL_TIMESTAMP_CONSUMED = 0x00000001, /* start-of-pipeline timestamp */
KGSL_TIMESTAMP_RETIRED = 0x00000002, /* end-of-pipeline timestamp*/
KGSL_TIMESTAMP_MAX = 0x00000002,
};
/* property types - used with kgsl_device_getproperty */
enum kgsl_property_type {
KGSL_PROP_DEVICE_INFO = 0x00000001,
KGSL_PROP_DEVICE_SHADOW = 0x00000002,
KGSL_PROP_DEVICE_POWER = 0x00000003,
KGSL_PROP_SHMEM = 0x00000004,
KGSL_PROP_SHMEM_APERTURES = 0x00000005,
KGSL_PROP_MMU_ENABLE = 0x00000006,
KGSL_PROP_INTERRUPT_WAITS = 0x00000007,
KGSL_PROP_VERSION = 0x00000008,
};
struct kgsl_shadowprop {
unsigned int gpuaddr;
unsigned int size;
unsigned int flags; /* contains KGSL_FLAGS_ values */
};
struct kgsl_pwrlevel {
unsigned int gpu_freq;
unsigned int bus_freq;
};
struct kgsl_version {
unsigned int drv_major;
unsigned int drv_minor;
unsigned int dev_major;
unsigned int dev_minor;
};
#ifdef __KERNEL__
#define KGSL_3D0_REG_MEMORY "kgsl_3d0_reg_memory"
#define KGSL_3D0_IRQ "kgsl_3d0_irq"
#define KGSL_2D0_REG_MEMORY "kgsl_2d0_reg_memory"
#define KGSL_2D0_IRQ "kgsl_2d0_irq"
#define KGSL_2D1_REG_MEMORY "kgsl_2d1_reg_memory"
#define KGSL_2D1_IRQ "kgsl_2d1_irq"
struct kgsl_grp_clk_name {
const char *clk;
const char *pclk;
};
struct kgsl_device_pwr_data {
struct kgsl_pwrlevel pwrlevel[KGSL_MAX_PWRLEVELS];
int init_level;
int num_levels;
int (*set_grp_async)(void);
unsigned int idle_timeout;
unsigned int nap_allowed;
unsigned int idle_pass;
};
struct kgsl_clk_data {
struct kgsl_grp_clk_name name;
struct msm_bus_scale_pdata *bus_scale_table;
};
struct kgsl_device_platform_data {
struct kgsl_device_pwr_data pwr_data;
struct kgsl_clk_data clk;
/* imem_clk_name is for 3d only, not used in 2d devices */
struct kgsl_grp_clk_name imem_clk_name;
};
#endif
/* structure holds list of ibs */
struct kgsl_ibdesc {
unsigned int gpuaddr;
void *hostptr;
unsigned int sizedwords;
unsigned int ctrl;
};
/* ioctls */
#define KGSL_IOC_TYPE 0x09
/* get misc info about the GPU
type should be a value from enum kgsl_property_type
value points to a structure that varies based on type
sizebytes is sizeof() that structure
for KGSL_PROP_DEVICE_INFO, use struct kgsl_devinfo
this structure contaings hardware versioning info.
for KGSL_PROP_DEVICE_SHADOW, use struct kgsl_shadowprop
this is used to find mmap() offset and sizes for mapping
struct kgsl_memstore into userspace.
*/
struct kgsl_device_getproperty {
unsigned int type;
void *value;
unsigned int sizebytes;
};
#define IOCTL_KGSL_DEVICE_GETPROPERTY \
_IOWR(KGSL_IOC_TYPE, 0x2, struct kgsl_device_getproperty)
/* read a GPU register.
offsetwords it the 32 bit word offset from the beginning of the
GPU register space.
*/
struct kgsl_device_regread {
unsigned int offsetwords;
unsigned int value; /* output param */
};
#define IOCTL_KGSL_DEVICE_REGREAD \
_IOWR(KGSL_IOC_TYPE, 0x3, struct kgsl_device_regread)
/* block until the GPU has executed past a given timestamp
* timeout is in milliseconds.
*/
struct kgsl_device_waittimestamp {
unsigned int timestamp;
unsigned int timeout;
};
#define IOCTL_KGSL_DEVICE_WAITTIMESTAMP \
_IOW(KGSL_IOC_TYPE, 0x6, struct kgsl_device_waittimestamp)
/* issue indirect commands to the GPU.
* drawctxt_id must have been created with IOCTL_KGSL_DRAWCTXT_CREATE
* ibaddr and sizedwords must specify a subset of a buffer created
* with IOCTL_KGSL_SHAREDMEM_FROM_PMEM
* flags may be a mask of KGSL_CONTEXT_ values
* timestamp is a returned counter value which can be passed to
* other ioctls to determine when the commands have been executed by
* the GPU.
*/
struct kgsl_ringbuffer_issueibcmds {
unsigned int drawctxt_id;
unsigned int ibdesc_addr;
unsigned int numibs;
unsigned int timestamp; /*output param */
unsigned int flags;
};
#define IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS \
_IOWR(KGSL_IOC_TYPE, 0x10, struct kgsl_ringbuffer_issueibcmds)
/* read the most recently executed timestamp value
* type should be a value from enum kgsl_timestamp_type
*/
struct kgsl_cmdstream_readtimestamp {
unsigned int type;
unsigned int timestamp; /*output param */
};
#define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_OLD \
_IOR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp)
#define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP \
_IOWR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp)
/* free memory when the GPU reaches a given timestamp.
* gpuaddr specify a memory region created by a
* IOCTL_KGSL_SHAREDMEM_FROM_PMEM call
* type should be a value from enum kgsl_timestamp_type
*/
struct kgsl_cmdstream_freememontimestamp {
unsigned int gpuaddr;
unsigned int type;
unsigned int timestamp;
};
#define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP \
_IOW(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp)
/* Previous versions of this header had incorrectly defined
IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP as a read-only ioctl instead
of a write only ioctl. To ensure binary compatability, the following
#define will be used to intercept the incorrect ioctl
*/
#define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_OLD \
_IOR(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp)
/* create a draw context, which is used to preserve GPU state.
* The flags field may contain a mask KGSL_CONTEXT_* values
*/
struct kgsl_drawctxt_create {
unsigned int flags;
unsigned int drawctxt_id; /*output param */
};
#define IOCTL_KGSL_DRAWCTXT_CREATE \
_IOWR(KGSL_IOC_TYPE, 0x13, struct kgsl_drawctxt_create)
/* destroy a draw context */
struct kgsl_drawctxt_destroy {
unsigned int drawctxt_id;
};
#define IOCTL_KGSL_DRAWCTXT_DESTROY \
_IOW(KGSL_IOC_TYPE, 0x14, struct kgsl_drawctxt_destroy)
/* add a block of pmem, fb, ashmem or user allocated address
* into the GPU address space */
struct kgsl_map_user_mem {
int fd;
unsigned int gpuaddr; /*output param */
unsigned int len;
unsigned int offset;
unsigned int hostptr; /*input param */
enum kgsl_user_mem_type memtype;
unsigned int reserved; /* May be required to add
params for another mem type */
};
#define IOCTL_KGSL_MAP_USER_MEM \
_IOWR(KGSL_IOC_TYPE, 0x15, struct kgsl_map_user_mem)
/* add a block of pmem or fb into the GPU address space */
struct kgsl_sharedmem_from_pmem {
int pmem_fd;
unsigned int gpuaddr; /*output param */
unsigned int len;
unsigned int offset;
};
#define IOCTL_KGSL_SHAREDMEM_FROM_PMEM \
_IOWR(KGSL_IOC_TYPE, 0x20, struct kgsl_sharedmem_from_pmem)
/* remove memory from the GPU's address space */
struct kgsl_sharedmem_free {
unsigned int gpuaddr;
};
#define IOCTL_KGSL_SHAREDMEM_FREE \
_IOW(KGSL_IOC_TYPE, 0x21, struct kgsl_sharedmem_free)
struct kgsl_cff_user_event {
unsigned char cff_opcode;
unsigned int op1;
unsigned int op2;
unsigned int op3;
unsigned int op4;
unsigned int op5;
unsigned int __pad[2];
};
#define IOCTL_KGSL_CFF_USER_EVENT \
_IOW(KGSL_IOC_TYPE, 0x31, struct kgsl_cff_user_event)
struct kgsl_gmem_desc {
unsigned int x;
unsigned int y;
unsigned int width;
unsigned int height;
unsigned int pitch;
};
struct kgsl_buffer_desc {
void *hostptr;
unsigned int gpuaddr;
int size;
unsigned int format;
unsigned int pitch;
unsigned int enabled;
};
struct kgsl_bind_gmem_shadow {
unsigned int drawctxt_id;
struct kgsl_gmem_desc gmem_desc;
unsigned int shadow_x;
unsigned int shadow_y;
struct kgsl_buffer_desc shadow_buffer;
unsigned int buffer_id;
};
#define IOCTL_KGSL_DRAWCTXT_BIND_GMEM_SHADOW \
_IOW(KGSL_IOC_TYPE, 0x22, struct kgsl_bind_gmem_shadow)
/* add a block of memory into the GPU address space */
struct kgsl_sharedmem_from_vmalloc {
unsigned int gpuaddr; /*output param */
unsigned int hostptr;
unsigned int flags;
};
#define IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC \
_IOWR(KGSL_IOC_TYPE, 0x23, struct kgsl_sharedmem_from_vmalloc)
#define IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE \
_IOW(KGSL_IOC_TYPE, 0x24, struct kgsl_sharedmem_free)
struct kgsl_drawctxt_set_bin_base_offset {
unsigned int drawctxt_id;
unsigned int offset;
};
#define IOCTL_KGSL_DRAWCTXT_SET_BIN_BASE_OFFSET \
_IOW(KGSL_IOC_TYPE, 0x25, struct kgsl_drawctxt_set_bin_base_offset)
enum kgsl_cmdwindow_type {
KGSL_CMDWINDOW_MIN = 0x00000000,
KGSL_CMDWINDOW_2D = 0x00000000,
KGSL_CMDWINDOW_3D = 0x00000001, /* legacy */
KGSL_CMDWINDOW_MMU = 0x00000002,
KGSL_CMDWINDOW_ARBITER = 0x000000FF,
KGSL_CMDWINDOW_MAX = 0x000000FF,
};
/* write to the command window */
struct kgsl_cmdwindow_write {
enum kgsl_cmdwindow_type target;
unsigned int addr;
unsigned int data;
};
#define IOCTL_KGSL_CMDWINDOW_WRITE \
_IOW(KGSL_IOC_TYPE, 0x2e, struct kgsl_cmdwindow_write)
struct kgsl_gpumem_alloc {
unsigned long gpuaddr;
size_t size;
unsigned int flags;
};
#define IOCTL_KGSL_GPUMEM_ALLOC \
_IOWR(KGSL_IOC_TYPE, 0x2f, struct kgsl_gpumem_alloc)
struct kgsl_cff_syncmem {
unsigned int gpuaddr;
unsigned int len;
unsigned int __pad[2]; /* For future binary compatibility */
};
#define IOCTL_KGSL_CFF_SYNCMEM \
_IOW(KGSL_IOC_TYPE, 0x30, struct kgsl_cff_syncmem)
#ifdef __KERNEL__
#ifdef CONFIG_MSM_KGSL_DRM
int kgsl_gem_obj_addr(int drm_fd, int handle, unsigned long *start,
unsigned long *len);
#else
#define kgsl_gem_obj_addr(...) 0
#endif
#endif
#endif /* _MSM_KGSL_H */
#else
/*
* (C) Copyright Advanced Micro Devices, Inc. 2002, 2007
* Copyright (c) 2008-2009 QUALCOMM USA, INC.
@ -345,3 +815,4 @@ struct kgsl_cmdwindow_write {
_IOW(KGSL_IOC_TYPE, 0x2e, struct kgsl_cmdwindow_write)
#endif /* _MSM_KGSL_H */
#endif

View File

@ -188,6 +188,75 @@ do { \
finish_wait(&wq, &__wait); \
} while (0)
/**
* wait_io_event_interruptible - sleep until an io condition gets true
* @wq: the waitqueue to wait on
* @condition: a C expression for the event to wait for
*
* The process is put to sleep (TASK_INTERRUPTIBLE) until the
* @condition evaluates to true or a signal is received.
* The @condition is checked each time the waitqueue @wq is woken up.
*
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
*
* The function will return -ERESTARTSYS if it was interrupted by a
* signal and 0 if @condition evaluated to true.
*/
#define wait_io_event_interruptible(wq, condition) \
({ \
int __ret = 0; \
if (!(condition)) \
__wait_io_event_interruptible(wq, condition, __ret); \
__ret; \
})
#define __wait_io_event_interruptible_timeout(wq, condition, ret) \
do { \
DEFINE_WAIT(__wait); \
\
for (;;) { \
prepare_to_wait(&wq, &__wait, TASK_INTERRUPTIBLE); \
if (condition) \
break; \
if (!signal_pending(current)) { \
ret = io_schedule_timeout(ret); \
if (!ret) \
break; \
continue; \
} \
ret = -ERESTARTSYS; \
break; \
} \
finish_wait(&wq, &__wait); \
} while (0)
/**
* wait_io_event_interruptible_timeout - sleep until an io condition gets true or a timeout elapses
* @wq: the waitqueue to wait on
* @condition: a C expression for the event to wait for
* @timeout: timeout, in jiffies
*
* The process is put to sleep (TASK_INTERRUPTIBLE) until the
* @condition evaluates to true or a signal is received.
* The @condition is checked each time the waitqueue @wq is woken up.
*
* wake_up() has to be called after changing any variable that could
* change the result of the wait condition.
*
* The function returns 0 if the @timeout elapsed, -ERESTARTSYS if it
* was interrupted by a signal, and the remaining jiffies otherwise
* if the condition evaluated to true before the timeout elapsed.
*/
#define wait_io_event_interruptible_timeout(wq, condition, timeout) \
({ \
long __ret = timeout; \
if (!(condition)) \
__wait_io_event_interruptible_timeout(wq, condition, __ret); \
__ret; \
})
/**
* wait_event - sleep until a condition gets true
* @wq: the waitqueue to wait on

View File

@ -121,7 +121,7 @@
static inline int rt_policy(int policy)
{
if (unlikely(policy == SCHED_FIFO || policy == SCHED_RR))
if (policy == SCHED_FIFO || policy == SCHED_RR)
return 1;
return 0;
}
@ -2443,7 +2443,7 @@ out_running:
if (p->sched_class->task_wake_up)
p->sched_class->task_wake_up(rq, p);
if (unlikely(rq->idle_stamp)) {
if (rq->idle_stamp) {
u64 delta = rq->clock - rq->idle_stamp;
u64 max = 2*sysctl_sched_migration_cost;

View File

@ -1052,7 +1052,7 @@ static struct task_struct *_pick_next_task_rt(struct rq *rq)
rt_rq = &rq->rt;
if (unlikely(!rt_rq->rt_nr_running))
if (!rt_rq->rt_nr_running)
return NULL;
if (rt_rq_throttled(rt_rq))
@ -1472,7 +1472,7 @@ static int pull_rt_task(struct rq *this_rq)
static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
{
/* Try to pull RT tasks here if we lower this rq's prio */
if (unlikely(rt_task(prev)) && rq->rt.highest_prio.curr > prev->prio)
if (rq->rt.highest_prio.curr > prev->prio)
pull_rt_task(rq);
}

View File

@ -10,9 +10,9 @@ endif
lib-y := ctype.o string.o vsprintf.o cmdline.o \
rbtree.o radix-tree.o dump_stack.o \
idr.o int_sqrt.o extable.o prio_tree.o \
sha1.o irq_regs.o reciprocal_div.o argv_split.o \
sha1.o md5.o irq_regs.o reciprocal_div.o argv_split.o \
proportions.o prio_heap.o ratelimit.o show_mem.o \
is_single_threaded.o plist.o decompress.o flex_array.o
is_single_threaded.o plist.o decompress.o flex_array.o memcopy.o
lib-$(CONFIG_MMU) += ioremap.o
lib-$(CONFIG_SMP) += cpumask.o

View File

@ -271,6 +271,89 @@ int __bitmap_weight(const unsigned long *bitmap, int bits)
}
EXPORT_SYMBOL(__bitmap_weight);
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) % BITS_PER_LONG))
void bitmap_set(unsigned long *map, int start, int nr)
{
unsigned long *p = map + BIT_WORD(start);
const int size = start + nr;
int bits_to_set = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_set = BITMAP_FIRST_WORD_MASK(start);
while (nr - bits_to_set >= 0) {
*p |= mask_to_set;
nr -= bits_to_set;
bits_to_set = BITS_PER_LONG;
mask_to_set = ~0UL;
p++;
}
if (nr) {
mask_to_set &= BITMAP_LAST_WORD_MASK(size);
*p |= mask_to_set;
}
}
EXPORT_SYMBOL(bitmap_set);
void bitmap_clear(unsigned long *map, int start, int nr)
{
unsigned long *p = map + BIT_WORD(start);
const int size = start + nr;
int bits_to_clear = BITS_PER_LONG - (start % BITS_PER_LONG);
unsigned long mask_to_clear = BITMAP_FIRST_WORD_MASK(start);
while (nr - bits_to_clear >= 0) {
*p &= ~mask_to_clear;
nr -= bits_to_clear;
bits_to_clear = BITS_PER_LONG;
mask_to_clear = ~0UL;
p++;
}
if (nr) {
mask_to_clear &= BITMAP_LAST_WORD_MASK(size);
*p &= ~mask_to_clear;
}
}
EXPORT_SYMBOL(bitmap_clear);
/**
* bitmap_find_next_zero_area - find a contiguous aligned zero area
* @map: The address to base the search on
* @size: The bitmap size in bits
* @start: The bitnumber to start searching at
* @nr: The number of zeroed bits we're looking for
* @align_mask: Alignment mask for zero area
* @align_offset: Alignment offset for zero area.
*
* The @align_mask should be one less than a power of 2; the effect is that
* the bit offset of all zero areas this function finds plus @align_offset
* is multiple of that power of 2.
*/
unsigned long bitmap_find_next_zero_area_off(unsigned long *map,
unsigned long size,
unsigned long start,
unsigned int nr,
unsigned long align_mask,
unsigned long align_offset)
{
unsigned long index, end, i;
again:
index = find_next_zero_bit(map, size, start);
/* Align allocation */
index = __ALIGN_MASK(index + align_offset, align_mask) - align_offset;
end = index + nr;
if (end > size)
return end;
i = find_next_bit(map, end, index);
if (i < end) {
start = i + 1;
goto again;
}
return index;
}
EXPORT_SYMBOL(bitmap_find_next_zero_area_off);
/*
* Bitmap printing & parsing functions: first version by Bill Irwin,
* second version by Paul Jackson, third by Joe Korty.

403
lib/memcopy.c Normal file
View File

@ -0,0 +1,403 @@
/*
* memcopy.c -- subroutines for memory copy functions.
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the Free
* Software Foundation; either version 2 of the License, or (at your option)
* any later version.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General
* Public License for more details.
*
* You should have received a copy of the GNU General Public License along
* with this program; if not, write to the Free Software Foundation, Inc.,
* 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
*
* The code is derived from the GNU C Library.
* Copyright (C) 1991, 1992, 1993, 1997, 2004 Free Software Foundation, Inc.
*/
/* BE VERY CAREFUL IF YOU CHANGE THIS CODE...! */
#include <linux/memcopy.h>
/*
* _wordcopy_fwd_aligned -- Copy block beginning at SRCP to block beginning
* at DSTP with LEN `op_t' words (not LEN bytes!).
* Both SRCP and DSTP should be aligned for memory operations on `op_t's.
*/
void _wordcopy_fwd_aligned (long int dstp, long int srcp, size_t len)
{
op_t a0, a1;
switch (len % 8) {
case 2:
a0 = ((op_t *) srcp)[0];
srcp -= 6 * OPSIZ;
dstp -= 7 * OPSIZ;
len += 6;
goto do1;
case 3:
a1 = ((op_t *) srcp)[0];
srcp -= 5 * OPSIZ;
dstp -= 6 * OPSIZ;
len += 5;
goto do2;
case 4:
a0 = ((op_t *) srcp)[0];
srcp -= 4 * OPSIZ;
dstp -= 5 * OPSIZ;
len += 4;
goto do3;
case 5:
a1 = ((op_t *) srcp)[0];
srcp -= 3 * OPSIZ;
dstp -= 4 * OPSIZ;
len += 3;
goto do4;
case 6:
a0 = ((op_t *) srcp)[0];
srcp -= 2 * OPSIZ;
dstp -= 3 * OPSIZ;
len += 2;
goto do5;
case 7:
a1 = ((op_t *) srcp)[0];
srcp -= 1 * OPSIZ;
dstp -= 2 * OPSIZ;
len += 1;
goto do6;
case 0:
if (OP_T_THRESHOLD <= 3 * OPSIZ && len == 0)
return;
a0 = ((op_t *) srcp)[0];
srcp -= 0 * OPSIZ;
dstp -= 1 * OPSIZ;
goto do7;
case 1:
a1 = ((op_t *) srcp)[0];
srcp -=-1 * OPSIZ;
dstp -= 0 * OPSIZ;
len -= 1;
if (OP_T_THRESHOLD <= 3 * OPSIZ && len == 0)
goto do0;
goto do8; /* No-op. */
}
do {
do8:
a0 = ((op_t *) srcp)[0];
((op_t *) dstp)[0] = a1;
do7:
a1 = ((op_t *) srcp)[1];
((op_t *) dstp)[1] = a0;
do6:
a0 = ((op_t *) srcp)[2];
((op_t *) dstp)[2] = a1;
do5:
a1 = ((op_t *) srcp)[3];
((op_t *) dstp)[3] = a0;
do4:
a0 = ((op_t *) srcp)[4];
((op_t *) dstp)[4] = a1;
do3:
a1 = ((op_t *) srcp)[5];
((op_t *) dstp)[5] = a0;
do2:
a0 = ((op_t *) srcp)[6];
((op_t *) dstp)[6] = a1;
do1:
a1 = ((op_t *) srcp)[7];
((op_t *) dstp)[7] = a0;
srcp += 8 * OPSIZ;
dstp += 8 * OPSIZ;
len -= 8;
} while (len != 0);
/*
* This is the right position for do0. Please don't move it into
* the loop.
*/
do0:
((op_t *) dstp)[0] = a1;
}
/*
* _wordcopy_fwd_dest_aligned -- Copy block beginning at SRCP to block
* beginning at DSTP with LEN `op_t' words (not LEN bytes!). DSTP should
* be aligned for memory operations on `op_t's, but SRCP must *not* be aligned.
*/
void _wordcopy_fwd_dest_aligned (long int dstp, long int srcp, size_t len)
{
op_t a0, a1, a2, a3;
int sh_1, sh_2;
/*
* Calculate how to shift a word read at the memory operation aligned
* srcp to make it aligned for copy.
*/
sh_1 = 8 * (srcp % OPSIZ);
sh_2 = 8 * OPSIZ - sh_1;
/*
* Make SRCP aligned by rounding it down to the beginning of the `op_t'
* it points in the middle of.
*/
srcp &= -OPSIZ;
switch (len % 4) {
case 2:
a1 = ((op_t *) srcp)[0];
a2 = ((op_t *) srcp)[1];
srcp -= 1 * OPSIZ;
dstp -= 3 * OPSIZ;
len += 2;
goto do1;
case 3:
a0 = ((op_t *) srcp)[0];
a1 = ((op_t *) srcp)[1];
srcp -= 0 * OPSIZ;
dstp -= 2 * OPSIZ;
len += 1;
goto do2;
case 0:
if (OP_T_THRESHOLD <= 3 * OPSIZ && len == 0)
return;
a3 = ((op_t *) srcp)[0];
a0 = ((op_t *) srcp)[1];
srcp -=-1 * OPSIZ;
dstp -= 1 * OPSIZ;
len += 0;
goto do3;
case 1:
a2 = ((op_t *) srcp)[0];
a3 = ((op_t *) srcp)[1];
srcp -=-2 * OPSIZ;
dstp -= 0 * OPSIZ;
len -= 1;
if (OP_T_THRESHOLD <= 3 * OPSIZ && len == 0)
goto do0;
goto do4; /* No-op. */
}
do {
do4:
a0 = ((op_t *) srcp)[0];
((op_t *) dstp)[0] = MERGE (a2, sh_1, a3, sh_2);
do3:
a1 = ((op_t *) srcp)[1];
((op_t *) dstp)[1] = MERGE (a3, sh_1, a0, sh_2);
do2:
a2 = ((op_t *) srcp)[2];
((op_t *) dstp)[2] = MERGE (a0, sh_1, a1, sh_2);
do1:
a3 = ((op_t *) srcp)[3];
((op_t *) dstp)[3] = MERGE (a1, sh_1, a2, sh_2);
srcp += 4 * OPSIZ;
dstp += 4 * OPSIZ;
len -= 4;
} while (len != 0);
/*
* This is the right position for do0. Please don't move it into
* the loop.
*/
do0:
((op_t *) dstp)[0] = MERGE (a2, sh_1, a3, sh_2);
}
/*
* _wordcopy_bwd_aligned -- Copy block finishing right before
* SRCP to block finishing right before DSTP with LEN `op_t' words (not LEN
* bytes!). Both SRCP and DSTP should be aligned for memory operations
* on `op_t's.
*/
void _wordcopy_bwd_aligned (long int dstp, long int srcp, size_t len)
{
op_t a0, a1;
switch (len % 8) {
case 2:
srcp -= 2 * OPSIZ;
dstp -= 1 * OPSIZ;
a0 = ((op_t *) srcp)[1];
len += 6;
goto do1;
case 3:
srcp -= 3 * OPSIZ;
dstp -= 2 * OPSIZ;
a1 = ((op_t *) srcp)[2];
len += 5;
goto do2;
case 4:
srcp -= 4 * OPSIZ;
dstp -= 3 * OPSIZ;
a0 = ((op_t *) srcp)[3];
len += 4;
goto do3;
case 5:
srcp -= 5 * OPSIZ;
dstp -= 4 * OPSIZ;
a1 = ((op_t *) srcp)[4];
len += 3;
goto do4;
case 6:
srcp -= 6 * OPSIZ;
dstp -= 5 * OPSIZ;
a0 = ((op_t *) srcp)[5];
len += 2;
goto do5;
case 7:
srcp -= 7 * OPSIZ;
dstp -= 6 * OPSIZ;
a1 = ((op_t *) srcp)[6];
len += 1;
goto do6;
case 0:
if (OP_T_THRESHOLD <= 3 * OPSIZ && len == 0)
return;
srcp -= 8 * OPSIZ;
dstp -= 7 * OPSIZ;
a0 = ((op_t *) srcp)[7];
goto do7;
case 1:
srcp -= 9 * OPSIZ;
dstp -= 8 * OPSIZ;
a1 = ((op_t *) srcp)[8];
len -= 1;
if (OP_T_THRESHOLD <= 3 * OPSIZ && len == 0)
goto do0;
goto do8; /* No-op. */
}
do {
do8:
a0 = ((op_t *) srcp)[7];
((op_t *) dstp)[7] = a1;
do7:
a1 = ((op_t *) srcp)[6];
((op_t *) dstp)[6] = a0;
do6:
a0 = ((op_t *) srcp)[5];
((op_t *) dstp)[5] = a1;
do5:
a1 = ((op_t *) srcp)[4];
((op_t *) dstp)[4] = a0;
do4:
a0 = ((op_t *) srcp)[3];
((op_t *) dstp)[3] = a1;
do3:
a1 = ((op_t *) srcp)[2];
((op_t *) dstp)[2] = a0;
do2:
a0 = ((op_t *) srcp)[1];
((op_t *) dstp)[1] = a1;
do1:
a1 = ((op_t *) srcp)[0];
((op_t *) dstp)[0] = a0;
srcp -= 8 * OPSIZ;
dstp -= 8 * OPSIZ;
len -= 8;
} while (len != 0);
/*
* This is the right position for do0. Please don't move it into
* the loop.
*/
do0:
((op_t *) dstp)[7] = a1;
}
/*
* _wordcopy_bwd_dest_aligned -- Copy block finishing right before SRCP to
* block finishing right before DSTP with LEN `op_t' words (not LEN bytes!).
* DSTP should be aligned for memory operations on `op_t', but SRCP must *not*
* be aligned.
*/
void _wordcopy_bwd_dest_aligned (long int dstp, long int srcp, size_t len)
{
op_t a0, a1, a2, a3;
int sh_1, sh_2;
/*
* Calculate how to shift a word read at the memory operation aligned
* srcp to make it aligned for copy.
*/
sh_1 = 8 * (srcp % OPSIZ);
sh_2 = 8 * OPSIZ - sh_1;
/*
* Make srcp aligned by rounding it down to the beginning of the op_t
* it points in the middle of.
*/
srcp &= -OPSIZ;
srcp += OPSIZ;
switch (len % 4) {
case 2:
srcp -= 3 * OPSIZ;
dstp -= 1 * OPSIZ;
a2 = ((op_t *) srcp)[2];
a1 = ((op_t *) srcp)[1];
len += 2;
goto do1;
case 3:
srcp -= 4 * OPSIZ;
dstp -= 2 * OPSIZ;
a3 = ((op_t *) srcp)[3];
a2 = ((op_t *) srcp)[2];
len += 1;
goto do2;
case 0:
if (OP_T_THRESHOLD <= 3 * OPSIZ && len == 0)
return;
srcp -= 5 * OPSIZ;
dstp -= 3 * OPSIZ;
a0 = ((op_t *) srcp)[4];
a3 = ((op_t *) srcp)[3];
goto do3;
case 1:
srcp -= 6 * OPSIZ;
dstp -= 4 * OPSIZ;
a1 = ((op_t *) srcp)[5];
a0 = ((op_t *) srcp)[4];
len -= 1;
if (OP_T_THRESHOLD <= 3 * OPSIZ && len == 0)
goto do0;
goto do4; /* No-op. */
}
do {
do4:
a3 = ((op_t *) srcp)[3];
((op_t *) dstp)[3] = MERGE (a0, sh_1, a1, sh_2);
do3:
a2 = ((op_t *) srcp)[2];
((op_t *) dstp)[2] = MERGE (a3, sh_1, a0, sh_2);
do2:
a1 = ((op_t *) srcp)[1];
((op_t *) dstp)[1] = MERGE (a2, sh_1, a3, sh_2);
do1:
a0 = ((op_t *) srcp)[0];
((op_t *) dstp)[0] = MERGE (a1, sh_1, a2, sh_2);
srcp -= 4 * OPSIZ;
dstp -= 4 * OPSIZ;
len -= 4;
} while (len != 0);
/*
* This is the right position for do0. Please don't move it into
* the loop.
*/
do0:
((op_t *) dstp)[3] = MERGE (a0, sh_1, a1, sh_2);
}

426
lib/memory_alloc.c Normal file
View File

@ -0,0 +1,426 @@
/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <asm/page.h>
#include <linux/io.h>
#include <linux/memory_alloc.h>
#include <linux/mm.h>
#include <linux/vmalloc.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/err.h>
#include <linux/log2.h>
#include <linux/debugfs.h>
#include <linux/seq_file.h>
#define MAX_MEMPOOLS 8
struct mem_pool mpools[MAX_MEMPOOLS];
/* The tree contains all allocations over all memory pools */
static struct rb_root alloc_root;
static struct mutex alloc_mutex;
static void *s_start(struct seq_file *m, loff_t *pos)
__acquires(&alloc_mutex)
{
loff_t n = *pos;
struct rb_node *r;
mutex_lock(&alloc_mutex);
r = rb_first(&alloc_root);
while (n > 0 && r) {
n--;
r = rb_next(r);
}
if (!n)
return r;
return NULL;
}
static void *s_next(struct seq_file *m, void *p, loff_t *pos)
{
struct rb_node *r = p;
++*pos;
return rb_next(r);
}
static void s_stop(struct seq_file *m, void *p)
__releases(&alloc_mutex)
{
mutex_unlock(&alloc_mutex);
}
static int s_show(struct seq_file *m, void *p)
{
struct rb_node *r = p;
struct alloc *node = rb_entry(r, struct alloc, rb_node);
seq_printf(m, "0x%lx 0x%p %ld %u %pS\n", node->paddr, node->vaddr,
node->len, node->mpool->id, node->caller);
return 0;
}
static const struct seq_operations mempool_op = {
.start = s_start,
.next = s_next,
.stop = s_stop,
.show = s_show,
};
static int mempool_open(struct inode *inode, struct file *file)
{
return seq_open(file, &mempool_op);
}
static struct alloc *find_alloc(void *addr)
{
struct rb_root *root = &alloc_root;
struct rb_node *p = root->rb_node;
mutex_lock(&alloc_mutex);
while (p) {
struct alloc *node;
node = rb_entry(p, struct alloc, rb_node);
if (addr < node->vaddr)
p = p->rb_left;
else if (addr > node->vaddr)
p = p->rb_right;
else {
mutex_unlock(&alloc_mutex);
return node;
}
}
mutex_unlock(&alloc_mutex);
return NULL;
}
static int add_alloc(struct alloc *node)
{
struct rb_root *root = &alloc_root;
struct rb_node **p = &root->rb_node;
struct rb_node *parent = NULL;
mutex_lock(&alloc_mutex);
while (*p) {
struct alloc *tmp;
parent = *p;
tmp = rb_entry(parent, struct alloc, rb_node);
if (node->vaddr < tmp->vaddr)
p = &(*p)->rb_left;
else if (node->vaddr > tmp->vaddr)
p = &(*p)->rb_right;
else {
WARN(1, "memory at %p already allocated", tmp->vaddr);
mutex_unlock(&alloc_mutex);
return -EINVAL;
}
}
rb_link_node(&node->rb_node, parent, p);
rb_insert_color(&node->rb_node, root);
mutex_unlock(&alloc_mutex);
return 0;
}
static int remove_alloc(struct alloc *victim_node)
{
struct rb_root *root = &alloc_root;
if (!victim_node)
return -EINVAL;
mutex_lock(&alloc_mutex);
rb_erase(&victim_node->rb_node, root);
mutex_unlock(&alloc_mutex);
return 0;
}
static struct gen_pool *initialize_gpool(unsigned long start,
unsigned long size)
{
struct gen_pool *gpool;
gpool = gen_pool_create(PAGE_SHIFT, -1);
if (!gpool)
return NULL;
if (gen_pool_add(gpool, start, size, -1)) {
gen_pool_destroy(gpool);
return NULL;
}
return gpool;
}
static void *__alloc(struct mem_pool *mpool, unsigned long size,
unsigned long align, int cached, void *caller)
{
unsigned long paddr;
void __iomem *vaddr;
unsigned long aligned_size;
int log_align = ilog2(align);
struct alloc *node;
aligned_size = PFN_ALIGN(size);
paddr = gen_pool_alloc_aligned(mpool->gpool, aligned_size, log_align);
if (!paddr)
return NULL;
node = kmalloc(sizeof(struct alloc), GFP_KERNEL);
if (!node)
goto out;
if (cached)
vaddr = ioremap_cached(paddr, aligned_size);
else
vaddr = ioremap(paddr, aligned_size);
if (!vaddr)
goto out_kfree;
node->vaddr = vaddr;
node->paddr = paddr;
node->len = aligned_size;
node->mpool = mpool;
node->caller = caller;
if (add_alloc(node))
goto out_kfree;
mpool->free -= aligned_size;
return vaddr;
out_kfree:
if (vaddr)
iounmap(vaddr);
kfree(node);
out:
gen_pool_free(mpool->gpool, paddr, aligned_size);
return NULL;
}
static void __free(void *vaddr, bool unmap)
{
struct alloc *node = find_alloc(vaddr);
if (!node)
return;
if (unmap)
iounmap(node->vaddr);
gen_pool_free(node->mpool->gpool, node->paddr, node->len);
node->mpool->free += node->len;
remove_alloc(node);
kfree(node);
}
static struct mem_pool *mem_type_to_memory_pool(int mem_type)
{
struct mem_pool *mpool = &mpools[mem_type];
if (!mpool->size)
return NULL;
mutex_lock(&mpool->pool_mutex);
if (!mpool->gpool)
mpool->gpool = initialize_gpool(mpool->paddr, mpool->size);
mutex_unlock(&mpool->pool_mutex);
if (!mpool->gpool)
return NULL;
return mpool;
}
struct mem_pool *initialize_memory_pool(unsigned long start,
unsigned long size, int mem_type)
{
int id = mem_type;
if (id >= MAX_MEMPOOLS || size <= PAGE_SIZE || size % PAGE_SIZE)
return NULL;
mutex_lock(&mpools[id].pool_mutex);
mpools[id].paddr = start;
mpools[id].size = size;
mpools[id].free = size;
mpools[id].id = id;
mutex_unlock(&mpools[id].pool_mutex);
pr_info("memory pool %d (start %lx size %lx) initialized\n",
id, start, size);
return &mpools[id];
}
EXPORT_SYMBOL_GPL(initialize_memory_pool);
void *allocate_contiguous_memory(unsigned long size,
int mem_type, unsigned long align, int cached)
{
unsigned long aligned_size = PFN_ALIGN(size);
struct mem_pool *mpool;
mpool = mem_type_to_memory_pool(mem_type);
if (!mpool)
return NULL;
return __alloc(mpool, aligned_size, align, cached,
__builtin_return_address(0));
}
EXPORT_SYMBOL_GPL(allocate_contiguous_memory);
unsigned long _allocate_contiguous_memory_nomap(unsigned long size,
int mem_type, unsigned long align, void *caller)
{
unsigned long paddr;
unsigned long aligned_size;
struct alloc *node;
struct mem_pool *mpool;
int log_align = ilog2(align);
mpool = mem_type_to_memory_pool(mem_type);
if (!mpool || !mpool->gpool)
return 0;
aligned_size = PFN_ALIGN(size);
paddr = gen_pool_alloc_aligned(mpool->gpool, aligned_size, log_align);
if (!paddr)
return 0;
node = kmalloc(sizeof(struct alloc), GFP_KERNEL);
if (!node)
goto out;
node->paddr = paddr;
/* We search the tree using node->vaddr, so set
* it to something unique even though we don't
* use it for physical allocation nodes.
* The virtual and physical address ranges
* are disjoint, so there won't be any chance of
* a duplicate node->vaddr value.
*/
node->vaddr = (void *)paddr;
node->len = aligned_size;
node->mpool = mpool;
node->caller = caller;
if (add_alloc(node))
goto out_kfree;
mpool->free -= aligned_size;
return paddr;
out_kfree:
kfree(node);
out:
gen_pool_free(mpool->gpool, paddr, aligned_size);
return 0;
}
EXPORT_SYMBOL_GPL(_allocate_contiguous_memory_nomap);
unsigned long allocate_contiguous_memory_nomap(unsigned long size,
int mem_type, unsigned long align)
{
return _allocate_contiguous_memory_nomap(size, mem_type, align,
__builtin_return_address(0));
}
EXPORT_SYMBOL_GPL(allocate_contiguous_memory_nomap);
void free_contiguous_memory(void *addr)
{
if (!addr)
return;
__free(addr, true);
return;
}
EXPORT_SYMBOL_GPL(free_contiguous_memory);
void free_contiguous_memory_by_paddr(unsigned long paddr)
{
if (!paddr)
return;
__free((void *)paddr, false);
return;
}
EXPORT_SYMBOL_GPL(free_contiguous_memory_by_paddr);
unsigned long memory_pool_node_paddr(void *vaddr)
{
struct alloc *node = find_alloc(vaddr);
if (!node)
return -EINVAL;
return node->paddr;
}
EXPORT_SYMBOL_GPL(memory_pool_node_paddr);
unsigned long memory_pool_node_len(void *vaddr)
{
struct alloc *node = find_alloc(vaddr);
if (!node)
return -EINVAL;
return node->len;
}
EXPORT_SYMBOL_GPL(memory_pool_node_len);
static const struct file_operations mempool_operations = {
.owner = THIS_MODULE,
.open = mempool_open,
.read = seq_read,
.llseek = seq_lseek,
.release = seq_release_private,
};
int __init memory_pool_init(void)
{
int i;
alloc_root = RB_ROOT;
mutex_init(&alloc_mutex);
for (i = 0; i < ARRAY_SIZE(mpools); i++) {
mutex_init(&mpools[i].pool_mutex);
mpools[i].gpool = NULL;
}
return 0;
}
static int __init debugfs_mempool_init(void)
{
struct dentry *entry, *dir = debugfs_create_dir("mempool", NULL);
if (!dir) {
pr_err("Cannot create /sys/kernel/debug/mempool");
return -EINVAL;
}
entry = debugfs_create_file("map", S_IRUSR, dir,
NULL, &mempool_operations);
if (!entry)
pr_err("Cannot create /sys/kernel/debug/mempool/map");
return entry ? 0 : -EINVAL;
}
module_init(debugfs_mempool_init);

View File

@ -276,7 +276,7 @@ out:
* chunks of ashmem regions LRU-wise one-at-a-time until we hit 'nr_to_scan'
* pages freed.
*/
static int ashmem_shrink(int nr_to_scan, gfp_t gfp_mask)
static int ashmem_shrink(struct shrinker *s, int nr_to_scan, gfp_t gfp_mask)
{
struct ashmem_range *range, *next;
@ -595,8 +595,8 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
case ASHMEM_PURGE_ALL_CACHES:
ret = -EPERM;
if (capable(CAP_SYS_ADMIN)) {
ret = ashmem_shrink(0, GFP_KERNEL);
ashmem_shrink(ret, GFP_KERNEL);
ret = ashmem_shrink(&ashmem_shrinker, 0, GFP_KERNEL);
ashmem_shrink(&ashmem_shrinker, ret, GFP_KERNEL);
}
break;
}
@ -604,6 +604,59 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
return ret;
}
static int is_ashmem_file(struct file *file)
{
char fname[256], *name;
name = dentry_path(file->f_dentry, fname, 256);
return strcmp(name, "/ashmem") ? 0 : 1;
}
int get_ashmem_file(int fd, struct file **filp, struct file **vm_file,
unsigned long *len)
{
int ret = -1;
struct file *file = fget(fd);
*filp = NULL;
*vm_file = NULL;
if (unlikely(file == NULL)) {
pr_err("ashmem: %s: requested data from file "
"descriptor that doesn't exist.\n", __func__);
} else {
char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
pr_debug("filp %p rdev %d pid %u(%s) file %p(%ld)"
" dev id: %d\n", filp,
file->f_dentry->d_inode->i_rdev,
current->pid, get_task_comm(currtask_name, current),
file, file_count(file),
MINOR(file->f_dentry->d_inode->i_rdev));
if (is_ashmem_file(file)) {
struct ashmem_area *asma = file->private_data;
*filp = file;
*vm_file = asma->file;
*len = asma->size;
ret = 0;
} else {
pr_err("file descriptor is not an ashmem "
"region fd: %d\n", fd);
fput(file);
}
}
return ret;
}
EXPORT_SYMBOL(get_ashmem_file);
void put_ashmem_file(struct file *file)
{
char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1];
pr_debug("rdev %d pid %u(%s) file %p(%ld)" " dev id: %d\n",
file->f_dentry->d_inode->i_rdev, current->pid,
get_task_comm(currtask_name, current), file,
file_count(file), MINOR(file->f_dentry->d_inode->i_rdev));
if (file && is_ashmem_file(file))
fput(file);
}
EXPORT_SYMBOL(put_ashmem_file);
static struct file_operations ashmem_fops = {
.owner = THIS_MODULE,
.open = ashmem_open,

View File

@ -121,7 +121,7 @@ static char * const zone_names[MAX_NR_ZONES] = {
"Movable",
};
int min_free_kbytes = 1024;
int min_free_kbytes = 5120;
int min_free_order_shift = 1;
static unsigned long __meminitdata nr_kernel_pages;