[KGSL] add missing files for last commit

This commit is contained in:
Shantanu Gupta 2012-05-14 02:47:02 +05:30 committed by milaq
parent 351d65fc0a
commit 9bb78d2a7f
12 changed files with 4051 additions and 0 deletions

View File

@ -0,0 +1 @@
#include <generated/asm-offsets.h>

View File

@ -0,0 +1,75 @@
/*
* arch/arm/include/asm/outercache.h
*
* Copyright (C) 2010 ARM Ltd.
* Written by Catalin Marinas <catalin.marinas@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __ASM_OUTERCACHE_H
#define __ASM_OUTERCACHE_H
struct outer_cache_fns {
void (*inv_range)(unsigned long, unsigned long);
void (*clean_range)(unsigned long, unsigned long);
void (*flush_range)(unsigned long, unsigned long);
#ifdef CONFIG_OUTER_CACHE_SYNC
void (*sync)(void);
#endif
};
#ifdef CONFIG_OUTER_CACHE
extern struct outer_cache_fns outer_cache;
static inline void outer_inv_range(unsigned long start, unsigned long end)
{
if (outer_cache.inv_range)
outer_cache.inv_range(start, end);
}
static inline void outer_clean_range(unsigned long start, unsigned long end)
{
if (outer_cache.clean_range)
outer_cache.clean_range(start, end);
}
static inline void outer_flush_range(unsigned long start, unsigned long end)
{
if (outer_cache.flush_range)
outer_cache.flush_range(start, end);
}
#else
static inline void outer_inv_range(unsigned long start, unsigned long end)
{ }
static inline void outer_clean_range(unsigned long start, unsigned long end)
{ }
static inline void outer_flush_range(unsigned long start, unsigned long end)
{ }
#endif
#ifdef CONFIG_OUTER_CACHE_SYNC
static inline void outer_sync(void)
{
if (outer_cache.sync)
outer_cache.sync();
}
#else
static inline void outer_sync(void)
{ }
#endif
#endif /* __ASM_OUTERCACHE_H */

View File

@ -0,0 +1,63 @@
/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _INTERNAL_POWER_RAIL_H
#define _INTERNAL_POWER_RAIL_H
/* Clock power rail IDs */
#define PWR_RAIL_GRP_CLK 8
#define PWR_RAIL_GRP_2D_CLK 58
#define PWR_RAIL_MDP_CLK 14
#define PWR_RAIL_MFC_CLK 68
#define PWR_RAIL_ROTATOR_CLK 90
#define PWR_RAIL_VDC_CLK 39
#define PWR_RAIL_VFE_CLK 41
#define PWR_RAIL_VPE_CLK 76
enum rail_ctl_mode {
PWR_RAIL_CTL_AUTO = 0,
PWR_RAIL_CTL_MANUAL,
};
static inline int __maybe_unused internal_pwr_rail_ctl(unsigned rail_id,
bool enable)
{
/* Not yet implemented. */
return 0;
}
static inline int __maybe_unused internal_pwr_rail_mode(unsigned rail_id,
enum rail_ctl_mode mode)
{
/* Not yet implemented. */
return 0;
}
int internal_pwr_rail_ctl_auto(unsigned rail_id, bool enable);
#endif /* _INTERNAL_POWER_RAIL_H */

View File

@ -0,0 +1,64 @@
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/* The MSM Hardware supports multiple flavors of physical memory.
* This file captures hardware specific information of these types.
*/
#ifndef __ASM_ARCH_MSM_MEMTYPES_H
#define __ASM_ARCH_MSM_MEMTYPES_H
#include <mach/memory.h>
#include <linux/init.h>
int __init meminfo_init(unsigned int, unsigned int);
/* Redundant check to prevent this from being included outside of 7x30 */
#if defined(CONFIG_ARCH_MSM7X30)
unsigned int get_num_populated_chipselects(void);
#endif
unsigned int get_num_memory_banks(void);
unsigned int get_memory_bank_size(unsigned int);
unsigned int get_memory_bank_start(unsigned int);
int soc_change_memory_power(u64, u64, int);
enum {
MEMTYPE_NONE = -1,
MEMTYPE_SMI_KERNEL = 0,
MEMTYPE_SMI,
MEMTYPE_EBI0,
MEMTYPE_EBI1,
MEMTYPE_MAX,
};
void msm_reserve(void);
#define MEMTYPE_FLAGS_FIXED 0x1
#define MEMTYPE_FLAGS_1M_ALIGN 0x2
struct memtype_reserve {
unsigned long start;
unsigned long size;
unsigned long limit;
int flags;
};
struct reserve_info {
struct memtype_reserve *memtype_reserve_table;
void (*calculate_reserve_sizes)(void);
int (*paddr_to_memtype)(unsigned int);
unsigned long low_unstable_address;
unsigned long max_unstable_size;
unsigned long bank_size;
};
extern struct reserve_info *reserve_info;
#endif

418
drivers/gpu/msm/a2xx_reg.h Normal file
View File

@ -0,0 +1,418 @@
/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __A200_REG_H
#define __A200_REG_H
enum VGT_EVENT_TYPE {
VS_DEALLOC = 0,
PS_DEALLOC = 1,
VS_DONE_TS = 2,
PS_DONE_TS = 3,
CACHE_FLUSH_TS = 4,
CONTEXT_DONE = 5,
CACHE_FLUSH = 6,
VIZQUERY_START = 7,
VIZQUERY_END = 8,
SC_WAIT_WC = 9,
RST_PIX_CNT = 13,
RST_VTX_CNT = 14,
TILE_FLUSH = 15,
CACHE_FLUSH_AND_INV_TS_EVENT = 20,
ZPASS_DONE = 21,
CACHE_FLUSH_AND_INV_EVENT = 22,
PERFCOUNTER_START = 23,
PERFCOUNTER_STOP = 24,
VS_FETCH_DONE = 27,
FACENESS_FLUSH = 28,
};
enum COLORFORMATX {
COLORX_4_4_4_4 = 0,
COLORX_1_5_5_5 = 1,
COLORX_5_6_5 = 2,
COLORX_8 = 3,
COLORX_8_8 = 4,
COLORX_8_8_8_8 = 5,
COLORX_S8_8_8_8 = 6,
COLORX_16_FLOAT = 7,
COLORX_16_16_FLOAT = 8,
COLORX_16_16_16_16_FLOAT = 9,
COLORX_32_FLOAT = 10,
COLORX_32_32_FLOAT = 11,
COLORX_32_32_32_32_FLOAT = 12,
COLORX_2_3_3 = 13,
COLORX_8_8_8 = 14,
};
enum SURFACEFORMAT {
FMT_1_REVERSE = 0,
FMT_1 = 1,
FMT_8 = 2,
FMT_1_5_5_5 = 3,
FMT_5_6_5 = 4,
FMT_6_5_5 = 5,
FMT_8_8_8_8 = 6,
FMT_2_10_10_10 = 7,
FMT_8_A = 8,
FMT_8_B = 9,
FMT_8_8 = 10,
FMT_Cr_Y1_Cb_Y0 = 11,
FMT_Y1_Cr_Y0_Cb = 12,
FMT_5_5_5_1 = 13,
FMT_8_8_8_8_A = 14,
FMT_4_4_4_4 = 15,
FMT_10_11_11 = 16,
FMT_11_11_10 = 17,
FMT_DXT1 = 18,
FMT_DXT2_3 = 19,
FMT_DXT4_5 = 20,
FMT_24_8 = 22,
FMT_24_8_FLOAT = 23,
FMT_16 = 24,
FMT_16_16 = 25,
FMT_16_16_16_16 = 26,
FMT_16_EXPAND = 27,
FMT_16_16_EXPAND = 28,
FMT_16_16_16_16_EXPAND = 29,
FMT_16_FLOAT = 30,
FMT_16_16_FLOAT = 31,
FMT_16_16_16_16_FLOAT = 32,
FMT_32 = 33,
FMT_32_32 = 34,
FMT_32_32_32_32 = 35,
FMT_32_FLOAT = 36,
FMT_32_32_FLOAT = 37,
FMT_32_32_32_32_FLOAT = 38,
FMT_32_AS_8 = 39,
FMT_32_AS_8_8 = 40,
FMT_16_MPEG = 41,
FMT_16_16_MPEG = 42,
FMT_8_INTERLACED = 43,
FMT_32_AS_8_INTERLACED = 44,
FMT_32_AS_8_8_INTERLACED = 45,
FMT_16_INTERLACED = 46,
FMT_16_MPEG_INTERLACED = 47,
FMT_16_16_MPEG_INTERLACED = 48,
FMT_DXN = 49,
FMT_8_8_8_8_AS_16_16_16_16 = 50,
FMT_DXT1_AS_16_16_16_16 = 51,
FMT_DXT2_3_AS_16_16_16_16 = 52,
FMT_DXT4_5_AS_16_16_16_16 = 53,
FMT_2_10_10_10_AS_16_16_16_16 = 54,
FMT_10_11_11_AS_16_16_16_16 = 55,
FMT_11_11_10_AS_16_16_16_16 = 56,
FMT_32_32_32_FLOAT = 57,
FMT_DXT3A = 58,
FMT_DXT5A = 59,
FMT_CTX1 = 60,
FMT_DXT3A_AS_1_1_1_1 = 61
};
#define REG_PERF_MODE_CNT 0x0
#define REG_PERF_STATE_RESET 0x0
#define REG_PERF_STATE_ENABLE 0x1
#define REG_PERF_STATE_FREEZE 0x2
#define RB_EDRAM_INFO_EDRAM_SIZE_SIZE 4
#define RB_EDRAM_INFO_EDRAM_MAPPING_MODE_SIZE 2
#define RB_EDRAM_INFO_UNUSED0_SIZE 8
#define RB_EDRAM_INFO_EDRAM_RANGE_SIZE 18
struct rb_edram_info_t {
unsigned int edram_size:RB_EDRAM_INFO_EDRAM_SIZE_SIZE;
unsigned int edram_mapping_mode:RB_EDRAM_INFO_EDRAM_MAPPING_MODE_SIZE;
unsigned int unused0:RB_EDRAM_INFO_UNUSED0_SIZE;
unsigned int edram_range:RB_EDRAM_INFO_EDRAM_RANGE_SIZE;
};
union reg_rb_edram_info {
unsigned int val;
struct rb_edram_info_t f;
};
#define RBBM_READ_ERROR_UNUSED0_SIZE 2
#define RBBM_READ_ERROR_READ_ADDRESS_SIZE 15
#define RBBM_READ_ERROR_UNUSED1_SIZE 13
#define RBBM_READ_ERROR_READ_REQUESTER_SIZE 1
#define RBBM_READ_ERROR_READ_ERROR_SIZE 1
struct rbbm_read_error_t {
unsigned int unused0:RBBM_READ_ERROR_UNUSED0_SIZE;
unsigned int read_address:RBBM_READ_ERROR_READ_ADDRESS_SIZE;
unsigned int unused1:RBBM_READ_ERROR_UNUSED1_SIZE;
unsigned int read_requester:RBBM_READ_ERROR_READ_REQUESTER_SIZE;
unsigned int read_error:RBBM_READ_ERROR_READ_ERROR_SIZE;
};
union rbbm_read_error_u {
unsigned int val:32;
struct rbbm_read_error_t f;
};
#define CP_RB_CNTL_RB_BUFSZ_SIZE 6
#define CP_RB_CNTL_UNUSED0_SIZE 2
#define CP_RB_CNTL_RB_BLKSZ_SIZE 6
#define CP_RB_CNTL_UNUSED1_SIZE 2
#define CP_RB_CNTL_BUF_SWAP_SIZE 2
#define CP_RB_CNTL_UNUSED2_SIZE 2
#define CP_RB_CNTL_RB_POLL_EN_SIZE 1
#define CP_RB_CNTL_UNUSED3_SIZE 6
#define CP_RB_CNTL_RB_NO_UPDATE_SIZE 1
#define CP_RB_CNTL_UNUSED4_SIZE 3
#define CP_RB_CNTL_RB_RPTR_WR_ENA_SIZE 1
struct cp_rb_cntl_t {
unsigned int rb_bufsz:CP_RB_CNTL_RB_BUFSZ_SIZE;
unsigned int unused0:CP_RB_CNTL_UNUSED0_SIZE;
unsigned int rb_blksz:CP_RB_CNTL_RB_BLKSZ_SIZE;
unsigned int unused1:CP_RB_CNTL_UNUSED1_SIZE;
unsigned int buf_swap:CP_RB_CNTL_BUF_SWAP_SIZE;
unsigned int unused2:CP_RB_CNTL_UNUSED2_SIZE;
unsigned int rb_poll_en:CP_RB_CNTL_RB_POLL_EN_SIZE;
unsigned int unused3:CP_RB_CNTL_UNUSED3_SIZE;
unsigned int rb_no_update:CP_RB_CNTL_RB_NO_UPDATE_SIZE;
unsigned int unused4:CP_RB_CNTL_UNUSED4_SIZE;
unsigned int rb_rptr_wr_ena:CP_RB_CNTL_RB_RPTR_WR_ENA_SIZE;
};
union reg_cp_rb_cntl {
unsigned int val:32;
struct cp_rb_cntl_t f;
};
#define RB_COLOR_INFO__COLOR_FORMAT_MASK 0x0000000fL
#define RB_COPY_DEST_INFO__COPY_DEST_FORMAT__SHIFT 0x00000004
#define SQ_INT_CNTL__PS_WATCHDOG_MASK 0x00000001L
#define SQ_INT_CNTL__VS_WATCHDOG_MASK 0x00000002L
#define RBBM_INT_CNTL__RDERR_INT_MASK 0x00000001L
#define RBBM_INT_CNTL__DISPLAY_UPDATE_INT_MASK 0x00000002L
#define RBBM_INT_CNTL__GUI_IDLE_INT_MASK 0x00080000L
#define RBBM_STATUS__CMDFIFO_AVAIL_MASK 0x0000001fL
#define RBBM_STATUS__TC_BUSY_MASK 0x00000020L
#define RBBM_STATUS__HIRQ_PENDING_MASK 0x00000100L
#define RBBM_STATUS__CPRQ_PENDING_MASK 0x00000200L
#define RBBM_STATUS__CFRQ_PENDING_MASK 0x00000400L
#define RBBM_STATUS__PFRQ_PENDING_MASK 0x00000800L
#define RBBM_STATUS__VGT_BUSY_NO_DMA_MASK 0x00001000L
#define RBBM_STATUS__RBBM_WU_BUSY_MASK 0x00004000L
#define RBBM_STATUS__CP_NRT_BUSY_MASK 0x00010000L
#define RBBM_STATUS__MH_BUSY_MASK 0x00040000L
#define RBBM_STATUS__MH_COHERENCY_BUSY_MASK 0x00080000L
#define RBBM_STATUS__SX_BUSY_MASK 0x00200000L
#define RBBM_STATUS__TPC_BUSY_MASK 0x00400000L
#define RBBM_STATUS__SC_CNTX_BUSY_MASK 0x01000000L
#define RBBM_STATUS__PA_BUSY_MASK 0x02000000L
#define RBBM_STATUS__VGT_BUSY_MASK 0x04000000L
#define RBBM_STATUS__SQ_CNTX17_BUSY_MASK 0x08000000L
#define RBBM_STATUS__SQ_CNTX0_BUSY_MASK 0x10000000L
#define RBBM_STATUS__RB_CNTX_BUSY_MASK 0x40000000L
#define RBBM_STATUS__GUI_ACTIVE_MASK 0x80000000L
#define CP_INT_CNTL__SW_INT_MASK 0x00080000L
#define CP_INT_CNTL__T0_PACKET_IN_IB_MASK 0x00800000L
#define CP_INT_CNTL__OPCODE_ERROR_MASK 0x01000000L
#define CP_INT_CNTL__PROTECTED_MODE_ERROR_MASK 0x02000000L
#define CP_INT_CNTL__RESERVED_BIT_ERROR_MASK 0x04000000L
#define CP_INT_CNTL__IB_ERROR_MASK 0x08000000L
#define CP_INT_CNTL__IB2_INT_MASK 0x20000000L
#define CP_INT_CNTL__IB1_INT_MASK 0x40000000L
#define CP_INT_CNTL__RB_INT_MASK 0x80000000L
#define MASTER_INT_SIGNAL__MH_INT_STAT 0x00000020L
#define MASTER_INT_SIGNAL__SQ_INT_STAT 0x04000000L
#define MASTER_INT_SIGNAL__CP_INT_STAT 0x40000000L
#define MASTER_INT_SIGNAL__RBBM_INT_STAT 0x80000000L
#define RB_EDRAM_INFO__EDRAM_SIZE_MASK 0x0000000fL
#define RB_EDRAM_INFO__EDRAM_RANGE_MASK 0xffffc000L
#define MH_ARBITER_CONFIG__SAME_PAGE_GRANULARITY__SHIFT 0x00000006
#define MH_ARBITER_CONFIG__L1_ARB_ENABLE__SHIFT 0x00000007
#define MH_ARBITER_CONFIG__L1_ARB_HOLD_ENABLE__SHIFT 0x00000008
#define MH_ARBITER_CONFIG__L2_ARB_CONTROL__SHIFT 0x00000009
#define MH_ARBITER_CONFIG__PAGE_SIZE__SHIFT 0x0000000a
#define MH_ARBITER_CONFIG__TC_REORDER_ENABLE__SHIFT 0x0000000d
#define MH_ARBITER_CONFIG__TC_ARB_HOLD_ENABLE__SHIFT 0x0000000e
#define MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT_ENABLE__SHIFT 0x0000000f
#define MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT__SHIFT 0x00000010
#define MH_ARBITER_CONFIG__CP_CLNT_ENABLE__SHIFT 0x00000016
#define MH_ARBITER_CONFIG__VGT_CLNT_ENABLE__SHIFT 0x00000017
#define MH_ARBITER_CONFIG__TC_CLNT_ENABLE__SHIFT 0x00000018
#define MH_ARBITER_CONFIG__RB_CLNT_ENABLE__SHIFT 0x00000019
#define MH_ARBITER_CONFIG__PA_CLNT_ENABLE__SHIFT 0x0000001a
#define CP_RB_CNTL__RB_BUFSZ__SHIFT 0x00000000
#define CP_RB_CNTL__RB_BLKSZ__SHIFT 0x00000008
#define CP_RB_CNTL__RB_POLL_EN__SHIFT 0x00000014
#define CP_RB_CNTL__RB_NO_UPDATE__SHIFT 0x0000001b
#define RB_COLOR_INFO__COLOR_FORMAT__SHIFT 0x00000000
#define RB_EDRAM_INFO__EDRAM_MAPPING_MODE__SHIFT 0x00000004
#define RB_EDRAM_INFO__EDRAM_RANGE__SHIFT 0x0000000e
#define REG_CP_CSQ_IB1_STAT 0x01FE
#define REG_CP_CSQ_IB2_STAT 0x01FF
#define REG_CP_CSQ_RB_STAT 0x01FD
#define REG_CP_DEBUG 0x01FC
#define REG_CP_IB1_BASE 0x0458
#define REG_CP_IB1_BUFSZ 0x0459
#define REG_CP_IB2_BASE 0x045A
#define REG_CP_IB2_BUFSZ 0x045B
#define REG_CP_INT_ACK 0x01F4
#define REG_CP_INT_CNTL 0x01F2
#define REG_CP_INT_STATUS 0x01F3
#define REG_CP_ME_CNTL 0x01F6
#define REG_CP_ME_RAM_DATA 0x01FA
#define REG_CP_ME_RAM_WADDR 0x01F8
#define REG_CP_ME_STATUS 0x01F7
#define REG_CP_PFP_UCODE_ADDR 0x00C0
#define REG_CP_PFP_UCODE_DATA 0x00C1
#define REG_CP_QUEUE_THRESHOLDS 0x01D5
#define REG_CP_RB_BASE 0x01C0
#define REG_CP_RB_CNTL 0x01C1
#define REG_CP_RB_RPTR 0x01C4
#define REG_CP_RB_RPTR_ADDR 0x01C3
#define REG_CP_RB_RPTR_WR 0x01C7
#define REG_CP_RB_WPTR 0x01C5
#define REG_CP_RB_WPTR_BASE 0x01C8
#define REG_CP_RB_WPTR_DELAY 0x01C6
#define REG_CP_STAT 0x047F
#define REG_CP_STATE_DEBUG_DATA 0x01ED
#define REG_CP_STATE_DEBUG_INDEX 0x01EC
#define REG_CP_ST_BASE 0x044D
#define REG_CP_ST_BUFSZ 0x044E
#define REG_CP_PERFMON_CNTL 0x0444
#define REG_CP_PERFCOUNTER_SELECT 0x0445
#define REG_CP_PERFCOUNTER_LO 0x0446
#define REG_CP_PERFCOUNTER_HI 0x0447
#define REG_RBBM_PERFCOUNTER1_SELECT 0x0395
#define REG_RBBM_PERFCOUNTER1_HI 0x0398
#define REG_RBBM_PERFCOUNTER1_LO 0x0397
#define REG_MASTER_INT_SIGNAL 0x03B7
#define REG_PA_CL_VPORT_XSCALE 0x210F
#define REG_PA_CL_VPORT_ZOFFSET 0x2114
#define REG_PA_CL_VPORT_ZSCALE 0x2113
#define REG_PA_CL_CLIP_CNTL 0x2204
#define REG_PA_CL_VTE_CNTL 0x2206
#define REG_PA_SC_AA_MASK 0x2312
#define REG_PA_SC_LINE_CNTL 0x2300
#define REG_PA_SC_SCREEN_SCISSOR_BR 0x200F
#define REG_PA_SC_SCREEN_SCISSOR_TL 0x200E
#define REG_PA_SC_VIZ_QUERY 0x2293
#define REG_PA_SC_VIZ_QUERY_STATUS 0x0C44
#define REG_PA_SC_WINDOW_OFFSET 0x2080
#define REG_PA_SC_WINDOW_SCISSOR_BR 0x2082
#define REG_PA_SC_WINDOW_SCISSOR_TL 0x2081
#define REG_PA_SU_FACE_DATA 0x0C86
#define REG_PA_SU_POINT_SIZE 0x2280
#define REG_PA_SU_LINE_CNTL 0x2282
#define REG_PA_SU_POLY_OFFSET_BACK_OFFSET 0x2383
#define REG_PA_SU_POLY_OFFSET_FRONT_SCALE 0x2380
#define REG_PA_SU_SC_MODE_CNTL 0x2205
#define REG_PC_INDEX_OFFSET 0x2102
#define REG_RBBM_CNTL 0x003B
#define REG_RBBM_INT_ACK 0x03B6
#define REG_RBBM_INT_CNTL 0x03B4
#define REG_RBBM_INT_STATUS 0x03B5
#define REG_RBBM_PATCH_RELEASE 0x0001
#define REG_RBBM_PERIPHID1 0x03F9
#define REG_RBBM_PERIPHID2 0x03FA
#define REG_RBBM_DEBUG 0x039B
#define REG_RBBM_DEBUG_OUT 0x03A0
#define REG_RBBM_DEBUG_CNTL 0x03A1
#define REG_RBBM_PM_OVERRIDE1 0x039C
#define REG_RBBM_PM_OVERRIDE2 0x039D
#define REG_RBBM_READ_ERROR 0x03B3
#define REG_RBBM_SOFT_RESET 0x003C
#define REG_RBBM_STATUS 0x05D0
#define REG_RB_COLORCONTROL 0x2202
#define REG_RB_COLOR_DEST_MASK 0x2326
#define REG_RB_COLOR_MASK 0x2104
#define REG_RB_COPY_CONTROL 0x2318
#define REG_RB_DEPTHCONTROL 0x2200
#define REG_RB_EDRAM_INFO 0x0F02
#define REG_RB_MODECONTROL 0x2208
#define REG_RB_SURFACE_INFO 0x2000
#define REG_RB_SAMPLE_POS 0x220a
#define REG_SCRATCH_ADDR 0x01DD
#define REG_SCRATCH_REG0 0x0578
#define REG_SCRATCH_REG2 0x057A
#define REG_SCRATCH_UMSK 0x01DC
#define REG_SQ_CF_BOOLEANS 0x4900
#define REG_SQ_CF_LOOP 0x4908
#define REG_SQ_GPR_MANAGEMENT 0x0D00
#define REG_SQ_FLOW_CONTROL 0x0D01
#define REG_SQ_INST_STORE_MANAGMENT 0x0D02
#define REG_SQ_INT_ACK 0x0D36
#define REG_SQ_INT_CNTL 0x0D34
#define REG_SQ_INT_STATUS 0x0D35
#define REG_SQ_PROGRAM_CNTL 0x2180
#define REG_SQ_PS_PROGRAM 0x21F6
#define REG_SQ_VS_PROGRAM 0x21F7
#define REG_SQ_WRAPPING_0 0x2183
#define REG_SQ_WRAPPING_1 0x2184
#define REG_VGT_ENHANCE 0x2294
#define REG_VGT_INDX_OFFSET 0x2102
#define REG_VGT_MAX_VTX_INDX 0x2100
#define REG_VGT_MIN_VTX_INDX 0x2101
#define REG_TP0_CHICKEN 0x0E1E
#define REG_TC_CNTL_STATUS 0x0E00
#define REG_PA_SC_AA_CONFIG 0x2301
#define REG_VGT_VERTEX_REUSE_BLOCK_CNTL 0x2316
#define REG_SQ_INTERPOLATOR_CNTL 0x2182
#define REG_RB_DEPTH_INFO 0x2002
#define REG_COHER_DEST_BASE_0 0x2006
#define REG_RB_FOG_COLOR 0x2109
#define REG_RB_STENCILREFMASK_BF 0x210C
#define REG_PA_SC_LINE_STIPPLE 0x2283
#define REG_SQ_PS_CONST 0x2308
#define REG_RB_DEPTH_CLEAR 0x231D
#define REG_RB_SAMPLE_COUNT_CTL 0x2324
#define REG_SQ_CONSTANT_0 0x4000
#define REG_SQ_FETCH_0 0x4800
#define REG_COHER_BASE_PM4 0xA2A
#define REG_COHER_STATUS_PM4 0xA2B
#define REG_COHER_SIZE_PM4 0xA29
/*registers added in adreno220*/
#define REG_A220_PC_INDX_OFFSET REG_VGT_INDX_OFFSET
#define REG_A220_PC_VERTEX_REUSE_BLOCK_CNTL REG_VGT_VERTEX_REUSE_BLOCK_CNTL
#define REG_A220_PC_MAX_VTX_INDX REG_VGT_MAX_VTX_INDX
#define REG_A220_RB_LRZ_VSC_CONTROL 0x2209
#define REG_A220_GRAS_CONTROL 0x2210
#define REG_A220_VSC_BIN_SIZE 0x0C01
#define REG_A220_VSC_PIPE_DATA_LENGTH_7 0x0C1D
/*registers added in adreno225*/
#define REG_A225_RB_COLOR_INFO3 0x2005
#define REG_A225_PC_MULTI_PRIM_IB_RESET_INDX 0x2103
#define REG_A225_GRAS_UCP0X 0x2340
#define REG_A225_GRAS_UCP_ENABLED 0x2360
#endif /* __A200_REG_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,766 @@
/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/types.h>
#include <linux/device.h>
#include <linux/spinlock.h>
#include <linux/genalloc.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include "kgsl.h"
#include "kgsl_mmu.h"
#include "kgsl_device.h"
#include "kgsl_sharedmem.h"
#include "adreno_ringbuffer.h"
static ssize_t
sysfs_show_ptpool_entries(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
kgsl_driver.ptpool;
return snprintf(buf, PAGE_SIZE, "%d\n", pool->entries);
}
static ssize_t
sysfs_show_ptpool_min(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
kgsl_driver.ptpool;
return snprintf(buf, PAGE_SIZE, "%d\n",
pool->static_entries);
}
static ssize_t
sysfs_show_ptpool_chunks(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
kgsl_driver.ptpool;
return snprintf(buf, PAGE_SIZE, "%d\n", pool->chunks);
}
static ssize_t
sysfs_show_ptpool_ptsize(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
kgsl_driver.ptpool;
return snprintf(buf, PAGE_SIZE, "%d\n", pool->ptsize);
}
static struct kobj_attribute attr_ptpool_entries = {
.attr = { .name = "ptpool_entries", .mode = 0444 },
.show = sysfs_show_ptpool_entries,
.store = NULL,
};
static struct kobj_attribute attr_ptpool_min = {
.attr = { .name = "ptpool_min", .mode = 0444 },
.show = sysfs_show_ptpool_min,
.store = NULL,
};
static struct kobj_attribute attr_ptpool_chunks = {
.attr = { .name = "ptpool_chunks", .mode = 0444 },
.show = sysfs_show_ptpool_chunks,
.store = NULL,
};
static struct kobj_attribute attr_ptpool_ptsize = {
.attr = { .name = "ptpool_ptsize", .mode = 0444 },
.show = sysfs_show_ptpool_ptsize,
.store = NULL,
};
static struct attribute *ptpool_attrs[] = {
&attr_ptpool_entries.attr,
&attr_ptpool_min.attr,
&attr_ptpool_chunks.attr,
&attr_ptpool_ptsize.attr,
NULL,
};
static struct attribute_group ptpool_attr_group = {
.attrs = ptpool_attrs,
};
static int
_kgsl_ptpool_add_entries(struct kgsl_ptpool *pool, int count, int dynamic)
{
struct kgsl_ptpool_chunk *chunk;
size_t size = ALIGN(count * pool->ptsize, PAGE_SIZE);
BUG_ON(count == 0);
if (get_order(size) >= MAX_ORDER) {
KGSL_CORE_ERR("ptpool allocation is too big: %d\n", size);
return -EINVAL;
}
chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
if (chunk == NULL) {
KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*chunk));
return -ENOMEM;
}
chunk->size = size;
chunk->count = count;
chunk->dynamic = dynamic;
chunk->data = dma_alloc_coherent(NULL, size,
&chunk->phys, GFP_KERNEL);
if (chunk->data == NULL) {
KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size);
goto err;
}
chunk->bitmap = kzalloc(BITS_TO_LONGS(count) * 4, GFP_KERNEL);
if (chunk->bitmap == NULL) {
KGSL_CORE_ERR("kzalloc(%d) failed\n",
BITS_TO_LONGS(count) * 4);
goto err_dma;
}
list_add_tail(&chunk->list, &pool->list);
pool->chunks++;
pool->entries += count;
if (!dynamic)
pool->static_entries += count;
return 0;
err_dma:
dma_free_coherent(NULL, chunk->size, chunk->data, chunk->phys);
err:
kfree(chunk);
return -ENOMEM;
}
static void *
_kgsl_ptpool_get_entry(struct kgsl_ptpool *pool, unsigned int *physaddr)
{
struct kgsl_ptpool_chunk *chunk;
list_for_each_entry(chunk, &pool->list, list) {
int bit = find_first_zero_bit(chunk->bitmap, chunk->count);
if (bit >= chunk->count)
continue;
set_bit(bit, chunk->bitmap);
*physaddr = chunk->phys + (bit * pool->ptsize);
return chunk->data + (bit * pool->ptsize);
}
return NULL;
}
/**
* kgsl_ptpool_add
* @pool: A pointer to a ptpool structure
* @entries: Number of entries to add
*
* Add static entries to the pagetable pool.
*/
static int
kgsl_ptpool_add(struct kgsl_ptpool *pool, int count)
{
int ret = 0;
BUG_ON(count == 0);
mutex_lock(&pool->lock);
/* Only 4MB can be allocated in one chunk, so larger allocations
need to be split into multiple sections */
while (count) {
int entries = ((count * pool->ptsize) > SZ_4M) ?
SZ_4M / pool->ptsize : count;
/* Add the entries as static, i.e. they don't ever stand
a chance of being removed */
ret = _kgsl_ptpool_add_entries(pool, entries, 0);
if (ret)
break;
count -= entries;
}
mutex_unlock(&pool->lock);
return ret;
}
/**
* kgsl_ptpool_alloc
* @pool: A pointer to a ptpool structure
* @addr: A pointer to store the physical address of the chunk
*
* Allocate a pagetable from the pool. Returns the virtual address
* of the pagetable, the physical address is returned in physaddr
*/
static void *kgsl_ptpool_alloc(struct kgsl_ptpool *pool,
unsigned int *physaddr)
{
void *addr = NULL;
int ret;
mutex_lock(&pool->lock);
addr = _kgsl_ptpool_get_entry(pool, physaddr);
if (addr)
goto done;
/* Add a chunk for 1 more pagetable and mark it as dynamic */
ret = _kgsl_ptpool_add_entries(pool, 1, 1);
if (ret)
goto done;
addr = _kgsl_ptpool_get_entry(pool, physaddr);
done:
mutex_unlock(&pool->lock);
return addr;
}
static inline void _kgsl_ptpool_rm_chunk(struct kgsl_ptpool_chunk *chunk)
{
list_del(&chunk->list);
if (chunk->data)
dma_free_coherent(NULL, chunk->size, chunk->data,
chunk->phys);
kfree(chunk->bitmap);
kfree(chunk);
}
/**
* kgsl_ptpool_free
* @pool: A pointer to a ptpool structure
* @addr: A pointer to the virtual address to free
*
* Free a pagetable allocated from the pool
*/
static void kgsl_ptpool_free(struct kgsl_ptpool *pool, void *addr)
{
struct kgsl_ptpool_chunk *chunk, *tmp;
if (pool == NULL || addr == NULL)
return;
mutex_lock(&pool->lock);
list_for_each_entry_safe(chunk, tmp, &pool->list, list) {
if (addr >= chunk->data &&
addr < chunk->data + chunk->size) {
int bit = ((unsigned long) (addr - chunk->data)) /
pool->ptsize;
clear_bit(bit, chunk->bitmap);
memset(addr, 0, pool->ptsize);
if (chunk->dynamic &&
bitmap_empty(chunk->bitmap, chunk->count))
_kgsl_ptpool_rm_chunk(chunk);
break;
}
}
mutex_unlock(&pool->lock);
}
void kgsl_gpummu_ptpool_destroy(void *ptpool)
{
struct kgsl_ptpool *pool = (struct kgsl_ptpool *)ptpool;
struct kgsl_ptpool_chunk *chunk, *tmp;
if (pool == NULL)
return;
mutex_lock(&pool->lock);
list_for_each_entry_safe(chunk, tmp, &pool->list, list)
_kgsl_ptpool_rm_chunk(chunk);
mutex_unlock(&pool->lock);
kfree(pool);
}
/**
* kgsl_ptpool_init
* @pool: A pointer to a ptpool structure to initialize
* @ptsize: The size of each pagetable entry
* @entries: The number of inital entries to add to the pool
*
* Initalize a pool and allocate an initial chunk of entries.
*/
void *kgsl_gpummu_ptpool_init(int ptsize, int entries)
{
struct kgsl_ptpool *pool;
int ret = 0;
BUG_ON(ptsize == 0);
pool = kzalloc(sizeof(struct kgsl_ptpool), GFP_KERNEL);
if (!pool) {
KGSL_CORE_ERR("Failed to allocate memory "
"for ptpool\n");
return NULL;
}
pool->ptsize = ptsize;
mutex_init(&pool->lock);
INIT_LIST_HEAD(&pool->list);
if (entries) {
ret = kgsl_ptpool_add(pool, entries);
if (ret)
goto err_ptpool_remove;
}
ret = sysfs_create_group(kgsl_driver.ptkobj, &ptpool_attr_group);
if (ret) {
KGSL_CORE_ERR("sysfs_create_group failed for ptpool "
"statistics: %d\n", ret);
goto err_ptpool_remove;
}
return (void *)pool;
err_ptpool_remove:
kgsl_gpummu_ptpool_destroy(pool);
return NULL;
}
int kgsl_gpummu_pt_equal(struct kgsl_pagetable *pt,
unsigned int pt_base)
{
struct kgsl_gpummu_pt *gpummu_pt = pt->priv;
return pt && pt_base && (gpummu_pt->base.gpuaddr == pt_base);
}
void kgsl_gpummu_destroy_pagetable(void *mmu_specific_pt)
{
struct kgsl_gpummu_pt *gpummu_pt = (struct kgsl_gpummu_pt *)
mmu_specific_pt;
kgsl_ptpool_free((struct kgsl_ptpool *)kgsl_driver.ptpool,
gpummu_pt->base.hostptr);
kgsl_driver.stats.coherent -= KGSL_PAGETABLE_SIZE;
kfree(gpummu_pt->tlbflushfilter.base);
kfree(gpummu_pt);
}
static inline uint32_t
kgsl_pt_entry_get(unsigned int va_base, uint32_t va)
{
return (va - va_base) >> PAGE_SHIFT;
}
static inline void
kgsl_pt_map_set(struct kgsl_gpummu_pt *pt, uint32_t pte, uint32_t val)
{
uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
writel_relaxed(val, &baseptr[pte]);
}
static inline uint32_t
kgsl_pt_map_get(struct kgsl_gpummu_pt *pt, uint32_t pte)
{
uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
return readl_relaxed(&baseptr[pte]) & GSL_PT_PAGE_ADDR_MASK;
}
static unsigned int kgsl_gpummu_pt_get_flags(struct kgsl_pagetable *pt,
enum kgsl_deviceid id)
{
unsigned int result = 0;
struct kgsl_gpummu_pt *gpummu_pt;
if (pt == NULL)
return 0;
gpummu_pt = pt->priv;
spin_lock(&pt->lock);
if (gpummu_pt->tlb_flags && (1<<id)) {
result = KGSL_MMUFLAGS_TLBFLUSH;
gpummu_pt->tlb_flags &= ~(1<<id);
}
spin_unlock(&pt->lock);
return result;
}
static void kgsl_gpummu_pagefault(struct kgsl_device *device)
{
unsigned int reg;
unsigned int ptbase;
kgsl_regread(device, MH_MMU_PAGE_FAULT, &reg);
kgsl_regread(device, MH_MMU_PT_BASE, &ptbase);
KGSL_MEM_CRIT(device,
"mmu page fault: page=0x%lx pt=%d op=%s axi=%d\n",
reg & ~(PAGE_SIZE - 1),
kgsl_mmu_get_ptname_from_ptbase(ptbase),
reg & 0x02 ? "WRITE" : "READ", (reg >> 4) & 0xF);
}
static void *kgsl_gpummu_create_pagetable(void)
{
struct kgsl_gpummu_pt *gpummu_pt;
gpummu_pt = kzalloc(sizeof(struct kgsl_gpummu_pt),
GFP_KERNEL);
if (!gpummu_pt)
return NULL;
gpummu_pt->tlb_flags = 0;
gpummu_pt->last_superpte = 0;
gpummu_pt->tlbflushfilter.size = (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE /
(PAGE_SIZE * GSL_PT_SUPER_PTE * 8)) + 1;
gpummu_pt->tlbflushfilter.base = (unsigned int *)
kzalloc(gpummu_pt->tlbflushfilter.size, GFP_KERNEL);
if (!gpummu_pt->tlbflushfilter.base) {
KGSL_CORE_ERR("kzalloc(%d) failed\n",
gpummu_pt->tlbflushfilter.size);
goto err_free_gpummu;
}
GSL_TLBFLUSH_FILTER_RESET();
gpummu_pt->base.hostptr = kgsl_ptpool_alloc((struct kgsl_ptpool *)
kgsl_driver.ptpool,
&gpummu_pt->base.physaddr);
if (gpummu_pt->base.hostptr == NULL)
goto err_flushfilter;
/* ptpool allocations are from coherent memory, so update the
device statistics acordingly */
KGSL_STATS_ADD(KGSL_PAGETABLE_SIZE, kgsl_driver.stats.coherent,
kgsl_driver.stats.coherent_max);
gpummu_pt->base.gpuaddr = gpummu_pt->base.physaddr;
gpummu_pt->base.size = KGSL_PAGETABLE_SIZE;
return (void *)gpummu_pt;
err_flushfilter:
kfree(gpummu_pt->tlbflushfilter.base);
err_free_gpummu:
kfree(gpummu_pt);
return NULL;
}
static void kgsl_gpummu_default_setstate(struct kgsl_device *device,
uint32_t flags)
{
struct kgsl_gpummu_pt *gpummu_pt;
if (!kgsl_mmu_enabled())
return;
if (flags & KGSL_MMUFLAGS_PTUPDATE) {
kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
gpummu_pt = device->mmu.hwpagetable->priv;
kgsl_regwrite(device, MH_MMU_PT_BASE,
gpummu_pt->base.gpuaddr);
}
if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
/* Invalidate all and tc */
kgsl_regwrite(device, MH_MMU_INVALIDATE, 0x00000003);
}
}
static void kgsl_gpummu_setstate(struct kgsl_device *device,
struct kgsl_pagetable *pagetable)
{
struct kgsl_mmu *mmu = &device->mmu;
struct kgsl_gpummu_pt *gpummu_pt;
if (mmu->flags & KGSL_FLAGS_STARTED) {
/* page table not current, then setup mmu to use new
* specified page table
*/
if (mmu->hwpagetable != pagetable) {
mmu->hwpagetable = pagetable;
spin_lock(&mmu->hwpagetable->lock);
gpummu_pt = mmu->hwpagetable->priv;
gpummu_pt->tlb_flags &= ~(1<<device->id);
spin_unlock(&mmu->hwpagetable->lock);
/* call device specific set page table */
kgsl_setstate(mmu->device, KGSL_MMUFLAGS_TLBFLUSH |
KGSL_MMUFLAGS_PTUPDATE);
}
}
}
static int kgsl_gpummu_init(struct kgsl_device *device)
{
/*
* intialize device mmu
*
* call this with the global lock held
*/
int status = 0;
struct kgsl_mmu *mmu = &device->mmu;
mmu->device = device;
/* sub-client MMU lookups require address translation */
if ((mmu->config & ~0x1) > 0) {
/*make sure virtual address range is a multiple of 64Kb */
if (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE & ((1 << 16) - 1)) {
KGSL_CORE_ERR("Invalid pagetable size requested "
"for GPUMMU: %x\n", CONFIG_MSM_KGSL_PAGE_TABLE_SIZE);
return -EINVAL;
}
/* allocate memory used for completing r/w operations that
* cannot be mapped by the MMU
*/
status = kgsl_allocate_contiguous(&mmu->setstate_memory, 64);
if (!status)
kgsl_sharedmem_set(&mmu->setstate_memory, 0, 0,
mmu->setstate_memory.size);
}
dev_info(device->dev, "|%s| MMU type set for device is GPUMMU\n",
__func__);
return status;
}
static int kgsl_gpummu_start(struct kgsl_device *device)
{
/*
* intialize device mmu
*
* call this with the global lock held
*/
struct kgsl_mmu *mmu = &device->mmu;
struct kgsl_gpummu_pt *gpummu_pt;
if (mmu->flags & KGSL_FLAGS_STARTED)
return 0;
/* MMU not enabled */
if ((mmu->config & 0x1) == 0)
return 0;
/* setup MMU and sub-client behavior */
kgsl_regwrite(device, MH_MMU_CONFIG, mmu->config);
/* idle device */
kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
/* enable axi interrupts */
kgsl_regwrite(device, MH_INTERRUPT_MASK,
GSL_MMU_INT_MASK | MH_INTERRUPT_MASK__MMU_PAGE_FAULT);
kgsl_sharedmem_set(&mmu->setstate_memory, 0, 0,
mmu->setstate_memory.size);
/* TRAN_ERROR needs a 32 byte (32 byte aligned) chunk of memory
* to complete transactions in case of an MMU fault. Note that
* we'll leave the bottom 32 bytes of the setstate_memory for other
* purposes (e.g. use it when dummy read cycles are needed
* for other blocks) */
kgsl_regwrite(device, MH_MMU_TRAN_ERROR,
mmu->setstate_memory.physaddr + 32);
if (mmu->defaultpagetable == NULL)
mmu->defaultpagetable =
kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
/* Return error if the default pagetable doesn't exist */
if (mmu->defaultpagetable == NULL)
return -ENOMEM;
mmu->hwpagetable = mmu->defaultpagetable;
gpummu_pt = mmu->hwpagetable->priv;
kgsl_regwrite(device, MH_MMU_PT_BASE,
gpummu_pt->base.gpuaddr);
kgsl_regwrite(device, MH_MMU_VA_RANGE,
(KGSL_PAGETABLE_BASE |
(CONFIG_MSM_KGSL_PAGE_TABLE_SIZE >> 16)));
kgsl_setstate(device, KGSL_MMUFLAGS_TLBFLUSH);
mmu->flags |= KGSL_FLAGS_STARTED;
return 0;
}
static int
kgsl_gpummu_unmap(void *mmu_specific_pt,
struct kgsl_memdesc *memdesc)
{
unsigned int numpages;
unsigned int pte, ptefirst, ptelast, superpte;
unsigned int range = memdesc->size;
struct kgsl_gpummu_pt *gpummu_pt = mmu_specific_pt;
/* All GPU addresses as assigned are page aligned, but some
functions purturb the gpuaddr with an offset, so apply the
mask here to make sure we have the right address */
unsigned int gpuaddr = memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK;
numpages = (range >> PAGE_SHIFT);
if (range & (PAGE_SIZE - 1))
numpages++;
ptefirst = kgsl_pt_entry_get(KGSL_PAGETABLE_BASE, gpuaddr);
ptelast = ptefirst + numpages;
superpte = ptefirst - (ptefirst & (GSL_PT_SUPER_PTE-1));
GSL_TLBFLUSH_FILTER_SETDIRTY(superpte / GSL_PT_SUPER_PTE);
for (pte = ptefirst; pte < ptelast; pte++) {
#ifdef VERBOSE_DEBUG
/* check if PTE exists */
if (!kgsl_pt_map_get(gpummu_pt, pte))
KGSL_CORE_ERR("pt entry %x is already "
"unmapped for pagetable %p\n", pte, gpummu_pt);
#endif
kgsl_pt_map_set(gpummu_pt, pte, GSL_PT_PAGE_DIRTY);
superpte = pte - (pte & (GSL_PT_SUPER_PTE - 1));
if (pte == superpte)
GSL_TLBFLUSH_FILTER_SETDIRTY(superpte /
GSL_PT_SUPER_PTE);
}
/* Post all writes to the pagetable */
wmb();
return 0;
}
#define SUPERPTE_IS_DIRTY(_p) \
(((_p) & (GSL_PT_SUPER_PTE - 1)) == 0 && \
GSL_TLBFLUSH_FILTER_ISDIRTY((_p) / GSL_PT_SUPER_PTE))
static int
kgsl_gpummu_map(void *mmu_specific_pt,
struct kgsl_memdesc *memdesc,
unsigned int protflags)
{
unsigned int pte;
struct kgsl_gpummu_pt *gpummu_pt = mmu_specific_pt;
struct scatterlist *s;
int flushtlb = 0;
int i;
pte = kgsl_pt_entry_get(KGSL_PAGETABLE_BASE, memdesc->gpuaddr);
/* Flush the TLB if the first PTE isn't at the superpte boundary */
if (pte & (GSL_PT_SUPER_PTE - 1))
flushtlb = 1;
for_each_sg(memdesc->sg, s, memdesc->sglen, i) {
unsigned int paddr = sg_phys(s);
unsigned int j;
/* Each sg entry might be multiple pages long */
for (j = paddr; j < paddr + s->length; pte++, j += PAGE_SIZE) {
if (SUPERPTE_IS_DIRTY(pte))
flushtlb = 1;
kgsl_pt_map_set(gpummu_pt, pte, j | protflags);
}
}
/* Flush the TLB if the last PTE isn't at the superpte boundary */
if ((pte + 1) & (GSL_PT_SUPER_PTE - 1))
flushtlb = 1;
wmb();
if (flushtlb) {
/*set all devices as needing flushing*/
gpummu_pt->tlb_flags = UINT_MAX;
GSL_TLBFLUSH_FILTER_RESET();
}
return 0;
}
static int kgsl_gpummu_stop(struct kgsl_device *device)
{
struct kgsl_mmu *mmu = &device->mmu;
kgsl_regwrite(device, MH_MMU_CONFIG, 0x00000000);
mmu->flags &= ~KGSL_FLAGS_STARTED;
return 0;
}
static int kgsl_gpummu_close(struct kgsl_device *device)
{
/*
* close device mmu
*
* call this with the global lock held
*/
struct kgsl_mmu *mmu = &device->mmu;
if (mmu->setstate_memory.gpuaddr)
kgsl_sharedmem_free(&mmu->setstate_memory);
if (mmu->defaultpagetable)
kgsl_mmu_putpagetable(mmu->defaultpagetable);
return 0;
}
static unsigned int
kgsl_gpummu_get_current_ptbase(struct kgsl_device *device)
{
unsigned int ptbase;
kgsl_regread(device, MH_MMU_PT_BASE, &ptbase);
return ptbase;
}
struct kgsl_mmu_ops gpummu_ops = {
.mmu_init = kgsl_gpummu_init,
.mmu_close = kgsl_gpummu_close,
.mmu_start = kgsl_gpummu_start,
.mmu_stop = kgsl_gpummu_stop,
.mmu_setstate = kgsl_gpummu_setstate,
.mmu_device_setstate = kgsl_gpummu_default_setstate,
.mmu_pagefault = kgsl_gpummu_pagefault,
.mmu_get_current_ptbase = kgsl_gpummu_get_current_ptbase,
};
struct kgsl_mmu_pt_ops gpummu_pt_ops = {
.mmu_map = kgsl_gpummu_map,
.mmu_unmap = kgsl_gpummu_unmap,
.mmu_create_pagetable = kgsl_gpummu_create_pagetable,
.mmu_destroy_pagetable = kgsl_gpummu_destroy_pagetable,
.mmu_pt_equal = kgsl_gpummu_pt_equal,
.mmu_pt_get_flags = kgsl_gpummu_pt_get_flags,
};

View File

@ -0,0 +1,85 @@
/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __KGSL_GPUMMU_H
#define __KGSL_GPUMMU_H
#define GSL_PT_PAGE_BITS_MASK 0x00000007
#define GSL_PT_PAGE_ADDR_MASK PAGE_MASK
#define GSL_MMU_INT_MASK \
(MH_INTERRUPT_MASK__AXI_READ_ERROR | \
MH_INTERRUPT_MASK__AXI_WRITE_ERROR)
/* Macros to manage TLB flushing */
#define GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS (sizeof(unsigned char) * 8)
#define GSL_TLBFLUSH_FILTER_GET(superpte) \
(*((unsigned char *) \
(((unsigned int)gpummu_pt->tlbflushfilter.base) \
+ (superpte / GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS))))
#define GSL_TLBFLUSH_FILTER_SETDIRTY(superpte) \
(GSL_TLBFLUSH_FILTER_GET((superpte)) |= 1 << \
(superpte % GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS))
#define GSL_TLBFLUSH_FILTER_ISDIRTY(superpte) \
(GSL_TLBFLUSH_FILTER_GET((superpte)) & \
(1 << (superpte % GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS)))
#define GSL_TLBFLUSH_FILTER_RESET() memset(gpummu_pt->tlbflushfilter.base,\
0, gpummu_pt->tlbflushfilter.size)
extern struct kgsl_mmu_ops gpummu_ops;
extern struct kgsl_mmu_pt_ops gpummu_pt_ops;
struct kgsl_tlbflushfilter {
unsigned int *base;
unsigned int size;
};
struct kgsl_gpummu_pt {
struct kgsl_memdesc base;
unsigned int last_superpte;
unsigned int tlb_flags;
/* Maintain filter to manage tlb flushing */
struct kgsl_tlbflushfilter tlbflushfilter;
};
struct kgsl_ptpool_chunk {
size_t size;
unsigned int count;
int dynamic;
void *data;
unsigned int phys;
unsigned long *bitmap;
struct list_head list;
};
struct kgsl_ptpool {
size_t ptsize;
struct mutex lock;
struct list_head list;
int entries;
int static_entries;
int chunks;
};
void *kgsl_gpummu_ptpool_init(int ptsize,
int entries);
void kgsl_gpummu_ptpool_destroy(void *ptpool);
static inline unsigned int kgsl_pt_get_base_addr(struct kgsl_pagetable *pt)
{
struct kgsl_gpummu_pt *gpummu_pt = pt->priv;
return gpummu_pt->base.gpuaddr;
}
#endif /* __KGSL_GPUMMU_H */

View File

@ -0,0 +1,333 @@
/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/types.h>
#include <linux/device.h>
#include <linux/spinlock.h>
#include <linux/genalloc.h>
#include <linux/slab.h>
#include <linux/iommu.h>
#include <mach/iommu.h>
#include <linux/msm_kgsl.h>
#include "kgsl.h"
#include "kgsl_device.h"
#include "kgsl_mmu.h"
#include "kgsl_sharedmem.h"
struct kgsl_iommu {
struct device *iommu_user_dev;
int iommu_user_dev_attached;
struct device *iommu_priv_dev;
int iommu_priv_dev_attached;
};
static int kgsl_iommu_pt_equal(struct kgsl_pagetable *pt,
unsigned int pt_base)
{
struct iommu_domain *domain = pt->priv;
return pt && pt_base && ((unsigned int)domain == pt_base);
}
static void kgsl_iommu_destroy_pagetable(void *mmu_specific_pt)
{
struct iommu_domain *domain = mmu_specific_pt;
if (domain)
iommu_domain_free(domain);
}
void *kgsl_iommu_create_pagetable(void)
{
struct iommu_domain *domain = iommu_domain_alloc(0);
if (!domain)
KGSL_CORE_ERR("Failed to create iommu domain\n");
return domain;
}
static void kgsl_detach_pagetable_iommu_domain(struct kgsl_mmu *mmu)
{
struct iommu_domain *domain;
struct kgsl_iommu *iommu = mmu->priv;
BUG_ON(mmu->hwpagetable == NULL);
BUG_ON(mmu->hwpagetable->priv == NULL);
domain = mmu->hwpagetable->priv;
if (iommu->iommu_user_dev_attached) {
iommu_detach_device(domain, iommu->iommu_user_dev);
iommu->iommu_user_dev_attached = 0;
KGSL_MEM_INFO(mmu->device,
"iommu %p detached from user dev of MMU: %p\n",
domain, mmu);
}
if (iommu->iommu_priv_dev_attached) {
iommu_detach_device(domain, iommu->iommu_priv_dev);
iommu->iommu_priv_dev_attached = 0;
KGSL_MEM_INFO(mmu->device,
"iommu %p detached from priv dev of MMU: %p\n",
domain, mmu);
}
}
static int kgsl_attach_pagetable_iommu_domain(struct kgsl_mmu *mmu)
{
struct iommu_domain *domain;
int ret = 0;
struct kgsl_iommu *iommu = mmu->priv;
BUG_ON(mmu->hwpagetable == NULL);
BUG_ON(mmu->hwpagetable->priv == NULL);
domain = mmu->hwpagetable->priv;
if (iommu->iommu_user_dev && !iommu->iommu_user_dev_attached) {
ret = iommu_attach_device(domain, iommu->iommu_user_dev);
if (ret) {
KGSL_MEM_ERR(mmu->device,
"Failed to attach device, err %d\n", ret);
goto done;
}
iommu->iommu_user_dev_attached = 1;
KGSL_MEM_INFO(mmu->device,
"iommu %p attached to user dev of MMU: %p\n",
domain, mmu);
}
if (iommu->iommu_priv_dev && !iommu->iommu_priv_dev_attached) {
ret = iommu_attach_device(domain, iommu->iommu_priv_dev);
if (ret) {
KGSL_MEM_ERR(mmu->device,
"Failed to attach device, err %d\n", ret);
iommu_detach_device(domain, iommu->iommu_user_dev);
iommu->iommu_user_dev_attached = 0;
goto done;
}
iommu->iommu_priv_dev_attached = 1;
KGSL_MEM_INFO(mmu->device,
"iommu %p attached to priv dev of MMU: %p\n",
domain, mmu);
}
done:
return ret;
}
static int kgsl_get_iommu_ctxt(struct kgsl_iommu *iommu,
struct kgsl_device *device)
{
int status = 0;
struct platform_device *pdev =
container_of(device->parentdev, struct platform_device, dev);
struct kgsl_device_platform_data *pdata_dev = pdev->dev.platform_data;
if (pdata_dev->iommu_user_ctx_name)
iommu->iommu_user_dev = msm_iommu_get_ctx(
pdata_dev->iommu_user_ctx_name);
if (pdata_dev->iommu_priv_ctx_name)
iommu->iommu_priv_dev = msm_iommu_get_ctx(
pdata_dev->iommu_priv_ctx_name);
if (!iommu->iommu_user_dev) {
KGSL_CORE_ERR("Failed to get user iommu dev handle for "
"device %s\n",
pdata_dev->iommu_user_ctx_name);
status = -EINVAL;
}
return status;
}
static void kgsl_iommu_setstate(struct kgsl_device *device,
struct kgsl_pagetable *pagetable)
{
struct kgsl_mmu *mmu = &device->mmu;
if (mmu->flags & KGSL_FLAGS_STARTED) {
/* page table not current, then setup mmu to use new
* specified page table
*/
if (mmu->hwpagetable != pagetable) {
kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
kgsl_detach_pagetable_iommu_domain(mmu);
mmu->hwpagetable = pagetable;
if (mmu->hwpagetable)
kgsl_attach_pagetable_iommu_domain(mmu);
}
}
}
static int kgsl_iommu_init(struct kgsl_device *device)
{
/*
* intialize device mmu
*
* call this with the global lock held
*/
int status = 0;
struct kgsl_mmu *mmu = &device->mmu;
struct kgsl_iommu *iommu;
mmu->device = device;
iommu = kzalloc(sizeof(struct kgsl_iommu), GFP_KERNEL);
if (!iommu) {
KGSL_CORE_ERR("kzalloc(%d) failed\n",
sizeof(struct kgsl_iommu));
return -ENOMEM;
}
iommu->iommu_priv_dev_attached = 0;
iommu->iommu_user_dev_attached = 0;
status = kgsl_get_iommu_ctxt(iommu, device);
if (status) {
kfree(iommu);
iommu = NULL;
}
mmu->priv = iommu;
dev_info(device->dev, "|%s| MMU type set for device is IOMMU\n",
__func__);
return status;
}
static int kgsl_iommu_start(struct kgsl_device *device)
{
int status;
struct kgsl_mmu *mmu = &device->mmu;
if (mmu->flags & KGSL_FLAGS_STARTED)
return 0;
kgsl_regwrite(device, MH_MMU_CONFIG, 0x00000000);
if (mmu->defaultpagetable == NULL)
mmu->defaultpagetable =
kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
/* Return error if the default pagetable doesn't exist */
if (mmu->defaultpagetable == NULL)
return -ENOMEM;
mmu->hwpagetable = mmu->defaultpagetable;
status = kgsl_attach_pagetable_iommu_domain(mmu);
if (!status)
mmu->flags |= KGSL_FLAGS_STARTED;
return status;
}
static int
kgsl_iommu_unmap(void *mmu_specific_pt,
struct kgsl_memdesc *memdesc)
{
int ret;
unsigned int range = memdesc->size;
struct iommu_domain *domain = (struct iommu_domain *)
mmu_specific_pt;
/* All GPU addresses as assigned are page aligned, but some
functions purturb the gpuaddr with an offset, so apply the
mask here to make sure we have the right address */
unsigned int gpuaddr = memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK;
if (range == 0 || gpuaddr == 0)
return 0;
ret = iommu_unmap_range(domain, gpuaddr, range);
if (ret)
KGSL_CORE_ERR("iommu_unmap_range(%p, %x, %d) failed "
"with err: %d\n", domain, gpuaddr,
range, ret);
return 0;
}
static int
kgsl_iommu_map(void *mmu_specific_pt,
struct kgsl_memdesc *memdesc,
unsigned int protflags)
{
int ret;
unsigned int iommu_virt_addr;
struct iommu_domain *domain = mmu_specific_pt;
BUG_ON(NULL == domain);
iommu_virt_addr = memdesc->gpuaddr;
ret = iommu_map_range(domain, iommu_virt_addr, memdesc->sg,
memdesc->size, MSM_IOMMU_ATTR_NONCACHED);
if (ret) {
KGSL_CORE_ERR("iommu_map_range(%p, %x, %p, %d, %d) "
"failed with err: %d\n", domain,
iommu_virt_addr, memdesc->sg, memdesc->size,
MSM_IOMMU_ATTR_NONCACHED, ret);
return ret;
}
return ret;
}
static int kgsl_iommu_stop(struct kgsl_device *device)
{
/*
* stop device mmu
*
* call this with the global lock held
*/
struct kgsl_mmu *mmu = &device->mmu;
if (mmu->flags & KGSL_FLAGS_STARTED) {
/* detach iommu attachment */
kgsl_detach_pagetable_iommu_domain(mmu);
mmu->flags &= ~KGSL_FLAGS_STARTED;
}
return 0;
}
static int kgsl_iommu_close(struct kgsl_device *device)
{
struct kgsl_mmu *mmu = &device->mmu;
if (mmu->defaultpagetable)
kgsl_mmu_putpagetable(mmu->defaultpagetable);
return 0;
}
static unsigned int
kgsl_iommu_get_current_ptbase(struct kgsl_device *device)
{
/* Current base is always the hwpagetables domain as we
* do not use per process pagetables right not for iommu.
* This will change when we switch to per process pagetables.
*/
return (unsigned int)device->mmu.hwpagetable->priv;
}
struct kgsl_mmu_ops iommu_ops = {
.mmu_init = kgsl_iommu_init,
.mmu_close = kgsl_iommu_close,
.mmu_start = kgsl_iommu_start,
.mmu_stop = kgsl_iommu_stop,
.mmu_setstate = kgsl_iommu_setstate,
.mmu_device_setstate = NULL,
.mmu_pagefault = NULL,
.mmu_get_current_ptbase = kgsl_iommu_get_current_ptbase,
};
struct kgsl_mmu_pt_ops iommu_pt_ops = {
.mmu_map = kgsl_iommu_map,
.mmu_unmap = kgsl_iommu_unmap,
.mmu_create_pagetable = kgsl_iommu_create_pagetable,
.mmu_destroy_pagetable = kgsl_iommu_destroy_pagetable,
.mmu_pt_equal = kgsl_iommu_pt_equal,
.mmu_pt_get_flags = NULL,
};

View File

@ -0,0 +1,221 @@
/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/slab.h>
#include <linux/timer.h>
#include <linux/idle_stats_device.h>
#include <linux/cpufreq.h>
#include <linux/notifier.h>
#include <linux/cpumask.h>
#include <linux/tick.h>
#include "kgsl.h"
#include "kgsl_pwrscale.h"
#include "kgsl_device.h"
#define MAX_CORES 4
struct _cpu_info {
spinlock_t lock;
struct notifier_block cpu_nb;
u64 start[MAX_CORES];
u64 end[MAX_CORES];
int curr_freq[MAX_CORES];
int max_freq[MAX_CORES];
};
struct idlestats_priv {
char name[32];
struct msm_idle_stats_device idledev;
struct kgsl_device *device;
struct msm_idle_pulse pulse;
struct _cpu_info cpu_info;
};
static int idlestats_cpufreq_notifier(
struct notifier_block *nb,
unsigned long val, void *data)
{
struct _cpu_info *cpu = container_of(nb,
struct _cpu_info, cpu_nb);
struct cpufreq_freqs *freq = data;
if (val != CPUFREQ_POSTCHANGE)
return 0;
spin_lock(&cpu->lock);
if (freq->cpu < num_possible_cpus())
cpu->curr_freq[freq->cpu] = freq->new / 1000;
spin_unlock(&cpu->lock);
return 0;
}
static void idlestats_get_sample(struct msm_idle_stats_device *idledev,
struct msm_idle_pulse *pulse)
{
struct kgsl_power_stats stats;
struct idlestats_priv *priv = container_of(idledev,
struct idlestats_priv, idledev);
struct kgsl_device *device = priv->device;
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
mutex_lock(&device->mutex);
/* If the GPU is asleep, don't wake it up - assume that we
are idle */
if (!(device->state & (KGSL_STATE_SLEEP | KGSL_STATE_NAP))) {
device->ftbl->power_stats(device, &stats);
pulse->busy_start_time = pwr->time - stats.busy_time;
pulse->busy_interval = stats.busy_time;
} else {
pulse->busy_start_time = pwr->time;
pulse->busy_interval = 0;
}
pulse->wait_interval = 0;
mutex_unlock(&device->mutex);
}
static void idlestats_busy(struct kgsl_device *device,
struct kgsl_pwrscale *pwrscale)
{
struct idlestats_priv *priv = pwrscale->priv;
int i, busy, nr_cpu = 1;
if (priv->pulse.busy_start_time != 0) {
priv->pulse.wait_interval = 0;
/* Calculate the total CPU busy time for this GPU pulse */
for (i = 0; i < num_possible_cpus(); i++) {
spin_lock(&priv->cpu_info.lock);
if (cpu_online(i)) {
priv->cpu_info.end[i] =
(u64)ktime_to_us(ktime_get()) -
get_cpu_idle_time_us(i, NULL);
busy = priv->cpu_info.end[i] -
priv->cpu_info.start[i];
/* Normalize the busy time by frequency */
busy = priv->cpu_info.curr_freq[i] *
(busy / priv->cpu_info.max_freq[i]);
priv->pulse.wait_interval += busy;
nr_cpu++;
}
spin_unlock(&priv->cpu_info.lock);
}
priv->pulse.wait_interval /= nr_cpu;
msm_idle_stats_idle_end(&priv->idledev, &priv->pulse);
}
priv->pulse.busy_start_time = ktime_to_us(ktime_get());
}
static void idlestats_idle(struct kgsl_device *device,
struct kgsl_pwrscale *pwrscale)
{
int i, nr_cpu;
struct kgsl_power_stats stats;
struct idlestats_priv *priv = pwrscale->priv;
/* This is called from within a mutex protected function, so
no additional locking required */
device->ftbl->power_stats(device, &stats);
/* If total_time is zero, then we don't have
any interesting statistics to store */
if (stats.total_time == 0) {
priv->pulse.busy_start_time = 0;
return;
}
priv->pulse.busy_interval = stats.busy_time;
nr_cpu = num_possible_cpus();
for (i = 0; i < nr_cpu; i++)
if (cpu_online(i))
priv->cpu_info.start[i] =
(u64)ktime_to_us(ktime_get()) -
get_cpu_idle_time_us(i, NULL);
msm_idle_stats_idle_start(&priv->idledev);
}
static void idlestats_sleep(struct kgsl_device *device,
struct kgsl_pwrscale *pwrscale)
{
struct idlestats_priv *priv = pwrscale->priv;
priv->idledev.stats->event |= MSM_IDLE_STATS_EVENT_IDLE_TIMER_EXPIRED;
}
static int idlestats_init(struct kgsl_device *device,
struct kgsl_pwrscale *pwrscale)
{
struct idlestats_priv *priv;
struct cpufreq_policy cpu_policy;
int ret, i;
priv = pwrscale->priv = kzalloc(sizeof(struct idlestats_priv),
GFP_KERNEL);
if (pwrscale->priv == NULL)
return -ENOMEM;
snprintf(priv->name, sizeof(priv->name), "idle_stats_%s",
device->name);
priv->device = device;
priv->idledev.name = (const char *) priv->name;
priv->idledev.get_sample = idlestats_get_sample;
spin_lock_init(&priv->cpu_info.lock);
priv->cpu_info.cpu_nb.notifier_call =
idlestats_cpufreq_notifier;
ret = cpufreq_register_notifier(&priv->cpu_info.cpu_nb,
CPUFREQ_TRANSITION_NOTIFIER);
if (ret)
goto err;
for (i = 0; i < num_possible_cpus(); i++) {
cpufreq_frequency_table_cpuinfo(&cpu_policy,
cpufreq_frequency_get_table(i));
priv->cpu_info.max_freq[i] = cpu_policy.max / 1000;
priv->cpu_info.curr_freq[i] = cpu_policy.max / 1000;
}
ret = msm_idle_stats_register_device(&priv->idledev);
err:
if (ret) {
kfree(pwrscale->priv);
pwrscale->priv = NULL;
}
return ret;
}
static void idlestats_close(struct kgsl_device *device,
struct kgsl_pwrscale *pwrscale)
{
struct idlestats_priv *priv = pwrscale->priv;
if (pwrscale->priv == NULL)
return;
cpufreq_unregister_notifier(&priv->cpu_info.cpu_nb,
CPUFREQ_TRANSITION_NOTIFIER);
msm_idle_stats_deregister_device(&priv->idledev);
kfree(pwrscale->priv);
pwrscale->priv = NULL;
}
struct kgsl_pwrscale_policy kgsl_pwrscale_policy_idlestats = {
.name = "idlestats",
.init = idlestats_init,
.idle = idlestats_idle,
.busy = idlestats_busy,
.sleep = idlestats_sleep,
.close = idlestats_close
};

View File

@ -0,0 +1,197 @@
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/io.h>
#include <mach/socinfo.h>
#include <mach/scm.h>
#include "kgsl.h"
#include "kgsl_pwrscale.h"
#include "kgsl_device.h"
#define TZ_GOVERNOR_PERFORMANCE 0
#define TZ_GOVERNOR_ONDEMAND 1
struct tz_priv {
int governor;
unsigned int no_switch_cnt;
unsigned int skip_cnt;
};
#define SWITCH_OFF 200
#define SWITCH_OFF_RESET_TH 40
#define SKIP_COUNTER 500
#define TZ_RESET_ID 0x3
#define TZ_UPDATE_ID 0x4
#ifdef CONFIG_MSM_SCM
/* Trap into the TrustZone, and call funcs there. */
static int __secure_tz_entry(u32 cmd, u32 val)
{
__iowmb();
return scm_call_atomic1(SCM_SVC_IO, cmd, val);
}
#else
static int __secure_tz_entry(u32 cmd, u32 val)
{
return 0;
}
#endif /* CONFIG_MSM_SCM */
static ssize_t tz_governor_show(struct kgsl_device *device,
struct kgsl_pwrscale *pwrscale,
char *buf)
{
struct tz_priv *priv = pwrscale->priv;
int ret;
if (priv->governor == TZ_GOVERNOR_ONDEMAND)
ret = snprintf(buf, 10, "ondemand\n");
else
ret = snprintf(buf, 13, "performance\n");
return ret;
}
static ssize_t tz_governor_store(struct kgsl_device *device,
struct kgsl_pwrscale *pwrscale,
const char *buf, size_t count)
{
char str[20];
struct tz_priv *priv = pwrscale->priv;
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
int ret;
ret = sscanf(buf, "%20s", str);
if (ret != 1)
return -EINVAL;
mutex_lock(&device->mutex);
if (!strncmp(str, "ondemand", 8))
priv->governor = TZ_GOVERNOR_ONDEMAND;
else if (!strncmp(str, "performance", 11))
priv->governor = TZ_GOVERNOR_PERFORMANCE;
if (priv->governor == TZ_GOVERNOR_PERFORMANCE)
kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
mutex_unlock(&device->mutex);
return count;
}
PWRSCALE_POLICY_ATTR(governor, 0644, tz_governor_show, tz_governor_store);
static struct attribute *tz_attrs[] = {
&policy_attr_governor.attr,
NULL
};
static struct attribute_group tz_attr_group = {
.attrs = tz_attrs,
};
static void tz_wake(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale)
{
struct tz_priv *priv = pwrscale->priv;
if (device->state != KGSL_STATE_NAP &&
priv->governor == TZ_GOVERNOR_ONDEMAND)
kgsl_pwrctrl_pwrlevel_change(device,
device->pwrctrl.thermal_pwrlevel);
}
static void tz_idle(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale)
{
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
struct tz_priv *priv = pwrscale->priv;
struct kgsl_power_stats stats;
int val;
/* In "performance" mode the clock speed always stays
the same */
if (priv->governor == TZ_GOVERNOR_PERFORMANCE)
return;
device->ftbl->power_stats(device, &stats);
if (stats.total_time == 0)
return;
/* If the GPU has stayed in turbo mode for a while, *
* stop writing out values. */
if (pwr->active_pwrlevel == 0) {
if (priv->no_switch_cnt > SWITCH_OFF) {
priv->skip_cnt++;
if (priv->skip_cnt > SKIP_COUNTER) {
priv->no_switch_cnt -= SWITCH_OFF_RESET_TH;
priv->skip_cnt = 0;
}
return;
}
priv->no_switch_cnt++;
} else {
priv->no_switch_cnt = 0;
}
val = __secure_tz_entry(TZ_UPDATE_ID,
stats.total_time - stats.busy_time);
if (val)
kgsl_pwrctrl_pwrlevel_change(device,
pwr->active_pwrlevel + val);
}
static void tz_sleep(struct kgsl_device *device,
struct kgsl_pwrscale *pwrscale)
{
struct tz_priv *priv = pwrscale->priv;
__secure_tz_entry(TZ_RESET_ID, 0);
priv->no_switch_cnt = 0;
}
static int tz_init(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale)
{
struct tz_priv *priv;
/* Trustzone is only valid for some SOCs */
if (!(cpu_is_msm8x60() || cpu_is_msm8960() || cpu_is_msm8930()))
return -EINVAL;
priv = pwrscale->priv = kzalloc(sizeof(struct tz_priv), GFP_KERNEL);
if (pwrscale->priv == NULL)
return -ENOMEM;
priv->governor = TZ_GOVERNOR_ONDEMAND;
kgsl_pwrscale_policy_add_files(device, pwrscale, &tz_attr_group);
return 0;
}
static void tz_close(struct kgsl_device *device, struct kgsl_pwrscale *pwrscale)
{
kgsl_pwrscale_policy_remove_files(device, pwrscale, &tz_attr_group);
kfree(pwrscale->priv);
pwrscale->priv = NULL;
}
struct kgsl_pwrscale_policy kgsl_pwrscale_policy_tz = {
.name = "trustzone",
.init = tz_init,
.idle = tz_idle,
.sleep = tz_sleep,
.wake = tz_wake,
.close = tz_close
};
EXPORT_SYMBOL(kgsl_pwrscale_policy_tz);

221
include/drm/kgsl_drm.h Normal file
View File

@ -0,0 +1,221 @@
/* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef _KGSL_DRM_H_
#define _KGSL_DRM_H_
#include "drm.h"
#define DRM_KGSL_GEM_CREATE 0x00
#define DRM_KGSL_GEM_PREP 0x01
#define DRM_KGSL_GEM_SETMEMTYPE 0x02
#define DRM_KGSL_GEM_GETMEMTYPE 0x03
#define DRM_KGSL_GEM_MMAP 0x04
#define DRM_KGSL_GEM_ALLOC 0x05
#define DRM_KGSL_GEM_BIND_GPU 0x06
#define DRM_KGSL_GEM_UNBIND_GPU 0x07
#define DRM_KGSL_GEM_GET_BUFINFO 0x08
#define DRM_KGSL_GEM_SET_BUFCOUNT 0x09
#define DRM_KGSL_GEM_SET_ACTIVE 0x0A
#define DRM_KGSL_GEM_LOCK_HANDLE 0x0B
#define DRM_KGSL_GEM_UNLOCK_HANDLE 0x0C
#define DRM_KGSL_GEM_UNLOCK_ON_TS 0x0D
#define DRM_KGSL_GEM_CREATE_FD 0x0E
#define DRM_IOCTL_KGSL_GEM_CREATE \
DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_CREATE, struct drm_kgsl_gem_create)
#define DRM_IOCTL_KGSL_GEM_PREP \
DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_PREP, struct drm_kgsl_gem_prep)
#define DRM_IOCTL_KGSL_GEM_SETMEMTYPE \
DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_SETMEMTYPE, \
struct drm_kgsl_gem_memtype)
#define DRM_IOCTL_KGSL_GEM_GETMEMTYPE \
DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_GETMEMTYPE, \
struct drm_kgsl_gem_memtype)
#define DRM_IOCTL_KGSL_GEM_MMAP \
DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_MMAP, struct drm_kgsl_gem_mmap)
#define DRM_IOCTL_KGSL_GEM_ALLOC \
DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_ALLOC, struct drm_kgsl_gem_alloc)
#define DRM_IOCTL_KGSL_GEM_BIND_GPU \
DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_BIND_GPU, struct drm_kgsl_gem_bind_gpu)
#define DRM_IOCTL_KGSL_GEM_UNBIND_GPU \
DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_UNBIND_GPU, \
struct drm_kgsl_gem_bind_gpu)
#define DRM_IOCTL_KGSL_GEM_GET_BUFINFO \
DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_GET_BUFINFO, \
struct drm_kgsl_gem_bufinfo)
#define DRM_IOCTL_KGSL_GEM_SET_BUFCOUNT \
DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_SET_BUFCOUNT, \
struct drm_kgsl_gem_bufcount)
#define DRM_IOCTL_KGSL_GEM_SET_ACTIVE \
DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_SET_ACTIVE, \
struct drm_kgsl_gem_active)
#define DRM_IOCTL_KGSL_GEM_LOCK_HANDLE \
DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_LOCK_HANDLE, \
struct drm_kgsl_gem_lock_handles)
#define DRM_IOCTL_KGSL_GEM_UNLOCK_HANDLE \
DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_UNLOCK_HANDLE, \
struct drm_kgsl_gem_unlock_handles)
#define DRM_IOCTL_KGSL_GEM_UNLOCK_ON_TS \
DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_UNLOCK_ON_TS, \
struct drm_kgsl_gem_unlock_on_ts)
#define DRM_IOCTL_KGSL_GEM_CREATE_FD \
DRM_IOWR(DRM_COMMAND_BASE + DRM_KGSL_GEM_CREATE_FD, \
struct drm_kgsl_gem_create_fd)
/* Maximum number of sub buffers per GEM object */
#define DRM_KGSL_GEM_MAX_BUFFERS 2
/* Memory types - these define the source and caching policies
of the GEM memory chunk */
/* Legacy definitions left for compatability */
#define DRM_KGSL_GEM_TYPE_EBI 0
#define DRM_KGSL_GEM_TYPE_SMI 1
#define DRM_KGSL_GEM_TYPE_KMEM 2
#define DRM_KGSL_GEM_TYPE_KMEM_NOCACHE 3
#define DRM_KGSL_GEM_TYPE_MEM_MASK 0xF
/* Contiguous memory (PMEM) */
#define DRM_KGSL_GEM_TYPE_PMEM 0x000100
/* PMEM memory types */
#define DRM_KGSL_GEM_PMEM_EBI 0x001000
#define DRM_KGSL_GEM_PMEM_SMI 0x002000
/* Standard paged memory */
#define DRM_KGSL_GEM_TYPE_MEM 0x010000
/* Caching controls */
#define DRM_KGSL_GEM_CACHE_NONE 0x000000
#define DRM_KGSL_GEM_CACHE_WCOMBINE 0x100000
#define DRM_KGSL_GEM_CACHE_WTHROUGH 0x200000
#define DRM_KGSL_GEM_CACHE_WBACK 0x400000
#define DRM_KGSL_GEM_CACHE_WBACKWA 0x800000
#define DRM_KGSL_GEM_CACHE_MASK 0xF00000
/* FD based objects */
#define DRM_KGSL_GEM_TYPE_FD_FBMEM 0x1000000
#define DRM_KGSL_GEM_TYPE_FD_MASK 0xF000000
/* Timestamp types */
#define DRM_KGSL_GEM_TS_3D 0x00000430
#define DRM_KGSL_GEM_TS_2D 0x00000180
struct drm_kgsl_gem_create {
uint32_t size;
uint32_t handle;
};
struct drm_kgsl_gem_prep {
uint32_t handle;
uint32_t phys;
uint64_t offset;
};
struct drm_kgsl_gem_memtype {
uint32_t handle;
uint32_t type;
};
struct drm_kgsl_gem_mmap {
uint32_t handle;
uint32_t size;
uint32_t hostptr;
uint64_t offset;
};
struct drm_kgsl_gem_alloc {
uint32_t handle;
uint64_t offset;
};
struct drm_kgsl_gem_bind_gpu {
uint32_t handle;
uint32_t gpuptr;
};
struct drm_kgsl_gem_bufinfo {
uint32_t handle;
uint32_t count;
uint32_t active;
uint32_t offset[DRM_KGSL_GEM_MAX_BUFFERS];
uint32_t gpuaddr[DRM_KGSL_GEM_MAX_BUFFERS];
};
struct drm_kgsl_gem_bufcount {
uint32_t handle;
uint32_t bufcount;
};
struct drm_kgsl_gem_active {
uint32_t handle;
uint32_t active;
};
struct drm_kgsl_gem_lock_handles {
uint32_t num_handles;
uint32_t *handle_list;
uint32_t pid;
uint32_t lock_id; /* Returned lock id used for unlocking */
};
struct drm_kgsl_gem_unlock_handles {
uint32_t lock_id;
};
struct drm_kgsl_gem_unlock_on_ts {
uint32_t lock_id;
uint32_t timestamp; /* This field is a hw generated ts */
uint32_t type; /* Which pipe to check for ts generation */
};
struct drm_kgsl_gem_create_fd {
uint32_t fd;
uint32_t handle;
};
#endif