drivers: gpu: packported kgsl from msm-3.0 kernel

This commit is contained in:
tytung 2012-01-13 00:31:11 +08:00
parent a837104e8d
commit f861dded06
6 changed files with 56 additions and 95 deletions

View File

@ -17,7 +17,7 @@
*/
#include <linux/miscdevice.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
#include <linux/slab.h>
#include <linux/fb.h>
#include <linux/file.h>
#include <linux/fs.h>
@ -118,18 +118,16 @@ static void kgsl_clk_enable(void)
{
clk_set_rate(kgsl_driver.ebi1_clk, 128000000);
clk_enable(kgsl_driver.imem_clk);
if (kgsl_driver.grp_pclk)
clk_enable(kgsl_driver.grp_pclk);
clk_enable(kgsl_driver.grp_clk);
#ifdef CONFIG_ARCH_MSM7227
clk_enable(kgsl_driver.grp_pclk);
#endif
}
static void kgsl_clk_disable(void)
{
#ifdef CONFIG_ARCH_MSM7227
clk_disable(kgsl_driver.grp_pclk);
#endif
clk_disable(kgsl_driver.grp_clk);
if (kgsl_driver.grp_pclk)
clk_disable(kgsl_driver.grp_pclk);
clk_disable(kgsl_driver.imem_clk);
clk_set_rate(kgsl_driver.ebi1_clk, 0);
}
@ -169,7 +167,7 @@ static void kgsl_hw_put_locked(bool start_timer)
{
if ((--kgsl_driver.active_cnt == 0) && start_timer) {
mod_timer(&kgsl_driver.standby_timer,
jiffies + msecs_to_jiffies(512));
jiffies + msecs_to_jiffies(20));
}
}
@ -192,6 +190,11 @@ static int kgsl_first_open_locked(void)
kgsl_clk_enable();
/* init memory apertures */
result = kgsl_sharedmem_init(&kgsl_driver.shmem);
if (result != 0)
goto done;
/* init devices */
result = kgsl_yamato_init(&kgsl_driver.yamato_device,
&kgsl_driver.yamato_config);
@ -218,6 +221,9 @@ static int kgsl_last_release_locked(void)
/* close devices */
kgsl_yamato_close(&kgsl_driver.yamato_device);
/* shutdown memory apertures */
kgsl_sharedmem_close(&kgsl_driver.shmem);
kgsl_clk_disable();
kgsl_driver.active = false;
wake_unlock(&kgsl_driver.wake_lock);
@ -1060,9 +1066,6 @@ static void kgsl_driver_cleanup(void)
kgsl_driver.interrupt_num = 0;
}
/* shutdown memory apertures */
kgsl_sharedmem_close(&kgsl_driver.shmem);
if (kgsl_driver.grp_clk) {
clk_put(kgsl_driver.grp_clk);
kgsl_driver.grp_clk = NULL;
@ -1097,9 +1100,6 @@ static int __devinit kgsl_platform_probe(struct platform_device *pdev)
BUG_ON(kgsl_driver.grp_clk != NULL);
BUG_ON(kgsl_driver.imem_clk != NULL);
BUG_ON(kgsl_driver.ebi1_clk != NULL);
#ifdef CONFIG_ARCH_MSM7227
BUG_ON(kgsl_driver.grp_pclk != NULL);
#endif
kgsl_driver.pdev = pdev;
@ -1114,6 +1114,13 @@ static int __devinit kgsl_platform_probe(struct platform_device *pdev)
}
kgsl_driver.grp_clk = clk;
clk = clk_get(&pdev->dev, "grp_pclk");
if (IS_ERR(clk)) {
KGSL_DRV_ERR("no grp_pclk, continuing\n");
clk = NULL;
}
kgsl_driver.grp_pclk = clk;
clk = clk_get(&pdev->dev, "imem_clk");
if (IS_ERR(clk)) {
result = PTR_ERR(clk);
@ -1130,15 +1137,6 @@ static int __devinit kgsl_platform_probe(struct platform_device *pdev)
}
kgsl_driver.ebi1_clk = clk;
#ifdef CONFIG_ARCH_MSM7227
clk = clk_get(&pdev->dev, "grp_pclk");
if (IS_ERR(clk)) {
result = PTR_ERR(clk);
KGSL_DRV_ERR("clk_get(grp_pclk) returned %d\n", result);
goto done;
}
kgsl_driver.grp_pclk = clk;
#endif
/*acquire interrupt */
kgsl_driver.interrupt_num = platform_get_irq(pdev, 0);
if (kgsl_driver.interrupt_num <= 0) {
@ -1172,9 +1170,6 @@ static int __devinit kgsl_platform_probe(struct platform_device *pdev)
kgsl_driver.shmem.physbase = res->start;
kgsl_driver.shmem.size = resource_size(res);
/* init memory apertures */
result = kgsl_sharedmem_init(&kgsl_driver.shmem);
done:
if (result)
kgsl_driver_cleanup();

View File

@ -45,11 +45,10 @@ struct kgsl_driver {
int have_irq;
struct clk *grp_clk;
struct clk *grp_pclk;
struct clk *imem_clk;
struct clk *ebi1_clk;
#ifdef CONFIG_ARCH_MSM7227
struct clk *grp_pclk;
#endif
struct kgsl_devconfig yamato_config;
uint32_t flags_debug;

View File

@ -28,7 +28,6 @@
#include "kgsl_pm4types.h"
#include "kgsl_cmdstream.h"
//#define DISABLE_SHADOW_WRITES
/*
*
* Memory Map for Register, Constant & Instruction Shadow, and Command Buffers

View File

@ -17,6 +17,7 @@
* along with this program; if not, you can find it at http://www.fsf.org
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/genalloc.h>
@ -395,16 +396,6 @@ int kgsl_mmu_init(struct kgsl_device *device)
return -ENOMEM;
}
mmu->hwpagetable = mmu->defaultpagetable;
mmu->tlbflushfilter.size = (mmu->va_range /
(PAGE_SIZE * GSL_PT_SUPER_PTE * 8)) + 1;
mmu->tlbflushfilter.base = (unsigned int *)
kzalloc(mmu->tlbflushfilter.size, GFP_KERNEL);
if (!mmu->tlbflushfilter.base) {
KGSL_MEM_ERR("Failed to create tlbflushfilter\n");
kgsl_mmu_close(device);
return -ENOMEM;
}
GSL_TLBFLUSH_FILTER_RESET();
kgsl_yamato_regwrite(device, REG_MH_MMU_PT_BASE,
mmu->hwpagetable->base.gpuaddr);
kgsl_yamato_regwrite(device, REG_MH_MMU_VA_RANGE,
@ -465,7 +456,7 @@ int kgsl_mmu_map(struct kgsl_pagetable *pagetable,
unsigned int flags)
{
int numpages;
unsigned int pte, ptefirst, ptelast, physaddr;
unsigned int pte, superpte, ptefirst, ptelast, physaddr;
int flushtlb, alloc_size;
struct kgsl_mmu *mmu = NULL;
int phys_contiguous = flags & KGSL_MEMFLAGS_CONPHYS;
@ -474,9 +465,6 @@ int kgsl_mmu_map(struct kgsl_pagetable *pagetable,
KGSL_MEM_VDBG("enter (pt=%p, physaddr=%08x, range=%08d, gpuaddr=%p)\n",
pagetable, address, range, gpuaddr);
#ifdef CONFIG_CACHE_L2X0
l2x0_cache_flush_all();
#endif
mmu = pagetable->mmu;
BUG_ON(mmu == NULL);
@ -526,11 +514,15 @@ int kgsl_mmu_map(struct kgsl_pagetable *pagetable,
pte = ptefirst;
flushtlb = 0;
/* tlb needs to be flushed when the first and last pte are not at
* superpte boundaries */
if ((ptefirst & (GSL_PT_SUPER_PTE - 1)) != 0 ||
((ptelast + 1) & (GSL_PT_SUPER_PTE-1)) != 0)
flushtlb = 1;
superpte = ptefirst & (GSL_PT_SUPER_PTE - 1);
for (pte = superpte; pte < ptefirst; pte++) {
/* tlb needs to be flushed only when a dirty superPTE
gets backed */
if (kgsl_pt_map_isdirty(pagetable, pte)) {
flushtlb = 1;
break;
}
}
for (pte = ptefirst; pte < ptelast; pte++) {
#ifdef VERBOSE_DEBUG
@ -538,10 +530,8 @@ int kgsl_mmu_map(struct kgsl_pagetable *pagetable,
uint32_t val = kgsl_pt_map_getaddr(pagetable, pte);
BUG_ON(val != 0 && val != GSL_PT_PAGE_DIRTY);
#endif
if ((pte & (GSL_PT_SUPER_PTE-1)) == 0)
if (GSL_TLBFLUSH_FILTER_ISDIRTY(pte / GSL_PT_SUPER_PTE))
flushtlb = 1;
if (kgsl_pt_map_isdirty(pagetable, pte))
flushtlb = 1;
/* mark pte as in use */
if (phys_contiguous)
physaddr = address;
@ -562,6 +552,17 @@ int kgsl_mmu_map(struct kgsl_pagetable *pagetable,
address += KGSL_PAGESIZE;
}
/* set superpte to end of next superpte */
superpte = (ptelast + (GSL_PT_SUPER_PTE - 1))
& (GSL_PT_SUPER_PTE - 1);
for (pte = ptelast; pte < superpte; pte++) {
/* tlb needs to be flushed only when a dirty superPTE
gets backed */
if (kgsl_pt_map_isdirty(pagetable, pte)) {
flushtlb = 1;
break;
}
}
KGSL_MEM_INFO("pt %p p %08x g %08x pte f %d l %d n %d f %d\n",
pagetable, address, *gpuaddr, ptefirst, ptelast,
numpages, flushtlb);
@ -570,10 +571,8 @@ int kgsl_mmu_map(struct kgsl_pagetable *pagetable,
/* Invalidate tlb only if current page table used by GPU is the
* pagetable that we used to allocate */
if (flushtlb && (pagetable == mmu->hwpagetable)) {
if (pagetable == mmu->hwpagetable)
kgsl_yamato_setstate(mmu->device, KGSL_MMUFLAGS_TLBFLUSH);
GSL_TLBFLUSH_FILTER_RESET();
}
KGSL_MEM_VDBG("return %d\n", 0);
@ -586,8 +585,7 @@ kgsl_mmu_unmap(struct kgsl_pagetable *pagetable, unsigned int gpuaddr,
int range)
{
unsigned int numpages;
unsigned int pte, ptefirst, ptelast, superpte;
struct kgsl_mmu *mmu = NULL;
unsigned int pte, ptefirst, ptelast;
KGSL_MEM_VDBG("enter (pt=%p, gpuaddr=0x%08x, range=%d)\n",
pagetable, gpuaddr, range);
@ -604,24 +602,22 @@ kgsl_mmu_unmap(struct kgsl_pagetable *pagetable, unsigned int gpuaddr,
KGSL_MEM_INFO("pt %p gpu %08x pte first %d last %d numpages %d\n",
pagetable, gpuaddr, ptefirst, ptelast, numpages);
mmu = pagetable->mmu;
superpte = ptefirst - (ptefirst & (GSL_PT_SUPER_PTE-1));
GSL_TLBFLUSH_FILTER_SETDIRTY(superpte / GSL_PT_SUPER_PTE);
for (pte = ptefirst; pte < ptelast; pte++) {
#ifdef VERBOSE_DEBUG
/* check if PTE exists */
BUG_ON(!kgsl_pt_map_getaddr(pagetable, pte));
#endif
kgsl_pt_map_set(pagetable, pte, GSL_PT_PAGE_DIRTY);
superpte = pte - (pte & (GSL_PT_SUPER_PTE - 1));
if (pte == superpte)
GSL_TLBFLUSH_FILTER_SETDIRTY(superpte /
GSL_PT_SUPER_PTE);
}
dmb();
/* Invalidate tlb only if current page table used by GPU is the
* pagetable that we used to allocate */
if (pagetable == pagetable->mmu->hwpagetable)
kgsl_yamato_setstate(pagetable->mmu->device,
KGSL_MMUFLAGS_TLBFLUSH);
gen_pool_free(pagetable->pool, gpuaddr, range);
KGSL_MEM_VDBG("return %d\n", 0);
@ -655,12 +651,6 @@ int kgsl_mmu_close(struct kgsl_device *device)
if (mmu->dummyspace.gpuaddr)
kgsl_sharedmem_free(&mmu->dummyspace);
if (mmu->tlbflushfilter.base) {
mmu->tlbflushfilter.size = 0;
kfree(mmu->tlbflushfilter.base);
mmu->tlbflushfilter.base = NULL;
}
mmu->flags &= ~KGSL_FLAGS_STARTED;
mmu->flags &= ~KGSL_FLAGS_INITIALIZED;
mmu->flags &= ~KGSL_FLAGS_INITIALIZED0;

View File

@ -31,21 +31,6 @@
#define KGSL_MMUFLAGS_TLBFLUSH 0x10000000
#define KGSL_MMUFLAGS_PTUPDATE 0x20000000
/* Macros to manage TLB flushing */
#define GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS (sizeof(unsigned char) * 8)
#define GSL_TLBFLUSH_FILTER_GET(superpte) \
(*((unsigned char *) \
(((unsigned int)mmu->tlbflushfilter.base) \
+ (superpte / GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS))))
#define GSL_TLBFLUSH_FILTER_SETDIRTY(superpte) \
(GSL_TLBFLUSH_FILTER_GET((superpte)) |= 1 << \
(superpte % GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS))
#define GSL_TLBFLUSH_FILTER_ISDIRTY(superpte) \
(GSL_TLBFLUSH_FILTER_GET((superpte)) & \
(1 << (superpte % GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS)))
#define GSL_TLBFLUSH_FILTER_RESET() memset(mmu->tlbflushfilter.base,\
0, mmu->tlbflushfilter.size)
extern unsigned int kgsl_cache_enable;
struct kgsl_device;
@ -83,11 +68,6 @@ struct kgsl_pagetable {
struct gen_pool *pool;
};
struct kgsl_tlbflushfilter {
unsigned int *base;
unsigned int size;
};
struct kgsl_mmu {
unsigned int refcnt;
uint32_t flags;
@ -101,8 +81,6 @@ struct kgsl_mmu {
/* current page table object being used by device mmu */
struct kgsl_pagetable *defaultpagetable;
struct kgsl_pagetable *hwpagetable;
/* Maintain filter to manage tlb flushing */
struct kgsl_tlbflushfilter tlbflushfilter;
};

View File

@ -802,7 +802,7 @@ int kgsl_yamato_idle(struct kgsl_device *device, unsigned int timeout)
struct kgsl_mmu_debug mmu_dbg;
unsigned int rbbm_status;
int idle_count = 0;
#define IDLE_COUNT_MAX 1500000
#define IDLE_COUNT_MAX 1000000
KGSL_DRV_VDBG("enter (device=%p, timeout=%d)\n", device, timeout);