[KGSL] remove the older driver along with some

apparently unused, unrequired binaries.
This commit is contained in:
Shantanu Gupta 2012-05-03 12:08:24 +05:30
parent 4df58bdbe5
commit d1aeebac34
32 changed files with 0 additions and 7924 deletions

View File

@ -1407,8 +1407,6 @@ CONFIG_FB_CFB_IMAGEBLIT=y
CONFIG_FB_MSM=y
CONFIG_FB_MSM_LCDC=y
# CONFIG_FB_MSM_TVOUT is not set
# CONFIG_GPU_MSM_KGSL is not set
# CONFIG_GPU_MSM_KGSL_MMU is not set
# CONFIG_MSM_HDMI is not set
CONFIG_FB_MSM_LOGO=y
# CONFIG_BACKLIGHT_LCD_SUPPORT is not set

View File

@ -55,22 +55,6 @@ config MSM_ROTATOR_USE_IMEM
block. Or some systems may want the iMem to be dedicated to a
different function.
config GPU_MSM_KGSL_MMU
bool "Turn on MMU for graphics driver "
depends on GPU_MSM_KGSL && MMU
default n
help
If enabled, the GPU driver will allocate memory from vmalloc
and enable the use of GPU MMU, instead of using pmem.
config MSM_KGSL_PER_FD_PAGETABLE
bool "Turn on per-fd pagetable for MMU of graphics driver "
depends on MSM_KGSL_MMU && MMU
default n
help
If enabled, the MMU unit of GPU driver will use seperate
pagetables for each file descriptor
config MSM_HDMI
bool "Support for HDMI in QCT platform"
depends on MSM_MDP31

View File

@ -33,5 +33,3 @@ obj-y += mddi_client_novb9f6_5582.o
obj-$(CONFIG_FB_MSM_LCDC) += mdp_lcdc.o
obj-$(CONFIG_FB_MSM_TVOUT) += tvenc.o tvfb.o
# Yamato GL driver
obj-$(CONFIG_GPU_MSM_KGSL) += gpu/kgsl/

View File

@ -1,12 +0,0 @@
---- CONFIG_MSM_KGSL_MMU Matches (11 in 6 files) ----
kgsl.c (\\jupiter\xinwang\hd2\kernel\msm8x50\20120427A\dorimanx-Dorimanx-HD2-2.6.32.X-69084da_hwa_mix\drivers\video\msm\gpu\kgsl):#ifdef CONFIG_MSM_KGSL_MMU
kgsl.c (\\jupiter\xinwang\hd2\kernel\msm8x50\20120427A\dorimanx-Dorimanx-HD2-2.6.32.X-69084da_hwa_mix\drivers\video\msm\gpu\kgsl):#ifdef CONFIG_MSM_KGSL_MMU
kgsl.c (\\jupiter\xinwang\hd2\kernel\msm8x50\20120427A\dorimanx-Dorimanx-HD2-2.6.32.X-69084da_hwa_mix\drivers\video\msm\gpu\kgsl):#ifdef CONFIG_MSM_KGSL_MMU
kgsl_device.h (\\jupiter\xinwang\hd2\kernel\msm8x50\20120427A\dorimanx-Dorimanx-HD2-2.6.32.X-69084da_hwa_mix\drivers\video\msm\gpu\kgsl):#ifdef CONFIG_MSM_KGSL_MMU
kgsl_log.c (\\jupiter\xinwang\hd2\kernel\msm8x50\20120427A\dorimanx-Dorimanx-HD2-2.6.32.X-69084da_hwa_mix\drivers\video\msm\gpu\kgsl):#ifdef CONFIG_MSM_KGSL_MMU
kgsl_log.c (\\jupiter\xinwang\hd2\kernel\msm8x50\20120427A\dorimanx-Dorimanx-HD2-2.6.32.X-69084da_hwa_mix\drivers\video\msm\gpu\kgsl):#ifdef CONFIG_MSM_KGSL_MMU
kgsl_mmu.c (\\jupiter\xinwang\hd2\kernel\msm8x50\20120427A\dorimanx-Dorimanx-HD2-2.6.32.X-69084da_hwa_mix\drivers\video\msm\gpu\kgsl):#ifndef CONFIG_MSM_KGSL_MMU
kgsl_mmu.c (\\jupiter\xinwang\hd2\kernel\msm8x50\20120427A\dorimanx-Dorimanx-HD2-2.6.32.X-69084da_hwa_mix\drivers\video\msm\gpu\kgsl):#ifdef CONFIG_MSM_KGSL_MMU
kgsl_mmu.h (\\jupiter\xinwang\hd2\kernel\msm8x50\20120427A\dorimanx-Dorimanx-HD2-2.6.32.X-69084da_hwa_mix\drivers\video\msm\gpu\kgsl):#ifdef CONFIG_MSM_KGSL_MMU
kgsl_yamato.c (\\jupiter\xinwang\hd2\kernel\msm8x50\20120427A\dorimanx-Dorimanx-HD2-2.6.32.X-69084da_hwa_mix\drivers\video\msm\gpu\kgsl):#ifdef CONFIG_MSM_KGSL_MMU
kgsl_yamato.c (\\jupiter\xinwang\hd2\kernel\msm8x50\20120427A\dorimanx-Dorimanx-HD2-2.6.32.X-69084da_hwa_mix\drivers\video\msm\gpu\kgsl):#ifdef CONFIG_MSM_KGSL_MMU

View File

@ -1,11 +0,0 @@
msm_kgsl-objs = \
kgsl_drawctxt.o \
kgsl_cmdstream.o \
kgsl.o \
kgsl_log.o \
kgsl_mmu.o \
kgsl_ringbuffer.o \
kgsl_sharedmem.o \
kgsl_yamato.o
obj-$(CONFIG_GPU_MSM_KGSL) += msm_kgsl.o

File diff suppressed because it is too large Load Diff

View File

@ -1,84 +0,0 @@
/*
* Copyright (c) 2008-2009 QUALCOMM USA, INC.
*
* All source code in this file is licensed under the following license
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can find it at http://www.fsf.org
*/
#ifndef _GSL_DRIVER_H
#define _GSL_DRIVER_H
#include <linux/types.h>
#include <linux/msm_kgsl.h>
#include <linux/miscdevice.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/mutex.h>
#include <linux/wait.h>
#include <linux/timer.h>
#include <linux/wakelock.h>
#include <asm/atomic.h>
#include "kgsl_device.h"
#include "kgsl_sharedmem.h"
#define DRIVER_NAME "kgsl"
struct kgsl_driver {
struct miscdevice misc;
struct platform_device *pdev;
atomic_t open_count;
struct mutex mutex;
int interrupt_num;
int have_irq;
struct clk *grp_clk;
struct clk *grp_pclk;
struct clk *imem_clk;
struct clk *ebi1_clk;
struct kgsl_devconfig yamato_config;
uint32_t flags_debug;
struct kgsl_sharedmem shmem;
struct kgsl_device yamato_device;
struct list_head client_list;
bool active;
int active_cnt;
struct timer_list standby_timer;
struct wake_lock wake_lock;
};
extern struct kgsl_driver kgsl_driver;
struct kgsl_mem_entry {
struct kgsl_memdesc memdesc;
struct file *pmem_file;
struct list_head list;
struct list_head free_list;
uint32_t free_timestamp;
/* back pointer to private structure under whose context this
* allocation is made */
struct kgsl_file_private *priv;
};
void kgsl_remove_mem_entry(struct kgsl_mem_entry *entry);
#endif /* _GSL_DRIVER_H */

View File

@ -1,105 +0,0 @@
/*
* Copyright (c) 2008-2009 QUALCOMM USA, INC.
*
* All source code in this file is licensed under the following license
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can find it at http://www.fsf.org
*/
#include "kgsl.h"
#include "kgsl_device.h"
#include "kgsl_cmdstream.h"
#include "kgsl_sharedmem.h"
int kgsl_cmdstream_init(struct kgsl_device *device)
{
return 0;
}
int kgsl_cmdstream_close(struct kgsl_device *device)
{
return 0;
}
uint32_t
kgsl_cmdstream_readtimestamp(struct kgsl_device *device,
enum kgsl_timestamp_type type)
{
uint32_t timestamp = 0;
KGSL_CMD_VDBG("enter (device_id=%d, type=%d)\n", device->id, type);
if (type == KGSL_TIMESTAMP_CONSUMED)
KGSL_CMDSTREAM_GET_SOP_TIMESTAMP(device,
(unsigned int *)&timestamp);
else if (type == KGSL_TIMESTAMP_RETIRED)
KGSL_CMDSTREAM_GET_EOP_TIMESTAMP(device,
(unsigned int *)&timestamp);
rmb();
KGSL_CMD_VDBG("return %d\n", timestamp);
return timestamp;
}
int kgsl_cmdstream_check_timestamp(struct kgsl_device *device,
unsigned int timestamp)
{
unsigned int ts_processed;
ts_processed = kgsl_cmdstream_readtimestamp(device,
KGSL_TIMESTAMP_RETIRED);
return timestamp_cmp(ts_processed, timestamp);
}
void kgsl_cmdstream_memqueue_drain(struct kgsl_device *device)
{
struct kgsl_mem_entry *entry, *entry_tmp;
uint32_t ts_processed;
struct kgsl_ringbuffer *rb = &device->ringbuffer;
/* get current EOP timestamp */
ts_processed =
kgsl_cmdstream_readtimestamp(device, KGSL_TIMESTAMP_RETIRED);
list_for_each_entry_safe(entry, entry_tmp, &rb->memqueue, free_list) {
/*NOTE: this assumes that the free list is sorted by
* timestamp, but I'm not yet sure that it is a valid
* assumption
*/
if (!timestamp_cmp(ts_processed, entry->free_timestamp))
break;
KGSL_MEM_DBG("ts_processed %d ts_free %d gpuaddr %x)\n",
ts_processed, entry->free_timestamp,
entry->memdesc.gpuaddr);
kgsl_remove_mem_entry(entry);
}
}
int
kgsl_cmdstream_freememontimestamp(struct kgsl_device *device,
struct kgsl_mem_entry *entry,
uint32_t timestamp,
enum kgsl_timestamp_type type)
{
struct kgsl_ringbuffer *rb = &device->ringbuffer;
KGSL_MEM_DBG("enter (dev %p gpuaddr %x ts %d)\n",
device, entry->memdesc.gpuaddr, timestamp);
(void)type; /* unref. For now just use EOP timestamp */
list_add_tail(&entry->free_list, &rb->memqueue);
entry->free_timestamp = timestamp;
return 0;
}

View File

@ -1,54 +0,0 @@
#ifndef __KGSL_CMDSTREAM_H
#define __KGSL_CMDSTREAM_H
#include <linux/msm_kgsl.h>
#include "kgsl_device.h"
#include "kgsl_log.h"
#ifdef KGSL_DEVICE_SHADOW_MEMSTORE_TO_USER
#define KGSL_CMDSTREAM_USE_MEM_TIMESTAMP
#endif /* KGSL_DEVICE_SHADOW_MEMSTORE_TO_USER */
#ifdef KGSL_CMDSTREAM_USE_MEM_TIMESTAMP
#define KGSL_CMDSTREAM_GET_SOP_TIMESTAMP(device, data) \
kgsl_sharedmem_read(&device->memstore, (data), \
KGSL_DEVICE_MEMSTORE_OFFSET(soptimestamp), 4)
#else
#define KGSL_CMDSTREAM_GET_SOP_TIMESTAMP(device, data) \
kgsl_yamato_regread(device, REG_CP_TIMESTAMP, (data))
#endif /* KGSL_CMDSTREAM_USE_MEM_TIMESTAMP */
#define KGSL_CMDSTREAM_GET_EOP_TIMESTAMP(device, data) \
kgsl_sharedmem_read(&device->memstore, (data), \
KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp), 4)
/* Flags to control command packet settings */
#define KGSL_CMD_FLAGS_PMODE 0x00000001
#define KGSL_CMD_FLAGS_NO_TS_CMP 0x00000002
int kgsl_cmdstream_init(struct kgsl_device *device);
int kgsl_cmdstream_close(struct kgsl_device *device);
void kgsl_cmdstream_memqueue_drain(struct kgsl_device *device);
uint32_t
kgsl_cmdstream_readtimestamp(struct kgsl_device *device,
enum kgsl_timestamp_type type);
int kgsl_cmdstream_check_timestamp(struct kgsl_device *device,
unsigned int timestamp);
int
kgsl_cmdstream_freememontimestamp(struct kgsl_device *device,
struct kgsl_mem_entry *entry,
uint32_t timestamp,
enum kgsl_timestamp_type type);
static inline bool timestamp_cmp(unsigned int new, unsigned int old)
{
int ts_diff = new - old;
return (ts_diff >= 0) || (ts_diff < -20000);
}
#endif /* __KGSL_CMDSTREAM_H */

View File

@ -1,141 +0,0 @@
/*
* (C) Copyright Advanced Micro Devices, Inc. 2002, 2007
* Copyright (c) 2008-2009 QUALCOMM USA, INC.
*
* All source code in this file is licensed under the following license
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can find it at http://www.fsf.org
*/
#ifndef _KGSL_DEVICE_H
#define _KGSL_DEVICE_H
#include <asm/atomic.h>
#include <linux/types.h>
#include <linux/irqreturn.h>
#include <linux/wait.h>
#include <linux/msm_kgsl.h>
#include "kgsl_drawctxt.h"
#include "kgsl_mmu.h"
#include "kgsl_ringbuffer.h"
#define KGSL_CONTEXT_MAX 8
#define KGSL_TIMEOUT_NONE 0
#define KGSL_TIMEOUT_DEFAULT 0xFFFFFFFF
#define KGSL_DEV_FLAGS_INITIALIZED0 0x00000001
#define KGSL_DEV_FLAGS_INITIALIZED 0x00000002
#define KGSL_DEV_FLAGS_STARTED 0x00000004
#define KGSL_DEV_FLAGS_ACTIVE 0x00000008
#define KGSL_CHIPID_YAMATODX_REV21 0x20100
#define KGSL_CHIPID_YAMATODX_REV211 0x20101
/* Private memory flags for use with memdesc->priv feild */
#define KGSL_MEMFLAGS_MEM_REQUIRES_FLUSH 0x00000001
#define KGSL_MEMFLAGS_VMALLOC_MEM 0x00000002
#define KGSL_GRAPHICS_MEMORY_LOW_WATERMARK 0x1000000
#define KGSL_IS_PAGE_ALIGNED(addr) (!((addr) & (~PAGE_MASK)))
struct kgsl_device;
struct platform_device;
struct kgsl_memregion {
unsigned char *mmio_virt_base;
unsigned int mmio_phys_base;
uint32_t gpu_base;
unsigned int sizebytes;
};
struct kgsl_device {
unsigned int refcnt;
uint32_t flags;
enum kgsl_deviceid id;
unsigned int chip_id;
struct kgsl_memregion regspace;
struct kgsl_memdesc memstore;
struct kgsl_mmu mmu;
struct kgsl_memregion gmemspace;
struct kgsl_ringbuffer ringbuffer;
unsigned int drawctxt_count;
struct kgsl_drawctxt *drawctxt_active;
struct kgsl_drawctxt drawctxt[KGSL_CONTEXT_MAX];
wait_queue_head_t ib1_wq;
};
struct kgsl_devconfig {
struct kgsl_memregion regspace;
unsigned int mmu_config;
uint32_t mpu_base;
int mpu_range;
uint32_t va_base;
unsigned int va_range;
struct kgsl_memregion gmemspace;
};
int kgsl_yamato_start(struct kgsl_device *device, uint32_t flags);
int kgsl_yamato_stop(struct kgsl_device *device);
bool kgsl_yamato_is_idle(struct kgsl_device *device);
int kgsl_yamato_idle(struct kgsl_device *device, unsigned int timeout);
int kgsl_yamato_getproperty(struct kgsl_device *device,
enum kgsl_property_type type, void *value,
unsigned int sizebytes);
int kgsl_yamato_regread(struct kgsl_device *device, unsigned int offsetwords,
unsigned int *value);
int kgsl_yamato_regwrite(struct kgsl_device *device, unsigned int offsetwords,
unsigned int value);
int kgsl_yamato_waittimestamp(struct kgsl_device *device,
unsigned int timestamp, unsigned int timeout);
int kgsl_yamato_init(struct kgsl_device *, struct kgsl_devconfig *);
int kgsl_yamato_close(struct kgsl_device *device);
int kgsl_yamato_runpending(struct kgsl_device *device);
int __init kgsl_yamato_config(struct kgsl_devconfig *,
struct platform_device *pdev);
void kgsl_register_dump(struct kgsl_device *device);
int kgsl_yamato_setup_pt(struct kgsl_device *device,
struct kgsl_pagetable *pagetable);
int kgsl_yamato_cleanup_pt(struct kgsl_device *device,
struct kgsl_pagetable *pagetable);
#ifdef CONFIG_GPU_MSM_KGSL_MMU
int kgsl_yamato_setstate(struct kgsl_device *device, uint32_t flags);
#else
static inline int kgsl_yamato_setstate(struct kgsl_device *device, uint32_t flags)
{ return 0; }
#endif
irqreturn_t kgsl_yamato_isr(int irq, void *data);
#endif /* _KGSL_DEVICE_H */

File diff suppressed because it is too large Load Diff

View File

@ -1,120 +0,0 @@
/*
* (C) Copyright Advanced Micro Devices, Inc. 2002, 2007
* Copyright (c) 2008-2009 QUALCOMM USA, INC.
*
* All source code in this file is licensed under the following license
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can find it at http://www.fsf.org
*/
#ifndef __GSL_DRAWCTXT_H
#define __GSL_DRAWCTXT_H
/* Flags */
#define CTXT_FLAGS_NOT_IN_USE 0x00000000
#define CTXT_FLAGS_IN_USE 0x00000001
/* state shadow memory allocated */
#define CTXT_FLAGS_STATE_SHADOW 0x00000010
/* gmem shadow memory allocated */
#define CTXT_FLAGS_GMEM_SHADOW 0x00000100
/* gmem must be copied to shadow */
#define CTXT_FLAGS_GMEM_SAVE 0x00000200
/* gmem can be restored from shadow */
#define CTXT_FLAGS_GMEM_RESTORE 0x00000400
/* shader must be copied to shadow */
#define CTXT_FLAGS_SHADER_SAVE 0x00002000
/* shader can be restored from shadow */
#define CTXT_FLAGS_SHADER_RESTORE 0x00004000
#include "kgsl_sharedmem.h"
#include "yamato_reg.h"
#define KGSL_MAX_GMEM_SHADOW_BUFFERS 2
struct kgsl_device;
/* types */
/* draw context */
struct gmem_shadow_t {
struct kgsl_memdesc gmemshadow; /* Shadow buffer address */
/* 256 KB GMEM surface = 4 bytes-per-pixel x 256 pixels/row x
* 256 rows. */
/* width & height must be a multiples of 32, in case tiled textures
* are used. */
enum COLORFORMATX format;
unsigned int size; /* Size of surface used to store GMEM */
unsigned int width; /* Width of surface used to store GMEM */
unsigned int height; /* Height of surface used to store GMEM */
unsigned int pitch; /* Pitch of surface used to store GMEM */
int offset;
unsigned int offset_x;
unsigned int offset_y;
unsigned int gmem_offset_x;
unsigned int gmem_offset_y;
unsigned int gmem_pitch; /* Pitch value used for GMEM */
unsigned int *gmem_save_commands;
unsigned int *gmem_restore_commands;
unsigned int gmem_save[3];
unsigned int gmem_restore[3];
struct kgsl_memdesc quad_vertices;
struct kgsl_memdesc quad_texcoords;
};
struct kgsl_drawctxt {
uint32_t flags;
struct kgsl_pagetable *pagetable;
struct kgsl_memdesc gpustate;
unsigned int reg_save[3];
unsigned int reg_restore[3];
unsigned int shader_save[3];
unsigned int shader_fixup[3];
unsigned int shader_restore[3];
unsigned int chicken_restore[3];
unsigned int bin_base_offset;
/* Information of the GMEM shadow that is created in context create */
struct gmem_shadow_t context_gmem_shadow;
/* User defined GMEM shadow buffers */
struct gmem_shadow_t user_gmem_shadow[KGSL_MAX_GMEM_SHADOW_BUFFERS];
};
int kgsl_drawctxt_create(struct kgsl_device *, struct kgsl_pagetable *,
unsigned int flags,
unsigned int *drawctxt_id);
int kgsl_drawctxt_destroy(struct kgsl_device *device, unsigned int drawctxt_id);
int kgsl_drawctxt_init(struct kgsl_device *device);
int kgsl_drawctxt_close(struct kgsl_device *device);
void kgsl_drawctxt_switch(struct kgsl_device *device,
struct kgsl_drawctxt *drawctxt,
unsigned int flags);
int kgsl_drawctxt_bind_gmem_shadow(struct kgsl_device *device,
unsigned int drawctxt_id,
const struct kgsl_gmem_desc *gmem_desc,
unsigned int shadow_x,
unsigned int shadow_y,
const struct kgsl_buffer_desc
*shadow_buffer, unsigned int buffer_id);
int kgsl_drawctxt_set_bin_base_offset(struct kgsl_device *device,
unsigned int drawctxt_id,
unsigned int offset);
#endif /* __GSL_DRAWCTXT_H */

View File

@ -1,292 +0,0 @@
/*
* (C) Copyright Advanced Micro Devices, Inc. 2002, 2008
* Copyright (c) 2008-2009 QUALCOMM USA, INC.
*
* All source code in this file is licensed under the following license
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can find it at http://www.fsf.org
*/
#include <linux/debugfs.h>
#include "kgsl_log.h"
#include "kgsl_ringbuffer.h"
#include "kgsl_device.h"
#include "kgsl.h"
/*default log levels is error for everything*/
#define KGSL_LOG_LEVEL_DEFAULT 3
#define KGSL_LOG_LEVEL_MAX 7
unsigned int kgsl_drv_log = KGSL_LOG_LEVEL_DEFAULT;
unsigned int kgsl_cmd_log = KGSL_LOG_LEVEL_DEFAULT;
unsigned int kgsl_ctxt_log = KGSL_LOG_LEVEL_DEFAULT;
unsigned int kgsl_mem_log = KGSL_LOG_LEVEL_DEFAULT;
unsigned int kgsl_cache_enable;
#ifdef CONFIG_DEBUG_FS
static int kgsl_log_set(unsigned int *log_val, void *data, u64 val)
{
*log_val = min((unsigned int)val, (unsigned int)KGSL_LOG_LEVEL_MAX);
return 0;
}
static int kgsl_drv_log_set(void *data, u64 val)
{
return kgsl_log_set(&kgsl_drv_log, data, val);
}
static int kgsl_drv_log_get(void *data, u64 *val)
{
*val = kgsl_drv_log;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(kgsl_drv_log_fops, kgsl_drv_log_get,
kgsl_drv_log_set, "%llu\n");
static int kgsl_cmd_log_set(void *data, u64 val)
{
return kgsl_log_set(&kgsl_cmd_log, data, val);
}
static int kgsl_cmd_log_get(void *data, u64 *val)
{
*val = kgsl_cmd_log;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(kgsl_cmd_log_fops, kgsl_cmd_log_get,
kgsl_cmd_log_set, "%llu\n");
static int kgsl_ctxt_log_set(void *data, u64 val)
{
return kgsl_log_set(&kgsl_ctxt_log, data, val);
}
static int kgsl_ctxt_log_get(void *data, u64 *val)
{
*val = kgsl_ctxt_log;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(kgsl_ctxt_log_fops, kgsl_ctxt_log_get,
kgsl_ctxt_log_set, "%llu\n");
static int kgsl_mem_log_set(void *data, u64 val)
{
return kgsl_log_set(&kgsl_mem_log, data, val);
}
static int kgsl_mem_log_get(void *data, u64 *val)
{
*val = kgsl_mem_log;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(kgsl_mem_log_fops, kgsl_mem_log_get,
kgsl_mem_log_set, "%llu\n");
#ifdef DEBUG
static ssize_t rb_regs_open(struct inode *inode, struct file *file)
{
file->private_data = inode->i_private;
return 0;
}
static ssize_t rb_regs_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
const int debug_bufmax = 4096;
static char buffer[4096];
int n = 0;
struct kgsl_device *device = NULL;
struct kgsl_ringbuffer *rb = NULL;
struct kgsl_rb_debug rb_debug;
device = &kgsl_driver.yamato_device;
rb = &device->ringbuffer;
kgsl_ringbuffer_debug(rb, &rb_debug);
n += scnprintf(buffer + n, debug_bufmax - n,
"rbbm_status %08x mem_rptr %08x mem_wptr_poll %08x\n",
rb_debug.rbbm_status,
rb_debug.mem_rptr,
rb_debug.mem_wptr_poll);
n += scnprintf(buffer + n, debug_bufmax - n,
"rb_base %08x rb_cntl %08x rb_rptr_addr %08x"
" rb_rptr %08x rb_rptr_wr %08x\n",
rb_debug.cp_rb_base,
rb_debug.cp_rb_cntl,
rb_debug.cp_rb_rptr_addr,
rb_debug.cp_rb_rptr,
rb_debug.cp_rb_rptr_wr);
n += scnprintf(buffer + n, debug_bufmax - n,
"rb_wptr %08x rb_wptr_delay %08x rb_wptr_base %08x"
" ib1_base %08x ib1_bufsz %08x\n",
rb_debug.cp_rb_wptr,
rb_debug.cp_rb_wptr_delay,
rb_debug.cp_rb_wptr_base,
rb_debug.cp_ib1_base,
rb_debug.cp_ib1_bufsz);
n += scnprintf(buffer + n, debug_bufmax - n,
"ib2_base %08x ib2_bufsz %08x st_base %08x"
" st_bufsz %08x cp_me_cntl %08x cp_me_status %08x\n",
rb_debug.cp_ib2_base,
rb_debug.cp_ib2_bufsz,
rb_debug.cp_st_base,
rb_debug.cp_st_bufsz,
rb_debug.cp_me_cntl,
rb_debug.cp_me_status);
n += scnprintf(buffer + n, debug_bufmax - n,
"csq_cp_rb %08x csq_cp_ib1 %08x csq_cp_ib2 %08x\n",
rb_debug.cp_csq_rb_stat,
rb_debug.cp_csq_ib1_stat,
rb_debug.cp_csq_ib2_stat);
n += scnprintf(buffer + n, debug_bufmax - n,
"cp_debug %08x cp_stat %08x cp_int_status %08x"
" cp_int_cntl %08x\n",
rb_debug.cp_debug,
rb_debug.cp_stat,
rb_debug.cp_int_status,
rb_debug.cp_int_cntl);
n += scnprintf(buffer + n, debug_bufmax - n,
"sop_timestamp: %0d eop_timestamp: %d\n",
rb_debug.sop_timestamp,
rb_debug.eop_timestamp);
n++;
buffer[n] = 0;
return simple_read_from_buffer(buf, count, ppos, buffer, n);
}
static struct file_operations kgsl_rb_regs_fops = {
.read = rb_regs_read,
.open = rb_regs_open,
};
#endif /*DEBUG*/
#ifdef DEBUG
static ssize_t mmu_regs_open(struct inode *inode, struct file *file)
{
file->private_data = inode->i_private;
return 0;
}
static ssize_t mmu_regs_read(struct file *file, char __user *buf, size_t count,
loff_t *ppos)
{
const int debug_bufmax = 4096;
static char buffer[4096];
int n = 0;
struct kgsl_device *device = NULL;
struct kgsl_mmu *mmu = NULL;
struct kgsl_mmu_debug mmu_debug;
device = &kgsl_driver.yamato_device;
mmu = &device->mmu;
kgsl_mmu_debug(mmu, &mmu_debug);
n += scnprintf(buffer + n, debug_bufmax - n,
"config %08x mpu_base %08x mpu_end %08x\n",
mmu_debug.config,
mmu_debug.mpu_base,
mmu_debug.mpu_end);
n += scnprintf(buffer + n, debug_bufmax - n,
"va_range %08x pt_base %08x\n",
mmu_debug.va_range,
mmu_debug.pt_base);
n += scnprintf(buffer + n, debug_bufmax - n,
"page_fault %08x trans_error %08x axi_error %08x\n",
mmu_debug.page_fault,
mmu_debug.trans_error,
mmu_debug.axi_error);
n += scnprintf(buffer + n, debug_bufmax - n,
"interrupt_mask %08x interrupt_status %08x\n",
mmu_debug.interrupt_mask,
mmu_debug.interrupt_status);
n++;
buffer[n] = 0;
return simple_read_from_buffer(buf, count, ppos, buffer, n);
}
static struct file_operations kgsl_mmu_regs_fops = {
.read = mmu_regs_read,
.open = mmu_regs_open,
};
#endif /*DEBUG*/
#ifdef CONFIG_GPU_MSM_KGSL_MMU
static int kgsl_cache_enable_set(void *data, u64 val)
{
kgsl_cache_enable = (val != 0);
return 0;
}
static int kgsl_cache_enable_get(void *data, u64 *val)
{
*val = kgsl_cache_enable;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(kgsl_cache_enable_fops, kgsl_cache_enable_get,
kgsl_cache_enable_set, "%llu\n");
#endif
#endif /* CONFIG_DEBUG_FS */
int kgsl_debug_init(void)
{
#ifdef CONFIG_DEBUG_FS
struct dentry *dent;
dent = debugfs_create_dir("kgsl", 0);
if (IS_ERR(dent))
return 0;
debugfs_create_file("log_level_cmd", 0644, dent, 0,
&kgsl_cmd_log_fops);
debugfs_create_file("log_level_ctxt", 0644, dent, 0,
&kgsl_ctxt_log_fops);
debugfs_create_file("log_level_drv", 0644, dent, 0,
&kgsl_drv_log_fops);
debugfs_create_file("log_level_mem", 0644, dent, 0,
&kgsl_mem_log_fops);
#ifdef DEBUG
debugfs_create_file("rb_regs", 0444, dent, 0,
&kgsl_rb_regs_fops);
#endif
#ifdef DEBUG
debugfs_create_file("mmu_regs", 0444, dent, 0,
&kgsl_mmu_regs_fops);
#endif
#ifdef CONFIG_GPU_MSM_KGSL_MMU
debugfs_create_file("cache_enable", 0644, dent, 0,
&kgsl_cache_enable_fops);
#endif
#endif /* CONFIG_DEBUG_FS */
return 0;
}

View File

@ -1,105 +0,0 @@
/*
* (C) Copyright Advanced Micro Devices, Inc. 2002, 2008
* Copyright (c) 2008-2009 QUALCOMM USA, INC.
*
* All source code in this file is licensed under the following license
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can find it at http://www.fsf.org
*/
#ifndef _GSL_LOG_H
#define _GSL_LOG_H
#include <linux/bug.h>
#include <linux/types.h>
#include <linux/msm_kgsl.h>
#include <linux/device.h>
extern unsigned int kgsl_drv_log;
extern unsigned int kgsl_cmd_log;
extern unsigned int kgsl_ctxt_log;
extern unsigned int kgsl_mem_log;
struct device *kgsl_driver_getdevnode(void);
int kgsl_debug_init(void);
#define KGSL_LOG_VDBG(lvl, fmt, args...) \
do { \
if ((lvl) >= 7) \
dev_vdbg(kgsl_driver_getdevnode(), "|%s| " fmt, \
__func__, ##args);\
} while (0)
#define KGSL_LOG_DBG(lvl, fmt, args...) \
do { \
if ((lvl) >= 7) \
dev_dbg(kgsl_driver_getdevnode(), "|%s| " fmt, \
__func__, ##args);\
} while (0)
#define KGSL_LOG_INFO(lvl, fmt, args...) \
do { \
if ((lvl) >= 6) \
dev_info(kgsl_driver_getdevnode(), "|%s| " fmt, \
__func__, ##args);\
} while (0)
#define KGSL_LOG_WARN(lvl, fmt, args...) \
do { \
if ((lvl) >= 4) \
dev_warn(kgsl_driver_getdevnode(), "|%s| " fmt, \
__func__, ##args);\
} while (0)
#define KGSL_LOG_ERR(lvl, fmt, args...) \
do { \
if ((lvl) >= 3) \
dev_err(kgsl_driver_getdevnode(), "|%s| " fmt, \
__func__, ##args);\
} while (0)
#define KGSL_LOG_FATAL(lvl, fmt, args...) \
do { \
if ((lvl) >= 2) \
dev_crit(kgsl_driver_getdevnode(), "|%s| " fmt, \
__func__, ##args);\
} while (0)
#define KGSL_DRV_VDBG(fmt, args...) KGSL_LOG_VDBG(kgsl_drv_log, fmt, ##args)
#define KGSL_DRV_DBG(fmt, args...) KGSL_LOG_DBG(kgsl_drv_log, fmt, ##args)
#define KGSL_DRV_INFO(fmt, args...) KGSL_LOG_INFO(kgsl_drv_log, fmt, ##args)
#define KGSL_DRV_WARN(fmt, args...) KGSL_LOG_WARN(kgsl_drv_log, fmt, ##args)
#define KGSL_DRV_ERR(fmt, args...) KGSL_LOG_ERR(kgsl_drv_log, fmt, ##args)
#define KGSL_DRV_FATAL(fmt, args...) KGSL_LOG_FATAL(kgsl_drv_log, fmt, ##args)
#define KGSL_CMD_VDBG(fmt, args...) KGSL_LOG_VDBG(kgsl_cmd_log, fmt, ##args)
#define KGSL_CMD_DBG(fmt, args...) KGSL_LOG_DBG(kgsl_cmd_log, fmt, ##args)
#define KGSL_CMD_INFO(fmt, args...) KGSL_LOG_INFO(kgsl_cmd_log, fmt, ##args)
#define KGSL_CMD_WARN(fmt, args...) KGSL_LOG_WARN(kgsl_cmd_log, fmt, ##args)
#define KGSL_CMD_ERR(fmt, args...) KGSL_LOG_ERR(kgsl_cmd_log, fmt, ##args)
#define KGSL_CMD_FATAL(fmt, args...) KGSL_LOG_FATAL(kgsl_cmd_log, fmt, ##args)
#define KGSL_CTXT_VDBG(fmt, args...) KGSL_LOG_VDBG(kgsl_ctxt_log, fmt, ##args)
#define KGSL_CTXT_DBG(fmt, args...) KGSL_LOG_DBG(kgsl_ctxt_log, fmt, ##args)
#define KGSL_CTXT_INFO(fmt, args...) KGSL_LOG_INFO(kgsl_ctxt_log, fmt, ##args)
#define KGSL_CTXT_WARN(fmt, args...) KGSL_LOG_WARN(kgsl_ctxt_log, fmt, ##args)
#define KGSL_CTXT_ERR(fmt, args...) KGSL_LOG_ERR(kgsl_ctxt_log, fmt, ##args)
#define KGSL_CTXT_FATAL(fmt, args...) KGSL_LOG_FATAL(kgsl_ctxt_log, fmt, ##args)
#define KGSL_MEM_VDBG(fmt, args...) KGSL_LOG_VDBG(kgsl_mem_log, fmt, ##args)
#define KGSL_MEM_DBG(fmt, args...) KGSL_LOG_DBG(kgsl_mem_log, fmt, ##args)
#define KGSL_MEM_INFO(fmt, args...) KGSL_LOG_INFO(kgsl_mem_log, fmt, ##args)
#define KGSL_MEM_WARN(fmt, args...) KGSL_LOG_WARN(kgsl_mem_log, fmt, ##args)
#define KGSL_MEM_ERR(fmt, args...) KGSL_LOG_ERR(kgsl_mem_log, fmt, ##args)
#define KGSL_MEM_FATAL(fmt, args...) KGSL_LOG_FATAL(kgsl_mem_log, fmt, ##args)
#endif /* _GSL_LOG_H */

View File

@ -1,672 +0,0 @@
/*
* (C) Copyright Advanced Micro Devices, Inc. 2002, 2007
* Copyright (c) 2008-2009 QUALCOMM USA, INC.
*
* All source code in this file is licensed under the following license
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can find it at http://www.fsf.org
*/
#include <linux/types.h>
#include <linux/slab.h>
#include <linux/spinlock.h>
#include <linux/genalloc.h>
#include <asm/pgalloc.h>
#include <asm/pgtable.h>
#include "kgsl_mmu.h"
#include "kgsl.h"
#include "kgsl_log.h"
#include "yamato_reg.h"
struct kgsl_pte_debug {
unsigned int read:1;
unsigned int write:1;
unsigned int dirty:1;
unsigned int reserved:9;
unsigned int phyaddr:20;
};
#define GSL_PTE_SIZE 4
#define GSL_PT_EXTRA_ENTRIES 16
#define GSL_PT_PAGE_BITS_MASK 0x00000007
#define GSL_PT_PAGE_ADDR_MASK (~(KGSL_PAGESIZE - 1))
#define GSL_MMU_INT_MASK \
(MH_INTERRUPT_MASK__AXI_READ_ERROR | \
MH_INTERRUPT_MASK__AXI_WRITE_ERROR)
uint32_t kgsl_pt_entry_get(struct kgsl_pagetable *pt, uint32_t va)
{
return (va - pt->va_base) >> KGSL_PAGESIZE_SHIFT;
}
uint32_t kgsl_pt_map_get(struct kgsl_pagetable *pt, uint32_t pte)
{
uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
return baseptr[pte];
}
void kgsl_pt_map_set(struct kgsl_pagetable *pt, uint32_t pte, uint32_t val)
{
uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
baseptr[pte] = val;
}
#define GSL_PT_MAP_DEBUG(pte) ((struct kgsl_pte_debug *) \
&gsl_pt_map_get(pagetable, pte))
void kgsl_pt_map_setbits(struct kgsl_pagetable *pt, uint32_t pte, uint32_t bits)
{
uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
baseptr[pte] |= bits;
}
void kgsl_pt_map_setaddr(struct kgsl_pagetable *pt, uint32_t pte,
uint32_t pageaddr)
{
uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
uint32_t val = baseptr[pte];
val &= ~GSL_PT_PAGE_ADDR_MASK;
val |= (pageaddr & GSL_PT_PAGE_ADDR_MASK);
baseptr[pte] = val;
}
void kgsl_pt_map_resetall(struct kgsl_pagetable *pt, uint32_t pte)
{
uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
baseptr[pte] &= GSL_PT_PAGE_DIRTY;
}
void kgsl_pt_map_resetbits(struct kgsl_pagetable *pt, uint32_t pte,
uint32_t bits)
{
uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
baseptr[pte] &= ~(bits & GSL_PT_PAGE_BITS_MASK);
}
int kgsl_pt_map_isdirty(struct kgsl_pagetable *pt, uint32_t pte)
{
uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
return baseptr[pte] & GSL_PT_PAGE_DIRTY;
}
uint32_t kgsl_pt_map_getaddr(struct kgsl_pagetable *pt, uint32_t pte)
{
uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
return baseptr[pte] & GSL_PT_PAGE_ADDR_MASK;
}
void kgsl_mh_intrcallback(struct kgsl_device *device)
{
unsigned int status = 0;
unsigned int reg;
unsigned int axi_error;
struct kgsl_mmu_debug dbg;
KGSL_MEM_VDBG("enter (device=%p)\n", device);
kgsl_yamato_regread(device, REG_MH_INTERRUPT_STATUS, &status);
if (status & MH_INTERRUPT_MASK__AXI_READ_ERROR) {
kgsl_yamato_regread(device, REG_MH_AXI_ERROR, &axi_error);
KGSL_MEM_FATAL("axi read error interrupt (%08x)\n", axi_error);
kgsl_mmu_debug(&device->mmu, &dbg);
} else if (status & MH_INTERRUPT_MASK__AXI_WRITE_ERROR) {
kgsl_yamato_regread(device, REG_MH_AXI_ERROR, &axi_error);
KGSL_MEM_FATAL("axi write error interrupt (%08x)\n", axi_error);
kgsl_mmu_debug(&device->mmu, &dbg);
} else if (status & MH_INTERRUPT_MASK__MMU_PAGE_FAULT) {
kgsl_yamato_regread(device, REG_MH_MMU_PAGE_FAULT, &reg);
KGSL_MEM_FATAL("mmu page fault interrupt: %08x\n", reg);
kgsl_mmu_debug(&device->mmu, &dbg);
} else {
KGSL_MEM_DBG("bad bits in REG_MH_INTERRUPT_STATUS %08x\n",
status);
}
kgsl_yamato_regwrite(device, REG_MH_INTERRUPT_CLEAR, status);
/*TODO: figure out how to handle errror interupts.
* specifically, page faults should probably nuke the client that
* caused them, but we don't have enough info to figure that out yet.
*/
KGSL_MEM_VDBG("return\n");
}
#ifdef DEBUG
void kgsl_mmu_debug(struct kgsl_mmu *mmu, struct kgsl_mmu_debug *regs)
{
memset(regs, 0, sizeof(struct kgsl_mmu_debug));
kgsl_yamato_regread(mmu->device, REG_MH_MMU_CONFIG,
&regs->config);
kgsl_yamato_regread(mmu->device, REG_MH_MMU_MPU_BASE,
&regs->mpu_base);
kgsl_yamato_regread(mmu->device, REG_MH_MMU_MPU_END,
&regs->mpu_end);
kgsl_yamato_regread(mmu->device, REG_MH_MMU_VA_RANGE,
&regs->va_range);
kgsl_yamato_regread(mmu->device, REG_MH_MMU_PT_BASE,
&regs->pt_base);
kgsl_yamato_regread(mmu->device, REG_MH_MMU_PAGE_FAULT,
&regs->page_fault);
kgsl_yamato_regread(mmu->device, REG_MH_MMU_TRAN_ERROR,
&regs->trans_error);
kgsl_yamato_regread(mmu->device, REG_MH_AXI_ERROR,
&regs->axi_error);
kgsl_yamato_regread(mmu->device, REG_MH_INTERRUPT_MASK,
&regs->interrupt_mask);
kgsl_yamato_regread(mmu->device, REG_MH_INTERRUPT_STATUS,
&regs->interrupt_status);
KGSL_MEM_DBG("mmu config %08x mpu_base %08x mpu_end %08x\n",
regs->config, regs->mpu_base, regs->mpu_end);
KGSL_MEM_DBG("mmu va_range %08x pt_base %08x \n",
regs->va_range, regs->pt_base);
KGSL_MEM_DBG("mmu page_fault %08x tran_err %08x\n",
regs->page_fault, regs->trans_error);
KGSL_MEM_DBG("mmu int mask %08x int status %08x\n",
regs->interrupt_mask, regs->interrupt_status);
}
#endif
struct kgsl_pagetable *kgsl_mmu_createpagetableobject(struct kgsl_mmu *mmu)
{
int status = 0;
struct kgsl_pagetable *pagetable = NULL;
uint32_t flags;
KGSL_MEM_VDBG("enter (mmu=%p)\n", mmu);
pagetable = kzalloc(sizeof(struct kgsl_pagetable), GFP_KERNEL);
if (pagetable == NULL) {
KGSL_MEM_ERR("Unable to allocate pagetable object.\n");
return NULL;
}
pagetable->mmu = mmu;
pagetable->va_base = mmu->va_base;
pagetable->va_range = mmu->va_range;
pagetable->last_superpte = 0;
pagetable->max_entries = (mmu->va_range >> KGSL_PAGESIZE_SHIFT)
+ GSL_PT_EXTRA_ENTRIES;
pagetable->pool = gen_pool_create(KGSL_PAGESIZE_SHIFT, -1);
if (pagetable->pool == NULL) {
KGSL_MEM_ERR("Unable to allocate virtualaddr pool.\n");
goto err_gen_pool_create;
}
if (gen_pool_add(pagetable->pool, pagetable->va_base,
pagetable->va_range, -1)) {
KGSL_MEM_ERR("gen_pool_create failed for pagetable %p\n",
pagetable);
goto err_gen_pool_add;
}
/* allocate page table memory */
flags = (KGSL_MEMFLAGS_ALIGN4K | KGSL_MEMFLAGS_CONPHYS
| KGSL_MEMFLAGS_STRICTREQUEST);
status = kgsl_sharedmem_alloc(flags,
pagetable->max_entries * GSL_PTE_SIZE,
&pagetable->base);
if (status) {
KGSL_MEM_ERR("cannot alloc page tables\n");
goto err_kgsl_sharedmem_alloc;
}
/* reset page table entries
* -- all pte's are marked as not dirty initially
*/
kgsl_sharedmem_set(&pagetable->base, 0, 0, pagetable->base.size);
pagetable->base.gpuaddr = pagetable->base.physaddr;
KGSL_MEM_VDBG("return %p\n", pagetable);
return pagetable;
err_kgsl_sharedmem_alloc:
err_gen_pool_add:
gen_pool_destroy(pagetable->pool);
err_gen_pool_create:
kfree(pagetable);
return NULL;
}
int kgsl_mmu_destroypagetableobject(struct kgsl_pagetable *pagetable)
{
KGSL_MEM_VDBG("enter (pagetable=%p)\n", pagetable);
if (pagetable) {
if (pagetable->base.gpuaddr)
kgsl_sharedmem_free(&pagetable->base);
if (pagetable->pool) {
gen_pool_destroy(pagetable->pool);
pagetable->pool = NULL;
}
kfree(pagetable);
}
KGSL_MEM_VDBG("return 0x%08x\n", 0);
return 0;
}
int kgsl_mmu_setstate(struct kgsl_device *device,
struct kgsl_pagetable *pagetable)
{
int status = 0;
struct kgsl_mmu *mmu = &device->mmu;
KGSL_MEM_VDBG("enter (device=%p, pagetable=%p)\n", device, pagetable);
if (mmu->flags & KGSL_FLAGS_STARTED) {
/* page table not current, then setup mmu to use new
* specified page table
*/
KGSL_MEM_INFO("from %p to %p\n", mmu->hwpagetable, pagetable);
if (mmu->hwpagetable != pagetable) {
mmu->hwpagetable = pagetable;
/* call device specific set page table */
status = kgsl_yamato_setstate(mmu->device,
KGSL_MMUFLAGS_TLBFLUSH |
KGSL_MMUFLAGS_PTUPDATE);
}
}
KGSL_MEM_VDBG("return %d\n", status);
return status;
}
int kgsl_mmu_init(struct kgsl_device *device)
{
/*
* intialize device mmu
*
* call this with the global lock held
*/
int status;
uint32_t flags;
struct kgsl_mmu *mmu = &device->mmu;
#ifdef _DEBUG
struct kgsl_mmu_debug regs;
#endif /* _DEBUG */
KGSL_MEM_VDBG("enter (device=%p)\n", device);
if (mmu->flags & KGSL_FLAGS_INITIALIZED0) {
KGSL_MEM_INFO("MMU already initialized.\n");
return 0;
}
mmu->device = device;
#ifndef CONFIG_GPU_MSM_KGSL_MMU
mmu->config = 0x00000000;
#endif
/* setup MMU and sub-client behavior */
kgsl_yamato_regwrite(device, REG_MH_MMU_CONFIG, mmu->config);
/* enable axi interrupts */
KGSL_MEM_DBG("enabling mmu interrupts mask=0x%08lx\n",
GSL_MMU_INT_MASK);
kgsl_yamato_regwrite(device, REG_MH_INTERRUPT_MASK, GSL_MMU_INT_MASK);
mmu->flags |= KGSL_FLAGS_INITIALIZED0;
/* MMU not enabled */
if ((mmu->config & 0x1) == 0) {
KGSL_MEM_VDBG("return %d\n", 0);
return 0;
}
/* idle device */
kgsl_yamato_idle(device, KGSL_TIMEOUT_DEFAULT);
/* make sure aligned to pagesize */
BUG_ON(mmu->mpu_base & (KGSL_PAGESIZE - 1));
BUG_ON((mmu->mpu_base + mmu->mpu_range) & (KGSL_PAGESIZE - 1));
/* define physical memory range accessible by the core */
kgsl_yamato_regwrite(device, REG_MH_MMU_MPU_BASE,
mmu->mpu_base);
kgsl_yamato_regwrite(device, REG_MH_MMU_MPU_END,
mmu->mpu_base + mmu->mpu_range);
/* enable axi interrupts */
KGSL_MEM_DBG("enabling mmu interrupts mask=0x%08lx\n",
GSL_MMU_INT_MASK | MH_INTERRUPT_MASK__MMU_PAGE_FAULT);
kgsl_yamato_regwrite(device, REG_MH_INTERRUPT_MASK,
GSL_MMU_INT_MASK | MH_INTERRUPT_MASK__MMU_PAGE_FAULT);
mmu->flags |= KGSL_FLAGS_INITIALIZED;
/* sub-client MMU lookups require address translation */
if ((mmu->config & ~0x1) > 0) {
/*make sure virtual address range is a multiple of 64Kb */
BUG_ON(mmu->va_range & ((1 << 16) - 1));
/* allocate memory used for completing r/w operations that
* cannot be mapped by the MMU
*/
flags = (KGSL_MEMFLAGS_ALIGN4K | KGSL_MEMFLAGS_CONPHYS
| KGSL_MEMFLAGS_STRICTREQUEST);
status = kgsl_sharedmem_alloc(flags, 64, &mmu->dummyspace);
if (status != 0) {
KGSL_MEM_ERR
("Unable to allocate dummy space memory.\n");
kgsl_mmu_close(device);
return status;
}
kgsl_sharedmem_set(&mmu->dummyspace, 0, 0,
mmu->dummyspace.size);
/* TRAN_ERROR needs a 32 byte (32 byte aligned) chunk of memory
* to complete transactions in case of an MMU fault. Note that
* we'll leave the bottom 32 bytes of the dummyspace for other
* purposes (e.g. use it when dummy read cycles are needed
* for other blocks */
kgsl_yamato_regwrite(device,
REG_MH_MMU_TRAN_ERROR,
mmu->dummyspace.physaddr + 32);
mmu->defaultpagetable = kgsl_mmu_createpagetableobject(mmu);
if (!mmu->defaultpagetable) {
KGSL_MEM_ERR("Failed to create global page table\n");
kgsl_mmu_close(device);
return -ENOMEM;
}
mmu->hwpagetable = mmu->defaultpagetable;
mmu->tlbflushfilter.size = (mmu->va_range /
(PAGE_SIZE * GSL_PT_SUPER_PTE * 8)) + 1;
mmu->tlbflushfilter.base = (unsigned int *)
kzalloc(mmu->tlbflushfilter.size, GFP_KERNEL);
if (!mmu->tlbflushfilter.base) {
KGSL_MEM_ERR("Failed to create tlbflushfilter\n");
kgsl_mmu_close(device);
return -ENOMEM;
}
GSL_TLBFLUSH_FILTER_RESET();
kgsl_yamato_regwrite(device, REG_MH_MMU_PT_BASE,
mmu->hwpagetable->base.gpuaddr);
kgsl_yamato_regwrite(device, REG_MH_MMU_VA_RANGE,
(mmu->hwpagetable->va_base |
(mmu->hwpagetable->va_range >> 16)));
status = kgsl_yamato_setstate(device, KGSL_MMUFLAGS_TLBFLUSH);
if (status) {
kgsl_mmu_close(device);
return status;
}
mmu->flags |= KGSL_FLAGS_STARTED;
}
KGSL_MEM_VDBG("return %d\n", 0);
return 0;
}
#ifdef CONFIG_GPU_MSM_KGSL_MMU
pte_t *kgsl_get_pte_from_vaddr(unsigned int vaddr)
{
pgd_t *pgd_ptr = NULL;
pmd_t *pmd_ptr = NULL;
pte_t *pte_ptr = NULL;
pgd_ptr = pgd_offset(current->mm, vaddr);
if (pgd_none(*pgd) || pgd_bad(*pgd)) {
KGSL_MEM_ERR
("Invalid pgd entry found while trying to convert virtual "
"address to physical\n");
return 0;
}
pmd_ptr = pmd_offset(pgd_ptr, vaddr);
if (pmd_none(*pmd_ptr) || pmd_bad(*pmd_ptr)) {
KGSL_MEM_ERR
("Invalid pmd entry found while trying to convert virtual "
"address to physical\n");
return 0;
}
pte_ptr = pte_offset_map(pmd_ptr, vaddr);
if (!pte_ptr) {
KGSL_MEM_ERR
("Unable to map pte entry while trying to convert virtual "
"address to physical\n");
return 0;
}
return pte_ptr;
}
int kgsl_mmu_map(struct kgsl_pagetable *pagetable,
unsigned int address,
int range,
unsigned int protflags,
unsigned int *gpuaddr,
unsigned int flags)
{
int numpages;
unsigned int pte, ptefirst, ptelast, physaddr;
int flushtlb, alloc_size;
struct kgsl_mmu *mmu = NULL;
int phys_contiguous = flags & KGSL_MEMFLAGS_CONPHYS;
unsigned int align = flags & KGSL_MEMFLAGS_ALIGN_MASK;
KGSL_MEM_VDBG("enter (pt=%p, physaddr=%08x, range=%08d, gpuaddr=%p)\n",
pagetable, address, range, gpuaddr);
mmu = pagetable->mmu;
BUG_ON(mmu == NULL);
BUG_ON(protflags & ~(GSL_PT_PAGE_RV | GSL_PT_PAGE_WV));
BUG_ON(protflags == 0);
BUG_ON(range <= 0);
/* Only support 4K and 8K alignment for now */
if (align != KGSL_MEMFLAGS_ALIGN8K && align != KGSL_MEMFLAGS_ALIGN4K) {
KGSL_MEM_ERR("Cannot map memory according to "
"requested flags: %08x\n", flags);
return -EINVAL;
}
/* Make sure address being mapped is at 4K boundary */
if (!IS_ALIGNED(address, KGSL_PAGESIZE) || range & ~KGSL_PAGEMASK) {
KGSL_MEM_ERR("Cannot map address not aligned "
"at page boundary: address: %08x, range: %08x\n",
address, range);
return -EINVAL;
}
alloc_size = range;
if (align == KGSL_MEMFLAGS_ALIGN8K)
alloc_size += KGSL_PAGESIZE;
*gpuaddr = gen_pool_alloc(pagetable->pool, alloc_size);
if (*gpuaddr == 0) {
KGSL_MEM_ERR("gen_pool_alloc failed: %d\n", alloc_size);
return -ENOMEM;
}
if (align == KGSL_MEMFLAGS_ALIGN8K) {
if (*gpuaddr & ((1 << 13) - 1)) {
/* Not 8k aligned, align it */
gen_pool_free(pagetable->pool, *gpuaddr, KGSL_PAGESIZE);
*gpuaddr = *gpuaddr + KGSL_PAGESIZE;
} else
gen_pool_free(pagetable->pool, *gpuaddr + range,
KGSL_PAGESIZE);
}
numpages = (range >> KGSL_PAGESIZE_SHIFT);
ptefirst = kgsl_pt_entry_get(pagetable, *gpuaddr);
ptelast = ptefirst + numpages;
pte = ptefirst;
flushtlb = 0;
/* tlb needs to be flushed when the first and last pte are not at
* superpte boundaries */
if ((ptefirst & (GSL_PT_SUPER_PTE - 1)) != 0 ||
((ptelast + 1) & (GSL_PT_SUPER_PTE-1)) != 0)
flushtlb = 1;
for (pte = ptefirst; pte < ptelast; pte++) {
#ifdef VERBOSE_DEBUG
/* check if PTE exists */
uint32_t val = kgsl_pt_map_getaddr(pagetable, pte);
BUG_ON(val != 0 && val != GSL_PT_PAGE_DIRTY);
#endif
if ((pte & (GSL_PT_SUPER_PTE-1)) == 0)
if (GSL_TLBFLUSH_FILTER_ISDIRTY(pte / GSL_PT_SUPER_PTE))
flushtlb = 1;
/* mark pte as in use */
if (phys_contiguous)
physaddr = address;
else {
physaddr = vmalloc_to_pfn((void *)address);
physaddr <<= PAGE_SHIFT;
}
if (physaddr)
kgsl_pt_map_set(pagetable, pte, physaddr | protflags);
else {
KGSL_MEM_ERR
("Unable to find physaddr for vmallloc address: %x\n",
address);
kgsl_mmu_unmap(pagetable, *gpuaddr, range);
return -EFAULT;
}
address += KGSL_PAGESIZE;
}
KGSL_MEM_INFO("pt %p p %08x g %08x pte f %d l %d n %d f %d\n",
pagetable, address, *gpuaddr, ptefirst, ptelast,
numpages, flushtlb);
dmb();
/* Invalidate tlb only if current page table used by GPU is the
* pagetable that we used to allocate */
if (flushtlb && (pagetable == mmu->hwpagetable)) {
kgsl_yamato_setstate(mmu->device, KGSL_MMUFLAGS_TLBFLUSH);
GSL_TLBFLUSH_FILTER_RESET();
}
KGSL_MEM_VDBG("return %d\n", 0);
return 0;
}
int
kgsl_mmu_unmap(struct kgsl_pagetable *pagetable, unsigned int gpuaddr,
int range)
{
unsigned int numpages;
unsigned int pte, ptefirst, ptelast, superpte;
struct kgsl_mmu *mmu = NULL;
KGSL_MEM_VDBG("enter (pt=%p, gpuaddr=0x%08x, range=%d)\n",
pagetable, gpuaddr, range);
BUG_ON(range <= 0);
numpages = (range >> KGSL_PAGESIZE_SHIFT);
if (range & (KGSL_PAGESIZE - 1))
numpages++;
ptefirst = kgsl_pt_entry_get(pagetable, gpuaddr);
ptelast = ptefirst + numpages;
KGSL_MEM_INFO("pt %p gpu %08x pte first %d last %d numpages %d\n",
pagetable, gpuaddr, ptefirst, ptelast, numpages);
mmu = pagetable->mmu;
superpte = ptefirst - (ptefirst & (GSL_PT_SUPER_PTE-1));
GSL_TLBFLUSH_FILTER_SETDIRTY(superpte / GSL_PT_SUPER_PTE);
for (pte = ptefirst; pte < ptelast; pte++) {
#ifdef VERBOSE_DEBUG
/* check if PTE exists */
BUG_ON(!kgsl_pt_map_getaddr(pagetable, pte));
#endif
kgsl_pt_map_set(pagetable, pte, GSL_PT_PAGE_DIRTY);
superpte = pte - (pte & (GSL_PT_SUPER_PTE - 1));
if (pte == superpte)
GSL_TLBFLUSH_FILTER_SETDIRTY(superpte /
GSL_PT_SUPER_PTE);
}
dmb();
gen_pool_free(pagetable->pool, gpuaddr, range);
KGSL_MEM_VDBG("return %d\n", 0);
return 0;
}
#endif
int kgsl_mmu_close(struct kgsl_device *device)
{
/*
* close device mmu
*
* call this with the global lock held
*/
struct kgsl_mmu *mmu = &device->mmu;
#ifdef _DEBUG
int i;
#endif /* _DEBUG */
KGSL_MEM_VDBG("enter (device=%p)\n", device);
if (mmu->flags & KGSL_FLAGS_INITIALIZED0) {
/* disable mh interrupts */
KGSL_MEM_DBG("disabling mmu interrupts\n");
kgsl_yamato_regwrite(device, REG_MH_INTERRUPT_MASK, 0);
/* disable MMU */
kgsl_yamato_regwrite(device, REG_MH_MMU_CONFIG, 0x00000000);
if (mmu->dummyspace.gpuaddr)
kgsl_sharedmem_free(&mmu->dummyspace);
if (mmu->tlbflushfilter.base) {
mmu->tlbflushfilter.size = 0;
kfree(mmu->tlbflushfilter.base);
mmu->tlbflushfilter.base = NULL;
}
mmu->flags &= ~KGSL_FLAGS_STARTED;
mmu->flags &= ~KGSL_FLAGS_INITIALIZED;
mmu->flags &= ~KGSL_FLAGS_INITIALIZED0;
kgsl_mmu_destroypagetableobject(mmu->defaultpagetable);
mmu->defaultpagetable = NULL;
}
KGSL_MEM_VDBG("return %d\n", 0);
return 0;
}

View File

@ -1,169 +0,0 @@
/*
* (C) Copyright Advanced Micro Devices, Inc. 2002, 2007
* Copyright (c) 2008-2009 QUALCOMM USA, INC.
*
* All source code in this file is licensed under the following license
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can find it at http://www.fsf.org
*/
#ifndef __GSL_MMU_H
#define __GSL_MMU_H
#include <linux/types.h>
#include <linux/msm_kgsl.h>
#include "kgsl_log.h"
#include "kgsl_sharedmem.h"
#define GSL_PT_SUPER_PTE 8
#define GSL_PT_PAGE_WV 0x00000001
#define GSL_PT_PAGE_RV 0x00000002
#define GSL_PT_PAGE_DIRTY 0x00000004
/* MMU Flags */
#define KGSL_MMUFLAGS_TLBFLUSH 0x10000000
#define KGSL_MMUFLAGS_PTUPDATE 0x20000000
/* Macros to manage TLB flushing */
#define GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS (sizeof(unsigned char) * 8)
#define GSL_TLBFLUSH_FILTER_GET(superpte) \
(*((unsigned char *) \
(((unsigned int)mmu->tlbflushfilter.base) \
+ (superpte / GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS))))
#define GSL_TLBFLUSH_FILTER_SETDIRTY(superpte) \
(GSL_TLBFLUSH_FILTER_GET((superpte)) |= 1 << \
(superpte % GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS))
#define GSL_TLBFLUSH_FILTER_ISDIRTY(superpte) \
(GSL_TLBFLUSH_FILTER_GET((superpte)) & \
(1 << (superpte % GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS)))
#define GSL_TLBFLUSH_FILTER_RESET() memset(mmu->tlbflushfilter.base,\
0, mmu->tlbflushfilter.size)
extern unsigned int kgsl_cache_enable;
struct kgsl_device;
struct kgsl_mmu_debug {
unsigned int config;
unsigned int mpu_base;
unsigned int mpu_end;
unsigned int va_range;
unsigned int pt_base;
unsigned int page_fault;
unsigned int trans_error;
unsigned int axi_error;
unsigned int interrupt_mask;
unsigned int interrupt_status;
};
struct kgsl_ptstats {
int64_t maps;
int64_t unmaps;
int64_t superpteallocs;
int64_t superptefrees;
int64_t ptswitches;
int64_t tlbflushes[KGSL_DEVICE_MAX];
};
struct kgsl_pagetable {
unsigned int refcnt;
struct kgsl_mmu *mmu;
struct kgsl_memdesc base;
uint32_t va_base;
unsigned int va_range;
unsigned int last_superpte;
unsigned int max_entries;
struct gen_pool *pool;
};
struct kgsl_tlbflushfilter {
unsigned int *base;
unsigned int size;
};
struct kgsl_mmu {
unsigned int refcnt;
uint32_t flags;
struct kgsl_device *device;
unsigned int config;
uint32_t mpu_base;
int mpu_range;
uint32_t va_base;
unsigned int va_range;
struct kgsl_memdesc dummyspace;
/* current page table object being used by device mmu */
struct kgsl_pagetable *defaultpagetable;
struct kgsl_pagetable *hwpagetable;
/* Maintain filter to manage tlb flushing */
struct kgsl_tlbflushfilter tlbflushfilter;
};
static inline int
kgsl_mmu_isenabled(struct kgsl_mmu *mmu)
{
return ((mmu)->flags & KGSL_FLAGS_STARTED) ? 1 : 0;
}
int kgsl_mmu_init(struct kgsl_device *device);
int kgsl_mmu_close(struct kgsl_device *device);
struct kgsl_pagetable *kgsl_mmu_createpagetableobject(struct kgsl_mmu *mmu);
int kgsl_mmu_destroypagetableobject(struct kgsl_pagetable *pagetable);
int kgsl_mmu_setstate(struct kgsl_device *device,
struct kgsl_pagetable *pagetable);
#ifdef CONFIG_GPU_MSM_KGSL_MMU
int kgsl_mmu_map(struct kgsl_pagetable *pagetable,
unsigned int address,
int range,
unsigned int protflags,
unsigned int *gpuaddr,
unsigned int flags);
int kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
unsigned int gpuaddr, int range);
pte_t *kgsl_get_pte_from_vaddr(unsigned int vaddr);
#else
static inline int kgsl_mmu_map(struct kgsl_pagetable *pagetable,
unsigned int address,
int range,
unsigned int protflags,
unsigned int *gpuaddr,
unsigned int flags)
{
*gpuaddr = address;
return 0;
}
static inline int kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
unsigned int gpuaddr, int range) { return 0; }
static inline pte_t *kgsl_get_pte_from_vaddr(unsigned int vaddr) {return NULL;}
#endif
int kgsl_mmu_querystats(struct kgsl_pagetable *pagetable,
struct kgsl_ptstats *stats);
void kgsl_mh_intrcallback(struct kgsl_device *device);
#ifdef DEBUG
void kgsl_mmu_debug(struct kgsl_mmu *, struct kgsl_mmu_debug*);
#else
static inline void kgsl_mmu_debug(struct kgsl_mmu *mmu,
struct kgsl_mmu_debug *mmu_debug) { }
#endif /* DEBUG */
#endif /* __GSL_MMU_H */

View File

@ -1,182 +0,0 @@
/*
* (C) Copyright Advanced Micro Devices, Inc. 2002, 2007
* Copyright (c) 2008-2009 QUALCOMM USA, INC.
*
* All source code in this file is licensed under the following license
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can find it at http://www.fsf.org
*/
#ifndef __GSL_PM4TYPES_H
#define __GSL_PM4TYPES_H
#define PM4_PKT_MASK 0xc0000000
#define PM4_TYPE0_PKT ((unsigned int)0 << 30)
#define PM4_TYPE1_PKT ((unsigned int)1 << 30)
#define PM4_TYPE2_PKT ((unsigned int)2 << 30)
#define PM4_TYPE3_PKT ((unsigned int)3 << 30)
/* type3 packets */
/* initialize CP's micro-engine */
#define PM4_ME_INIT 0x48
/* skip N 32-bit words to get to the next packet */
#define PM4_NOP 0x10
/* indirect buffer dispatch. prefetch parser uses this packet type to determine
* whether to pre-fetch the IB
*/
#define PM4_INDIRECT_BUFFER 0x3f
/* indirect buffer dispatch. same as IB, but init is pipelined */
#define PM4_INDIRECT_BUFFER_PFD 0x37
/* wait for the IDLE state of the engine */
#define PM4_WAIT_FOR_IDLE 0x26
/* wait until a register or memory location is a specific value */
#define PM4_WAIT_REG_MEM 0x3c
/* wait until a register location is equal to a specific value */
#define PM4_WAIT_REG_EQ 0x52
/* wait until a register location is >= a specific value */
#define PM4_WAT_REG_GTE 0x53
/* wait until a read completes */
#define PM4_WAIT_UNTIL_READ 0x5c
/* wait until all base/size writes from an IB_PFD packet have completed */
#define PM4_WAIT_IB_PFD_COMPLETE 0x5d
/* register read/modify/write */
#define PM4_REG_RMW 0x21
/* reads register in chip and writes to memory */
#define PM4_REG_TO_MEM 0x3e
/* write N 32-bit words to memory */
#define PM4_MEM_WRITE 0x3d
/* write CP_PROG_COUNTER value to memory */
#define PM4_MEM_WRITE_CNTR 0x4f
/* conditional execution of a sequence of packets */
#define PM4_COND_EXEC 0x44
/* conditional write to memory or register */
#define PM4_COND_WRITE 0x45
/* generate an event that creates a write to memory when completed */
#define PM4_EVENT_WRITE 0x46
/* generate a VS|PS_done event */
#define PM4_EVENT_WRITE_SHD 0x58
/* generate a cache flush done event */
#define PM4_EVENT_WRITE_CFL 0x59
/* generate a z_pass done event */
#define PM4_EVENT_WRITE_ZPD 0x5b
/* initiate fetch of index buffer and draw */
#define PM4_DRAW_INDX 0x22
/* draw using supplied indices in packet */
#define PM4_DRAW_INDX_2 0x36
/* initiate fetch of index buffer and binIDs and draw */
#define PM4_DRAW_INDX_BIN 0x34
/* initiate fetch of bin IDs and draw using supplied indices */
#define PM4_DRAW_INDX_2_BIN 0x35
/* begin/end initiator for viz query extent processing */
#define PM4_VIZ_QUERY 0x23
/* fetch state sub-blocks and initiate shader code DMAs */
#define PM4_SET_STATE 0x25
/* load constant into chip and to memory */
#define PM4_SET_CONSTANT 0x2d
/* load sequencer instruction memory (pointer-based) */
#define PM4_IM_LOAD 0x27
/* load sequencer instruction memory (code embedded in packet) */
#define PM4_IM_LOAD_IMMEDIATE 0x2b
/* load constants from a location in memory */
#define PM4_LOAD_CONSTANT_CONTEXT 0x2e
/* selective invalidation of state pointers */
#define PM4_INVALIDATE_STATE 0x3b
/* dynamically changes shader instruction memory partition */
#define PM4_SET_SHADER_BASES 0x4A
/* sets the 64-bit BIN_MASK register in the PFP */
#define PM4_SET_BIN_MASK 0x50
/* sets the 64-bit BIN_SELECT register in the PFP */
#define PM4_SET_BIN_SELECT 0x51
/* updates the current context, if needed */
#define PM4_CONTEXT_UPDATE 0x5e
/* generate interrupt from the command stream */
#define PM4_INTERRUPT 0x40
/* copy sequencer instruction memory to system memory */
#define PM4_IM_STORE 0x2c
/* program an offset that will added to the BIN_BASE value of
* the 3D_DRAW_INDX_BIN packet */
#define PM4_SET_BIN_BASE_OFFSET 0x4B
#define PM4_SET_PROTECTED_MODE 0x5f /* sets the register protection mode */
/* packet header building macros */
#define pm4_type0_packet(regindx, cnt) \
(PM4_TYPE0_PKT | (((cnt)-1) << 16) | ((regindx) & 0x7FFF))
#define pm4_type0_packet_for_sameregister(regindx, cnt) \
((PM4_TYPE0_PKT | (((cnt)-1) << 16) | ((1 << 15) | \
((regindx) & 0x7FFF)))
#define pm4_type1_packet(reg0, reg1) \
(PM4_TYPE1_PKT | ((reg1) << 12) | (reg0))
#define pm4_type3_packet(opcode, cnt) \
(PM4_TYPE3_PKT | (((cnt)-1) << 16) | (((opcode) & 0xFF) << 8))
#define pm4_predicated_type3_packet(opcode, cnt) \
(PM4_TYPE3_PKT | (((cnt)-1) << 16) | (((opcode) & 0xFF) << 8) | 0x1)
#define pm4_nop_packet(cnt) \
(PM4_TYPE3_PKT | (((cnt)-1) << 16) | (PM4_NOP << 8))
/* packet headers */
#define PM4_HDR_ME_INIT pm4_type3_packet(PM4_ME_INIT, 18)
#define PM4_HDR_INDIRECT_BUFFER_PFD pm4_type3_packet(PM4_INDIRECT_BUFFER_PFD, 2)
#define PM4_HDR_INDIRECT_BUFFER pm4_type3_packet(PM4_INDIRECT_BUFFER, 2)
#endif /* __GSL_PM4TYPES_H */

View File

@ -1,837 +0,0 @@
/*
* (C) Copyright Advanced Micro Devices, Inc. 2002, 2007
* Copyright (c) 2008-2009 QUALCOMM USA, INC.
*
* All source code in this file is licensed under the following license
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can find it at http://www.fsf.org
*/
#include <linux/firmware.h>
#include <linux/io.h>
#include <linux/sched.h>
#include <linux/wait.h>
#include "kgsl.h"
#include "kgsl_device.h"
#include "kgsl_log.h"
#include "kgsl_pm4types.h"
#include "kgsl_ringbuffer.h"
#include "kgsl_cmdstream.h"
#include "yamato_reg.h"
#define GSL_RB_NOP_SIZEDWORDS 2
/* protected mode error checking below register address 0x800
* note: if CP_INTERRUPT packet is used then checking needs
* to change to below register address 0x7C8
*/
#define GSL_RB_PROTECTED_MODE_CONTROL 0x200001F2
#define GSL_CP_INT_MASK \
(CP_INT_CNTL__SW_INT_MASK | \
CP_INT_CNTL__T0_PACKET_IN_IB_MASK | \
CP_INT_CNTL__OPCODE_ERROR_MASK | \
CP_INT_CNTL__PROTECTED_MODE_ERROR_MASK | \
CP_INT_CNTL__RESERVED_BIT_ERROR_MASK | \
CP_INT_CNTL__IB_ERROR_MASK | \
CP_INT_CNTL__IB2_INT_MASK | \
CP_INT_CNTL__IB1_INT_MASK | \
CP_INT_CNTL__RB_INT_MASK)
#define YAMATO_PFP_FW "yamato_pfp.fw"
#define YAMATO_PM4_FW "yamato_pm4.fw"
/* ringbuffer size log2 quadwords equivalent */
inline unsigned int kgsl_ringbuffer_sizelog2quadwords(unsigned int sizedwords)
{
unsigned int sizelog2quadwords = 0;
int i = sizedwords >> 1;
while (i >>= 1)
sizelog2quadwords++;
return sizelog2quadwords;
}
/* functions */
void kgsl_cp_intrcallback(struct kgsl_device *device)
{
unsigned int status = 0;
struct kgsl_ringbuffer *rb = &device->ringbuffer;
KGSL_CMD_VDBG("enter (device=%p)\n", device);
kgsl_yamato_regread(device, REG_CP_INT_STATUS, &status);
if (status & CP_INT_CNTL__RB_INT_MASK) {
/* signal intr completion event */
int init_reftimestamp = 0x7fffffff;
int enableflag = 0;
kgsl_sharedmem_write(&rb->device->memstore,
KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable),
&enableflag, 4);
kgsl_sharedmem_write(&rb->device->memstore,
KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts),
&init_reftimestamp, 4);
KGSL_CMD_WARN("ringbuffer rb interrupt\n");
}
if (status & (CP_INT_CNTL__IB1_INT_MASK | CP_INT_CNTL__RB_INT_MASK)) {
KGSL_CMD_WARN("ringbuffer ib1/rb interrupt\n");
wake_up_interruptible_all(&device->ib1_wq);
}
if (status & CP_INT_CNTL__T0_PACKET_IN_IB_MASK) {
KGSL_CMD_FATAL("ringbuffer TO packet in IB interrupt\n");
kgsl_yamato_regwrite(rb->device, REG_CP_INT_CNTL, 0);
kgsl_ringbuffer_dump(rb);
}
if (status & CP_INT_CNTL__OPCODE_ERROR_MASK) {
KGSL_CMD_FATAL("ringbuffer opcode error interrupt\n");
kgsl_yamato_regwrite(rb->device, REG_CP_INT_CNTL, 0);
kgsl_ringbuffer_dump(rb);
}
if (status & CP_INT_CNTL__PROTECTED_MODE_ERROR_MASK) {
KGSL_CMD_FATAL("ringbuffer protected mode error interrupt\n");
kgsl_yamato_regwrite(rb->device, REG_CP_INT_CNTL, 0);
kgsl_ringbuffer_dump(rb);
}
if (status & CP_INT_CNTL__RESERVED_BIT_ERROR_MASK) {
KGSL_CMD_FATAL("ringbuffer reserved bit error interrupt\n");
kgsl_yamato_regwrite(rb->device, REG_CP_INT_CNTL, 0);
kgsl_ringbuffer_dump(rb);
}
if (status & CP_INT_CNTL__IB_ERROR_MASK) {
KGSL_CMD_FATAL("ringbuffer IB error interrupt\n");
kgsl_yamato_regwrite(rb->device, REG_CP_INT_CNTL, 0);
kgsl_ringbuffer_dump(rb);
}
if (status & CP_INT_CNTL__SW_INT_MASK)
KGSL_CMD_DBG("ringbuffer software interrupt\n");
if (status & CP_INT_CNTL__IB2_INT_MASK)
KGSL_CMD_DBG("ringbuffer ib2 interrupt\n");
if (status & (~GSL_CP_INT_MASK))
KGSL_CMD_DBG("bad bits in REG_CP_INT_STATUS %08x\n", status);
/* only ack bits we understand */
status &= GSL_CP_INT_MASK;
kgsl_yamato_regwrite(device, REG_CP_INT_ACK, status);
KGSL_CMD_VDBG("return\n");
}
void kgsl_ringbuffer_watchdog()
{
struct kgsl_device *device = NULL;
struct kgsl_ringbuffer *rb = NULL;
device = &kgsl_driver.yamato_device;
BUG_ON(device == NULL);
rb = &device->ringbuffer;
KGSL_CMD_VDBG("enter\n");
if ((rb->flags & KGSL_FLAGS_STARTED) == 0) {
KGSL_CMD_VDBG("not started\n");
return;
}
GSL_RB_GET_READPTR(rb, &rb->rptr);
if (rb->rptr == rb->wptr) {
/* clear rptr sample for interval n */
rb->watchdog.flags &= ~KGSL_FLAGS_ACTIVE;
goto done;
}
/* ringbuffer is currently not empty */
/* and a rptr sample was taken during interval n-1 */
if (rb->watchdog.flags & KGSL_FLAGS_ACTIVE) {
/* and the rptr did not advance between
* interval n-1 and n */
if (rb->rptr == rb->watchdog.rptr_sample) {
/* then the core has hung */
KGSL_CMD_FATAL("Watchdog detected core hung.\n");
goto done;
}
/* save rptr sample for interval n */
rb->watchdog.flags |= KGSL_FLAGS_ACTIVE;
rb->watchdog.rptr_sample = rb->rptr;
}
done:
KGSL_CMD_VDBG("return\n");
}
static void kgsl_ringbuffer_submit(struct kgsl_ringbuffer *rb)
{
BUG_ON(rb->wptr == 0);
GSL_RB_UPDATE_WPTR_POLLING(rb);
/* Drain write buffer and data memory barrier */
dsb();
dmb();
/* Memory fence to ensure all data has posted. On some systems,
* like 7x27, the register block is not allocated as strongly ordered
* memory. Adding a memory fence ensures ordering during ringbuffer
* submits.*/
mb();
kgsl_yamato_regwrite(rb->device, REG_CP_RB_WPTR, rb->wptr);
rb->flags |= KGSL_FLAGS_ACTIVE;
}
static int
kgsl_ringbuffer_waitspace(struct kgsl_ringbuffer *rb, unsigned int numcmds,
int wptr_ahead)
{
int nopcount;
unsigned int freecmds;
unsigned int *cmds;
KGSL_CMD_VDBG("enter (rb=%p, numcmds=%d, wptr_ahead=%d)\n",
rb, numcmds, wptr_ahead);
/* if wptr ahead, fill the remaining with NOPs */
if (wptr_ahead) {
/* -1 for header */
nopcount = rb->sizedwords - rb->wptr - 1;
cmds = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
GSL_RB_WRITE(cmds, pm4_nop_packet(nopcount));
rb->wptr++;
kgsl_ringbuffer_submit(rb);
rb->wptr = 0;
}
/* wait for space in ringbuffer */
do {
GSL_RB_GET_READPTR(rb, &rb->rptr);
freecmds = rb->rptr - rb->wptr;
} while ((freecmds != 0) && (freecmds < numcmds));
KGSL_CMD_VDBG("return %d\n", 0);
return 0;
}
static unsigned int *kgsl_ringbuffer_allocspace(struct kgsl_ringbuffer *rb,
unsigned int numcmds)
{
unsigned int *ptr = NULL;
int status = 0;
BUG_ON(numcmds >= rb->sizedwords);
/* check for available space */
if (rb->wptr >= rb->rptr) {
/* wptr ahead or equal to rptr */
/* reserve dwords for nop packet */
if ((rb->wptr + numcmds) > (rb->sizedwords -
GSL_RB_NOP_SIZEDWORDS))
status = kgsl_ringbuffer_waitspace(rb, numcmds, 1);
} else {
/* wptr behind rptr */
if ((rb->wptr + numcmds) >= rb->rptr)
status = kgsl_ringbuffer_waitspace(rb, numcmds, 0);
/* check for remaining space */
/* reserve dwords for nop packet */
if ((rb->wptr + numcmds) > (rb->sizedwords -
GSL_RB_NOP_SIZEDWORDS))
status = kgsl_ringbuffer_waitspace(rb, numcmds, 1);
}
if (status == 0) {
ptr = (unsigned int *)rb->buffer_desc.hostptr + rb->wptr;
rb->wptr += numcmds;
}
return ptr;
}
static int kgsl_ringbuffer_load_pm4_ucode(struct kgsl_device *device)
{
int status = 0;
int i;
const struct firmware *fw = NULL;
unsigned int *fw_ptr = NULL;
size_t fw_word_size = 0;
status = request_firmware(&fw, YAMATO_PM4_FW,
kgsl_driver.misc.this_device);
if (status != 0) {
KGSL_DRV_ERR("request_firmware failed for %s with error %d\n",
YAMATO_PM4_FW, status);
goto done;
}
/*this firmware must come in 3 word chunks. plus 1 word of version*/
if ((fw->size % (sizeof(uint32_t)*3)) != 4) {
KGSL_DRV_ERR("bad firmware size %d.\n", fw->size);
status = -EINVAL;
goto done;
}
fw_ptr = (unsigned int *)fw->data;
fw_word_size = fw->size/sizeof(uint32_t);
KGSL_DRV_INFO("loading pm4 ucode version: %d\n", fw_ptr[0]);
kgsl_yamato_regwrite(device, REG_CP_DEBUG, 0x02000000);
kgsl_yamato_regwrite(device, REG_CP_ME_RAM_WADDR, 0);
for (i = 1; i < fw_word_size; i++)
kgsl_yamato_regwrite(device, REG_CP_ME_RAM_DATA, fw_ptr[i]);
done:
release_firmware(fw);
return status;
}
static int kgsl_ringbuffer_load_pfp_ucode(struct kgsl_device *device)
{
int status = 0;
int i;
const struct firmware *fw = NULL;
unsigned int *fw_ptr = NULL;
size_t fw_word_size = 0;
status = request_firmware(&fw, YAMATO_PFP_FW,
kgsl_driver.misc.this_device);
if (status != 0) {
KGSL_DRV_ERR("request_firmware for %s failed with error %d\n",
YAMATO_PFP_FW, status);
return status;
}
/*this firmware must come in 1 word chunks. */
if ((fw->size % sizeof(uint32_t)) != 0) {
KGSL_DRV_ERR("bad firmware size %d.\n", fw->size);
release_firmware(fw);
return -EINVAL;
}
fw_ptr = (unsigned int *)fw->data;
fw_word_size = fw->size/sizeof(uint32_t);
KGSL_DRV_INFO("loading pfp ucode version: %d\n", fw_ptr[0]);
kgsl_yamato_regwrite(device, REG_CP_PFP_UCODE_ADDR, 0);
for (i = 1; i < fw_word_size; i++)
kgsl_yamato_regwrite(device, REG_CP_PFP_UCODE_DATA, fw_ptr[i]);
release_firmware(fw);
return status;
}
static int kgsl_ringbuffer_start(struct kgsl_ringbuffer *rb)
{
int status;
/*cp_rb_cntl_u cp_rb_cntl; */
union reg_cp_rb_cntl cp_rb_cntl;
unsigned int *cmds, rb_cntl;
struct kgsl_device *device = rb->device;
KGSL_CMD_VDBG("enter (rb=%p)\n", rb);
if (rb->flags & KGSL_FLAGS_STARTED) {
KGSL_CMD_VDBG("return %d\n", 0);
return 0;
}
kgsl_sharedmem_set(&rb->memptrs_desc, 0, 0,
sizeof(struct kgsl_rbmemptrs));
kgsl_sharedmem_set(&rb->buffer_desc, 0, 0xAA,
(rb->sizedwords << 2));
kgsl_yamato_regwrite(device, REG_CP_RB_WPTR_BASE,
(rb->memptrs_desc.gpuaddr
+ GSL_RB_MEMPTRS_WPTRPOLL_OFFSET));
/* setup WPTR delay */
kgsl_yamato_regwrite(device, REG_CP_RB_WPTR_DELAY, 0 /*0x70000010 */);
/*setup REG_CP_RB_CNTL */
kgsl_yamato_regread(device, REG_CP_RB_CNTL, &rb_cntl);
cp_rb_cntl.val = rb_cntl;
/* size of ringbuffer */
cp_rb_cntl.f.rb_bufsz =
kgsl_ringbuffer_sizelog2quadwords(rb->sizedwords);
/* quadwords to read before updating mem RPTR */
cp_rb_cntl.f.rb_blksz = rb->blksizequadwords;
cp_rb_cntl.f.rb_poll_en = GSL_RB_CNTL_POLL_EN; /* WPTR polling */
/* mem RPTR writebacks */
cp_rb_cntl.f.rb_no_update = GSL_RB_CNTL_NO_UPDATE;
kgsl_yamato_regwrite(device, REG_CP_RB_CNTL, cp_rb_cntl.val);
kgsl_yamato_regwrite(device, REG_CP_RB_BASE, rb->buffer_desc.gpuaddr);
kgsl_yamato_regwrite(device, REG_CP_RB_RPTR_ADDR,
rb->memptrs_desc.gpuaddr +
GSL_RB_MEMPTRS_RPTR_OFFSET);
/* explicitly clear all cp interrupts */
kgsl_yamato_regwrite(device, REG_CP_INT_ACK, 0xFFFFFFFF);
/* setup scratch/timestamp */
kgsl_yamato_regwrite(device, REG_SCRATCH_ADDR,
device->memstore.gpuaddr +
KGSL_DEVICE_MEMSTORE_OFFSET(soptimestamp));
kgsl_yamato_regwrite(device, REG_SCRATCH_UMSK,
GSL_RB_MEMPTRS_SCRATCH_MASK);
/* load the CP ucode */
status = kgsl_ringbuffer_load_pm4_ucode(device);
if (status != 0) {
KGSL_DRV_ERR("kgsl_ringbuffer_load_pm4_ucode failed %d\n",
status);
return status;
}
/* load the prefetch parser ucode */
status = kgsl_ringbuffer_load_pfp_ucode(device);
if (status != 0) {
KGSL_DRV_ERR("kgsl_ringbuffer_load_pm4_ucode failed %d\n",
status);
return status;
}
kgsl_yamato_regwrite(device, REG_CP_QUEUE_THRESHOLDS, 0x000C0804);
rb->rptr = 0;
rb->wptr = 0;
rb->timestamp = 0;
GSL_RB_INIT_TIMESTAMP(rb);
INIT_LIST_HEAD(&rb->memqueue);
/* clear ME_HALT to start micro engine */
kgsl_yamato_regwrite(device, REG_CP_ME_CNTL, 0);
/* ME_INIT */
cmds = kgsl_ringbuffer_allocspace(rb, 19);
GSL_RB_WRITE(cmds, PM4_HDR_ME_INIT);
/* All fields present (bits 9:0) */
GSL_RB_WRITE(cmds, 0x000003ff);
/* Disable/Enable Real-Time Stream processing (present but ignored) */
GSL_RB_WRITE(cmds, 0x00000000);
/* Enable (2D <-> 3D) implicit synchronization (present but ignored) */
GSL_RB_WRITE(cmds, 0x00000000);
GSL_RB_WRITE(cmds, GSL_HAL_SUBBLOCK_OFFSET(REG_RB_SURFACE_INFO));
GSL_RB_WRITE(cmds, GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SC_WINDOW_OFFSET));
GSL_RB_WRITE(cmds, GSL_HAL_SUBBLOCK_OFFSET(REG_VGT_MAX_VTX_INDX));
GSL_RB_WRITE(cmds, GSL_HAL_SUBBLOCK_OFFSET(REG_SQ_PROGRAM_CNTL));
GSL_RB_WRITE(cmds, GSL_HAL_SUBBLOCK_OFFSET(REG_RB_DEPTHCONTROL));
GSL_RB_WRITE(cmds, GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SU_POINT_SIZE));
GSL_RB_WRITE(cmds, GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SC_LINE_CNTL));
GSL_RB_WRITE(cmds,
GSL_HAL_SUBBLOCK_OFFSET(REG_PA_SU_POLY_OFFSET_FRONT_SCALE));
/* Vertex and Pixel Shader Start Addresses in instructions
* (3 DWORDS per instruction) */
GSL_RB_WRITE(cmds, 0x80000180);
/* Maximum Contexts */
GSL_RB_WRITE(cmds, 0x00000001);
/* Write Confirm Interval and The CP will wait the
* wait_interval * 16 clocks between polling */
GSL_RB_WRITE(cmds, 0x00000000);
/* NQ and External Memory Swap */
GSL_RB_WRITE(cmds, 0x00000000);
/* Protected mode error checking */
GSL_RB_WRITE(cmds, GSL_RB_PROTECTED_MODE_CONTROL);
/* Disable header dumping and Header dump address */
GSL_RB_WRITE(cmds, 0x00000000);
/* Header dump size */
GSL_RB_WRITE(cmds, 0x00000000);
kgsl_ringbuffer_submit(rb);
/* idle device to validate ME INIT */
status = kgsl_yamato_idle(device, KGSL_TIMEOUT_DEFAULT);
KGSL_CMD_DBG("enabling CP interrupts: mask %08lx\n", GSL_CP_INT_MASK);
kgsl_yamato_regwrite(rb->device, REG_CP_INT_CNTL, GSL_CP_INT_MASK);
if (status == 0)
rb->flags |= KGSL_FLAGS_STARTED;
KGSL_CMD_VDBG("return %d\n", status);
return status;
}
static int kgsl_ringbuffer_stop(struct kgsl_ringbuffer *rb)
{
KGSL_CMD_VDBG("enter (rb=%p)\n", rb);
if (rb->flags & KGSL_FLAGS_STARTED) {
KGSL_CMD_DBG("disabling CP interrupts: mask %08x\n", 0);
kgsl_yamato_regwrite(rb->device, REG_CP_INT_CNTL, 0);
/* ME_HALT */
kgsl_yamato_regwrite(rb->device, REG_CP_ME_CNTL, 0x10000000);
rb->flags &= ~KGSL_FLAGS_STARTED;
kgsl_ringbuffer_dump(rb);
}
KGSL_CMD_VDBG("return %d\n", 0);
return 0;
}
int kgsl_ringbuffer_init(struct kgsl_device *device)
{
int status;
uint32_t flags;
struct kgsl_ringbuffer *rb = &device->ringbuffer;
KGSL_CMD_VDBG("enter (device=%p)\n", device);
rb->device = device;
rb->sizedwords = (2 << kgsl_cfg_rb_sizelog2quadwords);
rb->blksizequadwords = kgsl_cfg_rb_blksizequadwords;
/* allocate memory for ringbuffer, needs to be double octword aligned
* align on page from contiguous physical memory
*/
flags =
(KGSL_MEMFLAGS_ALIGNPAGE | KGSL_MEMFLAGS_CONPHYS |
KGSL_MEMFLAGS_STRICTREQUEST);
status = kgsl_sharedmem_alloc(flags, (rb->sizedwords << 2),
&rb->buffer_desc);
if (status != 0) {
kgsl_ringbuffer_close(rb);
KGSL_CMD_VDBG("return %d\n", status);
return status;
}
/* allocate memory for polling and timestamps */
/* This really can be at 4 byte alignment boundry but for using MMU
* we need to make it at page boundary */
flags = (KGSL_MEMFLAGS_ALIGNPAGE | KGSL_MEMFLAGS_CONPHYS);
status = kgsl_sharedmem_alloc(flags, sizeof(struct kgsl_rbmemptrs),
&rb->memptrs_desc);
if (status != 0) {
kgsl_ringbuffer_close(rb);
KGSL_CMD_VDBG("return %d\n", status);
return status;
}
/* last allocation of init process is made here so map all
* allocations to MMU */
status = kgsl_yamato_setup_pt(device, device->mmu.defaultpagetable);
if (status != 0) {
kgsl_ringbuffer_close(rb);
KGSL_CMD_VDBG("return %d\n", status);
return status;
}
/* overlay structure on memptrs memory */
rb->memptrs = (struct kgsl_rbmemptrs *) rb->memptrs_desc.hostptr;
rb->flags |= KGSL_FLAGS_INITIALIZED;
status = kgsl_ringbuffer_start(rb);
if (status != 0) {
kgsl_ringbuffer_close(rb);
KGSL_CMD_VDBG("return %d\n", status);
return status;
}
KGSL_CMD_VDBG("return %d\n", 0);
return 0;
}
int kgsl_ringbuffer_close(struct kgsl_ringbuffer *rb)
{
KGSL_CMD_VDBG("enter (rb=%p)\n", rb);
kgsl_cmdstream_memqueue_drain(rb->device);
kgsl_ringbuffer_stop(rb);
/* this must happen before first sharedmem_free */
kgsl_yamato_cleanup_pt(rb->device, rb->device->mmu.defaultpagetable);
if (rb->buffer_desc.hostptr)
kgsl_sharedmem_free(&rb->buffer_desc);
if (rb->memptrs_desc.hostptr)
kgsl_sharedmem_free(&rb->memptrs_desc);
rb->flags &= ~KGSL_FLAGS_INITIALIZED;
memset(rb, 0, sizeof(struct kgsl_ringbuffer));
KGSL_CMD_VDBG("return %d\n", 0);
return 0;
}
static uint32_t
kgsl_ringbuffer_addcmds(struct kgsl_ringbuffer *rb,
int flags, unsigned int *cmds,
int sizedwords)
{
unsigned int *ringcmds;
unsigned int timestamp;
unsigned int total_sizedwords = sizedwords + 6;
/* reserve space to temporarily turn off protected mode
* error checking if needed
*/
total_sizedwords += flags & KGSL_CMD_FLAGS_PMODE ? 4 : 0;
total_sizedwords += !(flags & KGSL_CMD_FLAGS_NO_TS_CMP) ? 9 : 0;
ringcmds = kgsl_ringbuffer_allocspace(rb, total_sizedwords);
if (flags & KGSL_CMD_FLAGS_PMODE) {
/* disable protected mode error checking */
*ringcmds++ = pm4_type3_packet(PM4_SET_PROTECTED_MODE, 1);
*ringcmds++ = 0;
}
memcpy(ringcmds, cmds, (sizedwords << 2));
ringcmds += sizedwords;
if (flags & KGSL_CMD_FLAGS_PMODE) {
/* re-enable protected mode error checking */
*ringcmds++ = pm4_type3_packet(PM4_SET_PROTECTED_MODE, 1);
*ringcmds++ = 1;
}
rb->timestamp++;
timestamp = rb->timestamp;
/* start-of-pipeline and end-of-pipeline timestamps */
*ringcmds++ = pm4_type0_packet(REG_CP_TIMESTAMP, 1);
*ringcmds++ = rb->timestamp;
*ringcmds++ = pm4_type3_packet(PM4_EVENT_WRITE, 3);
*ringcmds++ = CACHE_FLUSH_TS;
*ringcmds++ =
(rb->device->memstore.gpuaddr +
KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp));
*ringcmds++ = rb->timestamp;
if (!(flags & KGSL_CMD_FLAGS_NO_TS_CMP)) {
/* Add idle packet so avoid RBBM errors */
*ringcmds++ = pm4_type3_packet(PM4_WAIT_FOR_IDLE, 1);
*ringcmds++ = 0x00000000;
/* Conditional execution based on memory values */
*ringcmds++ = pm4_type3_packet(PM4_COND_EXEC, 4);
*ringcmds++ = (rb->device->memstore.gpuaddr +
KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable)) >> 2;
*ringcmds++ = (rb->device->memstore.gpuaddr +
KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts)) >> 2;
*ringcmds++ = rb->timestamp;
/* # of conditional command DWORDs */
*ringcmds++ = 2;
*ringcmds++ = pm4_type3_packet(PM4_INTERRUPT, 1);
*ringcmds++ = CP_INT_CNTL__RB_INT_MASK;
}
kgsl_ringbuffer_submit(rb);
GSL_RB_STATS(rb->stats.words_total += sizedwords);
GSL_RB_STATS(rb->stats.issues++);
KGSL_CMD_VDBG("return %d\n", timestamp);
/* return timestamp of issued coREG_ands */
return timestamp;
}
uint32_t
kgsl_ringbuffer_issuecmds(struct kgsl_device *device,
int flags,
unsigned int *cmds,
int sizedwords)
{
unsigned int timestamp;
struct kgsl_ringbuffer *rb = &device->ringbuffer;
KGSL_CMD_VDBG("enter (device->id=%d, flags=%d, cmds=%p, "
"sizedwords=%d)\n", device->id, flags, cmds, sizedwords);
timestamp = kgsl_ringbuffer_addcmds(rb, flags, cmds, sizedwords);
KGSL_CMD_VDBG("return %d\n)", timestamp);
return timestamp;
}
int
kgsl_ringbuffer_issueibcmds(struct kgsl_device *device,
int drawctxt_index,
uint32_t ibaddr,
int sizedwords,
uint32_t *timestamp,
unsigned int flags)
{
unsigned int link[3];
KGSL_CMD_VDBG("enter (device_id=%d, drawctxt_index=%d, ibaddr=0x%08x,"
" sizedwords=%d, timestamp=%p)\n",
device->id, drawctxt_index, ibaddr,
sizedwords, timestamp);
if (!(device->ringbuffer.flags & KGSL_FLAGS_STARTED)) {
KGSL_CMD_VDBG("return %d\n", -EINVAL);
return -EINVAL;
}
BUG_ON(ibaddr == 0);
BUG_ON(sizedwords == 0);
link[0] = PM4_HDR_INDIRECT_BUFFER_PFD;
link[1] = ibaddr;
link[2] = sizedwords;
kgsl_drawctxt_switch(device, &device->drawctxt[drawctxt_index], flags);
*timestamp = kgsl_ringbuffer_addcmds(&device->ringbuffer,
0, &link[0], 3);
KGSL_CMD_INFO("ctxt %d g %08x sd %d ts %d\n",
drawctxt_index, ibaddr, sizedwords, *timestamp);
KGSL_CMD_VDBG("return %d\n", 0);
return 0;
}
#ifdef DEBUG
void kgsl_ringbuffer_debug(struct kgsl_ringbuffer *rb,
struct kgsl_rb_debug *rb_debug)
{
memset(rb_debug, 0, sizeof(struct kgsl_rb_debug));
rb_debug->mem_rptr = rb->memptrs->rptr;
rb_debug->mem_wptr_poll = rb->memptrs->wptr_poll;
kgsl_yamato_regread(rb->device, REG_CP_RB_BASE,
(unsigned int *)&rb_debug->cp_rb_base);
kgsl_yamato_regread(rb->device, REG_CP_RB_CNTL,
(unsigned int *)&rb_debug->cp_rb_cntl);
kgsl_yamato_regread(rb->device, REG_CP_RB_RPTR_ADDR,
(unsigned int *)&rb_debug->cp_rb_rptr_addr);
kgsl_yamato_regread(rb->device, REG_CP_RB_RPTR,
(unsigned int *)&rb_debug->cp_rb_rptr);
kgsl_yamato_regread(rb->device, REG_CP_RB_RPTR_WR,
(unsigned int *)&rb_debug->cp_rb_rptr_wr);
kgsl_yamato_regread(rb->device, REG_CP_RB_WPTR,
(unsigned int *)&rb_debug->cp_rb_wptr);
kgsl_yamato_regread(rb->device, REG_CP_RB_WPTR_DELAY,
(unsigned int *)&rb_debug->cp_rb_wptr_delay);
kgsl_yamato_regread(rb->device, REG_CP_RB_WPTR_BASE,
(unsigned int *)&rb_debug->cp_rb_wptr_base);
kgsl_yamato_regread(rb->device, REG_CP_IB1_BASE,
(unsigned int *)&rb_debug->cp_ib1_base);
kgsl_yamato_regread(rb->device, REG_CP_IB1_BUFSZ,
(unsigned int *)&rb_debug->cp_ib1_bufsz);
kgsl_yamato_regread(rb->device, REG_CP_IB2_BASE,
(unsigned int *)&rb_debug->cp_ib2_base);
kgsl_yamato_regread(rb->device, REG_CP_IB2_BUFSZ,
(unsigned int *)&rb_debug->cp_ib2_bufsz);
kgsl_yamato_regread(rb->device, REG_CP_ST_BASE,
(unsigned int *)&rb_debug->cp_st_base);
kgsl_yamato_regread(rb->device, REG_CP_ST_BUFSZ,
(unsigned int *)&rb_debug->cp_st_bufsz);
kgsl_yamato_regread(rb->device, REG_CP_CSQ_RB_STAT,
(unsigned int *)&rb_debug->cp_csq_rb_stat);
kgsl_yamato_regread(rb->device, REG_CP_CSQ_IB1_STAT,
(unsigned int *)&rb_debug->cp_csq_ib1_stat);
kgsl_yamato_regread(rb->device, REG_CP_CSQ_IB2_STAT,
(unsigned int *)&rb_debug->cp_csq_ib2_stat);
kgsl_yamato_regread(rb->device, REG_SCRATCH_UMSK,
(unsigned int *)&rb_debug->scratch_umsk);
kgsl_yamato_regread(rb->device, REG_SCRATCH_ADDR,
(unsigned int *)&rb_debug->scratch_addr);
kgsl_yamato_regread(rb->device, REG_CP_ME_CNTL,
(unsigned int *)&rb_debug->cp_me_cntl);
kgsl_yamato_regread(rb->device, REG_CP_ME_STATUS,
(unsigned int *)&rb_debug->cp_me_status);
kgsl_yamato_regread(rb->device, REG_CP_DEBUG,
(unsigned int *)&rb_debug->cp_debug);
kgsl_yamato_regread(rb->device, REG_CP_STAT,
(unsigned int *)&rb_debug->cp_stat);
kgsl_yamato_regread(rb->device, REG_CP_INT_STATUS,
(unsigned int *)&rb_debug->cp_int_status);
kgsl_yamato_regread(rb->device, REG_CP_INT_CNTL,
(unsigned int *)&rb_debug->cp_int_cntl);
kgsl_yamato_regread(rb->device, REG_RBBM_STATUS,
(unsigned int *)&rb_debug->rbbm_status);
kgsl_yamato_regread(rb->device, REG_RBBM_INT_STATUS,
(unsigned int *)&rb_debug->rbbm_int_status);
GSL_RB_GET_SOP_TIMESTAMP(rb, (unsigned int *)&rb_debug->sop_timestamp);
GSL_RB_GET_EOP_TIMESTAMP(rb, (unsigned int *)&rb_debug->eop_timestamp);
}
#endif /*DEBUG*/
#ifdef DEBUG
void kgsl_ringbuffer_dump(struct kgsl_ringbuffer *rb)
{
struct kgsl_rb_debug rb_debug;
kgsl_ringbuffer_debug(rb, &rb_debug);
KGSL_CMD_DBG("rbbm_status %08x rbbm_int_status %08x"
" mem_rptr %08x mem_wptr_poll %08x\n",
rb_debug.rbbm_status,
rb_debug.rbbm_int_status,
rb_debug.mem_rptr, rb_debug.mem_wptr_poll);
KGSL_CMD_DBG("rb_base %08x rb_cntl %08x rb_rptr_addr %08x rb_rptr %08x"
" rb_rptr_wr %08x\n",
rb_debug.cp_rb_base, rb_debug.cp_rb_cntl,
rb_debug.cp_rb_rptr_addr, rb_debug.cp_rb_rptr,
rb_debug.cp_rb_rptr_wr);
KGSL_CMD_DBG("rb_wptr %08x rb_wptr_delay %08x rb_wptr_base %08x"
" ib1_base %08x ib1_bufsz %08x\n",
rb_debug.cp_rb_wptr, rb_debug.cp_rb_wptr_delay,
rb_debug.cp_rb_wptr_base, rb_debug.cp_ib1_base,
rb_debug.cp_ib1_bufsz);
KGSL_CMD_DBG("ib2_base %08x ib2_bufsz %08x st_base %08x st_bufsz %08x"
" cp_me_cntl %08x cp_me_status %08x\n",
rb_debug.cp_ib2_base, rb_debug.cp_ib2_bufsz,
rb_debug.cp_st_base, rb_debug.cp_st_bufsz,
rb_debug.cp_me_cntl, rb_debug.cp_me_status);
KGSL_CMD_DBG("cp_debug %08x cp_stat %08x cp_int_status %08x"
" cp_int_cntl %08x\n",
rb_debug.cp_debug, rb_debug.cp_stat,
rb_debug.cp_int_status, rb_debug.cp_int_cntl);
KGSL_CMD_DBG("sop_timestamp: %d eop_timestamp: %d\n",
rb_debug.sop_timestamp, rb_debug.eop_timestamp);
}
#endif /* DEBUG */

View File

@ -1,254 +0,0 @@
/*
* (C) Copyright Advanced Micro Devices, Inc. 2002, 2007
* Copyright (c) 2008-2009 QUALCOMM USA, INC.
*
* All source code in this file is licensed under the following license
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can find it at http://www.fsf.org
*/
#ifndef __GSL_RINGBUFFER_H
#define __GSL_RINGBUFFER_H
#include <linux/types.h>
#include <linux/msm_kgsl.h>
#include <linux/mutex.h>
#include "kgsl_log.h"
#include "kgsl_sharedmem.h"
#include "yamato_reg.h"
#define GSL_STATS_RINGBUFFER
#define GSL_RB_USE_MEM_RPTR
#define GSL_RB_USE_MEM_TIMESTAMP
#define GSL_DEVICE_SHADOW_MEMSTORE_TO_USER
/* ringbuffer sizes log2quadword */
#define GSL_RB_SIZE_8 0
#define GSL_RB_SIZE_16 1
#define GSL_RB_SIZE_32 2
#define GSL_RB_SIZE_64 3
#define GSL_RB_SIZE_128 4
#define GSL_RB_SIZE_256 5
#define GSL_RB_SIZE_512 6
#define GSL_RB_SIZE_1K 7
#define GSL_RB_SIZE_2K 8
#define GSL_RB_SIZE_4K 9
#define GSL_RB_SIZE_8K 10
#define GSL_RB_SIZE_16K 11
#define GSL_RB_SIZE_32K 12
#define GSL_RB_SIZE_64K 13
#define GSL_RB_SIZE_128K 14
#define GSL_RB_SIZE_256K 15
#define GSL_RB_SIZE_512K 16
#define GSL_RB_SIZE_1M 17
#define GSL_RB_SIZE_2M 18
#define GSL_RB_SIZE_4M 19
/* Yamato ringbuffer config*/
static const unsigned int kgsl_cfg_rb_sizelog2quadwords = GSL_RB_SIZE_32K;
static const unsigned int kgsl_cfg_rb_blksizequadwords = GSL_RB_SIZE_16;
/* CP timestamp register */
#define REG_CP_TIMESTAMP REG_SCRATCH_REG0
struct kgsl_device;
struct kgsl_drawctxt;
struct kgsl_ringbuffer;
struct kgsl_rb_debug {
unsigned int pm4_ucode_rel;
unsigned int pfp_ucode_rel;
unsigned int mem_wptr_poll;
unsigned int mem_rptr;
unsigned int cp_rb_base;
unsigned int cp_rb_cntl;
unsigned int cp_rb_rptr_addr;
unsigned int cp_rb_rptr;
unsigned int cp_rb_rptr_wr;
unsigned int cp_rb_wptr;
unsigned int cp_rb_wptr_delay;
unsigned int cp_rb_wptr_base;
unsigned int cp_ib1_base;
unsigned int cp_ib1_bufsz;
unsigned int cp_ib2_base;
unsigned int cp_ib2_bufsz;
unsigned int cp_st_base;
unsigned int cp_st_bufsz;
unsigned int cp_csq_rb_stat;
unsigned int cp_csq_ib1_stat;
unsigned int cp_csq_ib2_stat;
unsigned int scratch_umsk;
unsigned int scratch_addr;
unsigned int cp_me_cntl;
unsigned int cp_me_status;
unsigned int cp_debug;
unsigned int cp_stat;
unsigned int cp_int_status;
unsigned int cp_int_cntl;
unsigned int rbbm_status;
unsigned int rbbm_int_status;
unsigned int sop_timestamp;
unsigned int eop_timestamp;
};
#ifdef DEBUG
void kgsl_ringbuffer_debug(struct kgsl_ringbuffer *rb,
struct kgsl_rb_debug *rb_debug);
void kgsl_ringbuffer_dump(struct kgsl_ringbuffer *rb);
#else
static inline void kgsl_ringbuffer_debug(struct kgsl_ringbuffer *rb,
struct kgsl_rb_debug *rb_debug)
{
}
static inline void kgsl_ringbuffer_dump(struct kgsl_ringbuffer *rb)
{
}
#endif
struct kgsl_rbwatchdog {
uint32_t flags;
unsigned int rptr_sample;
};
#define GSL_RB_MEMPTRS_SCRATCH_COUNT 8
struct kgsl_rbmemptrs {
volatile int rptr;
volatile int wptr_poll;
} __attribute__ ((packed));
#define GSL_RB_MEMPTRS_RPTR_OFFSET \
(offsetof(struct kgsl_rbmemptrs, rptr))
#define GSL_RB_MEMPTRS_WPTRPOLL_OFFSET \
(offsetof(struct kgsl_rbmemptrs, wptr_poll))
struct kgsl_rbstats {
int64_t issues;
int64_t words_total;
};
struct kgsl_ringbuffer {
struct kgsl_device *device;
uint32_t flags;
struct kgsl_memdesc buffer_desc;
struct kgsl_memdesc memptrs_desc;
struct kgsl_rbmemptrs *memptrs;
/*ringbuffer size */
unsigned int sizedwords;
unsigned int blksizequadwords;
unsigned int wptr; /* write pointer offset in dwords from baseaddr */
unsigned int rptr; /* read pointer offset in dwords from baseaddr */
uint32_t timestamp;
/* queue of memfrees pending timestamp elapse */
struct list_head memqueue;
struct kgsl_rbwatchdog watchdog;
#ifdef GSL_STATS_RINGBUFFER
struct kgsl_rbstats stats;
#endif /* GSL_STATS_RINGBUFFER */
};
/* dword base address of the GFX decode space */
#define GSL_HAL_SUBBLOCK_OFFSET(reg) ((unsigned int)((reg) - (0x2000)))
#define GSL_RB_WRITE(ring, data) \
do { \
mb(); \
writel(data, ring); \
ring++; \
} while (0)
/* timestamp */
#ifdef GSL_DEVICE_SHADOW_MEMSTORE_TO_USER
#define GSL_RB_USE_MEM_TIMESTAMP
#endif /* GSL_DEVICE_SHADOW_MEMSTORE_TO_USER */
#ifdef GSL_RB_USE_MEM_TIMESTAMP
/* enable timestamp (...scratch0) memory shadowing */
#define GSL_RB_MEMPTRS_SCRATCH_MASK 0x1
#define GSL_RB_INIT_TIMESTAMP(rb)
#else
#define GSL_RB_MEMPTRS_SCRATCH_MASK 0x0
#define GSL_RB_INIT_TIMESTAMP(rb) \
kgsl_yamato_regwrite((rb)->device->id, REG_CP_TIMESTAMP, 0)
#endif /* GSL_RB_USE_MEMTIMESTAMP */
/* mem rptr */
#ifdef GSL_RB_USE_MEM_RPTR
#define GSL_RB_CNTL_NO_UPDATE 0x0 /* enable */
#define GSL_RB_GET_READPTR(rb, data) \
do { \
*(data) = (rb)->memptrs->rptr; \
} while (0)
#else
#define GSL_RB_CNTL_NO_UPDATE 0x1 /* disable */
#define GSL_RB_GET_READPTR(rb, data) \
do { \
kgsl_yamato_regread((rb)->device->id, REG_CP_RB_RPTR, (data)); \
} while (0)
#endif /* GSL_RB_USE_MEMRPTR */
/* wptr polling */
#ifdef GSL_RB_USE_WPTR_POLLING
#define GSL_RB_CNTL_POLL_EN 0x1 /* enable */
#define GSL_RB_UPDATE_WPTR_POLLING(rb) \
do { (rb)->memptrs->wptr_poll = (rb)->wptr; } while (0)
#else
#define GSL_RB_CNTL_POLL_EN 0x0 /* disable */
#define GSL_RB_UPDATE_WPTR_POLLING(rb)
#endif /* GSL_RB_USE_WPTR_POLLING */
/* stats */
#ifdef GSL_STATS_RINGBUFFER
#define GSL_RB_STATS(x) x
#else
#define GSL_RB_STATS(x)
#endif /* GSL_STATS_RINGBUFFER */
struct kgsl_pmem_entry;
int kgsl_ringbuffer_issueibcmds(struct kgsl_device *, int drawctxt_index,
uint32_t ibaddr, int sizedwords,
uint32_t *timestamp,
unsigned int flags);
int kgsl_ringbuffer_init(struct kgsl_device *device);
int kgsl_ringbuffer_close(struct kgsl_ringbuffer *rb);
uint32_t kgsl_ringbuffer_issuecmds(struct kgsl_device *device,
int pmodeoff,
unsigned int *cmdaddr,
int sizedwords);
int kgsl_ringbuffer_gettimestampshadow(struct kgsl_device *device,
unsigned int *sopaddr,
unsigned int *eopaddr);
void kgsl_ringbuffer_watchdog(void);
void kgsl_cp_intrcallback(struct kgsl_device *device);
#endif /* __GSL_RINGBUFFER_H */

View File

@ -1,300 +0,0 @@
/*
* (C) Copyright Advanced Micro Devices, Inc. 2002, 2007
* Copyright (c) 2008-2009 QUALCOMM USA, INC.
*
* All source code in this file is licensed under the following license
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can find it at http://www.fsf.org
*/
#include <linux/io.h>
#include <linux/spinlock.h>
#include <linux/genalloc.h>
#include "kgsl_sharedmem.h"
#include "kgsl_device.h"
#include "kgsl.h"
#include "kgsl_log.h"
/* block alignment shift count */
static inline unsigned int
kgsl_memarena_get_order(uint32_t flags)
{
unsigned int alignshift;
alignshift = ((flags & KGSL_MEMFLAGS_ALIGN_MASK)
>> KGSL_MEMFLAGS_ALIGN_SHIFT);
return alignshift;
}
/* block alignment shift count */
static inline unsigned int
kgsl_memarena_align(unsigned int address, unsigned int shift)
{
unsigned int alignedbaseaddr = ((address) >> shift) << shift;
if (alignedbaseaddr < address)
alignedbaseaddr += (1 << shift);
return alignedbaseaddr;
}
int
kgsl_sharedmem_init(struct kgsl_sharedmem *shmem)
{
int result = -EINVAL;
if (!request_mem_region(shmem->physbase, shmem->size, DRIVER_NAME)) {
KGSL_MEM_ERR("request_mem_region failed\n");
goto error;
}
shmem->baseptr = ioremap(shmem->physbase, shmem->size);
KGSL_MEM_INFO("ioremap(shm) = %p\n", shmem->baseptr);
if (shmem->baseptr == NULL) {
KGSL_MEM_ERR("ioremap failed for address %08x size %d\n",
shmem->physbase, shmem->size);
result = -ENODEV;
goto error_release_mem;
}
shmem->pool = gen_pool_create(KGSL_PAGESIZE_SHIFT, -1);
if (shmem->pool == NULL) {
KGSL_MEM_ERR("gen_pool_create failed\n");
result = -ENOMEM;
goto error_iounmap;
}
if (gen_pool_add(shmem->pool, shmem->physbase, shmem->size, -1)) {
KGSL_MEM_ERR("gen_pool_create failed\n");
result = -ENOMEM;
goto error_pool_destroy;
}
result = 0;
KGSL_MEM_INFO("physbase 0x%08x size 0x%08x baseptr 0x%p\n",
shmem->physbase, shmem->size, shmem->baseptr);
return 0;
error_pool_destroy:
gen_pool_destroy(shmem->pool);
error_iounmap:
iounmap(shmem->baseptr);
shmem->baseptr = NULL;
error_release_mem:
release_mem_region(shmem->physbase, shmem->size);
error:
return result;
}
int
kgsl_sharedmem_close(struct kgsl_sharedmem *shmem)
{
if (shmem->pool) {
gen_pool_destroy(shmem->pool);
shmem->pool = NULL;
}
if (shmem->baseptr != NULL) {
KGSL_MEM_INFO("iounmap(shm) = %p\n", shmem->baseptr);
iounmap(shmem->baseptr);
shmem->baseptr = NULL;
release_mem_region(shmem->physbase, shmem->size);
}
return 0;
}
/*
* get the host mapped address for a hardware device address
*/
static void *kgsl_memarena_gethostptr(struct kgsl_sharedmem *shmem,
uint32_t physaddr)
{
void *result;
KGSL_MEM_VDBG("enter (memarena=%p, physaddr=0x%08x)\n",
shmem, physaddr);
BUG_ON(shmem == NULL);
/* check address range */
if (physaddr < shmem->physbase)
return NULL;
if (physaddr >= shmem->physbase + shmem->size)
return NULL;
if (shmem->baseptr == NULL) {
KGSL_MEM_VDBG("return: %p\n", NULL);
return NULL;
}
result = ((physaddr - shmem->physbase) + shmem->baseptr);
KGSL_MEM_VDBG("return: %p\n", result);
return result;
}
int
kgsl_sharedmem_alloc(uint32_t flags, int size,
struct kgsl_memdesc *memdesc)
{
struct kgsl_sharedmem *shmem;
int result = -ENOMEM;
unsigned int blksize;
unsigned int baseaddr;
unsigned int alignshift;
unsigned int alignedbaseaddr;
KGSL_MEM_VDBG("enter (flags=0x%08x, size=%d, memdesc=%p)\n",
flags, size, memdesc);
shmem = &kgsl_driver.shmem;
BUG_ON(memdesc == NULL);
BUG_ON(size <= 0);
alignshift = kgsl_memarena_get_order(flags);
size = ALIGN(size, KGSL_PAGESIZE);
blksize = size;
if (alignshift > KGSL_PAGESIZE_SHIFT)
blksize += (1 << alignshift) - KGSL_PAGESIZE;
baseaddr = gen_pool_alloc(shmem->pool, blksize);
if (baseaddr == 0) {
KGSL_MEM_ERR("gen_pool_alloc failed\n");
result = -ENOMEM;
goto done;
}
result = 0;
if (alignshift > KGSL_PAGESIZE_SHIFT) {
alignedbaseaddr = ALIGN(baseaddr, (1 << alignshift));
KGSL_MEM_VDBG("ba %x al %x as %d m->as %d bs %x s %x\n",
baseaddr, alignedbaseaddr, alignshift,
KGSL_PAGESIZE_SHIFT, blksize, size);
if (alignedbaseaddr > baseaddr) {
KGSL_MEM_VDBG("physaddr %x free before %x size %x\n",
alignedbaseaddr,
baseaddr, alignedbaseaddr - baseaddr);
gen_pool_free(shmem->pool, baseaddr,
alignedbaseaddr - baseaddr);
blksize -= alignedbaseaddr - baseaddr;
}
if (blksize > size) {
KGSL_MEM_VDBG("physaddr %x free after %x size %x\n",
alignedbaseaddr,
alignedbaseaddr + size,
blksize - size);
gen_pool_free(shmem->pool,
alignedbaseaddr + size,
blksize - size);
}
} else {
alignedbaseaddr = baseaddr;
}
memdesc->physaddr = alignedbaseaddr;
memdesc->hostptr = kgsl_memarena_gethostptr(shmem, memdesc->physaddr);
memdesc->size = size;
KGSL_MEM_VDBG("ashift %d m->ashift %d blksize %d base %x abase %x\n",
alignshift, KGSL_PAGESIZE_SHIFT, blksize, baseaddr,
alignedbaseaddr);
done:
if (result)
memset(memdesc, 0, sizeof(*memdesc));
KGSL_MEM_VDBG("return: %d\n", result);
return result;
}
void
kgsl_sharedmem_free(struct kgsl_memdesc *memdesc)
{
struct kgsl_sharedmem *shmem = &kgsl_driver.shmem;
KGSL_MEM_VDBG("enter (shmem=%p, memdesc=%p, physaddr=%08x, size=%d)\n",
shmem, memdesc, memdesc->physaddr, memdesc->size);
BUG_ON(memdesc == NULL);
BUG_ON(memdesc->size <= 0);
BUG_ON(shmem->physbase > memdesc->physaddr);
BUG_ON((shmem->physbase + shmem->size)
< (memdesc->physaddr + memdesc->size));
gen_pool_free(shmem->pool, memdesc->physaddr, memdesc->size);
memset(memdesc, 0, sizeof(struct kgsl_memdesc));
KGSL_MEM_VDBG("return\n");
}
int
kgsl_sharedmem_read(const struct kgsl_memdesc *memdesc, void *dst,
unsigned int offsetbytes, unsigned int sizebytes)
{
if (memdesc == NULL || memdesc->hostptr == NULL || dst == NULL) {
KGSL_MEM_ERR("bad ptr memdesc %p hostptr %p dst %p\n",
memdesc,
(memdesc ? memdesc->hostptr : NULL),
dst);
return -EINVAL;
}
if (offsetbytes + sizebytes > memdesc->size) {
KGSL_MEM_ERR("bad range: offset %d size %d memdesc %d\n",
offsetbytes, sizebytes, memdesc->size);
return -ERANGE;
}
memcpy(dst, memdesc->hostptr + offsetbytes, sizebytes);
return 0;
}
int
kgsl_sharedmem_write(const struct kgsl_memdesc *memdesc,
unsigned int offsetbytes,
void *value, unsigned int sizebytes)
{
if (memdesc == NULL || memdesc->hostptr == NULL) {
KGSL_MEM_ERR("bad ptr memdesc %p hostptr %p\n", memdesc,
(memdesc ? memdesc->hostptr : NULL));
return -EINVAL;
}
if (offsetbytes + sizebytes > memdesc->size) {
KGSL_MEM_ERR("bad range: offset %d size %d memdesc %d\n",
offsetbytes, sizebytes, memdesc->size);
return -ERANGE;
}
memcpy(memdesc->hostptr + offsetbytes, value, sizebytes);
return 0;
}
int
kgsl_sharedmem_set(const struct kgsl_memdesc *memdesc, unsigned int offsetbytes,
unsigned int value, unsigned int sizebytes)
{
if (memdesc == NULL || memdesc->hostptr == NULL) {
KGSL_MEM_ERR("bad ptr memdesc %p hostptr %p\n", memdesc,
(memdesc ? memdesc->hostptr : NULL));
return -EINVAL;
}
if (offsetbytes + sizebytes > memdesc->size) {
KGSL_MEM_ERR("bad range: offset %d size %d memdesc %d\n",
offsetbytes, sizebytes, memdesc->size);
return -ERANGE;
}
memset(memdesc->hostptr + offsetbytes, value, sizebytes);
return 0;
}

View File

@ -1,111 +0,0 @@
/*
* (C) Copyright Advanced Micro Devices, Inc. 2002, 2007
* Copyright (c) 2008-2009 QUALCOMM USA, INC.
*
* All source code in this file is licensed under the following license
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can find it at http://www.fsf.org
*/
#ifndef __GSL_SHAREDMEM_H
#define __GSL_SHAREDMEM_H
#include <linux/types.h>
#include <linux/msm_kgsl.h>
#define KGSL_PAGESIZE 0x1000
#define KGSL_PAGESIZE_SHIFT 12
#define KGSL_PAGEMASK (~(KGSL_PAGESIZE - 1))
struct kgsl_pagetable;
struct platform_device;
struct gen_pool;
/* memory allocation flags */
#define KGSL_MEMFLAGS_ANY 0x00000000 /*dont care*/
#define KGSL_MEMFLAGS_APERTUREANY 0x00000000
#define KGSL_MEMFLAGS_EMEM 0x00000000
#define KGSL_MEMFLAGS_CONPHYS 0x00001000
#define KGSL_MEMFLAGS_ALIGNANY 0x00000000
#define KGSL_MEMFLAGS_ALIGN32 0x00000000
#define KGSL_MEMFLAGS_ALIGN64 0x00060000
#define KGSL_MEMFLAGS_ALIGN128 0x00070000
#define KGSL_MEMFLAGS_ALIGN256 0x00080000
#define KGSL_MEMFLAGS_ALIGN512 0x00090000
#define KGSL_MEMFLAGS_ALIGN1K 0x000A0000
#define KGSL_MEMFLAGS_ALIGN2K 0x000B0000
#define KGSL_MEMFLAGS_ALIGN4K 0x000C0000
#define KGSL_MEMFLAGS_ALIGN8K 0x000D0000
#define KGSL_MEMFLAGS_ALIGN16K 0x000E0000
#define KGSL_MEMFLAGS_ALIGN32K 0x000F0000
#define KGSL_MEMFLAGS_ALIGN64K 0x00100000
#define KGSL_MEMFLAGS_ALIGNPAGE KGSL_MEMFLAGS_ALIGN4K
/* fail the alloc if the flags cannot be honored */
#define KGSL_MEMFLAGS_STRICTREQUEST 0x80000000
#define KGSL_MEMFLAGS_APERTURE_MASK 0x0000F000
#define KGSL_MEMFLAGS_ALIGN_MASK 0x00FF0000
#define KGSL_MEMFLAGS_APERTURE_SHIFT 12
#define KGSL_MEMFLAGS_ALIGN_SHIFT 16
/* shared memory allocation */
struct kgsl_memdesc {
struct kgsl_pagetable *pagetable;
void *hostptr;
unsigned int gpuaddr;
unsigned int physaddr;
unsigned int size;
unsigned int priv;
};
struct kgsl_sharedmem {
void *baseptr;
unsigned int physbase;
unsigned int size;
struct gen_pool *pool;
};
int kgsl_sharedmem_alloc(uint32_t flags, int size,
struct kgsl_memdesc *memdesc);
/*TODO: add protection flags */
int kgsl_sharedmem_import(struct kgsl_pagetable *,
uint32_t phys_addr,
uint32_t size,
struct kgsl_memdesc *memdesc);
void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc);
int kgsl_sharedmem_read(const struct kgsl_memdesc *memdesc, void *dst,
unsigned int offsetbytes, unsigned int sizebytes);
int kgsl_sharedmem_write(const struct kgsl_memdesc *memdesc,
unsigned int offsetbytes, void *value,
unsigned int sizebytes);
int kgsl_sharedmem_set(const struct kgsl_memdesc *memdesc,
unsigned int offsetbytes, unsigned int value,
unsigned int sizebytes);
int kgsl_sharedmem_init(struct kgsl_sharedmem *shmem);
int kgsl_sharedmem_close(struct kgsl_sharedmem *shmem);
#endif /* __GSL_SHAREDMEM_H */

File diff suppressed because it is too large Load Diff

View File

@ -1,400 +0,0 @@
/*
* (C) Copyright Advanced Micro Devices, Inc. 2002, 2007
* Copyright (c) 2008-2009 QUALCOMM USA, INC.
*
* All source code in this file is licensed under the following license
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can find it at http://www.fsf.org
*/
#ifndef _YAMATO_REG_H
#define _YAMATO_REG_H
enum VGT_EVENT_TYPE {
VS_DEALLOC = 0,
PS_DEALLOC = 1,
VS_DONE_TS = 2,
PS_DONE_TS = 3,
CACHE_FLUSH_TS = 4,
CONTEXT_DONE = 5,
CACHE_FLUSH = 6,
VIZQUERY_START = 7,
VIZQUERY_END = 8,
SC_WAIT_WC = 9,
RST_PIX_CNT = 13,
RST_VTX_CNT = 14,
TILE_FLUSH = 15,
CACHE_FLUSH_AND_INV_TS_EVENT = 20,
ZPASS_DONE = 21,
CACHE_FLUSH_AND_INV_EVENT = 22,
PERFCOUNTER_START = 23,
PERFCOUNTER_STOP = 24,
VS_FETCH_DONE = 27,
FACENESS_FLUSH = 28,
};
enum COLORFORMATX {
COLORX_4_4_4_4 = 0,
COLORX_1_5_5_5 = 1,
COLORX_5_6_5 = 2,
COLORX_8 = 3,
COLORX_8_8 = 4,
COLORX_8_8_8_8 = 5,
COLORX_S8_8_8_8 = 6,
COLORX_16_FLOAT = 7,
COLORX_16_16_FLOAT = 8,
COLORX_16_16_16_16_FLOAT = 9,
COLORX_32_FLOAT = 10,
COLORX_32_32_FLOAT = 11,
COLORX_32_32_32_32_FLOAT = 12,
COLORX_2_3_3 = 13,
COLORX_8_8_8 = 14,
};
enum SURFACEFORMAT {
FMT_1_REVERSE = 0,
FMT_1 = 1,
FMT_8 = 2,
FMT_1_5_5_5 = 3,
FMT_5_6_5 = 4,
FMT_6_5_5 = 5,
FMT_8_8_8_8 = 6,
FMT_2_10_10_10 = 7,
FMT_8_A = 8,
FMT_8_B = 9,
FMT_8_8 = 10,
FMT_Cr_Y1_Cb_Y0 = 11,
FMT_Y1_Cr_Y0_Cb = 12,
FMT_5_5_5_1 = 13,
FMT_8_8_8_8_A = 14,
FMT_4_4_4_4 = 15,
FMT_10_11_11 = 16,
FMT_11_11_10 = 17,
FMT_DXT1 = 18,
FMT_DXT2_3 = 19,
FMT_DXT4_5 = 20,
FMT_24_8 = 22,
FMT_24_8_FLOAT = 23,
FMT_16 = 24,
FMT_16_16 = 25,
FMT_16_16_16_16 = 26,
FMT_16_EXPAND = 27,
FMT_16_16_EXPAND = 28,
FMT_16_16_16_16_EXPAND = 29,
FMT_16_FLOAT = 30,
FMT_16_16_FLOAT = 31,
FMT_16_16_16_16_FLOAT = 32,
FMT_32 = 33,
FMT_32_32 = 34,
FMT_32_32_32_32 = 35,
FMT_32_FLOAT = 36,
FMT_32_32_FLOAT = 37,
FMT_32_32_32_32_FLOAT = 38,
FMT_32_AS_8 = 39,
FMT_32_AS_8_8 = 40,
FMT_16_MPEG = 41,
FMT_16_16_MPEG = 42,
FMT_8_INTERLACED = 43,
FMT_32_AS_8_INTERLACED = 44,
FMT_32_AS_8_8_INTERLACED = 45,
FMT_16_INTERLACED = 46,
FMT_16_MPEG_INTERLACED = 47,
FMT_16_16_MPEG_INTERLACED = 48,
FMT_DXN = 49,
FMT_8_8_8_8_AS_16_16_16_16 = 50,
FMT_DXT1_AS_16_16_16_16 = 51,
FMT_DXT2_3_AS_16_16_16_16 = 52,
FMT_DXT4_5_AS_16_16_16_16 = 53,
FMT_2_10_10_10_AS_16_16_16_16 = 54,
FMT_10_11_11_AS_16_16_16_16 = 55,
FMT_11_11_10_AS_16_16_16_16 = 56,
FMT_32_32_32_FLOAT = 57,
FMT_DXT3A = 58,
FMT_DXT5A = 59,
FMT_CTX1 = 60,
FMT_DXT3A_AS_1_1_1_1 = 61
};
#define RB_EDRAM_INFO_EDRAM_SIZE_SIZE 4
#define RB_EDRAM_INFO_EDRAM_MAPPING_MODE_SIZE 2
#define RB_EDRAM_INFO_UNUSED0_SIZE 8
#define RB_EDRAM_INFO_EDRAM_RANGE_SIZE 18
struct rb_edram_info_t {
unsigned int edram_size:RB_EDRAM_INFO_EDRAM_SIZE_SIZE;
unsigned int edram_mapping_mode:RB_EDRAM_INFO_EDRAM_MAPPING_MODE_SIZE;
unsigned int unused0:RB_EDRAM_INFO_UNUSED0_SIZE;
unsigned int edram_range:RB_EDRAM_INFO_EDRAM_RANGE_SIZE;
};
union reg_rb_edram_info {
unsigned int val:32;
struct rb_edram_info_t f;
};
#define CP_RB_CNTL_RB_BUFSZ_SIZE 6
#define CP_RB_CNTL_UNUSED0_SIZE 2
#define CP_RB_CNTL_RB_BLKSZ_SIZE 6
#define CP_RB_CNTL_UNUSED1_SIZE 2
#define CP_RB_CNTL_BUF_SWAP_SIZE 2
#define CP_RB_CNTL_UNUSED2_SIZE 2
#define CP_RB_CNTL_RB_POLL_EN_SIZE 1
#define CP_RB_CNTL_UNUSED3_SIZE 6
#define CP_RB_CNTL_RB_NO_UPDATE_SIZE 1
#define CP_RB_CNTL_UNUSED4_SIZE 3
#define CP_RB_CNTL_RB_RPTR_WR_ENA_SIZE 1
struct cp_rb_cntl_t {
unsigned int rb_bufsz:CP_RB_CNTL_RB_BUFSZ_SIZE;
unsigned int unused0:CP_RB_CNTL_UNUSED0_SIZE;
unsigned int rb_blksz:CP_RB_CNTL_RB_BLKSZ_SIZE;
unsigned int unused1:CP_RB_CNTL_UNUSED1_SIZE;
unsigned int buf_swap:CP_RB_CNTL_BUF_SWAP_SIZE;
unsigned int unused2:CP_RB_CNTL_UNUSED2_SIZE;
unsigned int rb_poll_en:CP_RB_CNTL_RB_POLL_EN_SIZE;
unsigned int unused3:CP_RB_CNTL_UNUSED3_SIZE;
unsigned int rb_no_update:CP_RB_CNTL_RB_NO_UPDATE_SIZE;
unsigned int unused4:CP_RB_CNTL_UNUSED4_SIZE;
unsigned int rb_rptr_wr_ena:CP_RB_CNTL_RB_RPTR_WR_ENA_SIZE;
};
union reg_cp_rb_cntl {
unsigned int val:32;
struct cp_rb_cntl_t f;
};
#define RB_COLOR_INFO__COLOR_FORMAT_MASK 0x0000000fL
#define RB_COPY_DEST_INFO__COPY_DEST_FORMAT__SHIFT 0x00000004
#define SQ_INT_CNTL__PS_WATCHDOG_MASK 0x00000001L
#define SQ_INT_CNTL__VS_WATCHDOG_MASK 0x00000002L
#define MH_INTERRUPT_MASK__AXI_READ_ERROR 0x00000001L
#define MH_INTERRUPT_MASK__AXI_WRITE_ERROR 0x00000002L
#define MH_INTERRUPT_MASK__MMU_PAGE_FAULT 0x00000004L
#define RBBM_INT_CNTL__RDERR_INT_MASK 0x00000001L
#define RBBM_INT_CNTL__DISPLAY_UPDATE_INT_MASK 0x00000002L
#define RBBM_INT_CNTL__GUI_IDLE_INT_MASK 0x00080000L
#define RBBM_STATUS__CMDFIFO_AVAIL_MASK 0x0000001fL
#define RBBM_STATUS__TC_BUSY_MASK 0x00000020L
#define RBBM_STATUS__HIRQ_PENDING_MASK 0x00000100L
#define RBBM_STATUS__CPRQ_PENDING_MASK 0x00000200L
#define RBBM_STATUS__CFRQ_PENDING_MASK 0x00000400L
#define RBBM_STATUS__PFRQ_PENDING_MASK 0x00000800L
#define RBBM_STATUS__VGT_BUSY_NO_DMA_MASK 0x00001000L
#define RBBM_STATUS__RBBM_WU_BUSY_MASK 0x00004000L
#define RBBM_STATUS__CP_NRT_BUSY_MASK 0x00010000L
#define RBBM_STATUS__MH_BUSY_MASK 0x00040000L
#define RBBM_STATUS__MH_COHERENCY_BUSY_MASK 0x00080000L
#define RBBM_STATUS__SX_BUSY_MASK 0x00200000L
#define RBBM_STATUS__TPC_BUSY_MASK 0x00400000L
#define RBBM_STATUS__SC_CNTX_BUSY_MASK 0x01000000L
#define RBBM_STATUS__PA_BUSY_MASK 0x02000000L
#define RBBM_STATUS__VGT_BUSY_MASK 0x04000000L
#define RBBM_STATUS__SQ_CNTX17_BUSY_MASK 0x08000000L
#define RBBM_STATUS__SQ_CNTX0_BUSY_MASK 0x10000000L
#define RBBM_STATUS__RB_CNTX_BUSY_MASK 0x40000000L
#define RBBM_STATUS__GUI_ACTIVE_MASK 0x80000000L
#define CP_INT_CNTL__SW_INT_MASK 0x00080000L
#define CP_INT_CNTL__T0_PACKET_IN_IB_MASK 0x00800000L
#define CP_INT_CNTL__OPCODE_ERROR_MASK 0x01000000L
#define CP_INT_CNTL__PROTECTED_MODE_ERROR_MASK 0x02000000L
#define CP_INT_CNTL__RESERVED_BIT_ERROR_MASK 0x04000000L
#define CP_INT_CNTL__IB_ERROR_MASK 0x08000000L
#define CP_INT_CNTL__IB2_INT_MASK 0x20000000L
#define CP_INT_CNTL__IB1_INT_MASK 0x40000000L
#define CP_INT_CNTL__RB_INT_MASK 0x80000000L
#define MASTER_INT_SIGNAL__MH_INT_STAT 0x00000020L
#define MASTER_INT_SIGNAL__SQ_INT_STAT 0x04000000L
#define MASTER_INT_SIGNAL__CP_INT_STAT 0x40000000L
#define MASTER_INT_SIGNAL__RBBM_INT_STAT 0x80000000L
#define RB_EDRAM_INFO__EDRAM_SIZE_MASK 0x0000000fL
#define RB_EDRAM_INFO__EDRAM_RANGE_MASK 0xffffc000L
#define MH_ARBITER_CONFIG__SAME_PAGE_GRANULARITY__SHIFT 0x00000006
#define MH_ARBITER_CONFIG__L1_ARB_ENABLE__SHIFT 0x00000007
#define MH_ARBITER_CONFIG__L1_ARB_HOLD_ENABLE__SHIFT 0x00000008
#define MH_ARBITER_CONFIG__L2_ARB_CONTROL__SHIFT 0x00000009
#define MH_ARBITER_CONFIG__PAGE_SIZE__SHIFT 0x0000000a
#define MH_ARBITER_CONFIG__TC_REORDER_ENABLE__SHIFT 0x0000000d
#define MH_ARBITER_CONFIG__TC_ARB_HOLD_ENABLE__SHIFT 0x0000000e
#define MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT_ENABLE__SHIFT 0x0000000f
#define MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT__SHIFT 0x00000010
#define MH_ARBITER_CONFIG__CP_CLNT_ENABLE__SHIFT 0x00000016
#define MH_ARBITER_CONFIG__VGT_CLNT_ENABLE__SHIFT 0x00000017
#define MH_ARBITER_CONFIG__TC_CLNT_ENABLE__SHIFT 0x00000018
#define MH_ARBITER_CONFIG__RB_CLNT_ENABLE__SHIFT 0x00000019
#define MH_ARBITER_CONFIG__PA_CLNT_ENABLE__SHIFT 0x0000001a
#define MH_MMU_CONFIG__RB_W_CLNT_BEHAVIOR__SHIFT 0x00000004
#define MH_MMU_CONFIG__CP_W_CLNT_BEHAVIOR__SHIFT 0x00000006
#define MH_MMU_CONFIG__CP_R0_CLNT_BEHAVIOR__SHIFT 0x00000008
#define MH_MMU_CONFIG__CP_R1_CLNT_BEHAVIOR__SHIFT 0x0000000a
#define MH_MMU_CONFIG__CP_R2_CLNT_BEHAVIOR__SHIFT 0x0000000c
#define MH_MMU_CONFIG__CP_R3_CLNT_BEHAVIOR__SHIFT 0x0000000e
#define MH_MMU_CONFIG__CP_R4_CLNT_BEHAVIOR__SHIFT 0x00000010
#define MH_MMU_CONFIG__VGT_R0_CLNT_BEHAVIOR__SHIFT 0x00000012
#define MH_MMU_CONFIG__VGT_R1_CLNT_BEHAVIOR__SHIFT 0x00000014
#define MH_MMU_CONFIG__TC_R_CLNT_BEHAVIOR__SHIFT 0x00000016
#define MH_MMU_CONFIG__PA_W_CLNT_BEHAVIOR__SHIFT 0x00000018
#define CP_RB_CNTL__RB_BUFSZ__SHIFT 0x00000000
#define CP_RB_CNTL__RB_BLKSZ__SHIFT 0x00000008
#define CP_RB_CNTL__RB_POLL_EN__SHIFT 0x00000014
#define CP_RB_CNTL__RB_NO_UPDATE__SHIFT 0x0000001b
#define RB_COLOR_INFO__COLOR_FORMAT__SHIFT 0x00000000
#define RB_EDRAM_INFO__EDRAM_MAPPING_MODE__SHIFT 0x00000004
#define RB_EDRAM_INFO__EDRAM_RANGE__SHIFT 0x0000000e
#define REG_CP_CSQ_IB1_STAT 0x01FE
#define REG_CP_CSQ_IB2_STAT 0x01FF
#define REG_CP_CSQ_RB_STAT 0x01FD
#define REG_CP_DEBUG 0x01FC
#define REG_CP_IB1_BASE 0x0458
#define REG_CP_IB1_BUFSZ 0x0459
#define REG_CP_IB2_BASE 0x045A
#define REG_CP_IB2_BUFSZ 0x045B
#define REG_CP_INT_ACK 0x01F4
#define REG_CP_INT_CNTL 0x01F2
#define REG_CP_INT_STATUS 0x01F3
#define REG_CP_ME_CNTL 0x01F6
#define REG_CP_ME_RAM_DATA 0x01FA
#define REG_CP_ME_RAM_WADDR 0x01F8
#define REG_CP_ME_STATUS 0x01F7
#define REG_CP_PFP_UCODE_ADDR 0x00C0
#define REG_CP_PFP_UCODE_DATA 0x00C1
#define REG_CP_QUEUE_THRESHOLDS 0x01D5
#define REG_CP_RB_BASE 0x01C0
#define REG_CP_RB_CNTL 0x01C1
#define REG_CP_RB_RPTR 0x01C4
#define REG_CP_RB_RPTR_ADDR 0x01C3
#define REG_CP_RB_RPTR_WR 0x01C7
#define REG_CP_RB_WPTR 0x01C5
#define REG_CP_RB_WPTR_BASE 0x01C8
#define REG_CP_RB_WPTR_DELAY 0x01C6
#define REG_CP_STAT 0x047F
#define REG_CP_STATE_DEBUG_DATA 0x01ED
#define REG_CP_STATE_DEBUG_INDEX 0x01EC
#define REG_CP_ST_BASE 0x044D
#define REG_CP_ST_BUFSZ 0x044E
#define REG_MASTER_INT_SIGNAL 0x03B7
#define REG_MH_ARBITER_CONFIG 0x0A40
#define REG_MH_INTERRUPT_CLEAR 0x0A44
#define REG_MH_INTERRUPT_MASK 0x0A42
#define REG_MH_INTERRUPT_STATUS 0x0A43
#define REG_MH_MMU_CONFIG 0x0040
#define REG_MH_MMU_INVALIDATE 0x0045
#define REG_MH_MMU_MPU_BASE 0x0046
#define REG_MH_MMU_MPU_END 0x0047
#define REG_MH_MMU_PAGE_FAULT 0x0043
#define REG_MH_MMU_PT_BASE 0x0042
#define REG_MH_MMU_TRAN_ERROR 0x0044
#define REG_MH_MMU_VA_RANGE 0x0041
#define REG_PA_CL_VPORT_XSCALE 0x210F
#define REG_PA_CL_VPORT_ZOFFSET 0x2114
#define REG_PA_CL_VPORT_ZSCALE 0x2113
#define REG_PA_CL_VTE_CNTL 0x2206
#define REG_PA_SC_AA_MASK 0x2312
#define REG_PA_SC_LINE_CNTL 0x2300
#define REG_PA_SC_SCREEN_SCISSOR_BR 0x200F
#define REG_PA_SC_SCREEN_SCISSOR_TL 0x200E
#define REG_PA_SC_VIZ_QUERY 0x2293
#define REG_PA_SC_VIZ_QUERY_STATUS 0x0C44
#define REG_PA_SC_WINDOW_OFFSET 0x2080
#define REG_PA_SC_WINDOW_SCISSOR_BR 0x2082
#define REG_PA_SC_WINDOW_SCISSOR_TL 0x2081
#define REG_PA_SU_FACE_DATA 0x0C86
#define REG_PA_SU_POINT_SIZE 0x2280
#define REG_PA_SU_POLY_OFFSET_BACK_OFFSET 0x2383
#define REG_PA_SU_POLY_OFFSET_FRONT_SCALE 0x2380
#define REG_PA_SU_SC_MODE_CNTL 0x2205
#define REG_RBBM_CNTL 0x003B
#define REG_RBBM_INT_ACK 0x03B6
#define REG_RBBM_INT_CNTL 0x03B4
#define REG_RBBM_INT_STATUS 0x03B5
#define REG_RBBM_PATCH_RELEASE 0x0001
#define REG_RBBM_PERIPHID1 0x03F9
#define REG_RBBM_PERIPHID2 0x03FA
#define REG_RBBM_DEBUG 0x039B
#define REG_RBBM_PM_OVERRIDE1 0x039C
#define REG_RBBM_PM_OVERRIDE2 0x039D
#define REG_RBBM_READ_ERROR 0x03B3
#define REG_RBBM_SOFT_RESET 0x003C
#define REG_RBBM_STATUS 0x05D0
#define REG_RB_COLORCONTROL 0x2202
#define REG_RB_COLOR_DEST_MASK 0x2326
#define REG_RB_COLOR_MASK 0x2104
#define REG_RB_COPY_CONTROL 0x2318
#define REG_RB_DEPTHCONTROL 0x2200
#define REG_RB_EDRAM_INFO 0x0F02
#define REG_RB_MODECONTROL 0x2208
#define REG_RB_SURFACE_INFO 0x2000
#define REG_SCRATCH_ADDR 0x01DD
#define REG_SCRATCH_REG0 0x0578
#define REG_SCRATCH_REG2 0x057A
#define REG_SCRATCH_UMSK 0x01DC
#define REG_SQ_CF_BOOLEANS 0x4900
#define REG_SQ_CF_LOOP 0x4908
#define REG_SQ_GPR_MANAGEMENT 0x0D00
#define REG_SQ_INST_STORE_MANAGMENT 0x0D02
#define REG_SQ_INT_ACK 0x0D36
#define REG_SQ_INT_CNTL 0x0D34
#define REG_SQ_INT_STATUS 0x0D35
#define REG_SQ_PROGRAM_CNTL 0x2180
#define REG_SQ_PS_PROGRAM 0x21F6
#define REG_SQ_VS_PROGRAM 0x21F7
#define REG_SQ_WRAPPING_0 0x2183
#define REG_SQ_WRAPPING_1 0x2184
#define REG_VGT_ENHANCE 0x2294
#define REG_VGT_INDX_OFFSET 0x2102
#define REG_VGT_MAX_VTX_INDX 0x2100
#define REG_VGT_MIN_VTX_INDX 0x2101
#define REG_TP0_CHICKEN 0x0E1E
#define REG_TC_CNTL_STATUS 0x0E00
#define REG_PA_SC_AA_CONFIG 0x2301
#define REG_VGT_VERTEX_REUSE_BLOCK_CNTL 0x2316
#define REG_SQ_INTERPOLATOR_CNTL 0x2182
#define REG_RB_DEPTH_INFO 0x2002
#define REG_COHER_DEST_BASE_0 0x2006
#define REG_PA_SC_SCREEN_SCISSOR_BR 0x200F
#define REG_RB_FOG_COLOR 0x2109
#define REG_RB_STENCILREFMASK_BF 0x210C
#define REG_PA_SC_LINE_STIPPLE 0x2283
#define REG_SQ_PS_CONST 0x2308
#define REG_VGT_VERTEX_REUSE_BLOCK_CNTL 0x2316
#define REG_RB_DEPTH_CLEAR 0x231D
#define REG_RB_SAMPLE_COUNT_CTL 0x2324
#define REG_SQ_CONSTANT_0 0x4000
#define REG_SQ_FETCH_0 0x4800
#define REG_MH_AXI_ERROR 0xA45
#define REG_COHER_BASE_PM4 0xA2A
#define REG_COHER_STATUS_PM4 0xA2B
#define REG_COHER_SIZE_PM4 0xA29
#endif /* _YAMATO_REG_H */