htcleo: - fixed graphic glitches

- added some memory optimizations from htc-msm-2.6.32
	- some memory problems still present
This commit is contained in:
Danijel Posilovic 2010-08-30 18:44:32 +02:00
parent dd777b0b10
commit 7a022ba4a8
8 changed files with 282 additions and 219 deletions

View File

@ -6,6 +6,7 @@
typedef struct {
#ifdef CONFIG_CPU_HAS_ASID
unsigned int id;
spinlock_t id_lock;
#endif
unsigned int kvm_seq;
} mm_context_t;

View File

@ -2,6 +2,7 @@
* arch/arm/mm/cache-l2x0.c - L210/L220 cache controller support
*
* Copyright (C) 2007 ARM Limited
* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@ -26,6 +27,7 @@
#define CACHE_LINE_SIZE 32
static void __iomem *l2x0_base;
static uint32_t aux_ctrl_save;
static DEFINE_SPINLOCK(l2x0_lock);
static inline void sync_writel(unsigned long val, unsigned long reg,
@ -46,6 +48,11 @@ static inline void cache_sync(void)
sync_writel(0, L2X0_CACHE_SYNC, 1);
}
void l2x0_cache_sync(void)
{
cache_sync();
}
static inline void l2x0_inv_all(void)
{
/* invalidate all ways */
@ -53,6 +60,13 @@ static inline void l2x0_inv_all(void)
cache_sync();
}
static inline void l2x0_flush_all(void)
{
/* clean and invalidate all ways */
sync_writel(0xff, L2X0_CLEAN_INV_WAY, 0xff);
cache_sync();
}
static void l2x0_inv_range(unsigned long start, unsigned long end)
{
unsigned long addr;
@ -73,6 +87,25 @@ static void l2x0_inv_range(unsigned long start, unsigned long end)
cache_sync();
}
static void l2x0_inv_range_atomic(unsigned long start, unsigned long end)
{
unsigned long addr;
if (start & (CACHE_LINE_SIZE - 1)) {
start &= ~(CACHE_LINE_SIZE - 1);
writel(start, l2x0_base + L2X0_CLEAN_INV_LINE_PA);
start += CACHE_LINE_SIZE;
}
if (end & (CACHE_LINE_SIZE - 1)) {
end &= ~(CACHE_LINE_SIZE - 1);
writel(end, l2x0_base + L2X0_CLEAN_INV_LINE_PA);
}
for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
writel(addr, l2x0_base + L2X0_INV_LINE_PA);
}
static void l2x0_clean_range(unsigned long start, unsigned long end)
{
unsigned long addr;
@ -83,6 +116,15 @@ static void l2x0_clean_range(unsigned long start, unsigned long end)
cache_sync();
}
static void l2x0_clean_range_atomic(unsigned long start, unsigned long end)
{
unsigned long addr;
start &= ~(CACHE_LINE_SIZE - 1);
for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
writel(addr, l2x0_base + L2X0_CLEAN_LINE_PA);
}
static void l2x0_flush_range(unsigned long start, unsigned long end)
{
unsigned long addr;
@ -93,28 +135,82 @@ static void l2x0_flush_range(unsigned long start, unsigned long end)
cache_sync();
}
void l2x0_flush_range_atomic(unsigned long start, unsigned long end)
{
unsigned long addr;
start &= ~(CACHE_LINE_SIZE - 1);
for (addr = start; addr < end; addr += CACHE_LINE_SIZE)
writel(addr, l2x0_base + L2X0_CLEAN_INV_LINE_PA);
}
void __init l2x0_init(void __iomem *base, __u32 aux_val, __u32 aux_mask)
{
__u32 aux;
__u32 bits;
l2x0_base = base;
/* disable L2X0 */
writel(0, l2x0_base + L2X0_CTRL);
bits = readl(l2x0_base + L2X0_CTRL);
bits &= ~0x01; /* clear bit 0 */
writel(bits, l2x0_base + L2X0_CTRL);
aux = readl(l2x0_base + L2X0_AUX_CTRL);
aux &= aux_mask;
aux |= aux_val;
writel(aux, l2x0_base + L2X0_AUX_CTRL);
bits = readl(l2x0_base + L2X0_AUX_CTRL);
bits &= aux_mask;
bits |= aux_val;
writel(bits, l2x0_base + L2X0_AUX_CTRL);
l2x0_inv_all();
/* enable L2X0 */
writel(1, l2x0_base + L2X0_CTRL);
bits = readl(l2x0_base + L2X0_CTRL);
bits |= 0x01; /* set bit 0 */
writel(bits, l2x0_base + L2X0_CTRL);
outer_cache.inv_range = l2x0_inv_range;
outer_cache.clean_range = l2x0_clean_range;
outer_cache.flush_range = l2x0_flush_range;
bits = readl(l2x0_base + L2X0_CACHE_ID);
bits >>= 6; /* part no, bit 6 to 9 */
bits &= 0x0f; /* 4 bits */
if (bits == 2) { /* L220 */
outer_cache.inv_range = l2x0_inv_range;
outer_cache.clean_range = l2x0_clean_range;
outer_cache.flush_range = l2x0_flush_range;
printk(KERN_INFO "L220 cache controller enabled\n");
} else { /* L210 */
outer_cache.inv_range = l2x0_inv_range_atomic;
outer_cache.clean_range = l2x0_clean_range_atomic;
outer_cache.flush_range = l2x0_flush_range_atomic;
printk(KERN_INFO "L210 cache controller enabled\n");
}
printk(KERN_INFO "L2X0 cache controller enabled\n");
}
void l2x0_suspend(void)
{
/* Save aux control register value */
aux_ctrl_save = readl(l2x0_base + L2X0_AUX_CTRL);
/* Flush all cache */
l2x0_flush_all();
/* Disable the cache */
writel(0, l2x0_base + L2X0_CTRL);
/* Memory barrier */
dmb();
}
void l2x0_resume(int collapsed)
{
if (collapsed) {
/* Disable the cache */
writel(0, l2x0_base + L2X0_CTRL);
/* Restore aux control register value */
writel(aux_ctrl_save, l2x0_base + L2X0_AUX_CTRL);
/* Invalidate the cache */
l2x0_inv_all();
}
/* Enable the cache */
writel(1, l2x0_base + L2X0_CTRL);
}

View File

@ -10,12 +10,17 @@
#include <linux/init.h>
#include <linux/sched.h>
#include <linux/mm.h>
#include <linux/smp.h>
#include <linux/percpu.h>
#include <asm/mmu_context.h>
#include <asm/tlbflush.h>
static DEFINE_SPINLOCK(cpu_asid_lock);
unsigned int cpu_last_asid = ASID_FIRST_VERSION;
#ifdef CONFIG_SMP
DEFINE_PER_CPU(struct mm_struct *, current_mm);
#endif
/*
* We fork()ed a process, and we need a new context for the child
@ -26,13 +31,109 @@ unsigned int cpu_last_asid = ASID_FIRST_VERSION;
void __init_new_context(struct task_struct *tsk, struct mm_struct *mm)
{
mm->context.id = 0;
spin_lock_init(&mm->context.id_lock);
}
static void flush_context(void)
{
/* set the reserved ASID before flushing the TLB */
asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (0));
isb();
local_flush_tlb_all();
if (icache_is_vivt_asid_tagged()) {
__flush_icache_all();
dsb();
}
}
#ifdef CONFIG_SMP
static void set_mm_context(struct mm_struct *mm, unsigned int asid)
{
unsigned long flags;
/*
* Locking needed for multi-threaded applications where the
* same mm->context.id could be set from different CPUs during
* the broadcast. This function is also called via IPI so the
* mm->context.id_lock has to be IRQ-safe.
*/
spin_lock_irqsave(&mm->context.id_lock, flags);
if (likely((mm->context.id ^ cpu_last_asid) >> ASID_BITS)) {
/*
* Old version of ASID found. Set the new one and
* reset mm_cpumask(mm).
*/
mm->context.id = asid;
cpumask_clear(mm_cpumask(mm));
}
spin_unlock_irqrestore(&mm->context.id_lock, flags);
/*
* Set the mm_cpumask(mm) bit for the current CPU.
*/
cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
}
/*
* Reset the ASID on the current CPU. This function call is broadcast
* from the CPU handling the ASID rollover and holding cpu_asid_lock.
*/
static void reset_context(void *info)
{
unsigned int asid;
unsigned int cpu = smp_processor_id();
struct mm_struct *mm = per_cpu(current_mm, cpu);
/*
* Check if a current_mm was set on this CPU as it might still
* be in the early booting stages and using the reserved ASID.
*/
if (!mm)
return;
smp_rmb();
asid = cpu_last_asid + cpu + 1;
flush_context();
set_mm_context(mm, asid);
/* set the new ASID */
asm("mcr p15, 0, %0, c13, c0, 1\n" : : "r" (mm->context.id));
isb();
}
#else
static inline void set_mm_context(struct mm_struct *mm, unsigned int asid)
{
mm->context.id = asid;
cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
}
#endif
void __new_context(struct mm_struct *mm)
{
unsigned int asid;
spin_lock(&cpu_asid_lock);
#ifdef CONFIG_SMP
/*
* Check the ASID again, in case the change was broadcast from
* another CPU before we acquired the lock.
*/
if (unlikely(((mm->context.id ^ cpu_last_asid) >> ASID_BITS) == 0)) {
cpumask_set_cpu(smp_processor_id(), mm_cpumask(mm));
spin_unlock(&cpu_asid_lock);
return;
}
#endif
/*
* At this point, it is guaranteed that the current mm (with
* an old ASID) isn't active on any other CPU since the ASIDs
* are changed simultaneously via IPI.
*/
asid = ++cpu_last_asid;
if (asid == 0)
asid = cpu_last_asid = ASID_FIRST_VERSION;
@ -42,20 +143,15 @@ void __new_context(struct mm_struct *mm)
* to start a new version and flush the TLB.
*/
if (unlikely((asid & ~ASID_MASK) == 0)) {
asid = ++cpu_last_asid;
/* set the reserved ASID before flushing the TLB */
asm("mcr p15, 0, %0, c13, c0, 1 @ set reserved context ID\n"
:
: "r" (0));
isb();
flush_tlb_all();
if (icache_is_vivt_asid_tagged()) {
__flush_icache_all();
dsb();
}
asid = cpu_last_asid + smp_processor_id() + 1;
flush_context();
#ifdef CONFIG_SMP
smp_wmb();
smp_call_function(reset_context, NULL, 1);
#endif
cpu_last_asid += NR_CPUS;
}
spin_unlock(&cpu_asid_lock);
cpumask_copy(mm_cpumask(mm), cpumask_of(smp_processor_id()));
mm->context.id = asid;
set_mm_context(mm, asid);
spin_unlock(&cpu_asid_lock);
}

View File

@ -564,7 +564,16 @@ void dma_cache_maint(const void *start, size_t size, int direction)
}
inner_op(start, start + size);
#ifdef CONFIG_OUTER_CACHE
/*
* A page table walk would be required if the address isnt linearly
* mapped. Simply BUG_ON for now.
*/
BUG_ON(!virt_addr_valid(start) || !virt_addr_valid(start + size - 1));
outer_op(__pa(start), __pa(start) + size);
#endif
}
EXPORT_SYMBOL(dma_cache_maint);

View File

@ -251,6 +251,24 @@ static void __init bootmem_init_node(int node, struct meminfo *mi,
for_each_nodebank(i, mi, node) {
struct membank *bank = &mi->bank[i];
#if defined(CONFIG_FLATMEM) && !defined(CONFIG_HOLES_IN_ZONE)
/*
* The VM code assumes that hole end addresses are aligned if
* CONFIG_HOLES_IN_ZONE is not enabled. This results in
* panics since we free unused memmap entries on ARM.
* This check shouldn't be necessary for the last bank's end
* address, since the VM code accounts for the total zone size.
*/
if ((i < (mi->nr_banks - 1)) &&
(bank_pfn_end(bank) & (MAX_ORDER_NR_PAGES - 1))) {
pr_err("Memory bank[%d] not aligned to 0x%x bytes.\n"
"\tMake bank end address align with MAX_ORDER\n"
"\tor enable option CONFIG_HOLES_IN_ZONE.\n",
i, __pfn_to_phys(MAX_ORDER_NR_PAGES));
BUG();
}
#endif
if (!bank->highmem)
map_memory_bank(bank);
}
@ -528,9 +546,8 @@ static void __init free_unused_memmap_node(int node, struct meminfo *mi)
unsigned int i;
/*
* [FIXME] This relies on each bank being in address order. This
* may not be the case, especially if the user has provided the
* information on the command line.
* This relies on each bank being in address order. The banks
* are sorted previously in bootmem_init().
*/
for_each_nodebank(i, mi, node) {
struct membank *bank = &mi->bank[i];

View File

@ -185,7 +185,7 @@ cpu_v7_name:
* - cache type register is implemented
*/
__v7_setup:
#ifdef CONFIG_SMP
#if defined(CONFIG_SMP) && !defined(CONFIG_ARCH_MSM_SCORPIONMP)
mrc p15, 0, r0, c1, c0, 1
tst r0, #(1 << 6) @ SMP/nAMP mode enabled?
orreq r0, r0, #(1 << 6) | (1 << 0) @ Enable SMP/nAMP mode and
@ -235,6 +235,21 @@ __v7_setup:
mcr p15, 0, r10, c2, c0, 2 @ TTB control register
orr r4, r4, #TTB_FLAGS
mcr p15, 0, r4, c2, c0, 1 @ load TTB1
#ifdef CONFIG_ARCH_MSM_SCORPION
#ifdef CONFIG_ARCH_QSD8X50
mov r0, #0x77
#else
mov r0, #0x33
#endif
mcr p15, 3, r0, c15, c0, 3 @ set L2CR1
mrc p15, 0, r0, c1, c0, 1 @ read ACTLR
#ifdef CONFIG_CPU_CACHE_ERR_REPORT
orr r0, r0, #0x37 @ turn on L1/L2 error reporting
#else
bic r0, r0, #0x37
#endif
mcr p15, 0, r0, c1, c0, 1 @ write ACTLR
#endif
/*
* Memory region attributes with SCTLR.TRE=1
*

View File

@ -1,7 +1,6 @@
/* drivers/android/pmem.c
*
* Copyright (C) 2007 Google, Inc.
* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@ -32,8 +31,7 @@
#define PMEM_MAX_ORDER 128
#define PMEM_MIN_ALLOC PAGE_SIZE
#define PMEM_DEBUG 0
//#define PMEM_LOG
#define PMEM_DEBUG 1
/* indicates that a refernce to this file has been taken via get_pmem_file,
* the file should not be released until put_pmem_file is called */
@ -252,8 +250,7 @@ static int pmem_free(int id, int index)
*/
do {
buddy = PMEM_BUDDY_INDEX(id, curr);
if (buddy < pmem[id].num_entries &&
PMEM_IS_FREE(id, buddy) &&
if (PMEM_IS_FREE(id, buddy) &&
PMEM_ORDER(id, buddy) == PMEM_ORDER(id, curr)) {
PMEM_ORDER(id, buddy)++;
PMEM_ORDER(id, curr)++;
@ -263,15 +260,6 @@ static int pmem_free(int id, int index)
}
} while (curr < pmem[id].num_entries);
#ifdef PMEM_LOG
int i;
for(i=0;i<pmem[id].num_entries;i++)
if(pmem[id].bitmap[i].order>0 || i>=pmem[id].num_entries ) {
printk("free==>index=%d , order=%d , allocated=%d\n",
i,pmem[id].bitmap[i].order,
pmem[id].bitmap[i].allocated);
}
#endif
return 0;
}
@ -432,7 +420,7 @@ static int pmem_allocate(int id, unsigned long len)
* return an error
*/
if (best_fit < 0) {
printk("pmem: no space left to allocate! %s, pid=%d\n", pmem[id].dev.name, current->pid);
printk("pmem: no space left to allocate!\n");
return -1;
}
@ -623,25 +611,6 @@ static int pmem_mmap(struct file *file, struct vm_area_struct *vma)
up_write(&pmem[id].bitmap_sem);
data->index = index;
}
#ifdef PMEM_LOG
int i;
int allc_cnt = 0;
int order_cnt = 0;
for(i=0;i<pmem[id].num_entries;i++)
if(pmem[id].bitmap[i].order>0 || i>=pmem[id].num_entries) {
order_cnt++;
if (pmem[id].bitmap[i].allocated > 0) {
printk("mmap==>index=%d , order=%d,"
"allocated=%d, vbase=0x%8X \n",
i,pmem[id].bitmap[i].order,
pmem[id].bitmap[i].allocated,
vma->vm_start);
allc_cnt++;
}
}
printk("allocated/total = %d/%d\n", allc_cnt,order_cnt);
#endif
/* either no space was available or an error occured */
if (!has_allocation(file)) {
ret = -EINVAL;
@ -766,7 +735,7 @@ int get_pmem_addr(struct file *file, unsigned long *start,
return 0;
}
int get_pmem_file(unsigned int fd, unsigned long *start, unsigned long *vstart,
int get_pmem_file(int fd, unsigned long *start, unsigned long *vstart,
unsigned long *len, struct file **filp)
{
struct file *file;
@ -819,9 +788,6 @@ void flush_pmem_file(struct file *file, unsigned long offset, unsigned long len)
struct pmem_region_node *region_node;
struct list_head *elt;
void *flush_start, *flush_end;
#ifdef CONFIG_OUTER_CACHE
unsigned long phy_start, phy_end;
#endif
if (!is_pmem_file(file) || !has_allocation(file)) {
return;
@ -837,14 +803,6 @@ void flush_pmem_file(struct file *file, unsigned long offset, unsigned long len)
/* if this isn't a submmapped file, flush the whole thing */
if (unlikely(!(data->flags & PMEM_FLAGS_CONNECTED))) {
dmac_flush_range(vaddr, vaddr + pmem_len(id, data));
#ifdef CONFIG_OUTER_CACHE
phy_start = (unsigned long)vaddr -
(unsigned long)pmem[id].vbase + pmem[id].base;
phy_end = phy_start + pmem_len(id, data);
outer_flush_range(phy_start, phy_end);
#endif
goto end;
}
/* otherwise, flush the region of the file we are drawing */
@ -856,15 +814,6 @@ void flush_pmem_file(struct file *file, unsigned long offset, unsigned long len)
flush_start = vaddr + region_node->region.offset;
flush_end = flush_start + region_node->region.len;
dmac_flush_range(flush_start, flush_end);
#ifdef CONFIG_OUTER_CACHE
phy_start = (unsigned long)flush_start -
(unsigned long)pmem[id].vbase + pmem[id].base;
phy_end = phy_start + region_node->region.len;
outer_flush_range(phy_start, phy_end);
#endif
break;
}
}
@ -1199,45 +1148,16 @@ static long pmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
DLOG("connect\n");
return pmem_connect(arg, file);
break;
case PMEM_CLEAN_INV_CACHES:
case PMEM_CLEAN_CACHES:
case PMEM_INV_CACHES:
case PMEM_CACHE_FLUSH:
{
struct pmem_addr pmem_addr;
unsigned long vaddr;
unsigned long paddr;
unsigned long length;
unsigned long offset;
id = get_id(file);
if (!pmem[id].cached)
return 0;
if (!has_allocation(file))
return -EINVAL;
if (copy_from_user(&pmem_addr, (void __user *)arg,
sizeof(struct pmem_addr)))
struct pmem_region region;
DLOG("flush\n");
if (copy_from_user(&region, (void __user *)arg,
sizeof(struct pmem_region)))
return -EFAULT;
data = (struct pmem_data *)file->private_data;
offset = pmem_addr.offset;
length = pmem_addr.length;
if (offset + length > pmem_len(id, data))
return -EINVAL;
vaddr = pmem_addr.vaddr;
paddr = pmem_start_addr(id, data) + offset;
if (cmd == PMEM_CLEAN_INV_CACHES)
clean_and_invalidate_caches(vaddr,
length, paddr);
else if (cmd == PMEM_CLEAN_CACHES)
clean_caches(vaddr, length, paddr);
else if (cmd == PMEM_INV_CACHES)
invalidate_caches(vaddr, length, paddr);
flush_pmem_file(file, region.offset, region.len);
break;
}
default:
if (pmem[id].ioctl)
return pmem[id].ioctl(file, cmd, arg);

View File

@ -16,34 +16,6 @@
#ifndef _ANDROID_PMEM_H_
#define _ANDROID_PMEM_H_
#include <linux/fs.h>
#define PMEM_KERNEL_TEST_MAGIC 0xc0
#define PMEM_KERNEL_TEST_NOMINAL_TEST_IOCTL \
_IO(PMEM_KERNEL_TEST_MAGIC, 1)
#define PMEM_KERNEL_TEST_ADVERSARIAL_TEST_IOCTL \
_IO(PMEM_KERNEL_TEST_MAGIC, 2)
#define PMEM_KERNEL_TEST_HUGE_ALLOCATION_TEST_IOCTL \
_IO(PMEM_KERNEL_TEST_MAGIC, 3)
#define PMEM_KERNEL_TEST_FREE_UNALLOCATED_TEST_IOCTL \
_IO(PMEM_KERNEL_TEST_MAGIC, 4)
#define PMEM_KERNEL_TEST_LARGE_REGION_NUMBER_TEST_IOCTL \
_IO(PMEM_KERNEL_TEST_MAGIC, 5)
#include <linux/fs.h>
#define PMEM_KERNEL_TEST_MAGIC 0xc0
#define PMEM_KERNEL_TEST_NOMINAL_TEST_IOCTL \
_IO(PMEM_KERNEL_TEST_MAGIC, 1)
#define PMEM_KERNEL_TEST_ADVERSARIAL_TEST_IOCTL \
_IO(PMEM_KERNEL_TEST_MAGIC, 2)
#define PMEM_KERNEL_TEST_HUGE_ALLOCATION_TEST_IOCTL \
_IO(PMEM_KERNEL_TEST_MAGIC, 3)
#define PMEM_KERNEL_TEST_FREE_UNALLOCATED_TEST_IOCTL \
_IO(PMEM_KERNEL_TEST_MAGIC, 4)
#define PMEM_KERNEL_TEST_LARGE_REGION_NUMBER_TEST_IOCTL \
_IO(PMEM_KERNEL_TEST_MAGIC, 5)
#define PMEM_IOCTL_MAGIC 'p'
#define PMEM_GET_PHYS _IOW(PMEM_IOCTL_MAGIC, 1, unsigned int)
#define PMEM_MAP _IOW(PMEM_IOCTL_MAGIC, 2, unsigned int)
@ -61,63 +33,7 @@
* struct (with offset set to 0).
*/
#define PMEM_GET_TOTAL_SIZE _IOW(PMEM_IOCTL_MAGIC, 7, unsigned int)
/* Revokes gpu registers and resets the gpu. Pass a pointer to the
* start of the mapped gpu regs (the vaddr returned by mmap) as the argument.
*/
#define HW3D_REVOKE_GPU _IOW(PMEM_IOCTL_MAGIC, 8, unsigned int)
#define HW3D_GRANT_GPU _IOW(PMEM_IOCTL_MAGIC, 9, unsigned int)
#define HW3D_WAIT_FOR_INTERRUPT _IOW(PMEM_IOCTL_MAGIC, 10, unsigned int)
#define PMEM_CLEAN_INV_CACHES _IOW(PMEM_IOCTL_MAGIC, 11, unsigned int)
#define PMEM_CLEAN_CACHES _IOW(PMEM_IOCTL_MAGIC, 12, unsigned int)
#define PMEM_INV_CACHES _IOW(PMEM_IOCTL_MAGIC, 13, unsigned int)
struct pmem_region {
unsigned long offset;
unsigned long len;
};
struct pmem_addr {
unsigned long vaddr;
unsigned long offset;
unsigned long length;
};
#ifdef __KERNEL__
void put_pmem_fd(int fd);
void flush_pmem_fd(int fd, unsigned long start, unsigned long len);
enum pmem_allocator_type {
/* Zero is a default in platform PMEM structures in the board files,
* when the "allocator_type" structure element is not explicitly
* defined
*/
PMEM_ALLOCATORTYPE_BITMAP = 0, /* forced to be zero here */
PMEM_ALLOCATORTYPE_ALLORNOTHING,
PMEM_ALLOCATORTYPE_BUDDYBESTFIT,
PMEM_ALLOCATORTYPE_MAX,
};
#define PMEM_MEMTYPE_MASK 0x7
#define PMEM_INVALID_MEMTYPE 0x0
#define PMEM_MEMTYPE_EBI1 0x1
#define PMEM_MEMTYPE_RESERVED_INVALID1 0x2
#define PMEM_MEMTYPE_RESERVED_INVALID2 0x3
#define PMEM_MEMTYPE_RESERVED_INVALID3 0x4
#define PMEM_MEMTYPE_RESERVED_INVALID4 0x5
#define PMEM_MEMTYPE_RESERVED_INVALID5 0x6
#define PMEM_MEMTYPE_RESERVED_INVALID6 0x7
#define PMEM_ALIGNMENT_MASK 0x18
#define PMEM_ALIGNMENT_RESERVED_INVALID1 0x0
#define PMEM_ALIGNMENT_4K 0x8 /* the default */
#define PMEM_ALIGNMENT_1M 0x10
#define PMEM_ALIGNMENT_RESERVED_INVALID2 0x18
/* kernel api names for board specific data structures */
#define PMEM_KERNEL_EBI1_DATA_NAME "pmem_kernel_ebi1"
#define PMEM_CACHE_FLUSH _IOW(PMEM_IOCTL_MAGIC, 8, unsigned int)
struct android_pmem_platform_data
{
@ -126,31 +42,24 @@ struct android_pmem_platform_data
unsigned long start;
/* size of memory region */
unsigned long size;
enum pmem_allocator_type no_allocator;
/* treated as a 'hidden' variable in the board files. Can be
* set, but default is the system init value of 0 which becomes a
* quantum of 4K pages.
*/
unsigned int quantum;
/* set to indicate the region should not be managed with an allocator */
unsigned no_allocator;
/* set to indicate maps of this region should be cached, if a mix of
* cached and uncached is desired, set this and open the device with
* O_SYNC to get an uncached region */
unsigned cached;
/* The MSM7k has bits to enable a write buffer in the bus controller*/
unsigned buffered;
/* This PMEM is on memory that may be powered off */
unsigned unstable;
};
/* flags in the following function defined as above. */
int32_t pmem_kalloc(const size_t size, const uint32_t flags);
int32_t pmem_kfree(const int32_t physaddr);
struct pmem_region {
unsigned long offset;
unsigned long len;
};
#ifdef CONFIG_ANDROID_PMEM
int is_pmem_file(struct file *file);
int get_pmem_file(unsigned int fd, unsigned long *start, unsigned long *vstart,
int get_pmem_file(int fd, unsigned long *start, unsigned long *vstart,
unsigned long *end, struct file **filp);
int get_pmem_user_addr(struct file *file, unsigned long *start,
unsigned long *end);
@ -179,6 +88,6 @@ static inline int pmem_setup(struct android_pmem_platform_data *pdata,
static inline int pmem_remap(struct pmem_region *region, struct file *file,
unsigned operation) { return -ENOSYS; }
#endif
#endif /* __KERNEL__ */
#endif //_ANDROID_PPP_H_