Ion carveout and content protect heap buffers do not have a struct page associated with them. Thus sg_phys() will not work reliably on these buffers. Set the dma_address field on physically contiguous buffers. When mapping a scatterlist to the gpummu use sg_dma_address() first and if it returns 0 then use sg_phys(). msm: kgsl: Use kzalloc to allocate scatterlists of 1 page or less The majority of the scatterlist allocations used in KGSL are under 1 page (1 page of struct scatterlist is approximately 1024 entries equalling 4MB of allocated buffer). In these cases using vmalloc for the sglist is undesirable and slow. Add functions to check the size of the allocation and favor kzalloc for 1 page allocations and vmalloc for larger lists.
168 lines
4.5 KiB
C
Executable File
168 lines
4.5 KiB
C
Executable File
/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
|
|
* Copyright (C) 2011 Sony Ericsson Mobile Communications AB.
|
|
*
|
|
* This program is free software; you can redistribute it and/or modify
|
|
* it under the terms of the GNU General Public License version 2 and
|
|
* only version 2 as published by the Free Software Foundation.
|
|
*
|
|
* This program is distributed in the hope that it will be useful,
|
|
* but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
* GNU General Public License for more details.
|
|
*
|
|
*/
|
|
#ifndef __KGSL_SHAREDMEM_H
|
|
#define __KGSL_SHAREDMEM_H
|
|
|
|
#include <linux/slab.h>
|
|
#include <linux/dma-mapping.h>
|
|
#include <linux/vmalloc.h>
|
|
#include <linux/kmemleak.h>
|
|
|
|
/*
|
|
* Convert a page to a physical address
|
|
*/
|
|
#define phys_to_page(phys) (pfn_to_page(__phys_to_pfn(phys)))
|
|
|
|
struct kgsl_device;
|
|
struct kgsl_process_private;
|
|
|
|
#define KGSL_CACHE_OP_INV 0x01
|
|
#define KGSL_CACHE_OP_FLUSH 0x02
|
|
#define KGSL_CACHE_OP_CLEAN 0x03
|
|
|
|
/** Set if the memdesc describes cached memory */
|
|
#define KGSL_MEMFLAGS_CACHED 0x00000001
|
|
|
|
extern struct kgsl_memdesc_ops kgsl_vmalloc_ops;
|
|
|
|
int kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc,
|
|
struct kgsl_pagetable *pagetable, size_t size);
|
|
|
|
int kgsl_sharedmem_vmalloc_user(struct kgsl_memdesc *memdesc,
|
|
struct kgsl_pagetable *pagetable,
|
|
size_t size, int flags);
|
|
|
|
int kgsl_sharedmem_alloc_coherent(struct kgsl_memdesc *memdesc, size_t size);
|
|
|
|
int kgsl_sharedmem_ebimem_user(struct kgsl_memdesc *memdesc,
|
|
struct kgsl_pagetable *pagetable,
|
|
size_t size, int flags);
|
|
|
|
int kgsl_sharedmem_ebimem(struct kgsl_memdesc *memdesc,
|
|
struct kgsl_pagetable *pagetable,
|
|
size_t size);
|
|
|
|
void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc);
|
|
|
|
int kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc,
|
|
uint32_t *dst,
|
|
unsigned int offsetbytes);
|
|
|
|
int kgsl_sharedmem_writel(const struct kgsl_memdesc *memdesc,
|
|
unsigned int offsetbytes,
|
|
uint32_t src);
|
|
|
|
int kgsl_sharedmem_set(const struct kgsl_memdesc *memdesc,
|
|
unsigned int offsetbytes, unsigned int value,
|
|
unsigned int sizebytes);
|
|
|
|
void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op);
|
|
|
|
void kgsl_process_init_sysfs(struct kgsl_process_private *private);
|
|
void kgsl_process_uninit_sysfs(struct kgsl_process_private *private);
|
|
|
|
int kgsl_sharedmem_init_sysfs(void);
|
|
void kgsl_sharedmem_uninit_sysfs(void);
|
|
|
|
static inline unsigned int kgsl_get_sg_pa(struct scatterlist *sg)
|
|
{
|
|
/*
|
|
* Try sg_dma_address first to support ion carveout
|
|
* regions which do not work with sg_phys().
|
|
*/
|
|
unsigned int pa = sg_dma_address(sg);
|
|
if (pa == 0)
|
|
pa = sg_phys(sg);
|
|
return pa;
|
|
}
|
|
|
|
int
|
|
kgsl_sharedmem_map_vma(struct vm_area_struct *vma,
|
|
const struct kgsl_memdesc *memdesc);
|
|
|
|
/*
|
|
* For relatively small sglists, it is preferable to use kzalloc
|
|
* rather than going down the vmalloc rat hole. If the size of
|
|
* the sglist is < PAGE_SIZE use kzalloc otherwise fallback to
|
|
* vmalloc
|
|
*/
|
|
|
|
static inline void *kgsl_sg_alloc(unsigned int sglen)
|
|
{
|
|
if ((sglen * sizeof(struct scatterlist)) < PAGE_SIZE)
|
|
return kzalloc(sglen * sizeof(struct scatterlist), GFP_KERNEL);
|
|
else
|
|
return vmalloc(sglen * sizeof(struct scatterlist));
|
|
}
|
|
|
|
static inline void kgsl_sg_free(void *ptr, unsigned int sglen)
|
|
{
|
|
if ((sglen * sizeof(struct scatterlist)) < PAGE_SIZE)
|
|
kfree(ptr);
|
|
else
|
|
vfree(ptr);
|
|
}
|
|
|
|
static inline int
|
|
memdesc_sg_phys(struct kgsl_memdesc *memdesc,
|
|
unsigned int physaddr, unsigned int size)
|
|
{
|
|
memdesc->sg = kgsl_sg_alloc(1);
|
|
|
|
kmemleak_not_leak(memdesc->sg);
|
|
|
|
memdesc->sglen = 1;
|
|
sg_init_table(memdesc->sg, 1);
|
|
memdesc->sg[0].length = size;
|
|
memdesc->sg[0].offset = 0;
|
|
memdesc->sg[0].dma_address = physaddr;
|
|
return 0;
|
|
}
|
|
|
|
static inline int
|
|
kgsl_allocate(struct kgsl_memdesc *memdesc,
|
|
struct kgsl_pagetable *pagetable, size_t size)
|
|
{
|
|
#ifdef CONFIG_MSM_KGSL_MMU
|
|
return kgsl_sharedmem_vmalloc(memdesc, pagetable, size);
|
|
#else
|
|
return kgsl_sharedmem_ebimem(memdesc, pagetable, size);
|
|
#endif
|
|
}
|
|
|
|
static inline int
|
|
kgsl_allocate_user(struct kgsl_memdesc *memdesc,
|
|
struct kgsl_pagetable *pagetable,
|
|
size_t size, unsigned int flags)
|
|
{
|
|
#ifdef CONFIG_MSM_KGSL_MMU
|
|
return kgsl_sharedmem_vmalloc_user(memdesc, pagetable, size, flags);
|
|
#else
|
|
return kgsl_sharedmem_ebimem_user(memdesc, pagetable, size, flags);
|
|
#endif
|
|
}
|
|
|
|
static inline int
|
|
kgsl_allocate_contiguous(struct kgsl_memdesc *memdesc, size_t size)
|
|
{
|
|
int ret = kgsl_sharedmem_alloc_coherent(memdesc, size);
|
|
#ifndef CONFIG_MSM_KGSL_MMU
|
|
if (!ret)
|
|
memdesc->gpuaddr = memdesc->physaddr;
|
|
#endif
|
|
return ret;
|
|
}
|
|
|
|
#endif /* __KGSL_SHAREDMEM_H */
|