msm: kgsl: set the dma_address field of scatterlists
Ion carveout and content protect heap buffers do not have a struct page associated with them. Thus sg_phys() will not work reliably on these buffers. Set the dma_address field on physically contiguous buffers. When mapping a scatterlist to the gpummu use sg_dma_address() first and if it returns 0 then use sg_phys(). msm: kgsl: Use kzalloc to allocate scatterlists of 1 page or less The majority of the scatterlist allocations used in KGSL are under 1 page (1 page of struct scatterlist is approximately 1024 entries equalling 4MB of allocated buffer). In these cases using vmalloc for the sglist is undesirable and slow. Add functions to check the size of the allocation and favor kzalloc for 1 page allocations and vmalloc for larger lists.
This commit is contained in:
parent
a7bb935abb
commit
5c1047c767
@ -1349,7 +1349,8 @@ static int memdesc_sg_virt(struct kgsl_memdesc *memdesc,
|
||||
int sglen = PAGE_ALIGN(size) / PAGE_SIZE;
|
||||
unsigned long paddr = (unsigned long) addr;
|
||||
|
||||
memdesc->sg = vmalloc(sglen * sizeof(struct scatterlist));
|
||||
memdesc->sg = kgsl_sg_alloc(sglen);
|
||||
|
||||
if (memdesc->sg == NULL)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -1389,7 +1390,7 @@ static int memdesc_sg_virt(struct kgsl_memdesc *memdesc,
|
||||
|
||||
err:
|
||||
spin_unlock(¤t->mm->page_table_lock);
|
||||
vfree(memdesc->sg);
|
||||
kgsl_sg_free(memdesc->sg, sglen);
|
||||
memdesc->sg = NULL;
|
||||
|
||||
return -EINVAL;
|
||||
|
@ -685,7 +685,7 @@ kgsl_gpummu_map(void *mmu_specific_pt,
|
||||
flushtlb = 1;
|
||||
|
||||
for_each_sg(memdesc->sg, s, memdesc->sglen, i) {
|
||||
unsigned int paddr = sg_phys(s);
|
||||
unsigned int paddr = kgsl_get_sg_pa(s);
|
||||
unsigned int j;
|
||||
|
||||
/* Each sg entry might be multiple pages long */
|
||||
|
@ -154,7 +154,7 @@ static struct mem_entry_stats mem_stats[] = {
|
||||
#endif
|
||||
MEM_ENTRY_STAT(KGSL_MEM_ENTRY_USER, user),
|
||||
#ifdef CONFIG_ION
|
||||
MEM_ENTRY_STAT(KGSL_MEM_ENTRY_USER, ion),
|
||||
MEM_ENTRY_STAT(KGSL_MEM_ENTRY_ION, ion),
|
||||
#endif
|
||||
};
|
||||
|
||||
@ -286,7 +286,7 @@ static void outer_cache_range_op_sg(struct scatterlist *sg, int sglen, int op)
|
||||
int i;
|
||||
|
||||
for_each_sg(sg, s, sglen, i) {
|
||||
unsigned int paddr = sg_phys(s);
|
||||
unsigned int paddr = kgsl_get_sg_pa(s);
|
||||
_outer_cache_range_op(op, paddr, s->length);
|
||||
}
|
||||
}
|
||||
@ -465,7 +465,8 @@ _kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc,
|
||||
memdesc->priv = KGSL_MEMFLAGS_CACHED;
|
||||
memdesc->ops = &kgsl_vmalloc_ops;
|
||||
|
||||
memdesc->sg = vmalloc(sglen * sizeof(struct scatterlist));
|
||||
memdesc->sg = kgsl_sg_alloc(sglen);
|
||||
|
||||
if (memdesc->sg == NULL) {
|
||||
ret = -ENOMEM;
|
||||
goto done;
|
||||
@ -487,7 +488,7 @@ _kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc,
|
||||
flush_dcache_page(page);
|
||||
sg_set_page(&memdesc->sg[i], page, PAGE_SIZE, 0);
|
||||
}
|
||||
outer_cache_range_op_sg(memdesc->sg, memdesc->sglen,
|
||||
outer_cache_range_op_sg(memdesc->sg, memdesc->sglen,
|
||||
KGSL_CACHE_OP_FLUSH);
|
||||
|
||||
ret = kgsl_mmu_map(pagetable, memdesc, protflags);
|
||||
@ -593,7 +594,7 @@ void kgsl_sharedmem_free(struct kgsl_memdesc *memdesc)
|
||||
if (memdesc->ops && memdesc->ops->free)
|
||||
memdesc->ops->free(memdesc);
|
||||
|
||||
vfree(memdesc->sg);
|
||||
kgsl_sg_free(memdesc->sg, memdesc->sglen);
|
||||
|
||||
memset(memdesc, 0, sizeof(*memdesc));
|
||||
}
|
||||
|
@ -75,25 +75,58 @@ void kgsl_process_uninit_sysfs(struct kgsl_process_private *private);
|
||||
int kgsl_sharedmem_init_sysfs(void);
|
||||
void kgsl_sharedmem_uninit_sysfs(void);
|
||||
|
||||
static inline unsigned int kgsl_get_sg_pa(struct scatterlist *sg)
|
||||
{
|
||||
/*
|
||||
* Try sg_dma_address first to support ion carveout
|
||||
* regions which do not work with sg_phys().
|
||||
*/
|
||||
unsigned int pa = sg_dma_address(sg);
|
||||
if (pa == 0)
|
||||
pa = sg_phys(sg);
|
||||
return pa;
|
||||
}
|
||||
|
||||
int
|
||||
kgsl_sharedmem_map_vma(struct vm_area_struct *vma,
|
||||
const struct kgsl_memdesc *memdesc);
|
||||
|
||||
/*
|
||||
* For relatively small sglists, it is preferable to use kzalloc
|
||||
* rather than going down the vmalloc rat hole. If the size of
|
||||
* the sglist is < PAGE_SIZE use kzalloc otherwise fallback to
|
||||
* vmalloc
|
||||
*/
|
||||
|
||||
static inline void *kgsl_sg_alloc(unsigned int sglen)
|
||||
{
|
||||
if ((sglen * sizeof(struct scatterlist)) < PAGE_SIZE)
|
||||
return kzalloc(sglen * sizeof(struct scatterlist), GFP_KERNEL);
|
||||
else
|
||||
return vmalloc(sglen * sizeof(struct scatterlist));
|
||||
}
|
||||
|
||||
static inline void kgsl_sg_free(void *ptr, unsigned int sglen)
|
||||
{
|
||||
if ((sglen * sizeof(struct scatterlist)) < PAGE_SIZE)
|
||||
kfree(ptr);
|
||||
else
|
||||
vfree(ptr);
|
||||
}
|
||||
|
||||
static inline int
|
||||
memdesc_sg_phys(struct kgsl_memdesc *memdesc,
|
||||
unsigned int physaddr, unsigned int size)
|
||||
{
|
||||
struct page *page = phys_to_page(physaddr);
|
||||
|
||||
memdesc->sg = vmalloc(sizeof(struct scatterlist) * 1);
|
||||
if (memdesc->sg == NULL)
|
||||
return -ENOMEM;
|
||||
memdesc->sg = kgsl_sg_alloc(1);
|
||||
|
||||
kmemleak_not_leak(memdesc->sg);
|
||||
|
||||
memdesc->sglen = 1;
|
||||
sg_init_table(memdesc->sg, 1);
|
||||
sg_set_page(&memdesc->sg[0], page, size, 0);
|
||||
memdesc->sg[0].length = size;
|
||||
memdesc->sg[0].offset = 0;
|
||||
memdesc->sg[0].dma_address = physaddr;
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
Loading…
x
Reference in New Issue
Block a user