msm: kgsl: remove readl/writel use for dma memory
For dma_alloc_coherent() you don't need writel/readl because it's just a plain old void *. Linux tries very hard to make a distinction between io memory (void __iomem *) and memory (void *) so that drivers are portable to architectures that don't have a way to access registers via pointer dereferences. You can see http://lwn.net/Articles/102232/ and the Linus rant http://lwn.net/Articles/102240/ here for more details behind the motivation. msm: kgsl: Allocate physical pages instead of using vmalloc Replace vmalloc allocation with physical page allocation. For most allocations we do not need a kernel virual address. vmalloc uses up the kernel virtual address space. By replacing vmalloc with physical page alloction and mapping that allocation to kernel space only when it is required prevents the kgsl driver from using unnecessary vmalloc virtual space.
This commit is contained in:
parent
8c39724a75
commit
361e591fe7
@ -962,7 +962,7 @@ static int adreno_suspend_context(struct kgsl_device *device)
|
||||
return status;
|
||||
}
|
||||
|
||||
const struct kgsl_memdesc *adreno_find_region(struct kgsl_device *device,
|
||||
struct kgsl_memdesc *adreno_find_region(struct kgsl_device *device,
|
||||
unsigned int pt_base,
|
||||
unsigned int gpuaddr,
|
||||
unsigned int size)
|
||||
@ -1042,7 +1042,7 @@ const struct kgsl_memdesc *adreno_find_region(struct kgsl_device *device,
|
||||
uint8_t *adreno_convertaddr(struct kgsl_device *device, unsigned int pt_base,
|
||||
unsigned int gpuaddr, unsigned int size)
|
||||
{
|
||||
const struct kgsl_memdesc *memdesc;
|
||||
struct kgsl_memdesc *memdesc;
|
||||
|
||||
memdesc = adreno_find_region(device, pt_base, gpuaddr, size);
|
||||
|
||||
|
@ -1,4 +1,4 @@
|
||||
/* Copyright (c) 2008-2011, Code Aurora Forum. All rights reserved.
|
||||
/* Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
|
||||
*
|
||||
* This program is free software; you can redistribute it and/or modify
|
||||
* it under the terms of the GNU General Public License version 2 and
|
||||
@ -83,7 +83,7 @@ void adreno_regread(struct kgsl_device *device, unsigned int offsetwords,
|
||||
void adreno_regwrite(struct kgsl_device *device, unsigned int offsetwords,
|
||||
unsigned int value);
|
||||
|
||||
const struct kgsl_memdesc *adreno_find_region(struct kgsl_device *device,
|
||||
struct kgsl_memdesc *adreno_find_region(struct kgsl_device *device,
|
||||
unsigned int pt_base,
|
||||
unsigned int gpuaddr,
|
||||
unsigned int size);
|
||||
|
4
drivers/gpu/msm/adreno_ringbuffer.h
Normal file → Executable file
4
drivers/gpu/msm/adreno_ringbuffer.h
Normal file → Executable file
@ -64,7 +64,7 @@ struct adreno_ringbuffer {
|
||||
|
||||
#define GSL_RB_WRITE(ring, gpuaddr, data) \
|
||||
do { \
|
||||
writel_relaxed(data, ring); \
|
||||
*ring = data; \
|
||||
wmb(); \
|
||||
kgsl_cffdump_setmem(gpuaddr, data, 4); \
|
||||
ring++; \
|
||||
@ -93,7 +93,7 @@ struct adreno_ringbuffer {
|
||||
#define GSL_RB_CNTL_NO_UPDATE 0x0 /* enable */
|
||||
#define GSL_RB_GET_READPTR(rb, data) \
|
||||
do { \
|
||||
*(data) = readl_relaxed(&(rb)->memptrs->rptr); \
|
||||
*(data) = rb->memptrs->rptr; \
|
||||
} while (0)
|
||||
#else
|
||||
#define GSL_RB_CNTL_NO_UPDATE 0x1 /* disable */
|
||||
|
@ -1208,9 +1208,9 @@ kgsl_ioctl_sharedmem_from_vmalloc(struct kgsl_device_private *dev_priv,
|
||||
|
||||
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
|
||||
|
||||
result = remap_vmalloc_range(vma, (void *) entry->memdesc.hostptr, 0);
|
||||
result = kgsl_sharedmem_map_vma(vma, &entry->memdesc);
|
||||
if (result) {
|
||||
KGSL_CORE_ERR("remap_vmalloc_range failed: %d\n", result);
|
||||
KGSL_CORE_ERR("kgsl_sharedmem_map_vma failed: %d\n", result);
|
||||
goto error_free_vmalloc;
|
||||
}
|
||||
|
||||
|
@ -21,6 +21,7 @@
|
||||
#include <linux/mutex.h>
|
||||
#include <linux/cdev.h>
|
||||
#include <linux/regulator/consumer.h>
|
||||
#include <linux/mm.h>
|
||||
|
||||
#define KGSL_NAME "kgsl"
|
||||
|
||||
@ -105,7 +106,15 @@ struct kgsl_driver {
|
||||
extern struct kgsl_driver kgsl_driver;
|
||||
|
||||
struct kgsl_pagetable;
|
||||
struct kgsl_memdesc_ops;
|
||||
struct kgsl_memdesc;
|
||||
|
||||
struct kgsl_memdesc_ops {
|
||||
int (*vmflags)(struct kgsl_memdesc *);
|
||||
int (*vmfault)(struct kgsl_memdesc *, struct vm_area_struct *,
|
||||
struct vm_fault *);
|
||||
void (*free)(struct kgsl_memdesc *memdesc);
|
||||
int (*map_kernel_mem)(struct kgsl_memdesc *);
|
||||
};
|
||||
|
||||
/* shared memory allocation */
|
||||
struct kgsl_memdesc {
|
||||
@ -184,12 +193,14 @@ static inline int kgsl_gpuaddr_in_memdesc(const struct kgsl_memdesc *memdesc,
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
static inline uint8_t *kgsl_gpuaddr_to_vaddr(const struct kgsl_memdesc *memdesc,
|
||||
static inline uint8_t *kgsl_gpuaddr_to_vaddr(struct kgsl_memdesc *memdesc,
|
||||
unsigned int gpuaddr)
|
||||
{
|
||||
if (memdesc->hostptr == NULL || memdesc->gpuaddr == 0 ||
|
||||
(gpuaddr < memdesc->gpuaddr ||
|
||||
gpuaddr >= memdesc->gpuaddr + memdesc->size))
|
||||
if (memdesc->gpuaddr == 0 ||
|
||||
gpuaddr < memdesc->gpuaddr ||
|
||||
gpuaddr >= (memdesc->gpuaddr + memdesc->size) ||
|
||||
(NULL == memdesc->hostptr && memdesc->ops->map_kernel_mem &&
|
||||
memdesc->ops->map_kernel_mem(memdesc)))
|
||||
return NULL;
|
||||
|
||||
return memdesc->hostptr + (gpuaddr - memdesc->gpuaddr);
|
||||
|
7
drivers/gpu/msm/kgsl_drm.c
Normal file → Executable file
7
drivers/gpu/msm/kgsl_drm.c
Normal file → Executable file
@ -1068,17 +1068,18 @@ int kgsl_gem_kmem_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
|
||||
struct drm_gem_object *obj = vma->vm_private_data;
|
||||
struct drm_device *dev = obj->dev;
|
||||
struct drm_kgsl_gem_object *priv;
|
||||
unsigned long offset, pg;
|
||||
unsigned long offset;
|
||||
struct page *page;
|
||||
int i;
|
||||
|
||||
mutex_lock(&dev->struct_mutex);
|
||||
|
||||
priv = obj->driver_private;
|
||||
|
||||
offset = (unsigned long) vmf->virtual_address - vma->vm_start;
|
||||
pg = (unsigned long) priv->memdesc.hostptr + offset;
|
||||
i = offset >> PAGE_SHIFT;
|
||||
page = sg_page(&(priv->memdesc.sg[i]));
|
||||
|
||||
page = vmalloc_to_page((void *) pg);
|
||||
if (!page) {
|
||||
mutex_unlock(&dev->struct_mutex);
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
6
drivers/gpu/msm/kgsl_gpummu.c
Normal file → Executable file
6
drivers/gpu/msm/kgsl_gpummu.c
Normal file → Executable file
@ -385,14 +385,16 @@ kgsl_pt_map_set(struct kgsl_gpummu_pt *pt, uint32_t pte, uint32_t val)
|
||||
{
|
||||
uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
|
||||
|
||||
writel_relaxed(val, &baseptr[pte]);
|
||||
BUG_ON(pte*sizeof(uint32_t) >= pt->base.size);
|
||||
baseptr[pte] = val;
|
||||
}
|
||||
|
||||
static inline uint32_t
|
||||
kgsl_pt_map_get(struct kgsl_gpummu_pt *pt, uint32_t pte)
|
||||
{
|
||||
uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
|
||||
return readl_relaxed(&baseptr[pte]) & GSL_PT_PAGE_ADDR_MASK;
|
||||
BUG_ON(pte*sizeof(uint32_t) >= pt->base.size);
|
||||
return baseptr[pte] & GSL_PT_PAGE_ADDR_MASK;
|
||||
}
|
||||
|
||||
static unsigned int kgsl_gpummu_pt_get_flags(struct kgsl_pagetable *pt,
|
||||
|
@ -301,13 +301,14 @@ static int kgsl_vmalloc_vmfault(struct kgsl_memdesc *memdesc,
|
||||
struct vm_area_struct *vma,
|
||||
struct vm_fault *vmf)
|
||||
{
|
||||
unsigned long offset, pg;
|
||||
unsigned long offset;
|
||||
struct page *page;
|
||||
int i;
|
||||
|
||||
offset = (unsigned long) vmf->virtual_address - vma->vm_start;
|
||||
pg = (unsigned long) memdesc->hostptr + offset;
|
||||
|
||||
page = vmalloc_to_page((void *) pg);
|
||||
i = offset >> PAGE_SHIFT;
|
||||
page = sg_page(&memdesc->sg[i]);
|
||||
if (page == NULL)
|
||||
return VM_FAULT_SIGBUS;
|
||||
|
||||
@ -324,8 +325,14 @@ static int kgsl_vmalloc_vmflags(struct kgsl_memdesc *memdesc)
|
||||
|
||||
static void kgsl_vmalloc_free(struct kgsl_memdesc *memdesc)
|
||||
{
|
||||
int i = 0;
|
||||
struct scatterlist *sg;
|
||||
kgsl_driver.stats.vmalloc -= memdesc->size;
|
||||
vfree(memdesc->hostptr);
|
||||
if (memdesc->hostptr)
|
||||
vunmap(memdesc->hostptr);
|
||||
if (memdesc->sg)
|
||||
for_each_sg(memdesc->sg, sg, memdesc->sglen, i)
|
||||
__free_page(sg_page(sg));
|
||||
}
|
||||
|
||||
static int kgsl_contiguous_vmflags(struct kgsl_memdesc *memdesc)
|
||||
@ -333,6 +340,39 @@ static int kgsl_contiguous_vmflags(struct kgsl_memdesc *memdesc)
|
||||
return VM_RESERVED | VM_IO | VM_PFNMAP | VM_DONTEXPAND;
|
||||
}
|
||||
|
||||
/*
|
||||
* kgsl_vmalloc_map_kernel - Map the memory in memdesc to kernel address space
|
||||
*
|
||||
* @memdesc - The memory descriptor which contains information about the memory
|
||||
*
|
||||
* Return: 0 on success else error code
|
||||
*/
|
||||
static int kgsl_vmalloc_map_kernel(struct kgsl_memdesc *memdesc)
|
||||
{
|
||||
if (!memdesc->hostptr) {
|
||||
pgprot_t page_prot = pgprot_writecombine(PAGE_KERNEL);
|
||||
struct page **pages = NULL;
|
||||
struct scatterlist *sg;
|
||||
int i;
|
||||
/* create a list of pages to call vmap */
|
||||
pages = vmalloc(memdesc->sglen * sizeof(struct page *));
|
||||
if (!pages) {
|
||||
KGSL_CORE_ERR("vmalloc(%d) failed\n",
|
||||
memdesc->sglen * sizeof(struct page *));
|
||||
return -ENOMEM;
|
||||
}
|
||||
for_each_sg(memdesc->sg, sg, memdesc->sglen, i)
|
||||
pages[i] = sg_page(sg);
|
||||
memdesc->hostptr = vmap(pages, memdesc->sglen,
|
||||
VM_IOREMAP, page_prot);
|
||||
vfree(pages);
|
||||
}
|
||||
if (!memdesc->hostptr)
|
||||
return -ENOMEM;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int kgsl_contiguous_vmfault(struct kgsl_memdesc *memdesc,
|
||||
struct vm_area_struct *vma,
|
||||
struct vm_fault *vmf)
|
||||
@ -376,6 +416,7 @@ struct kgsl_memdesc_ops kgsl_vmalloc_ops = {
|
||||
.free = kgsl_vmalloc_free,
|
||||
.vmflags = kgsl_vmalloc_vmflags,
|
||||
.vmfault = kgsl_vmalloc_vmfault,
|
||||
.map_kernel_mem = kgsl_vmalloc_map_kernel,
|
||||
};
|
||||
EXPORT_SYMBOL(kgsl_vmalloc_ops);
|
||||
|
||||
@ -413,7 +454,7 @@ EXPORT_SYMBOL(kgsl_cache_range_op);
|
||||
static int
|
||||
_kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc,
|
||||
struct kgsl_pagetable *pagetable,
|
||||
void *ptr, size_t size, unsigned int protflags)
|
||||
size_t size, unsigned int protflags)
|
||||
{
|
||||
int order, ret = 0;
|
||||
int sglen = PAGE_ALIGN(size) / PAGE_SIZE;
|
||||
@ -423,7 +464,6 @@ _kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc,
|
||||
memdesc->pagetable = pagetable;
|
||||
memdesc->priv = KGSL_MEMFLAGS_CACHED;
|
||||
memdesc->ops = &kgsl_vmalloc_ops;
|
||||
memdesc->hostptr = (void *) ptr;
|
||||
|
||||
memdesc->sg = vmalloc(sglen * sizeof(struct scatterlist));
|
||||
if (memdesc->sg == NULL) {
|
||||
@ -436,19 +476,20 @@ _kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc,
|
||||
memdesc->sglen = sglen;
|
||||
sg_init_table(memdesc->sg, sglen);
|
||||
|
||||
for (i = 0; i < memdesc->sglen; i++, ptr += PAGE_SIZE) {
|
||||
struct page *page = vmalloc_to_page(ptr);
|
||||
for (i = 0; i < memdesc->sglen; i++) {
|
||||
struct page *page = alloc_page(GFP_KERNEL | __GFP_ZERO |
|
||||
__GFP_HIGHMEM);
|
||||
if (!page) {
|
||||
ret = -EINVAL;
|
||||
ret = -ENOMEM;
|
||||
memdesc->sglen = i;
|
||||
goto done;
|
||||
}
|
||||
flush_dcache_page(page);
|
||||
sg_set_page(&memdesc->sg[i], page, PAGE_SIZE, 0);
|
||||
}
|
||||
outer_cache_range_op_sg(memdesc->sg, memdesc->sglen,
|
||||
KGSL_CACHE_OP_FLUSH);
|
||||
|
||||
kgsl_cache_range_op(memdesc, KGSL_CACHE_OP_INV);
|
||||
|
||||
ret = kgsl_mmu_map(pagetable, memdesc, protflags);
|
||||
|
||||
if (ret)
|
||||
@ -473,20 +514,18 @@ int
|
||||
kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc,
|
||||
struct kgsl_pagetable *pagetable, size_t size)
|
||||
{
|
||||
void *ptr;
|
||||
|
||||
int ret = 0;
|
||||
BUG_ON(size == 0);
|
||||
|
||||
size = ALIGN(size, PAGE_SIZE * 2);
|
||||
ptr = vmalloc(size);
|
||||
|
||||
if (ptr == NULL) {
|
||||
KGSL_CORE_ERR("vmalloc(%d) failed\n", size);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
return _kgsl_sharedmem_vmalloc(memdesc, pagetable, ptr, size,
|
||||
ret = _kgsl_sharedmem_vmalloc(memdesc, pagetable, size,
|
||||
GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
|
||||
if (!ret)
|
||||
ret = kgsl_vmalloc_map_kernel(memdesc);
|
||||
if (ret)
|
||||
kgsl_sharedmem_free(memdesc);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(kgsl_sharedmem_vmalloc);
|
||||
|
||||
@ -495,25 +534,15 @@ kgsl_sharedmem_vmalloc_user(struct kgsl_memdesc *memdesc,
|
||||
struct kgsl_pagetable *pagetable,
|
||||
size_t size, int flags)
|
||||
{
|
||||
void *ptr;
|
||||
unsigned int protflags;
|
||||
|
||||
BUG_ON(size == 0);
|
||||
ptr = vmalloc_user(size);
|
||||
|
||||
if (ptr == NULL) {
|
||||
KGSL_CORE_ERR("vmalloc_user(%d) failed: allocated=%d\n",
|
||||
size, kgsl_driver.stats.vmalloc);
|
||||
return -ENOMEM;
|
||||
}
|
||||
|
||||
kmemleak_not_leak(ptr);
|
||||
|
||||
protflags = GSL_PT_PAGE_RV;
|
||||
if (!(flags & KGSL_MEMFLAGS_GPUREADONLY))
|
||||
protflags |= GSL_PT_PAGE_WV;
|
||||
|
||||
return _kgsl_sharedmem_vmalloc(memdesc, pagetable, ptr, size,
|
||||
return _kgsl_sharedmem_vmalloc(memdesc, pagetable, size,
|
||||
protflags);
|
||||
}
|
||||
EXPORT_SYMBOL(kgsl_sharedmem_vmalloc_user);
|
||||
@ -646,13 +675,17 @@ kgsl_sharedmem_readl(const struct kgsl_memdesc *memdesc,
|
||||
uint32_t *dst,
|
||||
unsigned int offsetbytes)
|
||||
{
|
||||
uint32_t *src;
|
||||
BUG_ON(memdesc == NULL || memdesc->hostptr == NULL || dst == NULL);
|
||||
WARN_ON(offsetbytes + sizeof(unsigned int) > memdesc->size);
|
||||
WARN_ON(offsetbytes % sizeof(uint32_t) != 0);
|
||||
if (offsetbytes % sizeof(uint32_t) != 0)
|
||||
return -EINVAL;
|
||||
|
||||
if (offsetbytes + sizeof(unsigned int) > memdesc->size)
|
||||
WARN_ON(offsetbytes + sizeof(uint32_t) > memdesc->size);
|
||||
if (offsetbytes + sizeof(uint32_t) > memdesc->size)
|
||||
return -ERANGE;
|
||||
|
||||
*dst = readl_relaxed(memdesc->hostptr + offsetbytes);
|
||||
src = (uint32_t *)(memdesc->hostptr + offsetbytes);
|
||||
*dst = *src;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(kgsl_sharedmem_readl);
|
||||
@ -662,12 +695,19 @@ kgsl_sharedmem_writel(const struct kgsl_memdesc *memdesc,
|
||||
unsigned int offsetbytes,
|
||||
uint32_t src)
|
||||
{
|
||||
uint32_t *dst;
|
||||
BUG_ON(memdesc == NULL || memdesc->hostptr == NULL);
|
||||
BUG_ON(offsetbytes + sizeof(unsigned int) > memdesc->size);
|
||||
WARN_ON(offsetbytes % sizeof(uint32_t) != 0);
|
||||
if (offsetbytes % sizeof(uint32_t) != 0)
|
||||
return -EINVAL;
|
||||
|
||||
WARN_ON(offsetbytes + sizeof(uint32_t) > memdesc->size);
|
||||
if (offsetbytes + sizeof(uint32_t) > memdesc->size)
|
||||
return -ERANGE;
|
||||
kgsl_cffdump_setmem(memdesc->gpuaddr + offsetbytes,
|
||||
src, sizeof(uint));
|
||||
writel_relaxed(src, memdesc->hostptr + offsetbytes);
|
||||
src, sizeof(uint32_t));
|
||||
dst = (uint32_t *)(memdesc->hostptr + offsetbytes);
|
||||
*dst = src;
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(kgsl_sharedmem_writel);
|
||||
@ -685,3 +725,33 @@ kgsl_sharedmem_set(const struct kgsl_memdesc *memdesc, unsigned int offsetbytes,
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(kgsl_sharedmem_set);
|
||||
|
||||
/*
|
||||
* kgsl_sharedmem_map_vma - Map a user vma to physical memory
|
||||
*
|
||||
* @vma - The user vma to map
|
||||
* @memdesc - The memory descriptor which contains information about the
|
||||
* physical memory
|
||||
*
|
||||
* Return: 0 on success else error code
|
||||
*/
|
||||
int
|
||||
kgsl_sharedmem_map_vma(struct vm_area_struct *vma,
|
||||
const struct kgsl_memdesc *memdesc)
|
||||
{
|
||||
unsigned long addr = vma->vm_start;
|
||||
unsigned long size = vma->vm_end - vma->vm_start;
|
||||
int ret, i = 0;
|
||||
|
||||
if (!memdesc->sg || (size != memdesc->size) ||
|
||||
(memdesc->sglen != (size / PAGE_SIZE)))
|
||||
return -EINVAL;
|
||||
|
||||
for (; addr < vma->vm_end; addr += PAGE_SIZE, i++) {
|
||||
ret = vm_insert_page(vma, addr, sg_page(&memdesc->sg[i]));
|
||||
if (ret)
|
||||
return ret;
|
||||
}
|
||||
return 0;
|
||||
}
|
||||
EXPORT_SYMBOL(kgsl_sharedmem_map_vma);
|
||||
|
@ -34,13 +34,6 @@ struct kgsl_process_private;
|
||||
/** Set if the memdesc describes cached memory */
|
||||
#define KGSL_MEMFLAGS_CACHED 0x00000001
|
||||
|
||||
struct kgsl_memdesc_ops {
|
||||
int (*vmflags)(struct kgsl_memdesc *);
|
||||
int (*vmfault)(struct kgsl_memdesc *, struct vm_area_struct *,
|
||||
struct vm_fault *);
|
||||
void (*free)(struct kgsl_memdesc *memdesc);
|
||||
};
|
||||
|
||||
extern struct kgsl_memdesc_ops kgsl_vmalloc_ops;
|
||||
|
||||
int kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc,
|
||||
@ -82,6 +75,10 @@ void kgsl_process_uninit_sysfs(struct kgsl_process_private *private);
|
||||
int kgsl_sharedmem_init_sysfs(void);
|
||||
void kgsl_sharedmem_uninit_sysfs(void);
|
||||
|
||||
int
|
||||
kgsl_sharedmem_map_vma(struct vm_area_struct *vma,
|
||||
const struct kgsl_memdesc *memdesc);
|
||||
|
||||
static inline int
|
||||
memdesc_sg_phys(struct kgsl_memdesc *memdesc,
|
||||
unsigned int physaddr, unsigned int size)
|
||||
|
Loading…
x
Reference in New Issue
Block a user