msm: kgsl: Change name of vmalloc allocator

Change the vmalloc allocation name to something more appropriate since
we do not allocate memory using vmalloc for userspace driver. We
directly allocate physical pages and map that to user address space. The
name is changed to page_alloc instead of vmalloc. Add sysfs files to
track memory usage via both vmalloc and page_alloc.
This commit is contained in:
securecrt 2012-07-26 13:52:28 +08:00
parent e2ff78936f
commit 4822aef009
5 changed files with 122 additions and 300 deletions

View File

@ -1242,7 +1242,7 @@ kgsl_ioctl_sharedmem_from_vmalloc(struct kgsl_device_private *dev_priv,
goto error;
}
result = kgsl_sharedmem_vmalloc_user(&entry->memdesc,
result = kgsl_sharedmem_page_alloc_user(&entry->memdesc,
private->pagetable, len,
param->flags);
if (result != 0)
@ -1253,7 +1253,7 @@ kgsl_ioctl_sharedmem_from_vmalloc(struct kgsl_device_private *dev_priv,
result = kgsl_sharedmem_map_vma(vma, &entry->memdesc);
if (result) {
KGSL_CORE_ERR("kgsl_sharedmem_map_vma failed: %d\n", result);
goto error_free_vmalloc;
goto error_free_alloc;
}
param->gpuaddr = entry->memdesc.gpuaddr;
@ -1268,7 +1268,7 @@ kgsl_ioctl_sharedmem_from_vmalloc(struct kgsl_device_private *dev_priv,
kgsl_check_idle(dev_priv->device);
return 0;
error_free_vmalloc:
error_free_alloc:
kgsl_sharedmem_free(&entry->memdesc);
error_free_entry:

View File

@ -95,6 +95,8 @@ struct kgsl_driver {
struct {
unsigned int vmalloc;
unsigned int vmalloc_max;
unsigned int page_alloc;
unsigned int page_alloc_max;
unsigned int coherent;
unsigned int coherent_max;
unsigned int mapped;

View File

@ -1,4 +1,4 @@
/* Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
/* Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -17,7 +17,6 @@
#include "drmP.h"
#include "drm.h"
#include <linux/android_pmem.h>
#include <linux/notifier.h>
#include "kgsl.h"
#include "kgsl_device.h"
@ -39,6 +38,9 @@
#define ENTRY_EMPTY -1
#define ENTRY_NEEDS_CLEANUP -2
#define DRM_KGSL_NOT_INITED -1
#define DRM_KGSL_INITED 1
#define DRM_KGSL_NUM_FENCE_ENTRIES (DRM_KGSL_HANDLE_WAIT_ENTRIES << 2)
#define DRM_KGSL_HANDLE_WAIT_ENTRIES 5
@ -127,6 +129,8 @@ struct drm_kgsl_gem_object {
struct list_head wait_list;
};
static int kgsl_drm_inited = DRM_KGSL_NOT_INITED;
/* This is a global list of all the memory currently mapped in the MMU */
static struct list_head kgsl_mem_list;
@ -152,22 +156,6 @@ static void kgsl_gem_mem_flush(struct kgsl_memdesc *memdesc, int type, int op)
kgsl_cache_range_op(memdesc, cacheop);
}
/* Flush all the memory mapped in the MMU */
void kgsl_gpu_mem_flush(int op)
{
struct drm_kgsl_gem_object *entry;
list_for_each_entry(entry, &kgsl_mem_list, list) {
kgsl_gem_mem_flush(&entry->memdesc, entry->type, op);
}
/* Takes care of WT/WC case.
* More useful when we go barrierless
*/
dmb();
}
/* TODO:
* Add vsync wait */
@ -186,41 +174,6 @@ struct kgsl_drm_device_priv {
struct kgsl_device_private *devpriv[KGSL_DEVICE_MAX];
};
static int kgsl_ts_notifier_cb(struct notifier_block *blk,
unsigned long code, void *_param);
static struct notifier_block kgsl_ts_nb[KGSL_DEVICE_MAX];
static int kgsl_drm_firstopen(struct drm_device *dev)
{
int i;
for (i = 0; i < KGSL_DEVICE_MAX; i++) {
struct kgsl_device *device = kgsl_get_device(i);
if (device == NULL)
continue;
kgsl_ts_nb[i].notifier_call = kgsl_ts_notifier_cb;
kgsl_register_ts_notifier(device, &kgsl_ts_nb[i]);
}
return 0;
}
void kgsl_drm_lastclose(struct drm_device *dev)
{
int i;
for (i = 0; i < KGSL_DEVICE_MAX; i++) {
struct kgsl_device *device = kgsl_get_device(i);
if (device == NULL)
continue;
kgsl_unregister_ts_notifier(device, &kgsl_ts_nb[i]);
}
}
void kgsl_drm_preclose(struct drm_device *dev, struct drm_file *file_priv)
{
}
@ -268,80 +221,71 @@ kgsl_gem_alloc_memory(struct drm_gem_object *obj)
{
struct drm_kgsl_gem_object *priv = obj->driver_private;
int index;
int result = 0;
/* Return if the memory is already allocated */
if (kgsl_gem_memory_allocated(obj) || TYPE_IS_FD(priv->type))
return 0;
if (priv->pagetable == NULL) {
priv->pagetable = kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
if (priv->pagetable == NULL) {
DRM_ERROR("Unable to get the GPU MMU pagetable\n");
return -EINVAL;
}
}
if (TYPE_IS_PMEM(priv->type)) {
int type;
if (priv->type == DRM_KGSL_GEM_TYPE_EBI ||
priv->type & DRM_KGSL_GEM_PMEM_EBI)
type = PMEM_MEMTYPE_EBI1;
else
type = PMEM_MEMTYPE_SMI;
priv->memdesc.physaddr =
pmem_kalloc(obj->size * priv->bufcount,
type | PMEM_ALIGNMENT_4K);
if (IS_ERR((void *) priv->memdesc.physaddr)) {
DRM_ERROR("Unable to allocate PMEM memory\n");
return -ENOMEM;
priv->type & DRM_KGSL_GEM_PMEM_EBI) {
type = PMEM_MEMTYPE_EBI1;
result = kgsl_sharedmem_ebimem_user(
&priv->memdesc,
priv->pagetable,
obj->size * priv->bufcount,
0);
if (result) {
DRM_ERROR(
"Unable to allocate PMEM memory\n");
return result;
}
}
priv->memdesc.size = obj->size * priv->bufcount;
else
return -EINVAL;
} else if (TYPE_IS_MEM(priv->type)) {
priv->memdesc.hostptr =
vmalloc_user(obj->size * priv->bufcount);
if (priv->memdesc.hostptr == NULL) {
DRM_ERROR("Unable to allocate vmalloc memory\n");
return -ENOMEM;
if (priv->type == DRM_KGSL_GEM_TYPE_KMEM ||
priv->type & DRM_KGSL_GEM_CACHE_MASK)
list_add(&priv->list, &kgsl_mem_list);
result = kgsl_sharedmem_page_alloc_user(&priv->memdesc,
priv->pagetable,
obj->size * priv->bufcount, 0);
if (result != 0) {
DRM_ERROR(
"Unable to allocate Vmalloc user memory\n");
return result;
}
priv->memdesc.size = obj->size * priv->bufcount;
priv->memdesc.ops = &kgsl_vmalloc_ops;
} else
return -EINVAL;
for (index = 0; index < priv->bufcount; index++)
for (index = 0; index < priv->bufcount; index++) {
priv->bufs[index].offset = index * obj->size;
priv->bufs[index].gpuaddr =
priv->memdesc.gpuaddr +
priv->bufs[index].offset;
}
priv->flags |= DRM_KGSL_GEM_FLAG_MAPPED;
return 0;
}
#ifdef CONFIG_MSM_KGSL_MMU
static void
kgsl_gem_unmap(struct drm_gem_object *obj)
{
struct drm_kgsl_gem_object *priv = obj->driver_private;
if (!priv->flags & DRM_KGSL_GEM_FLAG_MAPPED)
return;
kgsl_mmu_unmap(priv->pagetable, &priv->memdesc);
kgsl_mmu_putpagetable(priv->pagetable);
priv->pagetable = NULL;
if ((priv->type == DRM_KGSL_GEM_TYPE_KMEM) ||
(priv->type & DRM_KGSL_GEM_CACHE_MASK))
list_del(&priv->list);
priv->flags &= ~DRM_KGSL_GEM_FLAG_MAPPED;
}
#else
static void
kgsl_gem_unmap(struct drm_gem_object *obj)
{
}
#endif
static void
kgsl_gem_free_memory(struct drm_gem_object *obj)
{
@ -353,12 +297,17 @@ kgsl_gem_free_memory(struct drm_gem_object *obj)
kgsl_gem_mem_flush(&priv->memdesc, priv->type,
DRM_KGSL_GEM_CACHE_OP_FROM_DEV);
kgsl_gem_unmap(obj);
if (TYPE_IS_PMEM(priv->type))
pmem_kfree(priv->memdesc.physaddr);
kgsl_sharedmem_free(&priv->memdesc);
kgsl_mmu_putpagetable(priv->pagetable);
priv->pagetable = NULL;
if ((priv->type == DRM_KGSL_GEM_TYPE_KMEM) ||
(priv->type & DRM_KGSL_GEM_CACHE_MASK))
list_del(&priv->list);
priv->flags &= ~DRM_KGSL_GEM_FLAG_MAPPED;
}
int
@ -454,7 +403,7 @@ kgsl_gem_obj_addr(int drm_fd, int handle, unsigned long *start,
filp = fget(drm_fd);
if (unlikely(filp == NULL)) {
DRM_ERROR("Unable to ghet the DRM file descriptor\n");
DRM_ERROR("Unable to get the DRM file descriptor\n");
return -EINVAL;
}
file_priv = filp->private_data;
@ -527,7 +476,7 @@ kgsl_gem_init_obj(struct drm_device *dev,
ret = drm_gem_handle_create(file_priv, obj, handle);
drm_gem_object_handle_unreference(obj);
drm_gem_object_unreference(obj);
INIT_LIST_HEAD(&priv->wait_list);
for (i = 0; i < DRM_KGSL_HANDLE_WAIT_ENTRIES; i++) {
@ -702,128 +651,14 @@ int
kgsl_gem_unbind_gpu_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_kgsl_gem_bind_gpu *args = data;
struct drm_gem_object *obj;
struct drm_kgsl_gem_object *priv;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL) {
DRM_ERROR("Invalid GEM handle %x\n", args->handle);
return -EBADF;
}
mutex_lock(&dev->struct_mutex);
priv = obj->driver_private;
if (--priv->bound == 0)
kgsl_gem_unmap(obj);
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
return 0;
}
#ifdef CONFIG_MSM_KGSL_MMU
static int
kgsl_gem_map(struct drm_gem_object *obj)
{
struct drm_kgsl_gem_object *priv = obj->driver_private;
int index;
int ret = -EINVAL;
if (priv->flags & DRM_KGSL_GEM_FLAG_MAPPED)
return 0;
/* Get the global page table */
if (priv->pagetable == NULL) {
priv->pagetable = kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
if (priv->pagetable == NULL) {
DRM_ERROR("Unable to get the GPU MMU pagetable\n");
return -EINVAL;
}
}
priv->memdesc.pagetable = priv->pagetable;
ret = kgsl_mmu_map(priv->pagetable, &priv->memdesc,
GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
if (!ret) {
for (index = 0; index < priv->bufcount; index++) {
priv->bufs[index].gpuaddr =
priv->memdesc.gpuaddr +
priv->bufs[index].offset;
}
}
/* Add cached memory to the list to be cached */
if (priv->type == DRM_KGSL_GEM_TYPE_KMEM ||
priv->type & DRM_KGSL_GEM_CACHE_MASK)
list_add(&priv->list, &kgsl_mem_list);
priv->flags |= DRM_KGSL_GEM_FLAG_MAPPED;
return ret;
}
#else
static int
kgsl_gem_map(struct drm_gem_object *obj)
{
struct drm_kgsl_gem_object *priv = obj->driver_private;
int index;
if (TYPE_IS_PMEM(priv->type)) {
for (index = 0; index < priv->bufcount; index++)
priv->bufs[index].gpuaddr =
priv->memdesc.physaddr + priv->bufs[index].offset;
return 0;
}
return -EINVAL;
}
#endif
int
kgsl_gem_bind_gpu_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
{
struct drm_kgsl_gem_bind_gpu *args = data;
struct drm_gem_object *obj;
struct drm_kgsl_gem_object *priv;
int ret = 0;
obj = drm_gem_object_lookup(dev, file_priv, args->handle);
if (obj == NULL) {
DRM_ERROR("Invalid GEM handle %x\n", args->handle);
return -EBADF;
}
mutex_lock(&dev->struct_mutex);
priv = obj->driver_private;
if (priv->bound++ == 0) {
if (!kgsl_gem_memory_allocated(obj)) {
DRM_ERROR("Memory not allocated for this object\n");
ret = -ENOMEM;
goto out;
}
ret = kgsl_gem_map(obj);
/* This is legacy behavior - use GET_BUFFERINFO instead */
args->gpuptr = priv->bufs[0].gpuaddr;
}
out:
drm_gem_object_unreference(obj);
mutex_unlock(&dev->struct_mutex);
return ret;
return 0;
}
/* Allocate the memory and prepare it for CPU mapping */
@ -1371,27 +1206,6 @@ wakeup_fence_entries(struct drm_kgsl_gem_object_fence *fence)
fence->fence_id = ENTRY_NEEDS_CLEANUP; /* Mark it as needing cleanup */
}
static int kgsl_ts_notifier_cb(struct notifier_block *blk,
unsigned long code, void *_param)
{
struct drm_kgsl_gem_object_fence *fence;
struct kgsl_device *device = kgsl_get_device(code);
int i;
/* loop through the fences to see what things can be processed */
for (i = 0; i < DRM_KGSL_NUM_FENCE_ENTRIES; i++) {
fence = &gem_buf_fence[i];
if (!fence->ts_valid || fence->ts_device != code)
continue;
if (kgsl_check_timestamp(device, fence->timestamp))
wakeup_fence_entries(fence);
}
return 0;
}
int
kgsl_gem_lock_handle_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@ -1584,7 +1398,7 @@ kgsl_gem_unlock_on_ts_ioctl(struct drm_device *dev, void *data,
}
device = kgsl_get_device(ts_device);
ts_done = kgsl_check_timestamp(device, args->timestamp);
ts_done = kgsl_check_timestamp(device, NULL, args->timestamp);
mutex_lock(&dev->struct_mutex);
@ -1635,11 +1449,9 @@ struct drm_ioctl_desc kgsl_drm_ioctls[] = {
};
static struct drm_driver driver = {
.driver_features = DRIVER_USE_PLATFORM_DEVICE | DRIVER_GEM,
.driver_features = DRIVER_GEM,
.load = kgsl_drm_load,
.unload = kgsl_drm_unload,
.firstopen = kgsl_drm_firstopen,
.lastclose = kgsl_drm_lastclose,
.preclose = kgsl_drm_preclose,
.suspend = kgsl_drm_suspend,
.resume = kgsl_drm_resume,
@ -1670,8 +1482,13 @@ int kgsl_drm_init(struct platform_device *dev)
{
int i;
/* Only initialize once */
if (kgsl_drm_inited == DRM_KGSL_INITED)
return 0;
kgsl_drm_inited = DRM_KGSL_INITED;
driver.num_ioctls = DRM_ARRAY_SIZE(kgsl_drm_ioctls);
driver.platform_device = dev;
INIT_LIST_HEAD(&kgsl_mem_list);
@ -1681,10 +1498,11 @@ int kgsl_drm_init(struct platform_device *dev)
gem_buf_fence[i].fence_id = ENTRY_EMPTY;
}
return drm_init(&driver);
return drm_platform_init(&driver, dev);
}
void kgsl_drm_exit(void)
{
drm_exit(&driver);
kgsl_drm_inited = DRM_KGSL_NOT_INITED;
drm_platform_exit(&driver, driver.kdriver.platform_device);
}

View File

@ -1,5 +1,4 @@
/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
* Copyright (C) 2011 Sony Ericsson Mobile Communications AB.
/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
@ -205,6 +204,10 @@ static int kgsl_drv_memstat_show(struct device *dev,
val = kgsl_driver.stats.vmalloc;
else if (!strncmp(attr->attr.name, "vmalloc_max", 11))
val = kgsl_driver.stats.vmalloc_max;
else if (!strncmp(attr->attr.name, "page_alloc", 10))
val = kgsl_driver.stats.page_alloc;
else if (!strncmp(attr->attr.name, "page_alloc_max", 14))
val = kgsl_driver.stats.page_alloc_max;
else if (!strncmp(attr->attr.name, "coherent", 8))
val = kgsl_driver.stats.coherent;
else if (!strncmp(attr->attr.name, "coherent_max", 12))
@ -234,6 +237,8 @@ static int kgsl_drv_histogram_show(struct device *dev,
DEVICE_ATTR(vmalloc, 0444, kgsl_drv_memstat_show, NULL);
DEVICE_ATTR(vmalloc_max, 0444, kgsl_drv_memstat_show, NULL);
DEVICE_ATTR(page_alloc, 0444, kgsl_drv_memstat_show, NULL);
DEVICE_ATTR(page_alloc_max, 0444, kgsl_drv_memstat_show, NULL);
DEVICE_ATTR(coherent, 0444, kgsl_drv_memstat_show, NULL);
DEVICE_ATTR(coherent_max, 0444, kgsl_drv_memstat_show, NULL);
DEVICE_ATTR(mapped, 0444, kgsl_drv_memstat_show, NULL);
@ -243,6 +248,8 @@ DEVICE_ATTR(histogram, 0444, kgsl_drv_histogram_show, NULL);
static struct device_attribute *drv_attr_list[] = {
&dev_attr_vmalloc,
&dev_attr_vmalloc_max,
&dev_attr_page_alloc,
&dev_attr_page_alloc_max,
&dev_attr_coherent,
&dev_attr_coherent_max,
&dev_attr_mapped,
@ -297,7 +304,7 @@ static void outer_cache_range_op_sg(struct scatterlist *sg, int sglen, int op)
}
#endif
static int kgsl_vmalloc_vmfault(struct kgsl_memdesc *memdesc,
static int kgsl_page_alloc_vmfault(struct kgsl_memdesc *memdesc,
struct vm_area_struct *vma,
struct vm_fault *vmf)
{
@ -318,18 +325,20 @@ static int kgsl_vmalloc_vmfault(struct kgsl_memdesc *memdesc,
return 0;
}
static int kgsl_vmalloc_vmflags(struct kgsl_memdesc *memdesc)
static int kgsl_page_alloc_vmflags(struct kgsl_memdesc *memdesc)
{
return VM_RESERVED | VM_DONTEXPAND;
}
static void kgsl_vmalloc_free(struct kgsl_memdesc *memdesc)
static void kgsl_page_alloc_free(struct kgsl_memdesc *memdesc)
{
int i = 0;
struct scatterlist *sg;
kgsl_driver.stats.vmalloc -= memdesc->size;
if (memdesc->hostptr)
kgsl_driver.stats.page_alloc -= memdesc->size;
if (memdesc->hostptr) {
vunmap(memdesc->hostptr);
kgsl_driver.stats.vmalloc -= memdesc->size;
}
if (memdesc->sg)
for_each_sg(memdesc->sg, sg, memdesc->sglen, i)
__free_page(sg_page(sg));
@ -341,13 +350,14 @@ static int kgsl_contiguous_vmflags(struct kgsl_memdesc *memdesc)
}
/*
* kgsl_vmalloc_map_kernel - Map the memory in memdesc to kernel address space
* kgsl_page_alloc_map_kernel - Map the memory in memdesc to kernel address
* space
*
* @memdesc - The memory descriptor which contains information about the memory
*
* Return: 0 on success else error code
*/
static int kgsl_vmalloc_map_kernel(struct kgsl_memdesc *memdesc)
static int kgsl_page_alloc_map_kernel(struct kgsl_memdesc *memdesc)
{
if (!memdesc->hostptr) {
pgprot_t page_prot = pgprot_writecombine(PAGE_KERNEL);
@ -365,6 +375,8 @@ static int kgsl_vmalloc_map_kernel(struct kgsl_memdesc *memdesc)
pages[i] = sg_page(sg);
memdesc->hostptr = vmap(pages, memdesc->sglen,
VM_IOREMAP, page_prot);
KGSL_STATS_ADD(memdesc->size, kgsl_driver.stats.vmalloc,
kgsl_driver.stats.vmalloc_max);
vfree(pages);
}
if (!memdesc->hostptr)
@ -412,13 +424,13 @@ static void kgsl_coherent_free(struct kgsl_memdesc *memdesc)
}
/* Global - also used by kgsl_drm.c */
struct kgsl_memdesc_ops kgsl_vmalloc_ops = {
.free = kgsl_vmalloc_free,
.vmflags = kgsl_vmalloc_vmflags,
.vmfault = kgsl_vmalloc_vmfault,
.map_kernel_mem = kgsl_vmalloc_map_kernel,
struct kgsl_memdesc_ops kgsl_page_alloc_ops = {
.free = kgsl_page_alloc_free,
.vmflags = kgsl_page_alloc_vmflags,
.vmfault = kgsl_page_alloc_vmfault,
.map_kernel_mem = kgsl_page_alloc_map_kernel,
};
EXPORT_SYMBOL(kgsl_vmalloc_ops);
EXPORT_SYMBOL(kgsl_page_alloc_ops);
static struct kgsl_memdesc_ops kgsl_ebimem_ops = {
.free = kgsl_ebimem_free,
@ -452,7 +464,7 @@ void kgsl_cache_range_op(struct kgsl_memdesc *memdesc, int op)
EXPORT_SYMBOL(kgsl_cache_range_op);
static int
_kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc,
_kgsl_sharedmem_page_alloc(struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable,
size_t size, unsigned int protflags)
{
@ -463,11 +475,13 @@ _kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc,
memdesc->size = size;
memdesc->pagetable = pagetable;
memdesc->priv = KGSL_MEMFLAGS_CACHED;
memdesc->ops = &kgsl_vmalloc_ops;
memdesc->ops = &kgsl_page_alloc_ops;
memdesc->sg = kgsl_sg_alloc(sglen);
if (memdesc->sg == NULL) {
KGSL_CORE_ERR("vmalloc(%d) failed\n",
sglen * sizeof(struct scatterlist));
ret = -ENOMEM;
goto done;
}
@ -496,8 +510,8 @@ _kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc,
if (ret)
goto done;
KGSL_STATS_ADD(size, kgsl_driver.stats.vmalloc,
kgsl_driver.stats.vmalloc_max);
KGSL_STATS_ADD(size, kgsl_driver.stats.page_alloc,
kgsl_driver.stats.page_alloc_max);
order = get_order(size);
@ -512,7 +526,7 @@ done:
}
int
kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc,
kgsl_sharedmem_page_alloc(struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable, size_t size)
{
int ret = 0;
@ -520,18 +534,18 @@ kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc,
size = ALIGN(size, PAGE_SIZE * 2);
ret = _kgsl_sharedmem_vmalloc(memdesc, pagetable, size,
ret = _kgsl_sharedmem_page_alloc(memdesc, pagetable, size,
GSL_PT_PAGE_RV | GSL_PT_PAGE_WV);
if (!ret)
ret = kgsl_vmalloc_map_kernel(memdesc);
ret = kgsl_page_alloc_map_kernel(memdesc);
if (ret)
kgsl_sharedmem_free(memdesc);
return ret;
}
EXPORT_SYMBOL(kgsl_sharedmem_vmalloc);
EXPORT_SYMBOL(kgsl_sharedmem_page_alloc);
int
kgsl_sharedmem_vmalloc_user(struct kgsl_memdesc *memdesc,
kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable,
size_t size, int flags)
{
@ -543,10 +557,10 @@ kgsl_sharedmem_vmalloc_user(struct kgsl_memdesc *memdesc,
if (!(flags & KGSL_MEMFLAGS_GPUREADONLY))
protflags |= GSL_PT_PAGE_WV;
return _kgsl_sharedmem_vmalloc(memdesc, pagetable, size,
return _kgsl_sharedmem_page_alloc(memdesc, pagetable, size,
protflags);
}
EXPORT_SYMBOL(kgsl_sharedmem_vmalloc_user);
EXPORT_SYMBOL(kgsl_sharedmem_page_alloc_user);
int
kgsl_sharedmem_alloc_coherent(struct kgsl_memdesc *memdesc, size_t size)

View File

@ -36,12 +36,12 @@ struct kgsl_process_private;
/** Set if the memdesc is mapped into all pagetables */
#define KGSL_MEMFLAGS_GLOBAL 0x00000002
extern struct kgsl_memdesc_ops kgsl_vmalloc_ops;
extern struct kgsl_memdesc_ops kgsl_page_alloc_ops;
int kgsl_sharedmem_vmalloc(struct kgsl_memdesc *memdesc,
int kgsl_sharedmem_page_alloc(struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable, size_t size);
int kgsl_sharedmem_vmalloc_user(struct kgsl_memdesc *memdesc,
int kgsl_sharedmem_page_alloc_user(struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable,
size_t size, int flags);
@ -136,11 +136,7 @@ static inline int
kgsl_allocate(struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable, size_t size)
{
#ifdef CONFIG_MSM_KGSL_MMU
return kgsl_sharedmem_vmalloc(memdesc, pagetable, size);
#else
return kgsl_sharedmem_ebimem(memdesc, pagetable, size);
#endif
return kgsl_sharedmem_page_alloc(memdesc, pagetable, size);
}
static inline int
@ -148,21 +144,13 @@ kgsl_allocate_user(struct kgsl_memdesc *memdesc,
struct kgsl_pagetable *pagetable,
size_t size, unsigned int flags)
{
#ifdef CONFIG_MSM_KGSL_MMU
return kgsl_sharedmem_vmalloc_user(memdesc, pagetable, size, flags);
#else
return kgsl_sharedmem_ebimem_user(memdesc, pagetable, size, flags);
#endif
return kgsl_sharedmem_page_alloc_user(memdesc, pagetable, size, flags);
}
static inline int
kgsl_allocate_contiguous(struct kgsl_memdesc *memdesc, size_t size)
{
int ret = kgsl_sharedmem_alloc_coherent(memdesc, size);
#ifndef CONFIG_MSM_KGSL_MMU
if (!ret)
memdesc->gpuaddr = memdesc->physaddr;
#endif
return ret;
}