#ifdef CONFIG_MSM_KGSL /* * Basic general purpose allocator for managing special purpose memory * not managed by the regular kmalloc/kfree interface. * Uses for this includes on-device special memory, uncached memory * etc. * * Copyright 2005 (C) Jes Sorensen * * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ #include #include #include #include /* General purpose special memory pool descriptor. */ struct gen_pool { rwlock_t lock; /* protects chunks list */ struct list_head chunks; /* list of chunks in this pool */ unsigned order; /* minimum allocation order */ }; /* General purpose special memory pool chunk descriptor. */ struct gen_pool_chunk { spinlock_t lock; /* protects bits */ struct list_head next_chunk; /* next chunk in pool */ phys_addr_t phys_addr; /* physical starting address of memory chunk */ unsigned long start; /* start of memory chunk */ unsigned long size; /* number of bits */ unsigned long bits[0]; /* bitmap for allocating memory chunk */ }; /** * gen_pool_create() - create a new special memory pool * @order: Log base 2 of number of bytes each bitmap bit * represents. * @nid: Node id of the node the pool structure should be allocated * on, or -1. This will be also used for other allocations. * * Create a new special memory pool that can be used to manage special purpose * memory not managed by the regular kmalloc/kfree interface. */ struct gen_pool *__must_check gen_pool_create(unsigned order, int nid) { struct gen_pool *pool; if (WARN_ON(order >= BITS_PER_LONG)) return NULL; pool = kmalloc_node(sizeof *pool, GFP_KERNEL, nid); if (pool) { rwlock_init(&pool->lock); INIT_LIST_HEAD(&pool->chunks); pool->order = order; } return pool; } EXPORT_SYMBOL(gen_pool_create); /** * gen_pool_add_virt - add a new chunk of special memory to the pool * @pool: pool to add new memory chunk to * @virt: virtual starting address of memory chunk to add to pool * @phys: physical starting address of memory chunk to add to pool * @size: size in bytes of the memory chunk to add to pool * @nid: node id of the node the chunk structure and bitmap should be * allocated on, or -1 * * Add a new chunk of special memory to the specified pool. * * Returns 0 on success or a -ve errno on failure. */ int __must_check gen_pool_add_virt(struct gen_pool *pool, unsigned long virt, phys_addr_t phys, size_t size, int nid) { struct gen_pool_chunk *chunk; size_t nbytes; if (WARN_ON(!virt || virt + size < virt || (virt & ((1 << pool->order) - 1)))) return -EINVAL; size = size >> pool->order; if (WARN_ON(!size)) return -EINVAL; nbytes = sizeof *chunk + BITS_TO_LONGS(size) * sizeof *chunk->bits; chunk = kzalloc_node(nbytes, GFP_KERNEL, nid); if (!chunk) return -ENOMEM; spin_lock_init(&chunk->lock); chunk->phys_addr = phys; chunk->start = virt >> pool->order; chunk->size = size; write_lock(&pool->lock); list_add(&chunk->next_chunk, &pool->chunks); write_unlock(&pool->lock); return 0; } EXPORT_SYMBOL(gen_pool_add_virt); /** * gen_pool_virt_to_phys - return the physical address of memory * @pool: pool to allocate from * @addr: starting address of memory * * Returns the physical address on success, or -1 on error. */ phys_addr_t gen_pool_virt_to_phys(struct gen_pool *pool, unsigned long addr) { struct list_head *_chunk; struct gen_pool_chunk *chunk; read_lock(&pool->lock); list_for_each(_chunk, &pool->chunks) { chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); if (addr >= chunk->start && addr < (chunk->start + chunk->size)) return chunk->phys_addr + addr - chunk->start; } read_unlock(&pool->lock); return -1; } EXPORT_SYMBOL(gen_pool_virt_to_phys); /** * gen_pool_destroy() - destroy a special memory pool * @pool: Pool to destroy. * * Destroy the specified special memory pool. Verifies that there are no * outstanding allocations. */ void gen_pool_destroy(struct gen_pool *pool) { struct gen_pool_chunk *chunk; int bit; while (!list_empty(&pool->chunks)) { chunk = list_entry(pool->chunks.next, struct gen_pool_chunk, next_chunk); list_del(&chunk->next_chunk); bit = find_next_bit(chunk->bits, chunk->size, 0); BUG_ON(bit < chunk->size); kfree(chunk); } kfree(pool); } EXPORT_SYMBOL(gen_pool_destroy); /** * gen_pool_alloc_aligned() - allocate special memory from the pool * @pool: Pool to allocate from. * @size: Number of bytes to allocate from the pool. * @alignment_order: Order the allocated space should be * aligned to (eg. 20 means allocated space * must be aligned to 1MiB). * * Allocate the requested number of bytes from the specified pool. * Uses a first-fit algorithm. */ unsigned long __must_check gen_pool_alloc_aligned(struct gen_pool *pool, size_t size, unsigned alignment_order) { unsigned long addr, align_mask = 0, flags, start; struct gen_pool_chunk *chunk; if (size == 0) return 0; if (alignment_order > pool->order) align_mask = (1 << (alignment_order - pool->order)) - 1; size = (size + (1UL << pool->order) - 1) >> pool->order; read_lock(&pool->lock); list_for_each_entry(chunk, &pool->chunks, next_chunk) { if (chunk->size < size) continue; spin_lock_irqsave(&chunk->lock, flags); start = bitmap_find_next_zero_area_off(chunk->bits, chunk->size, 0, size, align_mask, chunk->start); if (start >= chunk->size) { spin_unlock_irqrestore(&chunk->lock, flags); continue; } bitmap_set(chunk->bits, start, size); spin_unlock_irqrestore(&chunk->lock, flags); addr = (chunk->start + start) << pool->order; goto done; } addr = 0; done: read_unlock(&pool->lock); return addr; } EXPORT_SYMBOL(gen_pool_alloc_aligned); /** * gen_pool_free() - free allocated special memory back to the pool * @pool: Pool to free to. * @addr: Starting address of memory to free back to pool. * @size: Size in bytes of memory to free. * * Free previously allocated special memory back to the specified pool. */ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) { struct gen_pool_chunk *chunk; unsigned long flags; if (!size) return; addr = addr >> pool->order; size = (size + (1UL << pool->order) - 1) >> pool->order; BUG_ON(addr + size < addr); read_lock(&pool->lock); list_for_each_entry(chunk, &pool->chunks, next_chunk) if (addr >= chunk->start && addr + size <= chunk->start + chunk->size) { spin_lock_irqsave(&chunk->lock, flags); bitmap_clear(chunk->bits, addr - chunk->start, size); spin_unlock_irqrestore(&chunk->lock, flags); goto done; } BUG_ON(1); done: read_unlock(&pool->lock); } EXPORT_SYMBOL(gen_pool_free); #else /* * Basic general purpose allocator for managing special purpose memory * not managed by the regular kmalloc/kfree interface. * Uses for this includes on-device special memory, uncached memory * etc. * * Copyright 2005 (C) Jes Sorensen * * This source code is licensed under the GNU General Public License, * Version 2. See the file COPYING for more details. */ #include #include /** * gen_pool_create - create a new special memory pool * @min_alloc_order: log base 2 of number of bytes each bitmap bit represents * @nid: node id of the node the pool structure should be allocated on, or -1 * * Create a new special memory pool that can be used to manage special purpose * memory not managed by the regular kmalloc/kfree interface. */ struct gen_pool *gen_pool_create(int min_alloc_order, int nid) { struct gen_pool *pool; pool = kmalloc_node(sizeof(struct gen_pool), GFP_KERNEL, nid); if (pool != NULL) { rwlock_init(&pool->lock); INIT_LIST_HEAD(&pool->chunks); pool->min_alloc_order = min_alloc_order; } return pool; } EXPORT_SYMBOL(gen_pool_create); /** * gen_pool_add - add a new chunk of special memory to the pool * @pool: pool to add new memory chunk to * @addr: starting address of memory chunk to add to pool * @size: size in bytes of the memory chunk to add to pool * @nid: node id of the node the chunk structure and bitmap should be * allocated on, or -1 * * Add a new chunk of special memory to the specified pool. */ int gen_pool_add(struct gen_pool *pool, unsigned long addr, size_t size, int nid) { struct gen_pool_chunk *chunk; int nbits = size >> pool->min_alloc_order; int nbytes = sizeof(struct gen_pool_chunk) + (nbits + BITS_PER_BYTE - 1) / BITS_PER_BYTE; chunk = kmalloc_node(nbytes, GFP_KERNEL | __GFP_ZERO, nid); if (unlikely(chunk == NULL)) return -1; spin_lock_init(&chunk->lock); chunk->start_addr = addr; chunk->end_addr = addr + size; write_lock(&pool->lock); list_add(&chunk->next_chunk, &pool->chunks); write_unlock(&pool->lock); return 0; } EXPORT_SYMBOL(gen_pool_add); /** * gen_pool_destroy - destroy a special memory pool * @pool: pool to destroy * * Destroy the specified special memory pool. Verifies that there are no * outstanding allocations. */ void gen_pool_destroy(struct gen_pool *pool) { struct list_head *_chunk, *_next_chunk; struct gen_pool_chunk *chunk; int order = pool->min_alloc_order; int bit, end_bit; list_for_each_safe(_chunk, _next_chunk, &pool->chunks) { chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); list_del(&chunk->next_chunk); end_bit = (chunk->end_addr - chunk->start_addr) >> order; bit = find_next_bit(chunk->bits, end_bit, 0); BUG_ON(bit < end_bit); kfree(chunk); } kfree(pool); return; } EXPORT_SYMBOL(gen_pool_destroy); /** * gen_pool_alloc - allocate special memory from the pool * @pool: pool to allocate from * @size: number of bytes to allocate from the pool * * Allocate the requested number of bytes from the specified pool. * Uses a first-fit algorithm. */ unsigned long gen_pool_alloc(struct gen_pool *pool, size_t size) { struct list_head *_chunk; struct gen_pool_chunk *chunk; unsigned long addr, flags; int order = pool->min_alloc_order; int nbits, bit, start_bit, end_bit; if (size == 0) return 0; nbits = (size + (1UL << order) - 1) >> order; read_lock(&pool->lock); list_for_each(_chunk, &pool->chunks) { chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); end_bit = (chunk->end_addr - chunk->start_addr) >> order; end_bit -= nbits + 1; spin_lock_irqsave(&chunk->lock, flags); bit = -1; while (bit + 1 < end_bit) { bit = find_next_zero_bit(chunk->bits, end_bit, bit + 1); if (bit >= end_bit) break; start_bit = bit; if (nbits > 1) { bit = find_next_bit(chunk->bits, bit + nbits, bit + 1); if (bit - start_bit < nbits) continue; } addr = chunk->start_addr + ((unsigned long)start_bit << order); while (nbits--) __set_bit(start_bit++, chunk->bits); spin_unlock_irqrestore(&chunk->lock, flags); read_unlock(&pool->lock); return addr; } spin_unlock_irqrestore(&chunk->lock, flags); } read_unlock(&pool->lock); return 0; } EXPORT_SYMBOL(gen_pool_alloc); /** * gen_pool_free - free allocated special memory back to the pool * @pool: pool to free to * @addr: starting address of memory to free back to pool * @size: size in bytes of memory to free * * Free previously allocated special memory back to the specified pool. */ void gen_pool_free(struct gen_pool *pool, unsigned long addr, size_t size) { struct list_head *_chunk; struct gen_pool_chunk *chunk; unsigned long flags; int order = pool->min_alloc_order; int bit, nbits; nbits = (size + (1UL << order) - 1) >> order; read_lock(&pool->lock); list_for_each(_chunk, &pool->chunks) { chunk = list_entry(_chunk, struct gen_pool_chunk, next_chunk); if (addr >= chunk->start_addr && addr < chunk->end_addr) { BUG_ON(addr + size > chunk->end_addr); spin_lock_irqsave(&chunk->lock, flags); bit = (addr - chunk->start_addr) >> order; while (nbits--) __clear_bit(bit++, chunk->bits); spin_unlock_irqrestore(&chunk->lock, flags); break; } } BUG_ON(nbits > 0); read_unlock(&pool->lock); } EXPORT_SYMBOL(gen_pool_free); #endif