Merge remote-tracking branch 'securecrt/ics_HWA' into ics_HWA
Conflicts: arch/arm/configs/htcleo_defconfig build.sh
This commit is contained in:
		@@ -401,7 +401,7 @@ CONFIG_BOUNCE=y
 | 
			
		||||
CONFIG_VIRT_TO_BUS=y
 | 
			
		||||
CONFIG_HAVE_MLOCK=y
 | 
			
		||||
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
 | 
			
		||||
CONFIG_KSM=y
 | 
			
		||||
# CONFIG_KSM is not set
 | 
			
		||||
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
 | 
			
		||||
CONFIG_ALIGNMENT_TRAP=y
 | 
			
		||||
CONFIG_ALLOW_CPU_ALIGNMENT=y
 | 
			
		||||
@@ -604,11 +604,10 @@ CONFIG_NETFILTER_XT_CONNMARK=y
 | 
			
		||||
#
 | 
			
		||||
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
 | 
			
		||||
CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
 | 
			
		||||
# CONFIG_NETFILTER_XT_TARGET_CT is not set
 | 
			
		||||
# CONFIG_NETFILTER_XT_TARGET_DSCP is not set
 | 
			
		||||
# CONFIG_NETFILTER_XT_TARGET_HL is not set
 | 
			
		||||
CONFIG_NETFILTER_XT_TARGET_MARK=y
 | 
			
		||||
# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set
 | 
			
		||||
CONFIG_NETFILTER_XT_TARGET_NFLOG=y
 | 
			
		||||
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
 | 
			
		||||
# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set
 | 
			
		||||
# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set
 | 
			
		||||
@@ -632,7 +631,7 @@ CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
 | 
			
		||||
# CONFIG_NETFILTER_XT_MATCH_ESP is not set
 | 
			
		||||
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
 | 
			
		||||
CONFIG_NETFILTER_XT_MATCH_HELPER=y
 | 
			
		||||
CONFIG_NETFILTER_XT_MATCH_HL=y
 | 
			
		||||
# CONFIG_NETFILTER_XT_MATCH_HL is not set
 | 
			
		||||
CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
 | 
			
		||||
CONFIG_NETFILTER_XT_MATCH_LENGTH=y
 | 
			
		||||
CONFIG_NETFILTER_XT_MATCH_LIMIT=y
 | 
			
		||||
@@ -1698,6 +1697,10 @@ CONFIG_ZRAM_NUM_DEVICES=1
 | 
			
		||||
CONFIG_ZRAM_DEFAULT_PERCENTAGE=18
 | 
			
		||||
# CONFIG_ZRAM_DEBUG is not set
 | 
			
		||||
CONFIG_ZRAM_DEFAULT_DISKSIZE=100000000
 | 
			
		||||
# CONFIG_ZRAM_LZO is not set
 | 
			
		||||
CONFIG_ZRAM_SNAPPY=y
 | 
			
		||||
CONFIG_SNAPPY_COMPRESS=y
 | 
			
		||||
CONFIG_SNAPPY_DECOMPRESS=y
 | 
			
		||||
 | 
			
		||||
#
 | 
			
		||||
# File systems
 | 
			
		||||
 
 | 
			
		||||
@@ -771,7 +771,7 @@ static struct android_pmem_platform_data android_pmem_adsp_pdata = {
 | 
			
		||||
#else
 | 
			
		||||
	.no_allocator	= 0,
 | 
			
		||||
#endif
 | 
			
		||||
	.cached		= 1,
 | 
			
		||||
	.cached		= 0,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
@@ -784,7 +784,7 @@ static struct android_pmem_platform_data android_pmem_venc_pdata = {
 | 
			
		||||
#else
 | 
			
		||||
	.no_allocator	= 0,
 | 
			
		||||
#endif
 | 
			
		||||
	.cached		= 1,
 | 
			
		||||
	.cached		= 0,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static struct platform_device android_pmem_mdp_device = {
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										36
									
								
								build.sh
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										36
									
								
								build.sh
									
									
									
									
									
										Executable file
									
								
							@@ -0,0 +1,36 @@
 | 
			
		||||
#!/bin/sh
 | 
			
		||||
 | 
			
		||||
export KERNELBASEDIR=$PWD/../ICS_Kernel_update-zip-files
 | 
			
		||||
#export TOOLCHAIN=$HOME/CodeSourcery/Sourcery_G++_Lite/bin/arm-none-eabi-
 | 
			
		||||
export TOOLCHAIN=$HOME/arm-2010q1/bin/arm-none-eabi-
 | 
			
		||||
 | 
			
		||||
export KERNEL_FILE=HTCLEO-Kernel_2.6.32-ics_tytung_HWA
 | 
			
		||||
 | 
			
		||||
rm arch/arm/boot/zImage
 | 
			
		||||
make htcleo_defconfig
 | 
			
		||||
make ARCH=arm CROSS_COMPILE=$TOOLCHAIN zImage -j8 && make ARCH=arm CROSS_COMPILE=$TOOLCHAIN modules -j8
 | 
			
		||||
 | 
			
		||||
if [ -f arch/arm/boot/zImage ]; then
 | 
			
		||||
 | 
			
		||||
mkdir -p $KERNELBASEDIR/
 | 
			
		||||
rm -rf $KERNELBASEDIR/boot/*
 | 
			
		||||
rm -rf $KERNELBASEDIR/system/lib/modules/*
 | 
			
		||||
mkdir -p $KERNELBASEDIR/boot
 | 
			
		||||
mkdir -p $KERNELBASEDIR/system/
 | 
			
		||||
mkdir -p $KERNELBASEDIR/system/lib/
 | 
			
		||||
mkdir -p $KERNELBASEDIR/system/lib/modules
 | 
			
		||||
 | 
			
		||||
cp -a arch/arm/boot/zImage $KERNELBASEDIR/boot/zImage
 | 
			
		||||
 | 
			
		||||
make ARCH=arm CROSS_COMPILE=$TOOLCHAIN INSTALL_MOD_PATH=$KERNELBASEDIR/system/lib/modules modules_install -j8
 | 
			
		||||
 | 
			
		||||
cd $KERNELBASEDIR/system/lib/modules
 | 
			
		||||
find -iname *.ko | xargs -i -t cp {} .
 | 
			
		||||
rm -rf $KERNELBASEDIR/system/lib/modules/lib
 | 
			
		||||
stat $KERNELBASEDIR/boot/zImage
 | 
			
		||||
cd ../../../
 | 
			
		||||
zip -r ${KERNEL_FILE}_`date +"%Y%m%d_%H_%M"`.zip boot system META-INF work
 | 
			
		||||
else
 | 
			
		||||
echo "Kernel STUCK in BUILD! no zImage exist"
 | 
			
		||||
fi
 | 
			
		||||
 | 
			
		||||
@@ -1,4 +1,4 @@
 | 
			
		||||
/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
 | 
			
		||||
/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
 | 
			
		||||
 *
 | 
			
		||||
 * This program is free software; you can redistribute it and/or modify
 | 
			
		||||
 * it under the terms of the GNU General Public License version 2 and
 | 
			
		||||
@@ -22,7 +22,7 @@
 | 
			
		||||
#include <linux/anon_inodes.h>
 | 
			
		||||
#include <linux/miscdevice.h>
 | 
			
		||||
#include <linux/genlock.h>
 | 
			
		||||
#include <linux/interrupt.h> /* for in_interrupt() */
 | 
			
		||||
#include <linux/interrupt.h>
 | 
			
		||||
 | 
			
		||||
/* Lock states - can either be unlocked, held as an exclusive write lock or a
 | 
			
		||||
 * shared read lock
 | 
			
		||||
@@ -32,7 +32,18 @@
 | 
			
		||||
#define _RDLOCK  GENLOCK_RDLOCK
 | 
			
		||||
#define _WRLOCK GENLOCK_WRLOCK
 | 
			
		||||
 | 
			
		||||
#define GENLOCK_LOG_ERR(fmt, args...) \
 | 
			
		||||
pr_err("genlock: %s: " fmt, __func__, ##args)
 | 
			
		||||
 | 
			
		||||
/* The genlock magic stored in the kernel private data is used to protect
 | 
			
		||||
 * against the possibility of user space passing a valid fd to a
 | 
			
		||||
 * non-genlock file for genlock_attach_lock()
 | 
			
		||||
 */
 | 
			
		||||
#define GENLOCK_MAGIC_OK  0xD2EAD10C
 | 
			
		||||
#define GENLOCK_MAGIC_BAD 0xD2EADBAD
 | 
			
		||||
 | 
			
		||||
struct genlock {
 | 
			
		||||
	unsigned int magic;       /* Magic for attach verification */
 | 
			
		||||
	struct list_head active;  /* List of handles holding lock */
 | 
			
		||||
	spinlock_t lock;          /* Spinlock to protect the lock internals */
 | 
			
		||||
	wait_queue_head_t queue;  /* Holding pen for processes pending lock */
 | 
			
		||||
@@ -49,12 +60,28 @@ struct genlock_handle {
 | 
			
		||||
				     taken */
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Create a spinlock to protect against a race condition when a lock gets
 | 
			
		||||
 * released while another process tries to attach it
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
static DEFINE_SPINLOCK(genlock_ref_lock);
 | 
			
		||||
 | 
			
		||||
static void genlock_destroy(struct kref *kref)
 | 
			
		||||
{
 | 
			
		||||
       struct genlock *lock = container_of(kref, struct genlock,
 | 
			
		||||
                       refcount);
 | 
			
		||||
	struct genlock *lock = container_of(kref, struct genlock,
 | 
			
		||||
			refcount);
 | 
			
		||||
 | 
			
		||||
       kfree(lock);
 | 
			
		||||
	/*
 | 
			
		||||
	 * Clear the private data for the file descriptor in case the fd is
 | 
			
		||||
	 * still active after the lock gets released
 | 
			
		||||
	 */
 | 
			
		||||
 | 
			
		||||
	if (lock->file)
 | 
			
		||||
		lock->file->private_data = NULL;
 | 
			
		||||
	lock->magic = GENLOCK_MAGIC_BAD;
 | 
			
		||||
 | 
			
		||||
	kfree(lock);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
@@ -64,6 +91,15 @@ static void genlock_destroy(struct kref *kref)
 | 
			
		||||
 | 
			
		||||
static int genlock_release(struct inode *inodep, struct file *file)
 | 
			
		||||
{
 | 
			
		||||
	struct genlock *lock = file->private_data;
 | 
			
		||||
	/*
 | 
			
		||||
	 * Clear the refrence back to this file structure to avoid
 | 
			
		||||
	 * somehow reusing the lock after the file has been destroyed
 | 
			
		||||
	 */
 | 
			
		||||
 | 
			
		||||
	if (lock)
 | 
			
		||||
		lock->file = NULL;
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -81,18 +117,29 @@ static const struct file_operations genlock_fops = {
 | 
			
		||||
struct genlock *genlock_create_lock(struct genlock_handle *handle)
 | 
			
		||||
{
 | 
			
		||||
	struct genlock *lock;
 | 
			
		||||
	void *ret;
 | 
			
		||||
 | 
			
		||||
	if (handle->lock != NULL)
 | 
			
		||||
	if (IS_ERR_OR_NULL(handle)) {
 | 
			
		||||
		GENLOCK_LOG_ERR("Invalid handle\n");
 | 
			
		||||
		return ERR_PTR(-EINVAL);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (handle->lock != NULL) {
 | 
			
		||||
		GENLOCK_LOG_ERR("Handle already has a lock attached\n");
 | 
			
		||||
		return ERR_PTR(-EINVAL);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	lock = kzalloc(sizeof(*lock), GFP_KERNEL);
 | 
			
		||||
	if (lock == NULL)
 | 
			
		||||
	if (lock == NULL) {
 | 
			
		||||
		GENLOCK_LOG_ERR("Unable to allocate memory for a lock\n");
 | 
			
		||||
		return ERR_PTR(-ENOMEM);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	INIT_LIST_HEAD(&lock->active);
 | 
			
		||||
	init_waitqueue_head(&lock->queue);
 | 
			
		||||
	spin_lock_init(&lock->lock);
 | 
			
		||||
 | 
			
		||||
	lock->magic = GENLOCK_MAGIC_OK;
 | 
			
		||||
	lock->state = _UNLOCKED;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
@@ -100,8 +147,13 @@ struct genlock *genlock_create_lock(struct genlock_handle *handle)
 | 
			
		||||
	 * other processes
 | 
			
		||||
	 */
 | 
			
		||||
 | 
			
		||||
	lock->file = anon_inode_getfile("genlock", &genlock_fops,
 | 
			
		||||
		lock, O_RDWR);
 | 
			
		||||
	ret = anon_inode_getfile("genlock", &genlock_fops, lock, O_RDWR);
 | 
			
		||||
	if (IS_ERR_OR_NULL(ret)) {
 | 
			
		||||
		GENLOCK_LOG_ERR("Unable to create lock inode\n");
 | 
			
		||||
		kfree(lock);
 | 
			
		||||
		return ret;
 | 
			
		||||
	}
 | 
			
		||||
	lock->file = ret;
 | 
			
		||||
 | 
			
		||||
	/* Attach the new lock to the handle */
 | 
			
		||||
	handle->lock = lock;
 | 
			
		||||
@@ -120,8 +172,10 @@ static int genlock_get_fd(struct genlock *lock)
 | 
			
		||||
{
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	if (!lock->file)
 | 
			
		||||
	if (!lock->file) {
 | 
			
		||||
		GENLOCK_LOG_ERR("No file attached to the lock\n");
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ret = get_unused_fd_flags(0);
 | 
			
		||||
	if (ret < 0)
 | 
			
		||||
@@ -143,24 +197,51 @@ struct genlock *genlock_attach_lock(struct genlock_handle *handle, int fd)
 | 
			
		||||
	struct file *file;
 | 
			
		||||
	struct genlock *lock;
 | 
			
		||||
 | 
			
		||||
	if (handle->lock != NULL)
 | 
			
		||||
	if (IS_ERR_OR_NULL(handle)) {
 | 
			
		||||
		GENLOCK_LOG_ERR("Invalid handle\n");
 | 
			
		||||
		return ERR_PTR(-EINVAL);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (handle->lock != NULL) {
 | 
			
		||||
		GENLOCK_LOG_ERR("Handle already has a lock attached\n");
 | 
			
		||||
		return ERR_PTR(-EINVAL);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	file = fget(fd);
 | 
			
		||||
	if (file == NULL)
 | 
			
		||||
	if (file == NULL) {
 | 
			
		||||
		GENLOCK_LOG_ERR("Bad file descriptor\n");
 | 
			
		||||
		return ERR_PTR(-EBADF);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * take a spinlock to avoid a race condition if the lock is
 | 
			
		||||
	 * released and then attached
 | 
			
		||||
	 */
 | 
			
		||||
 | 
			
		||||
	spin_lock(&genlock_ref_lock);
 | 
			
		||||
	lock = file->private_data;
 | 
			
		||||
 | 
			
		||||
	fput(file);
 | 
			
		||||
 | 
			
		||||
	if (lock == NULL)
 | 
			
		||||
		return ERR_PTR(-EINVAL);
 | 
			
		||||
	if (lock == NULL) {
 | 
			
		||||
		GENLOCK_LOG_ERR("File descriptor is invalid\n");
 | 
			
		||||
		goto fail_invalid;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (lock->magic != GENLOCK_MAGIC_OK) {
 | 
			
		||||
		GENLOCK_LOG_ERR("Magic is invalid - 0x%X\n", lock->magic);
 | 
			
		||||
		goto fail_invalid;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	handle->lock = lock;
 | 
			
		||||
	kref_get(&lock->refcount);
 | 
			
		||||
	spin_unlock(&genlock_ref_lock);
 | 
			
		||||
 | 
			
		||||
	return lock;
 | 
			
		||||
 | 
			
		||||
fail_invalid:
 | 
			
		||||
	spin_unlock(&genlock_ref_lock);
 | 
			
		||||
	return ERR_PTR(-EINVAL);
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(genlock_attach_lock);
 | 
			
		||||
 | 
			
		||||
@@ -199,13 +280,16 @@ static int _genlock_unlock(struct genlock *lock, struct genlock_handle *handle)
 | 
			
		||||
 | 
			
		||||
	spin_lock_irqsave(&lock->lock, irqflags);
 | 
			
		||||
 | 
			
		||||
	if (lock->state == _UNLOCKED)
 | 
			
		||||
	if (lock->state == _UNLOCKED) {
 | 
			
		||||
		GENLOCK_LOG_ERR("Trying to unlock an unlocked handle\n");
 | 
			
		||||
		goto done;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* Make sure this handle is an owner of the lock */
 | 
			
		||||
	if (!handle_has_lock(lock, handle))
 | 
			
		||||
	if (!handle_has_lock(lock, handle)) {
 | 
			
		||||
		GENLOCK_LOG_ERR("handle does not have lock attached to it\n");
 | 
			
		||||
		goto done;
 | 
			
		||||
 | 
			
		||||
	}
 | 
			
		||||
	/* If the handle holds no more references to the lock then
 | 
			
		||||
	   release it (maybe) */
 | 
			
		||||
 | 
			
		||||
@@ -228,7 +312,7 @@ static int _genlock_lock(struct genlock *lock, struct genlock_handle *handle,
 | 
			
		||||
{
 | 
			
		||||
	unsigned long irqflags;
 | 
			
		||||
	int ret = 0;
 | 
			
		||||
	unsigned int ticks = msecs_to_jiffies(timeout);
 | 
			
		||||
	unsigned long ticks = msecs_to_jiffies(timeout);
 | 
			
		||||
 | 
			
		||||
	spin_lock_irqsave(&lock->lock, irqflags);
 | 
			
		||||
 | 
			
		||||
@@ -247,12 +331,15 @@ static int _genlock_lock(struct genlock *lock, struct genlock_handle *handle,
 | 
			
		||||
	if (handle_has_lock(lock, handle)) {
 | 
			
		||||
 | 
			
		||||
		/*
 | 
			
		||||
		 * If the handle already holds the lock and the type matches,
 | 
			
		||||
		 * then just increment the active pointer. This allows the
 | 
			
		||||
		 * handle to do recursive locks
 | 
			
		||||
		 * If the handle already holds the lock and the lock type is
 | 
			
		||||
		 * a read lock then just increment the active pointer. This
 | 
			
		||||
		 * allows the handle to do recursive read locks. Recursive
 | 
			
		||||
		 * write locks are not allowed in order to support
 | 
			
		||||
		 * synchronization within a process using a single gralloc
 | 
			
		||||
		 * handle.
 | 
			
		||||
		 */
 | 
			
		||||
 | 
			
		||||
		if (lock->state == op) {
 | 
			
		||||
		if (lock->state == _RDLOCK && op == _RDLOCK) {
 | 
			
		||||
			handle->active++;
 | 
			
		||||
			goto done;
 | 
			
		||||
		}
 | 
			
		||||
@@ -261,32 +348,46 @@ static int _genlock_lock(struct genlock *lock, struct genlock_handle *handle,
 | 
			
		||||
		 * If the handle holds a write lock then the owner can switch
 | 
			
		||||
		 * to a read lock if they want. Do the transition atomically
 | 
			
		||||
		 * then wake up any pending waiters in case they want a read
 | 
			
		||||
		 * lock too.
 | 
			
		||||
		 * lock too. In order to support synchronization within a
 | 
			
		||||
		 * process the caller must explicity request to convert the
 | 
			
		||||
		 * lock type with the GENLOCK_WRITE_TO_READ flag.
 | 
			
		||||
		 */
 | 
			
		||||
 | 
			
		||||
		if (op == _RDLOCK && handle->active == 1) {
 | 
			
		||||
			lock->state = _RDLOCK;
 | 
			
		||||
			wake_up(&lock->queue);
 | 
			
		||||
		if (flags & GENLOCK_WRITE_TO_READ) {
 | 
			
		||||
			if (lock->state == _WRLOCK && op == _RDLOCK) {
 | 
			
		||||
				lock->state = _RDLOCK;
 | 
			
		||||
				wake_up(&lock->queue);
 | 
			
		||||
				goto done;
 | 
			
		||||
			} else {
 | 
			
		||||
				GENLOCK_LOG_ERR("Invalid state to convert"
 | 
			
		||||
					"write to read\n");
 | 
			
		||||
				ret = -EINVAL;
 | 
			
		||||
				goto done;
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	} else {
 | 
			
		||||
 | 
			
		||||
		/*
 | 
			
		||||
		 * Check to ensure the caller has not attempted to convert a
 | 
			
		||||
		 * write to a read without holding the lock.
 | 
			
		||||
		 */
 | 
			
		||||
 | 
			
		||||
		if (flags & GENLOCK_WRITE_TO_READ) {
 | 
			
		||||
			GENLOCK_LOG_ERR("Handle must have lock to convert"
 | 
			
		||||
				"write to read\n");
 | 
			
		||||
			ret = -EINVAL;
 | 
			
		||||
			goto done;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		/*
 | 
			
		||||
		 * Otherwise the user tried to turn a read into a write, and we
 | 
			
		||||
		 * don't allow that.
 | 
			
		||||
		 * If we request a read and the lock is held by a read, then go
 | 
			
		||||
		 * ahead and share the lock
 | 
			
		||||
		 */
 | 
			
		||||
 | 
			
		||||
		ret = -EINVAL;
 | 
			
		||||
		goto done;
 | 
			
		||||
		if (op == GENLOCK_RDLOCK && lock->state == _RDLOCK)
 | 
			
		||||
			goto dolock;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * If we request a read and the lock is held by a read, then go
 | 
			
		||||
	 * ahead and share the lock
 | 
			
		||||
	 */
 | 
			
		||||
 | 
			
		||||
	if (op == GENLOCK_RDLOCK && lock->state == _RDLOCK)
 | 
			
		||||
		goto dolock;
 | 
			
		||||
 | 
			
		||||
	/* Treat timeout 0 just like a NOBLOCK flag and return if the
 | 
			
		||||
	   lock cannot be aquired without blocking */
 | 
			
		||||
 | 
			
		||||
@@ -295,15 +396,26 @@ static int _genlock_lock(struct genlock *lock, struct genlock_handle *handle,
 | 
			
		||||
		goto done;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* Wait while the lock remains in an incompatible state */
 | 
			
		||||
	/*
 | 
			
		||||
	 * Wait while the lock remains in an incompatible state
 | 
			
		||||
	 * state    op    wait
 | 
			
		||||
	 * -------------------
 | 
			
		||||
	 * unlocked n/a   no
 | 
			
		||||
	 * read     read  no
 | 
			
		||||
	 * read     write yes
 | 
			
		||||
	 * write    n/a   yes
 | 
			
		||||
	 */
 | 
			
		||||
 | 
			
		||||
	while (lock->state != _UNLOCKED) {
 | 
			
		||||
		unsigned int elapsed;
 | 
			
		||||
	while ((lock->state == _RDLOCK && op == _WRLOCK) ||
 | 
			
		||||
			lock->state == _WRLOCK) {
 | 
			
		||||
		signed long elapsed;
 | 
			
		||||
 | 
			
		||||
		spin_unlock_irqrestore(&lock->lock, irqflags);
 | 
			
		||||
 | 
			
		||||
		elapsed = wait_event_interruptible_timeout(lock->queue,
 | 
			
		||||
			lock->state == _UNLOCKED, ticks);
 | 
			
		||||
			lock->state == _UNLOCKED ||
 | 
			
		||||
			(lock->state == _RDLOCK && op == _RDLOCK),
 | 
			
		||||
			ticks);
 | 
			
		||||
 | 
			
		||||
		spin_lock_irqsave(&lock->lock, irqflags);
 | 
			
		||||
 | 
			
		||||
@@ -312,7 +424,7 @@ static int _genlock_lock(struct genlock *lock, struct genlock_handle *handle,
 | 
			
		||||
			goto done;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		ticks = elapsed;
 | 
			
		||||
		ticks = (unsigned long) elapsed;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
dolock:
 | 
			
		||||
@@ -320,7 +432,7 @@ dolock:
 | 
			
		||||
 | 
			
		||||
	list_add_tail(&handle->entry, &lock->active);
 | 
			
		||||
	lock->state = op;
 | 
			
		||||
	handle->active = 1;
 | 
			
		||||
	handle->active++;
 | 
			
		||||
 | 
			
		||||
done:
 | 
			
		||||
	spin_unlock_irqrestore(&lock->lock, irqflags);
 | 
			
		||||
@@ -329,7 +441,7 @@ done:
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * genlock_lock - Acquire or release a lock
 | 
			
		||||
 * genlock_lock - Acquire or release a lock (depreciated)
 | 
			
		||||
 * @handle - pointer to the genlock handle that is requesting the lock
 | 
			
		||||
 * @op - the operation to perform (RDLOCK, WRLOCK, UNLOCK)
 | 
			
		||||
 * @flags - flags to control the operation
 | 
			
		||||
@@ -341,11 +453,76 @@ done:
 | 
			
		||||
int genlock_lock(struct genlock_handle *handle, int op, int flags,
 | 
			
		||||
	uint32_t timeout)
 | 
			
		||||
{
 | 
			
		||||
	struct genlock *lock = handle->lock;
 | 
			
		||||
	struct genlock *lock;
 | 
			
		||||
	unsigned long irqflags;
 | 
			
		||||
 | 
			
		||||
	int ret = 0;
 | 
			
		||||
 | 
			
		||||
	if (lock == NULL)
 | 
			
		||||
	if (IS_ERR_OR_NULL(handle)) {
 | 
			
		||||
		GENLOCK_LOG_ERR("Invalid handle\n");
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	lock = handle->lock;
 | 
			
		||||
 | 
			
		||||
	if (lock == NULL) {
 | 
			
		||||
		GENLOCK_LOG_ERR("Handle does not have a lock attached\n");
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	switch (op) {
 | 
			
		||||
	case GENLOCK_UNLOCK:
 | 
			
		||||
		ret = _genlock_unlock(lock, handle);
 | 
			
		||||
		break;
 | 
			
		||||
	case GENLOCK_RDLOCK:
 | 
			
		||||
		spin_lock_irqsave(&lock->lock, irqflags);
 | 
			
		||||
		if (handle_has_lock(lock, handle)) {
 | 
			
		||||
			/* request the WRITE_TO_READ flag for compatibility */
 | 
			
		||||
			flags |= GENLOCK_WRITE_TO_READ;
 | 
			
		||||
		}
 | 
			
		||||
		spin_unlock_irqrestore(&lock->lock, irqflags);
 | 
			
		||||
		/* fall through to take lock */
 | 
			
		||||
	case GENLOCK_WRLOCK:
 | 
			
		||||
		ret = _genlock_lock(lock, handle, op, flags, timeout);
 | 
			
		||||
		break;
 | 
			
		||||
	default:
 | 
			
		||||
		GENLOCK_LOG_ERR("Invalid lock operation\n");
 | 
			
		||||
		ret = -EINVAL;
 | 
			
		||||
		break;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(genlock_lock);
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * genlock_dreadlock - Acquire or release a lock
 | 
			
		||||
 * @handle - pointer to the genlock handle that is requesting the lock
 | 
			
		||||
 * @op - the operation to perform (RDLOCK, WRLOCK, UNLOCK)
 | 
			
		||||
 * @flags - flags to control the operation
 | 
			
		||||
 * @timeout - optional timeout to wait for the lock to come free
 | 
			
		||||
 *
 | 
			
		||||
 * Returns: 0 on success or error code on failure
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
int genlock_dreadlock(struct genlock_handle *handle, int op, int flags,
 | 
			
		||||
	uint32_t timeout)
 | 
			
		||||
{
 | 
			
		||||
	struct genlock *lock;
 | 
			
		||||
 | 
			
		||||
	int ret = 0;
 | 
			
		||||
 | 
			
		||||
	if (IS_ERR_OR_NULL(handle)) {
 | 
			
		||||
		GENLOCK_LOG_ERR("Invalid handle\n");
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	lock = handle->lock;
 | 
			
		||||
 | 
			
		||||
	if (lock == NULL) {
 | 
			
		||||
		GENLOCK_LOG_ERR("Handle does not have a lock attached\n");
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	switch (op) {
 | 
			
		||||
	case GENLOCK_UNLOCK:
 | 
			
		||||
@@ -356,13 +533,14 @@ int genlock_lock(struct genlock_handle *handle, int op, int flags,
 | 
			
		||||
		ret = _genlock_lock(lock, handle, op, flags, timeout);
 | 
			
		||||
		break;
 | 
			
		||||
	default:
 | 
			
		||||
		GENLOCK_LOG_ERR("Invalid lock operation\n");
 | 
			
		||||
		ret = -EINVAL;
 | 
			
		||||
		break;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(genlock_lock);
 | 
			
		||||
EXPORT_SYMBOL(genlock_dreadlock);
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * genlock_wait - Wait for the lock to be released
 | 
			
		||||
@@ -372,13 +550,22 @@ EXPORT_SYMBOL(genlock_lock);
 | 
			
		||||
 | 
			
		||||
int genlock_wait(struct genlock_handle *handle, uint32_t timeout)
 | 
			
		||||
{
 | 
			
		||||
	struct genlock *lock = handle->lock;
 | 
			
		||||
	struct genlock *lock;
 | 
			
		||||
	unsigned long irqflags;
 | 
			
		||||
	int ret = 0;
 | 
			
		||||
	unsigned int ticks = msecs_to_jiffies(timeout);
 | 
			
		||||
	unsigned long ticks = msecs_to_jiffies(timeout);
 | 
			
		||||
 | 
			
		||||
	if (lock == NULL)
 | 
			
		||||
	if (IS_ERR_OR_NULL(handle)) {
 | 
			
		||||
		GENLOCK_LOG_ERR("Invalid handle\n");
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	lock = handle->lock;
 | 
			
		||||
 | 
			
		||||
	if (lock == NULL) {
 | 
			
		||||
		GENLOCK_LOG_ERR("Handle does not have a lock attached\n");
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	spin_lock_irqsave(&lock->lock, irqflags);
 | 
			
		||||
 | 
			
		||||
@@ -393,7 +580,7 @@ int genlock_wait(struct genlock_handle *handle, uint32_t timeout)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	while (lock->state != _UNLOCKED) {
 | 
			
		||||
		unsigned int elapsed;
 | 
			
		||||
		signed long elapsed;
 | 
			
		||||
 | 
			
		||||
		spin_unlock_irqrestore(&lock->lock, irqflags);
 | 
			
		||||
 | 
			
		||||
@@ -407,7 +594,7 @@ int genlock_wait(struct genlock_handle *handle, uint32_t timeout)
 | 
			
		||||
			break;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		ticks = elapsed;
 | 
			
		||||
		ticks = (unsigned long) elapsed;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
done:
 | 
			
		||||
@@ -415,12 +602,7 @@ done:
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/**
 | 
			
		||||
 * genlock_release_lock - Release a lock attached to a handle
 | 
			
		||||
 * @handle - Pointer to the handle holding the lock
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
void genlock_release_lock(struct genlock_handle *handle)
 | 
			
		||||
static void genlock_release_lock(struct genlock_handle *handle)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
 | 
			
		||||
@@ -437,11 +619,12 @@ void genlock_release_lock(struct genlock_handle *handle)
 | 
			
		||||
	}
 | 
			
		||||
	spin_unlock_irqrestore(&handle->lock->lock, flags);
 | 
			
		||||
 | 
			
		||||
	spin_lock(&genlock_ref_lock);
 | 
			
		||||
	kref_put(&handle->lock->refcount, genlock_destroy);
 | 
			
		||||
	spin_unlock(&genlock_ref_lock);
 | 
			
		||||
	handle->lock = NULL;
 | 
			
		||||
	handle->active = 0;
 | 
			
		||||
}
 | 
			
		||||
EXPORT_SYMBOL(genlock_release_lock);
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Release function called when all references to a handle are released
 | 
			
		||||
@@ -468,8 +651,10 @@ static const struct file_operations genlock_handle_fops = {
 | 
			
		||||
static struct genlock_handle *_genlock_get_handle(void)
 | 
			
		||||
{
 | 
			
		||||
	struct genlock_handle *handle = kzalloc(sizeof(*handle), GFP_KERNEL);
 | 
			
		||||
	if (handle == NULL)
 | 
			
		||||
	if (handle == NULL) {
 | 
			
		||||
		GENLOCK_LOG_ERR("Unable to allocate memory for the handle\n");
 | 
			
		||||
		return ERR_PTR(-ENOMEM);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return handle;
 | 
			
		||||
}
 | 
			
		||||
@@ -482,12 +667,19 @@ static struct genlock_handle *_genlock_get_handle(void)
 | 
			
		||||
 | 
			
		||||
struct genlock_handle *genlock_get_handle(void)
 | 
			
		||||
{
 | 
			
		||||
	void *ret;
 | 
			
		||||
	struct genlock_handle *handle = _genlock_get_handle();
 | 
			
		||||
	if (IS_ERR(handle))
 | 
			
		||||
		return handle;
 | 
			
		||||
 | 
			
		||||
	handle->file = anon_inode_getfile("genlock-handle",
 | 
			
		||||
	ret = anon_inode_getfile("genlock-handle",
 | 
			
		||||
		&genlock_handle_fops, handle, O_RDWR);
 | 
			
		||||
	if (IS_ERR_OR_NULL(ret)) {
 | 
			
		||||
		GENLOCK_LOG_ERR("Unable to create handle inode\n");
 | 
			
		||||
		kfree(handle);
 | 
			
		||||
		return ret;
 | 
			
		||||
	}
 | 
			
		||||
	handle->file = ret;
 | 
			
		||||
 | 
			
		||||
	return handle;
 | 
			
		||||
}
 | 
			
		||||
@@ -531,6 +723,9 @@ static long genlock_dev_ioctl(struct file *filep, unsigned int cmd,
 | 
			
		||||
	struct genlock *lock;
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	if (IS_ERR_OR_NULL(handle))
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	switch (cmd) {
 | 
			
		||||
	case GENLOCK_IOC_NEW: {
 | 
			
		||||
		lock = genlock_create_lock(handle);
 | 
			
		||||
@@ -540,8 +735,11 @@ static long genlock_dev_ioctl(struct file *filep, unsigned int cmd,
 | 
			
		||||
		return 0;
 | 
			
		||||
	}
 | 
			
		||||
	case GENLOCK_IOC_EXPORT: {
 | 
			
		||||
		if (handle->lock == NULL)
 | 
			
		||||
		if (handle->lock == NULL) {
 | 
			
		||||
			GENLOCK_LOG_ERR("Handle does not have a lock"
 | 
			
		||||
					"attached\n");
 | 
			
		||||
			return -EINVAL;
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		ret = genlock_get_fd(handle->lock);
 | 
			
		||||
		if (ret < 0)
 | 
			
		||||
@@ -574,6 +772,14 @@ static long genlock_dev_ioctl(struct file *filep, unsigned int cmd,
 | 
			
		||||
		return genlock_lock(handle, param.op, param.flags,
 | 
			
		||||
			param.timeout);
 | 
			
		||||
	}
 | 
			
		||||
	case GENLOCK_IOC_DREADLOCK: {
 | 
			
		||||
		if (copy_from_user(¶m, (void __user *) arg,
 | 
			
		||||
		sizeof(param)))
 | 
			
		||||
			return -EFAULT;
 | 
			
		||||
 | 
			
		||||
		return genlock_dreadlock(handle, param.op, param.flags,
 | 
			
		||||
			param.timeout);
 | 
			
		||||
	}
 | 
			
		||||
	case GENLOCK_IOC_WAIT: {
 | 
			
		||||
		if (copy_from_user(¶m, (void __user *) arg,
 | 
			
		||||
		sizeof(param)))
 | 
			
		||||
@@ -582,10 +788,16 @@ static long genlock_dev_ioctl(struct file *filep, unsigned int cmd,
 | 
			
		||||
		return genlock_wait(handle, param.timeout);
 | 
			
		||||
	}
 | 
			
		||||
	case GENLOCK_IOC_RELEASE: {
 | 
			
		||||
		genlock_release_lock(handle);
 | 
			
		||||
		return 0;
 | 
			
		||||
		/*
 | 
			
		||||
		 * Return error - this ioctl has been deprecated.
 | 
			
		||||
		 * Locks should only be released when the handle is
 | 
			
		||||
		 * destroyed
 | 
			
		||||
		 */
 | 
			
		||||
		GENLOCK_LOG_ERR("Deprecated RELEASE ioctl called\n");
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
	}
 | 
			
		||||
	default:
 | 
			
		||||
		GENLOCK_LOG_ERR("Invalid ioctl\n");
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
@@ -1,7 +1,7 @@
 | 
			
		||||
/* drivers/android/pmem.c
 | 
			
		||||
 *
 | 
			
		||||
 * Copyright (C) 2007 Google, Inc.
 | 
			
		||||
 * Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
 | 
			
		||||
 * Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
 | 
			
		||||
*
 | 
			
		||||
 * This software is licensed under the terms of the GNU General Public
 | 
			
		||||
 * License version 2, as published by the Free Software Foundation, and
 | 
			
		||||
@@ -1074,17 +1074,17 @@ static void bitmap_bits_set_all(uint32_t *bitp, int bit_start, int bit_end)
 | 
			
		||||
 | 
			
		||||
static int
 | 
			
		||||
bitmap_allocate_contiguous(uint32_t *bitp, int num_bits_to_alloc,
 | 
			
		||||
		int total_bits, int spacing)
 | 
			
		||||
		int total_bits, int spacing, int start_bit)
 | 
			
		||||
{
 | 
			
		||||
	int bit_start, last_bit, word_index;
 | 
			
		||||
 | 
			
		||||
	if (num_bits_to_alloc <= 0)
 | 
			
		||||
		return -1;
 | 
			
		||||
 | 
			
		||||
	for (bit_start = 0; ;
 | 
			
		||||
		bit_start = (last_bit +
 | 
			
		||||
	for (bit_start = start_bit; ;
 | 
			
		||||
		bit_start = ((last_bit +
 | 
			
		||||
			(word_index << PMEM_32BIT_WORD_ORDER) + spacing - 1)
 | 
			
		||||
			& ~(spacing - 1)) {
 | 
			
		||||
			& ~(spacing - 1)) + start_bit) {
 | 
			
		||||
		int bit_end = bit_start + num_bits_to_alloc, total_words;
 | 
			
		||||
 | 
			
		||||
		if (bit_end > total_bits)
 | 
			
		||||
@@ -1162,7 +1162,8 @@ static int reserve_quanta(const unsigned int quanta_needed,
 | 
			
		||||
	ret = bitmap_allocate_contiguous(pmem[id].allocator.bitmap.bitmap,
 | 
			
		||||
		quanta_needed,
 | 
			
		||||
		(pmem[id].size + pmem[id].quantum - 1) / pmem[id].quantum,
 | 
			
		||||
		spacing);
 | 
			
		||||
		spacing,
 | 
			
		||||
		start_bit);
 | 
			
		||||
 | 
			
		||||
#if PMEM_DEBUG
 | 
			
		||||
	if (ret < 0)
 | 
			
		||||
@@ -1915,6 +1916,13 @@ int pmem_cache_maint(struct file *file, unsigned int cmd,
 | 
			
		||||
	if (!file)
 | 
			
		||||
		return -EBADF;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * check that the vaddr passed for flushing is valid
 | 
			
		||||
	 * so that you don't crash the kernel
 | 
			
		||||
	 */
 | 
			
		||||
	if (!pmem_addr->vaddr)
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	data = file->private_data;
 | 
			
		||||
	id = get_id(file);
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										167
									
								
								drivers/mmc/host/msm_sdcc.c
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							
							
						
						
									
										167
									
								
								drivers/mmc/host/msm_sdcc.c
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							@@ -73,7 +73,7 @@ static int msmsdcc_auto_suspend(struct mmc_host *, int);
 | 
			
		||||
#define BUSCLK_TIMEOUT (HZ)
 | 
			
		||||
#define SQN_BUSCLK_TIMEOUT (5 * HZ)
 | 
			
		||||
static unsigned int msmsdcc_fmin = 144000;
 | 
			
		||||
static unsigned int msmsdcc_fmax = 50000000;
 | 
			
		||||
static unsigned int msmsdcc_fmax = 64000000;
 | 
			
		||||
static unsigned int msmsdcc_4bit = 1;
 | 
			
		||||
static unsigned int msmsdcc_pwrsave = 1;
 | 
			
		||||
static unsigned int msmsdcc_piopoll = 1;
 | 
			
		||||
@@ -308,42 +308,40 @@ msmsdcc_dma_exec_func(struct msm_dmov_cmd *cmd)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void
 | 
			
		||||
msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd,
 | 
			
		||||
			  unsigned int result,
 | 
			
		||||
			  struct msm_dmov_errdata *err)
 | 
			
		||||
msmsdcc_dma_complete_tlet(unsigned long data)
 | 
			
		||||
{
 | 
			
		||||
	struct msmsdcc_dma_data	*dma_data =
 | 
			
		||||
		container_of(cmd, struct msmsdcc_dma_data, hdr);
 | 
			
		||||
	struct msmsdcc_host	*host = dma_data->host;
 | 
			
		||||
	struct msmsdcc_host *host = (struct msmsdcc_host *)data;
 | 
			
		||||
	unsigned long		flags;
 | 
			
		||||
	struct mmc_request	*mrq;
 | 
			
		||||
	struct msm_dmov_errdata err;
 | 
			
		||||
 | 
			
		||||
	spin_lock_irqsave(&host->lock, flags);
 | 
			
		||||
	host->dma.active = 0;
 | 
			
		||||
 | 
			
		||||
	err = host->dma.err;
 | 
			
		||||
	mrq = host->curr.mrq;
 | 
			
		||||
	BUG_ON(!mrq);
 | 
			
		||||
	WARN_ON(!mrq->data);
 | 
			
		||||
 | 
			
		||||
	if (!(result & DMOV_RSLT_VALID)) {
 | 
			
		||||
	if (!(host->dma.result & DMOV_RSLT_VALID)) {
 | 
			
		||||
		pr_err("msmsdcc: Invalid DataMover result\n");
 | 
			
		||||
		goto out;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (result & DMOV_RSLT_DONE) {
 | 
			
		||||
	if (host->dma.result & DMOV_RSLT_DONE) {
 | 
			
		||||
		host->curr.data_xfered = host->curr.xfer_size;
 | 
			
		||||
	} else {
 | 
			
		||||
		/* Error or flush  */
 | 
			
		||||
		if (result & DMOV_RSLT_ERROR)
 | 
			
		||||
		if (host->dma.result & DMOV_RSLT_ERROR)
 | 
			
		||||
			pr_err("%s: DMA error (0x%.8x)\n",
 | 
			
		||||
			       mmc_hostname(host->mmc), result);
 | 
			
		||||
		if (result & DMOV_RSLT_FLUSH)
 | 
			
		||||
			       mmc_hostname(host->mmc), host->dma.result);
 | 
			
		||||
		if (host->dma.result & DMOV_RSLT_FLUSH)
 | 
			
		||||
			pr_err("%s: DMA channel flushed (0x%.8x)\n",
 | 
			
		||||
			       mmc_hostname(host->mmc), result);
 | 
			
		||||
		if (err)
 | 
			
		||||
			       mmc_hostname(host->mmc), host->dma.result);
 | 
			
		||||
 | 
			
		||||
			pr_err("Flush data: %.8x %.8x %.8x %.8x %.8x %.8x\n",
 | 
			
		||||
			       err->flush[0], err->flush[1], err->flush[2],
 | 
			
		||||
			       err->flush[3], err->flush[4], err->flush[5]);
 | 
			
		||||
		       err.flush[0], err.flush[1], err.flush[2],
 | 
			
		||||
		       err.flush[3], err.flush[4], err.flush[5]);
 | 
			
		||||
		if (!mrq->data->error)
 | 
			
		||||
			mrq->data->error = -EIO;
 | 
			
		||||
	}
 | 
			
		||||
@@ -391,6 +389,22 @@ out:
 | 
			
		||||
	return;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void
 | 
			
		||||
msmsdcc_dma_complete_func(struct msm_dmov_cmd *cmd,
 | 
			
		||||
			  unsigned int result,
 | 
			
		||||
			  struct msm_dmov_errdata *err)
 | 
			
		||||
{
 | 
			
		||||
	struct msmsdcc_dma_data	*dma_data =
 | 
			
		||||
		container_of(cmd, struct msmsdcc_dma_data, hdr);
 | 
			
		||||
	struct msmsdcc_host *host = dma_data->host;
 | 
			
		||||
 | 
			
		||||
	dma_data->result = result;
 | 
			
		||||
	if (err)
 | 
			
		||||
		memcpy(&dma_data->err, err, sizeof(struct msm_dmov_errdata));
 | 
			
		||||
 | 
			
		||||
	tasklet_schedule(&host->dma_tlet);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int validate_dma(struct msmsdcc_host *host, struct mmc_data *data)
 | 
			
		||||
{
 | 
			
		||||
	if (host->dma.channel == -1)
 | 
			
		||||
@@ -451,14 +465,30 @@ static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
 | 
			
		||||
	host->curr.user_pages = 0;
 | 
			
		||||
 | 
			
		||||
	box = &nc->cmd[0];
 | 
			
		||||
	for (i = 0; i < host->dma.num_ents; i++) {
 | 
			
		||||
 | 
			
		||||
	/* location of command block must be 64 bit aligned */
 | 
			
		||||
	BUG_ON(host->dma.cmd_busaddr & 0x07);
 | 
			
		||||
 | 
			
		||||
	nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP;
 | 
			
		||||
	host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST |
 | 
			
		||||
			       DMOV_CMD_ADDR(host->dma.cmdptr_busaddr);
 | 
			
		||||
	host->dma.hdr.complete_func = msmsdcc_dma_complete_func;
 | 
			
		||||
 | 
			
		||||
	n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
 | 
			
		||||
			host->dma.num_ents, host->dma.dir);
 | 
			
		||||
	if (n == 0) {
 | 
			
		||||
		printk(KERN_ERR "%s: Unable to map in all sg elements\n",
 | 
			
		||||
			mmc_hostname(host->mmc));
 | 
			
		||||
		host->dma.sg = NULL;
 | 
			
		||||
		host->dma.num_ents = 0;
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	for_each_sg(host->dma.sg, sg, n, i) {
 | 
			
		||||
 | 
			
		||||
		box->cmd = CMD_MODE_BOX;
 | 
			
		||||
 | 
			
		||||
	/* Initialize sg dma address */
 | 
			
		||||
	sg->dma_address = page_to_dma(mmc_dev(host->mmc), sg_page(sg))
 | 
			
		||||
				+ sg->offset;
 | 
			
		||||
 | 
			
		||||
	if (i == (host->dma.num_ents - 1))
 | 
			
		||||
		if (i == n - 1)
 | 
			
		||||
			box->cmd |= CMD_LC;
 | 
			
		||||
		rows = (sg_dma_len(sg) % MCI_FIFOSIZE) ?
 | 
			
		||||
			(sg_dma_len(sg) / MCI_FIFOSIZE) + 1 :
 | 
			
		||||
@@ -486,27 +516,6 @@ static int msmsdcc_config_dma(struct msmsdcc_host *host, struct mmc_data *data)
 | 
			
		||||
			box->cmd |= CMD_DST_CRCI(crci);
 | 
			
		||||
		}
 | 
			
		||||
		box++;
 | 
			
		||||
		sg++;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* location of command block must be 64 bit aligned */
 | 
			
		||||
	BUG_ON(host->dma.cmd_busaddr & 0x07);
 | 
			
		||||
 | 
			
		||||
	nc->cmdptr = (host->dma.cmd_busaddr >> 3) | CMD_PTR_LP;
 | 
			
		||||
	host->dma.hdr.cmdptr = DMOV_CMD_PTR_LIST |
 | 
			
		||||
			       DMOV_CMD_ADDR(host->dma.cmdptr_busaddr);
 | 
			
		||||
	host->dma.hdr.complete_func = msmsdcc_dma_complete_func;
 | 
			
		||||
 | 
			
		||||
	n = dma_map_sg(mmc_dev(host->mmc), host->dma.sg,
 | 
			
		||||
			host->dma.num_ents, host->dma.dir);
 | 
			
		||||
/* dsb inside dma_map_sg will write nc out to mem as well */
 | 
			
		||||
 | 
			
		||||
	if (n != host->dma.num_ents) {
 | 
			
		||||
		printk(KERN_ERR "%s: Unable to map in all sg elements\n",
 | 
			
		||||
			mmc_hostname(host->mmc));
 | 
			
		||||
		host->dma.sg = NULL;
 | 
			
		||||
		host->dma.num_ents = 0;
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
@@ -542,6 +551,11 @@ msmsdcc_start_command_deferred(struct msmsdcc_host *host,
 | 
			
		||||
	      (cmd->opcode == 53))
 | 
			
		||||
		*c |= MCI_CSPM_DATCMD;
 | 
			
		||||
 | 
			
		||||
	if (host->prog_scan && (cmd->opcode == 12)) {
 | 
			
		||||
		*c |= MCI_CPSM_PROGENA;
 | 
			
		||||
		host->prog_enable = true;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (cmd == cmd->mrq->stop)
 | 
			
		||||
		*c |= MCI_CSPM_MCIABORT;
 | 
			
		||||
 | 
			
		||||
@@ -612,6 +626,8 @@ msmsdcc_start_data(struct msmsdcc_host *host, struct mmc_data *data,
 | 
			
		||||
		}
 | 
			
		||||
		dsb();
 | 
			
		||||
		msm_dmov_enqueue_cmd_ext(host->dma.channel, &host->dma.hdr);
 | 
			
		||||
		if (data->flags & MMC_DATA_WRITE)
 | 
			
		||||
			host->prog_scan = true;
 | 
			
		||||
	} else {
 | 
			
		||||
		msmsdcc_writel(host, timeout, MMCIDATATIMER);
 | 
			
		||||
 | 
			
		||||
@@ -701,6 +717,9 @@ msmsdcc_pio_read(struct msmsdcc_host *host, char *buffer, unsigned int remain)
 | 
			
		||||
		count += remain;
 | 
			
		||||
	}else
 | 
			
		||||
#endif
 | 
			
		||||
	if (remain % 4)
 | 
			
		||||
		remain = ((remain >> 2) + 1) << 2;
 | 
			
		||||
 | 
			
		||||
		while (msmsdcc_readl(host, MMCISTATUS) & MCI_RXDATAAVLBL) {
 | 
			
		||||
			*ptr = msmsdcc_readl(host, MMCIFIFO + (count % MCI_FIFOSIZE));
 | 
			
		||||
			ptr++;
 | 
			
		||||
@@ -737,13 +756,14 @@ msmsdcc_pio_write(struct msmsdcc_host *host, char *buffer,
 | 
			
		||||
	} else {
 | 
			
		||||
#endif
 | 
			
		||||
		do {
 | 
			
		||||
			unsigned int count, maxcnt;
 | 
			
		||||
			unsigned int count, maxcnt, sz;
 | 
			
		||||
 | 
			
		||||
			maxcnt = status & MCI_TXFIFOEMPTY ? MCI_FIFOSIZE :
 | 
			
		||||
							MCI_FIFOHALFSIZE;
 | 
			
		||||
			count = min(remain, maxcnt);
 | 
			
		||||
 | 
			
		||||
			writesl(base + MMCIFIFO, ptr, count >> 2);
 | 
			
		||||
			sz = count % 4 ? (count >> 2) + 1 : (count >> 2);
 | 
			
		||||
			writesl(base + MMCIFIFO, ptr, sz);
 | 
			
		||||
			ptr += count;
 | 
			
		||||
			remain -= count;
 | 
			
		||||
 | 
			
		||||
@@ -906,8 +926,23 @@ static void msmsdcc_do_cmdirq(struct msmsdcc_host *host, uint32_t status)
 | 
			
		||||
		else if (host->curr.data) { /* Non DMA */
 | 
			
		||||
			msmsdcc_stop_data(host);
 | 
			
		||||
			msmsdcc_request_end(host, cmd->mrq);
 | 
			
		||||
		} else /* host->data == NULL */
 | 
			
		||||
		} else { /* host->data == NULL */
 | 
			
		||||
			if (!cmd->error && host->prog_enable) {
 | 
			
		||||
				if (status & MCI_PROGDONE) {
 | 
			
		||||
					host->prog_scan = false;
 | 
			
		||||
					host->prog_enable = false;
 | 
			
		||||
			msmsdcc_request_end(host, cmd->mrq);
 | 
			
		||||
				} else {
 | 
			
		||||
					host->curr.cmd = cmd;
 | 
			
		||||
				}
 | 
			
		||||
			} else {
 | 
			
		||||
				if (host->prog_enable) {
 | 
			
		||||
					host->prog_scan = false;
 | 
			
		||||
					host->prog_enable = false;
 | 
			
		||||
				}
 | 
			
		||||
				msmsdcc_request_end(host, cmd->mrq);
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	} else if (cmd->data)
 | 
			
		||||
		if (!(cmd->data->flags & MMC_DATA_READ))
 | 
			
		||||
			msmsdcc_start_data(host, cmd->data,
 | 
			
		||||
@@ -921,7 +956,7 @@ msmsdcc_handle_irq_data(struct msmsdcc_host *host, u32 status,
 | 
			
		||||
	struct mmc_data *data = host->curr.data;
 | 
			
		||||
 | 
			
		||||
	if (status & (MCI_CMDSENT | MCI_CMDRESPEND | MCI_CMDCRCFAIL |
 | 
			
		||||
	              MCI_CMDTIMEOUT) && host->curr.cmd) {
 | 
			
		||||
			MCI_CMDTIMEOUT | MCI_PROGDONE) && host->curr.cmd) {
 | 
			
		||||
		msmsdcc_do_cmdirq(host, status);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@@ -1265,24 +1300,6 @@ msmsdcc_init_dma(struct msmsdcc_host *host)
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_MMC_MSM7X00A_RESUME_IN_WQ
 | 
			
		||||
static void
 | 
			
		||||
do_resume_work(struct work_struct *work)
 | 
			
		||||
{
 | 
			
		||||
	struct msmsdcc_host *host =
 | 
			
		||||
		container_of(work, struct msmsdcc_host, resume_task);
 | 
			
		||||
	struct mmc_host	*mmc = host->mmc;
 | 
			
		||||
 | 
			
		||||
	if (mmc) {
 | 
			
		||||
		mmc_resume_host(mmc);
 | 
			
		||||
		if (host->stat_irq)
 | 
			
		||||
			enable_irq(host->stat_irq);
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_HAS_EARLYSUSPEND
 | 
			
		||||
static void msmsdcc_early_suspend(struct early_suspend *h)
 | 
			
		||||
{
 | 
			
		||||
@@ -1382,14 +1399,8 @@ msmsdcc_probe(struct platform_device *pdev)
 | 
			
		||||
	host->dmares = dmares;
 | 
			
		||||
	spin_lock_init(&host->lock);
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_MMC_EMBEDDED_SDIO
 | 
			
		||||
	if (plat->embedded_sdio)
 | 
			
		||||
		mmc_set_embedded_sdio_data(mmc,
 | 
			
		||||
					   &plat->embedded_sdio->cis,
 | 
			
		||||
					   &plat->embedded_sdio->cccr,
 | 
			
		||||
					   plat->embedded_sdio->funcs,
 | 
			
		||||
					   plat->embedded_sdio->num_funcs);
 | 
			
		||||
#endif
 | 
			
		||||
	tasklet_init(&host->dma_tlet, msmsdcc_dma_complete_tlet,
 | 
			
		||||
			(unsigned long)host);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Setup DMA
 | 
			
		||||
@@ -1608,22 +1619,14 @@ msmsdcc_resume(struct platform_device *dev)
 | 
			
		||||
 | 
			
		||||
		msmsdcc_writel(host, host->saved_irq0mask, MMCIMASK0);
 | 
			
		||||
 | 
			
		||||
		if (mmc->card && mmc->card->type != MMC_TYPE_SDIO) {
 | 
			
		||||
#ifdef CONFIG_MMC_MSM7X00A_RESUME_IN_WQ
 | 
			
		||||
			schedule_work(&host->resume_task);
 | 
			
		||||
#else
 | 
			
		||||
		if (mmc->card && mmc->card->type != MMC_TYPE_SDIO)
 | 
			
		||||
			mmc_resume_host(mmc);
 | 
			
		||||
#endif
 | 
			
		||||
		}
 | 
			
		||||
 | 
			
		||||
		if (host->stat_irq)
 | 
			
		||||
			enable_irq(host->stat_irq);
 | 
			
		||||
 | 
			
		||||
#if BUSCLK_PWRSAVE
 | 
			
		||||
		if (host->clks_on)
 | 
			
		||||
			msmsdcc_disable_clocks(host, 1);
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
	}
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										13
									
								
								drivers/mmc/host/msm_sdcc.h
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							
							
						
						
									
										13
									
								
								drivers/mmc/host/msm_sdcc.h
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							@@ -155,7 +155,7 @@
 | 
			
		||||
#define MCI_IRQENABLE	\
 | 
			
		||||
	(MCI_CMDCRCFAILMASK|MCI_DATACRCFAILMASK|MCI_CMDTIMEOUTMASK|	\
 | 
			
		||||
	MCI_DATATIMEOUTMASK|MCI_TXUNDERRUNMASK|MCI_RXOVERRUNMASK|	\
 | 
			
		||||
	MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATAENDMASK)
 | 
			
		||||
	MCI_CMDRESPENDMASK|MCI_CMDSENTMASK|MCI_DATAENDMASK|MCI_PROGDONEMASK)
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * The size of the FIFO in bytes.
 | 
			
		||||
@@ -164,7 +164,7 @@
 | 
			
		||||
 | 
			
		||||
#define MCI_FIFOHALFSIZE (MCI_FIFOSIZE / 2)
 | 
			
		||||
 | 
			
		||||
#define NR_SG		32
 | 
			
		||||
#define NR_SG		128
 | 
			
		||||
 | 
			
		||||
struct clk;
 | 
			
		||||
 | 
			
		||||
@@ -190,7 +190,7 @@ struct msmsdcc_dma_data {
 | 
			
		||||
	int				busy; /* Set if DM is busy */
 | 
			
		||||
	int				active;
 | 
			
		||||
	unsigned int 			result;
 | 
			
		||||
	struct msm_dmov_errdata 	*err;
 | 
			
		||||
	struct msm_dmov_errdata		err;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct msmsdcc_pio_data {
 | 
			
		||||
@@ -258,17 +258,12 @@ struct msmsdcc_host {
 | 
			
		||||
	int polling_enabled;
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_MMC_MSM7X00A_RESUME_IN_WQ
 | 
			
		||||
	struct work_struct	resume_task;
 | 
			
		||||
#endif
 | 
			
		||||
	struct tasklet_struct 	dma_tlet;
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_MMC_AUTO_SUSPEND
 | 
			
		||||
	unsigned long           suspended;
 | 
			
		||||
#endif
 | 
			
		||||
	unsigned int prog_scan;
 | 
			
		||||
	unsigned int prog_enable;
 | 
			
		||||
	/* Command parameters */
 | 
			
		||||
	unsigned int		cmd_timeout;
 | 
			
		||||
	unsigned int		cmd_pio_irqmask;
 | 
			
		||||
@@ -279,6 +274,8 @@ struct msmsdcc_host {
 | 
			
		||||
	unsigned int	dummy_52_needed;
 | 
			
		||||
	unsigned int	dummy_52_state;
 | 
			
		||||
 | 
			
		||||
	bool prog_scan;
 | 
			
		||||
	bool prog_enable;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
 
 | 
			
		||||
@@ -51,7 +51,7 @@ unsigned crci_mask;
 | 
			
		||||
 | 
			
		||||
#include "msm_nand.h"
 | 
			
		||||
 | 
			
		||||
#define MSM_NAND_DMA_BUFFER_SIZE SZ_4K
 | 
			
		||||
#define MSM_NAND_DMA_BUFFER_SIZE SZ_1M
 | 
			
		||||
#define MSM_NAND_DMA_BUFFER_SLOTS \
 | 
			
		||||
	(MSM_NAND_DMA_BUFFER_SIZE / (sizeof(((atomic_t *)0)->counter) * 8))
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										2
									
								
								drivers/staging/Kconfig
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							
							
						
						
									
										2
									
								
								drivers/staging/Kconfig
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							@@ -125,5 +125,7 @@ source "drivers/staging/iio/Kconfig"
 | 
			
		||||
 | 
			
		||||
source "drivers/staging/zram/Kconfig"
 | 
			
		||||
 | 
			
		||||
source "drivers/staging/snappy/Kconfig"
 | 
			
		||||
 | 
			
		||||
endif # !STAGING_EXCLUDE_BUILD
 | 
			
		||||
endif # STAGING
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										3
									
								
								drivers/staging/Makefile
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							
							
						
						
									
										3
									
								
								drivers/staging/Makefile
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							@@ -45,4 +45,5 @@ obj-$(CONFIG_DX_SEP)		+= sep/
 | 
			
		||||
obj-$(CONFIG_IIO)		+= iio/
 | 
			
		||||
obj-$(CONFIG_ZRAM)		+= zram/
 | 
			
		||||
obj-$(CONFIG_XVMALLOC)		+= zram/
 | 
			
		||||
 | 
			
		||||
obj-$(CONFIG_SNAPPY_COMPRESS)   += snappy/
 | 
			
		||||
obj-$(CONFIG_SNAPPY_DECOMPRESS) += snappy/
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										274
									
								
								drivers/staging/android/binder.c
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							
							
						
						
									
										274
									
								
								drivers/staging/android/binder.c
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							@@ -3,7 +3,6 @@
 | 
			
		||||
 * Android IPC Subsystem
 | 
			
		||||
 *
 | 
			
		||||
 * Copyright (C) 2007-2008 Google, Inc.
 | 
			
		||||
 * Copyright (c) 2012, Code Aurora Forum. All rights reserved.
 | 
			
		||||
 *
 | 
			
		||||
 * This software is licensed under the terms of the GNU General Public
 | 
			
		||||
 * License version 2, as published by the Free Software Foundation, and
 | 
			
		||||
@@ -31,14 +30,15 @@
 | 
			
		||||
#include <linux/rbtree.h>
 | 
			
		||||
#include <linux/sched.h>
 | 
			
		||||
#include <linux/seq_file.h>
 | 
			
		||||
#include <linux/slab.h>
 | 
			
		||||
#include <linux/uaccess.h>
 | 
			
		||||
#include <linux/vmalloc.h>
 | 
			
		||||
#include <linux/slab.h>
 | 
			
		||||
 | 
			
		||||
#include "binder.h"
 | 
			
		||||
 | 
			
		||||
static DEFINE_MUTEX(binder_lock);
 | 
			
		||||
static DEFINE_MUTEX(binder_deferred_lock);
 | 
			
		||||
static DEFINE_MUTEX(binder_mmap_lock);
 | 
			
		||||
 | 
			
		||||
static HLIST_HEAD(binder_procs);
 | 
			
		||||
static HLIST_HEAD(binder_deferred_list);
 | 
			
		||||
@@ -98,12 +98,12 @@ enum {
 | 
			
		||||
	BINDER_DEBUG_BUFFER_ALLOC           = 1U << 13,
 | 
			
		||||
	BINDER_DEBUG_PRIORITY_CAP           = 1U << 14,
 | 
			
		||||
	BINDER_DEBUG_BUFFER_ALLOC_ASYNC     = 1U << 15,
 | 
			
		||||
	BINDER_DEBUG_TOP_ERRORS             = 1U << 16,
 | 
			
		||||
	BINDER_DEBUG_TOP_ERRORS		    = 1U << 16,
 | 
			
		||||
};
 | 
			
		||||
static uint32_t binder_debug_mask;
 | 
			
		||||
module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO);
 | 
			
		||||
 | 
			
		||||
static int binder_debug_no_lock;
 | 
			
		||||
static bool binder_debug_no_lock;
 | 
			
		||||
module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO);
 | 
			
		||||
 | 
			
		||||
static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait);
 | 
			
		||||
@@ -258,7 +258,7 @@ struct binder_ref {
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
struct binder_buffer {
 | 
			
		||||
	struct list_head entry; /* free and allocated entries by addesss */
 | 
			
		||||
	struct list_head entry; /* free and allocated entries by address */
 | 
			
		||||
	struct rb_node rb_node; /* free entry by size or allocated entry */
 | 
			
		||||
				/* by address */
 | 
			
		||||
	unsigned free:1;
 | 
			
		||||
@@ -288,6 +288,7 @@ struct binder_proc {
 | 
			
		||||
	struct rb_root refs_by_node;
 | 
			
		||||
	int pid;
 | 
			
		||||
	struct vm_area_struct *vma;
 | 
			
		||||
	struct mm_struct *vma_vm_mm;
 | 
			
		||||
	struct task_struct *tsk;
 | 
			
		||||
	struct files_struct *files;
 | 
			
		||||
	struct hlist_node deferred_work_node;
 | 
			
		||||
@@ -380,8 +381,7 @@ int task_get_unused_fd_flags(struct binder_proc *proc, int flags)
 | 
			
		||||
 | 
			
		||||
repeat:
 | 
			
		||||
	fdt = files_fdtable(files);
 | 
			
		||||
	fd = find_next_zero_bit(fdt->open_fds->fds_bits, fdt->max_fds,
 | 
			
		||||
				files->next_fd);
 | 
			
		||||
	fd = find_next_zero_bit(fdt->open_fds, fdt->max_fds, files->next_fd);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * N.B. For clone tasks sharing a files structure, this test
 | 
			
		||||
@@ -633,6 +633,11 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
 | 
			
		||||
	if (mm) {
 | 
			
		||||
		down_write(&mm->mmap_sem);
 | 
			
		||||
		vma = proc->vma;
 | 
			
		||||
		if (vma && mm != proc->vma_vm_mm) {
 | 
			
		||||
			pr_err("binder: %d: vma mm and task mm mismatch\n",
 | 
			
		||||
				proc->pid);
 | 
			
		||||
			vma = NULL;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (allocate == 0)
 | 
			
		||||
@@ -640,8 +645,8 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
 | 
			
		||||
 | 
			
		||||
	if (vma == NULL) {
 | 
			
		||||
		binder_debug(BINDER_DEBUG_TOP_ERRORS,
 | 
			
		||||
		       "binder: %d: binder_alloc_buf failed to "
 | 
			
		||||
		       "map pages in userspace, no vma\n", proc->pid);
 | 
			
		||||
			     "binder: %d: binder_alloc_buf failed to "
 | 
			
		||||
			     "map pages in userspace, no vma\n", proc->pid);
 | 
			
		||||
		goto err_no_vma;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@@ -654,8 +659,8 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
 | 
			
		||||
		*page = alloc_page(GFP_KERNEL | __GFP_ZERO);
 | 
			
		||||
		if (*page == NULL) {
 | 
			
		||||
			binder_debug(BINDER_DEBUG_TOP_ERRORS,
 | 
			
		||||
			       "binder: %d: binder_alloc_buf failed "
 | 
			
		||||
			       "for page at %p\n", proc->pid, page_addr);
 | 
			
		||||
				     "binder: %d: binder_alloc_buf failed "
 | 
			
		||||
				     "for page at %p\n", proc->pid, page_addr);
 | 
			
		||||
			goto err_alloc_page_failed;
 | 
			
		||||
		}
 | 
			
		||||
		tmp_area.addr = page_addr;
 | 
			
		||||
@@ -664,9 +669,9 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
 | 
			
		||||
		ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr);
 | 
			
		||||
		if (ret) {
 | 
			
		||||
			binder_debug(BINDER_DEBUG_TOP_ERRORS,
 | 
			
		||||
			       "binder: %d: binder_alloc_buf failed "
 | 
			
		||||
			       "to map page at %p in kernel\n",
 | 
			
		||||
			       proc->pid, page_addr);
 | 
			
		||||
				     "binder: %d: binder_alloc_buf failed "
 | 
			
		||||
				     "to map page at %p in kernel\n",
 | 
			
		||||
				     proc->pid, page_addr);
 | 
			
		||||
			goto err_map_kernel_failed;
 | 
			
		||||
		}
 | 
			
		||||
		user_page_addr =
 | 
			
		||||
@@ -674,9 +679,9 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate,
 | 
			
		||||
		ret = vm_insert_page(vma, user_page_addr, page[0]);
 | 
			
		||||
		if (ret) {
 | 
			
		||||
			binder_debug(BINDER_DEBUG_TOP_ERRORS,
 | 
			
		||||
			       "binder: %d: binder_alloc_buf failed "
 | 
			
		||||
			       "to map page at %lx in userspace\n",
 | 
			
		||||
			       proc->pid, user_page_addr);
 | 
			
		||||
				     "binder: %d: binder_alloc_buf failed "
 | 
			
		||||
				     "to map page at %lx in userspace\n",
 | 
			
		||||
				     proc->pid, user_page_addr);
 | 
			
		||||
			goto err_vm_insert_page_failed;
 | 
			
		||||
		}
 | 
			
		||||
		/* vm_insert_page does not seem to increment the refcount */
 | 
			
		||||
@@ -724,8 +729,8 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
 | 
			
		||||
 | 
			
		||||
	if (proc->vma == NULL) {
 | 
			
		||||
		binder_debug(BINDER_DEBUG_TOP_ERRORS,
 | 
			
		||||
		       "binder: %d: binder_alloc_buf, no vma\n",
 | 
			
		||||
		       proc->pid);
 | 
			
		||||
			     "binder: %d: binder_alloc_buf, no vma\n",
 | 
			
		||||
			     proc->pid);
 | 
			
		||||
		return NULL;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@@ -763,8 +768,8 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc,
 | 
			
		||||
	}
 | 
			
		||||
	if (best_fit == NULL) {
 | 
			
		||||
		binder_debug(BINDER_DEBUG_TOP_ERRORS,
 | 
			
		||||
		       "binder: %d: binder_alloc_buf size %zd failed, "
 | 
			
		||||
		       "no address space\n", proc->pid, size);
 | 
			
		||||
			     "binder: %d: binder_alloc_buf size %zd failed, "
 | 
			
		||||
			     "no address space\n", proc->pid, size);
 | 
			
		||||
		return NULL;
 | 
			
		||||
	}
 | 
			
		||||
	if (n == NULL) {
 | 
			
		||||
@@ -999,8 +1004,8 @@ static int binder_inc_node(struct binder_node *node, int strong, int internal,
 | 
			
		||||
			    !(node == binder_context_mgr_node &&
 | 
			
		||||
			    node->has_strong_ref)) {
 | 
			
		||||
				binder_debug(BINDER_DEBUG_TOP_ERRORS,
 | 
			
		||||
					"binder: invalid inc strong "
 | 
			
		||||
					"node for %d\n", node->debug_id);
 | 
			
		||||
					     "binder: invalid inc strong "
 | 
			
		||||
					     "node for %d\n", node->debug_id);
 | 
			
		||||
				return -EINVAL;
 | 
			
		||||
			}
 | 
			
		||||
			node->internal_strong_refs++;
 | 
			
		||||
@@ -1016,8 +1021,8 @@ static int binder_inc_node(struct binder_node *node, int strong, int internal,
 | 
			
		||||
		if (!node->has_weak_ref && list_empty(&node->work.entry)) {
 | 
			
		||||
			if (target_list == NULL) {
 | 
			
		||||
				binder_debug(BINDER_DEBUG_TOP_ERRORS,
 | 
			
		||||
					"binder: invalid inc weak node "
 | 
			
		||||
					"for %d\n", node->debug_id);
 | 
			
		||||
					     "binder: invalid inc weak node "
 | 
			
		||||
					     "for %d\n", node->debug_id);
 | 
			
		||||
				return -EINVAL;
 | 
			
		||||
			}
 | 
			
		||||
			list_add_tail(&node->work.entry, target_list);
 | 
			
		||||
@@ -1053,7 +1058,7 @@ static int binder_dec_node(struct binder_node *node, int strong, int internal)
 | 
			
		||||
			if (node->proc) {
 | 
			
		||||
				rb_erase(&node->rb_node, &node->proc->nodes);
 | 
			
		||||
				binder_debug(BINDER_DEBUG_INTERNAL_REFS,
 | 
			
		||||
					    "binder: refless node %d deleted\n",
 | 
			
		||||
					     "binder: refless node %d deleted\n",
 | 
			
		||||
					     node->debug_id);
 | 
			
		||||
			} else {
 | 
			
		||||
				hlist_del(&node->dead_node);
 | 
			
		||||
@@ -1272,8 +1277,7 @@ static void binder_send_failed_reply(struct binder_transaction *t,
 | 
			
		||||
				binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
 | 
			
		||||
					     "binder: send failed reply for "
 | 
			
		||||
					     "transaction %d to %d:%d\n",
 | 
			
		||||
					      t->debug_id,
 | 
			
		||||
					      target_thread->proc->pid,
 | 
			
		||||
					      t->debug_id, target_thread->proc->pid,
 | 
			
		||||
					      target_thread->pid);
 | 
			
		||||
 | 
			
		||||
				binder_pop_transaction(target_thread, t);
 | 
			
		||||
@@ -1281,11 +1285,12 @@ static void binder_send_failed_reply(struct binder_transaction *t,
 | 
			
		||||
				wake_up_interruptible(&target_thread->wait);
 | 
			
		||||
			} else {
 | 
			
		||||
				binder_debug(BINDER_DEBUG_TOP_ERRORS,
 | 
			
		||||
					"binder: reply failed, target "
 | 
			
		||||
					"thread, %d:%d, has error code %d "
 | 
			
		||||
					"already\n", target_thread->proc->pid,
 | 
			
		||||
					target_thread->pid,
 | 
			
		||||
					target_thread->return_error);
 | 
			
		||||
					     "binder: reply failed, target "
 | 
			
		||||
					     "thread, %d:%d, has error code %d "
 | 
			
		||||
					     "already\n",
 | 
			
		||||
					     target_thread->proc->pid,
 | 
			
		||||
					     target_thread->pid,
 | 
			
		||||
					     target_thread->return_error);
 | 
			
		||||
			}
 | 
			
		||||
			return;
 | 
			
		||||
		} else {
 | 
			
		||||
@@ -1319,15 +1324,14 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
 | 
			
		||||
	int debug_id = buffer->debug_id;
 | 
			
		||||
 | 
			
		||||
	binder_debug(BINDER_DEBUG_TRANSACTION,
 | 
			
		||||
		     "binder: %d buffer release %d, size %zd-%zd, failed at"
 | 
			
		||||
		     " %p\n", proc->pid, buffer->debug_id,
 | 
			
		||||
		     "binder: %d buffer release %d, size %zd-%zd, failed at %p\n",
 | 
			
		||||
		     proc->pid, buffer->debug_id,
 | 
			
		||||
		     buffer->data_size, buffer->offsets_size, failed_at);
 | 
			
		||||
 | 
			
		||||
	if (buffer->target_node)
 | 
			
		||||
		binder_dec_node(buffer->target_node, 1, 0);
 | 
			
		||||
 | 
			
		||||
	offp = (size_t *)(buffer->data + ALIGN(buffer->data_size,
 | 
			
		||||
				sizeof(void *)));
 | 
			
		||||
	offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, sizeof(void *)));
 | 
			
		||||
	if (failed_at)
 | 
			
		||||
		off_end = failed_at;
 | 
			
		||||
	else
 | 
			
		||||
@@ -1338,44 +1342,41 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
 | 
			
		||||
		    buffer->data_size < sizeof(*fp) ||
 | 
			
		||||
		    !IS_ALIGNED(*offp, sizeof(void *))) {
 | 
			
		||||
			binder_debug(BINDER_DEBUG_TOP_ERRORS,
 | 
			
		||||
					"binder: transaction release %d bad"
 | 
			
		||||
					"offset %zd, size %zd\n", debug_id,
 | 
			
		||||
					*offp, buffer->data_size);
 | 
			
		||||
				     "binder: transaction release %d bad"
 | 
			
		||||
				     "offset %zd, size %zd\n", debug_id,
 | 
			
		||||
				     *offp, buffer->data_size);
 | 
			
		||||
			continue;
 | 
			
		||||
		}
 | 
			
		||||
		fp = (struct flat_binder_object *)(buffer->data + *offp);
 | 
			
		||||
		switch (fp->type) {
 | 
			
		||||
		case BINDER_TYPE_BINDER:
 | 
			
		||||
		case BINDER_TYPE_WEAK_BINDER: {
 | 
			
		||||
			struct binder_node *node = binder_get_node(proc,
 | 
			
		||||
								fp->binder);
 | 
			
		||||
			struct binder_node *node = binder_get_node(proc, fp->binder);
 | 
			
		||||
			if (node == NULL) {
 | 
			
		||||
				binder_debug(BINDER_DEBUG_TOP_ERRORS,
 | 
			
		||||
					"binder: transaction release %d"
 | 
			
		||||
				       " bad node %p\n", debug_id, fp->binder);
 | 
			
		||||
					     "binder: transaction release %d"
 | 
			
		||||
					     " bad node %p\n", debug_id,
 | 
			
		||||
					     fp->binder);
 | 
			
		||||
				break;
 | 
			
		||||
			}
 | 
			
		||||
			binder_debug(BINDER_DEBUG_TRANSACTION,
 | 
			
		||||
				     "        node %d u%p\n",
 | 
			
		||||
				     node->debug_id, node->ptr);
 | 
			
		||||
			binder_dec_node(node, fp->type == BINDER_TYPE_BINDER,
 | 
			
		||||
									0);
 | 
			
		||||
			binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0);
 | 
			
		||||
		} break;
 | 
			
		||||
		case BINDER_TYPE_HANDLE:
 | 
			
		||||
		case BINDER_TYPE_WEAK_HANDLE: {
 | 
			
		||||
			struct binder_ref *ref = binder_get_ref(proc,
 | 
			
		||||
								fp->handle);
 | 
			
		||||
			struct binder_ref *ref = binder_get_ref(proc, fp->handle);
 | 
			
		||||
			if (ref == NULL) {
 | 
			
		||||
				binder_debug(BINDER_DEBUG_TOP_ERRORS,
 | 
			
		||||
					"binder: transaction release %d"
 | 
			
		||||
				       " bad handle %ld\n", debug_id,
 | 
			
		||||
				       fp->handle);
 | 
			
		||||
					     "binder: transaction release %d"
 | 
			
		||||
					     " bad handle %ld\n", debug_id,
 | 
			
		||||
					     fp->handle);
 | 
			
		||||
				break;
 | 
			
		||||
			}
 | 
			
		||||
			binder_debug(BINDER_DEBUG_TRANSACTION,
 | 
			
		||||
				     "        ref %d desc %d (node %d)\n",
 | 
			
		||||
				     ref->debug_id, ref->desc,
 | 
			
		||||
				     ref->node->debug_id);
 | 
			
		||||
				     ref->debug_id, ref->desc, ref->node->debug_id);
 | 
			
		||||
			binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE);
 | 
			
		||||
		} break;
 | 
			
		||||
 | 
			
		||||
@@ -1388,8 +1389,8 @@ static void binder_transaction_buffer_release(struct binder_proc *proc,
 | 
			
		||||
 | 
			
		||||
		default:
 | 
			
		||||
			binder_debug(BINDER_DEBUG_TOP_ERRORS,
 | 
			
		||||
				"binder: transaction release %d bad "
 | 
			
		||||
			       "object type %lx\n", debug_id, fp->type);
 | 
			
		||||
				     "binder: transaction release %d bad "
 | 
			
		||||
				     "object type %lx\n", debug_id, fp->type);
 | 
			
		||||
			break;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
@@ -1614,19 +1615,15 @@ static void binder_transaction(struct binder_proc *proc,
 | 
			
		||||
		case BINDER_TYPE_BINDER:
 | 
			
		||||
		case BINDER_TYPE_WEAK_BINDER: {
 | 
			
		||||
			struct binder_ref *ref;
 | 
			
		||||
			struct binder_node *node = binder_get_node(proc,
 | 
			
		||||
								fp->binder);
 | 
			
		||||
			struct binder_node *node = binder_get_node(proc, fp->binder);
 | 
			
		||||
			if (node == NULL) {
 | 
			
		||||
				node = binder_new_node(proc, fp->binder,
 | 
			
		||||
								fp->cookie);
 | 
			
		||||
				node = binder_new_node(proc, fp->binder, fp->cookie);
 | 
			
		||||
				if (node == NULL) {
 | 
			
		||||
					return_error = BR_FAILED_REPLY;
 | 
			
		||||
					goto err_binder_new_node_failed;
 | 
			
		||||
				}
 | 
			
		||||
				node->min_priority = fp->flags &
 | 
			
		||||
						FLAT_BINDER_FLAG_PRIORITY_MASK;
 | 
			
		||||
				node->accept_fds = !!(fp->flags &
 | 
			
		||||
						FLAT_BINDER_FLAG_ACCEPTS_FDS);
 | 
			
		||||
				node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK;
 | 
			
		||||
				node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS);
 | 
			
		||||
			}
 | 
			
		||||
			if (fp->cookie != node->cookie) {
 | 
			
		||||
				binder_user_error("binder: %d:%d sending u%p "
 | 
			
		||||
@@ -1656,8 +1653,7 @@ static void binder_transaction(struct binder_proc *proc,
 | 
			
		||||
		} break;
 | 
			
		||||
		case BINDER_TYPE_HANDLE:
 | 
			
		||||
		case BINDER_TYPE_WEAK_HANDLE: {
 | 
			
		||||
			struct binder_ref *ref = binder_get_ref(proc,
 | 
			
		||||
								fp->handle);
 | 
			
		||||
			struct binder_ref *ref = binder_get_ref(proc, fp->handle);
 | 
			
		||||
			if (ref == NULL) {
 | 
			
		||||
				binder_user_error("binder: %d:%d got "
 | 
			
		||||
					"transaction with invalid "
 | 
			
		||||
@@ -1673,31 +1669,24 @@ static void binder_transaction(struct binder_proc *proc,
 | 
			
		||||
					fp->type = BINDER_TYPE_WEAK_BINDER;
 | 
			
		||||
				fp->binder = ref->node->ptr;
 | 
			
		||||
				fp->cookie = ref->node->cookie;
 | 
			
		||||
				binder_inc_node(ref->node, fp->type ==
 | 
			
		||||
						BINDER_TYPE_BINDER, 0, NULL);
 | 
			
		||||
				binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL);
 | 
			
		||||
				binder_debug(BINDER_DEBUG_TRANSACTION,
 | 
			
		||||
				      "        ref %d desc %d -> node %d u%p\n",
 | 
			
		||||
				     ref->debug_id, ref->desc,
 | 
			
		||||
				    ref->node->debug_id,
 | 
			
		||||
				     ref->node->ptr);
 | 
			
		||||
					     "        ref %d desc %d -> node %d u%p\n",
 | 
			
		||||
					     ref->debug_id, ref->desc, ref->node->debug_id,
 | 
			
		||||
					     ref->node->ptr);
 | 
			
		||||
			} else {
 | 
			
		||||
				struct binder_ref *new_ref;
 | 
			
		||||
				new_ref = binder_get_ref_for_node(target_proc,
 | 
			
		||||
								ref->node);
 | 
			
		||||
				new_ref = binder_get_ref_for_node(target_proc, ref->node);
 | 
			
		||||
				if (new_ref == NULL) {
 | 
			
		||||
					return_error = BR_FAILED_REPLY;
 | 
			
		||||
					goto err_binder_get_ref_for_node_failed;
 | 
			
		||||
				}
 | 
			
		||||
				fp->handle = new_ref->desc;
 | 
			
		||||
				binder_inc_ref(new_ref, fp->type ==
 | 
			
		||||
						BINDER_TYPE_HANDLE, NULL);
 | 
			
		||||
				binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL);
 | 
			
		||||
				binder_debug(BINDER_DEBUG_TRANSACTION,
 | 
			
		||||
					     "        ref %d desc %d -> ref %d"
 | 
			
		||||
					     " desc %d (node %d)\n",
 | 
			
		||||
					     ref->debug_id, ref->desc,
 | 
			
		||||
					     new_ref->debug_id,
 | 
			
		||||
					     new_ref->desc,
 | 
			
		||||
					     ref->node->debug_id);
 | 
			
		||||
					     "        ref %d desc %d -> ref %d desc %d (node %d)\n",
 | 
			
		||||
					     ref->debug_id, ref->desc, new_ref->debug_id,
 | 
			
		||||
					     new_ref->desc, ref->node->debug_id);
 | 
			
		||||
			}
 | 
			
		||||
		} break;
 | 
			
		||||
 | 
			
		||||
@@ -1707,19 +1696,13 @@ static void binder_transaction(struct binder_proc *proc,
 | 
			
		||||
 | 
			
		||||
			if (reply) {
 | 
			
		||||
				if (!(in_reply_to->flags & TF_ACCEPT_FDS)) {
 | 
			
		||||
					binder_user_error("binder: %d:%d got"
 | 
			
		||||
						" reply with fd, %ld, but"
 | 
			
		||||
						" target does not allow fds\n",
 | 
			
		||||
						proc->pid, thread->pid,
 | 
			
		||||
						fp->handle);
 | 
			
		||||
					binder_user_error("binder: %d:%d got reply with fd, %ld, but target does not allow fds\n",
 | 
			
		||||
						proc->pid, thread->pid, fp->handle);
 | 
			
		||||
					return_error = BR_FAILED_REPLY;
 | 
			
		||||
					goto err_fd_not_allowed;
 | 
			
		||||
				}
 | 
			
		||||
			} else if (!target_node->accept_fds) {
 | 
			
		||||
				binder_user_error(
 | 
			
		||||
						"binder: %d:%d got transaction"
 | 
			
		||||
						" with fd, %ld, but target does"
 | 
			
		||||
						" not allow fds\n",
 | 
			
		||||
				binder_user_error("binder: %d:%d got transaction with fd, %ld, but target does not allow fds\n",
 | 
			
		||||
					proc->pid, thread->pid, fp->handle);
 | 
			
		||||
				return_error = BR_FAILED_REPLY;
 | 
			
		||||
				goto err_fd_not_allowed;
 | 
			
		||||
@@ -1727,15 +1710,12 @@ static void binder_transaction(struct binder_proc *proc,
 | 
			
		||||
 | 
			
		||||
			file = fget(fp->handle);
 | 
			
		||||
			if (file == NULL) {
 | 
			
		||||
				binder_user_error(
 | 
			
		||||
						"binder: %d:%d got transaction"
 | 
			
		||||
						" with invalid fd, %ld\n",
 | 
			
		||||
				binder_user_error("binder: %d:%d got transaction with invalid fd, %ld\n",
 | 
			
		||||
					proc->pid, thread->pid, fp->handle);
 | 
			
		||||
				return_error = BR_FAILED_REPLY;
 | 
			
		||||
				goto err_fget_failed;
 | 
			
		||||
			}
 | 
			
		||||
			target_fd = task_get_unused_fd_flags(target_proc,
 | 
			
		||||
								O_CLOEXEC);
 | 
			
		||||
			target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC);
 | 
			
		||||
			if (target_fd < 0) {
 | 
			
		||||
				fput(file);
 | 
			
		||||
				return_error = BR_FAILED_REPLY;
 | 
			
		||||
@@ -1743,8 +1723,7 @@ static void binder_transaction(struct binder_proc *proc,
 | 
			
		||||
			}
 | 
			
		||||
			task_fd_install(target_proc, target_fd, file);
 | 
			
		||||
			binder_debug(BINDER_DEBUG_TRANSACTION,
 | 
			
		||||
				     "        fd %ld -> %d\n", fp->handle,
 | 
			
		||||
								target_fd);
 | 
			
		||||
				     "        fd %ld -> %d\n", fp->handle, target_fd);
 | 
			
		||||
			/* TODO: fput? */
 | 
			
		||||
			fp->handle = target_fd;
 | 
			
		||||
		} break;
 | 
			
		||||
@@ -1893,11 +1872,9 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
 | 
			
		||||
				break;
 | 
			
		||||
			}
 | 
			
		||||
			binder_debug(BINDER_DEBUG_USER_REFS,
 | 
			
		||||
				     "binder: %d:%d %s ref %d desc %d s %d w %d"
 | 
			
		||||
				     " for node %d\n", proc->pid, thread->pid,
 | 
			
		||||
				     debug_string, ref->debug_id, ref->desc,
 | 
			
		||||
				     ref->strong, ref->weak,
 | 
			
		||||
				     ref->node->debug_id);
 | 
			
		||||
				     "binder: %d:%d %s ref %d desc %d s %d w %d for node %d\n",
 | 
			
		||||
				     proc->pid, thread->pid, debug_string, ref->debug_id,
 | 
			
		||||
				     ref->desc, ref->strong, ref->weak, ref->node->debug_id);
 | 
			
		||||
			break;
 | 
			
		||||
		}
 | 
			
		||||
		case BC_INCREFS_DONE:
 | 
			
		||||
@@ -1958,19 +1935,17 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
 | 
			
		||||
			binder_debug(BINDER_DEBUG_USER_REFS,
 | 
			
		||||
				     "binder: %d:%d %s node %d ls %d lw %d\n",
 | 
			
		||||
				     proc->pid, thread->pid,
 | 
			
		||||
				     cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE"
 | 
			
		||||
							: "BC_ACQUIRE_DONE",
 | 
			
		||||
				     node->debug_id, node->local_strong_refs,
 | 
			
		||||
							node->local_weak_refs);
 | 
			
		||||
				     cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE",
 | 
			
		||||
				     node->debug_id, node->local_strong_refs, node->local_weak_refs);
 | 
			
		||||
			break;
 | 
			
		||||
		}
 | 
			
		||||
		case BC_ATTEMPT_ACQUIRE:
 | 
			
		||||
			binder_debug(BINDER_DEBUG_TOP_ERRORS,
 | 
			
		||||
				"binder: BC_ATTEMPT_ACQUIRE not supported\n");
 | 
			
		||||
				     "binder: BC_ATTEMPT_ACQUIRE not supported\n");
 | 
			
		||||
			return -EINVAL;
 | 
			
		||||
		case BC_ACQUIRE_RESULT:
 | 
			
		||||
			binder_debug(BINDER_DEBUG_TOP_ERRORS,
 | 
			
		||||
				"binder: BC_ACQUIRE_RESULT not supported\n");
 | 
			
		||||
		        binder_debug(BINDER_DEBUG_TOP_ERRORS,
 | 
			
		||||
				     "binder: BC_ACQUIRE_RESULT not supported\n");
 | 
			
		||||
			return -EINVAL;
 | 
			
		||||
 | 
			
		||||
		case BC_FREE_BUFFER: {
 | 
			
		||||
@@ -1996,11 +1971,9 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
 | 
			
		||||
				break;
 | 
			
		||||
			}
 | 
			
		||||
			binder_debug(BINDER_DEBUG_FREE_BUFFER,
 | 
			
		||||
				     "binder: %d:%d BC_FREE_BUFFER u%p found"
 | 
			
		||||
				     " buffer %d for %s transaction\n",
 | 
			
		||||
				     proc->pid, thread->pid, data_ptr,
 | 
			
		||||
				     buffer->debug_id, buffer->transaction ?
 | 
			
		||||
				     "active" : "finished");
 | 
			
		||||
				     "binder: %d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n",
 | 
			
		||||
				     proc->pid, thread->pid, data_ptr, buffer->debug_id,
 | 
			
		||||
				     buffer->transaction ? "active" : "finished");
 | 
			
		||||
 | 
			
		||||
			if (buffer->transaction) {
 | 
			
		||||
				buffer->transaction->buffer = NULL;
 | 
			
		||||
@@ -2097,15 +2070,13 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
 | 
			
		||||
			}
 | 
			
		||||
 | 
			
		||||
			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
 | 
			
		||||
				     "binder: %d:%d %s %p ref %d desc %d s %d"
 | 
			
		||||
				     " w %d for node %d\n",
 | 
			
		||||
				     "binder: %d:%d %s %p ref %d desc %d s %d w %d for node %d\n",
 | 
			
		||||
				     proc->pid, thread->pid,
 | 
			
		||||
				     cmd == BC_REQUEST_DEATH_NOTIFICATION ?
 | 
			
		||||
				     "BC_REQUEST_DEATH_NOTIFICATION" :
 | 
			
		||||
				     "BC_CLEAR_DEATH_NOTIFICATION",
 | 
			
		||||
				     cookie, ref->debug_id, ref->desc,
 | 
			
		||||
				     ref->strong, ref->weak,
 | 
			
		||||
				     ref->node->debug_id);
 | 
			
		||||
				     ref->strong, ref->weak, ref->node->debug_id);
 | 
			
		||||
 | 
			
		||||
			if (cmd == BC_REQUEST_DEATH_NOTIFICATION) {
 | 
			
		||||
				if (ref->death) {
 | 
			
		||||
@@ -2119,12 +2090,10 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
 | 
			
		||||
				death = kzalloc(sizeof(*death), GFP_KERNEL);
 | 
			
		||||
				if (death == NULL) {
 | 
			
		||||
					thread->return_error = BR_ERROR;
 | 
			
		||||
					binder_debug(
 | 
			
		||||
						BINDER_DEBUG_FAILED_TRANSACTION,
 | 
			
		||||
						"binder: %d:%d "
 | 
			
		||||
						"BC_REQUEST_DEATH_NOTIFICATION"
 | 
			
		||||
						" failed\n",
 | 
			
		||||
						proc->pid, thread->pid);
 | 
			
		||||
					binder_debug(BINDER_DEBUG_FAILED_TRANSACTION,
 | 
			
		||||
						     "binder: %d:%d "
 | 
			
		||||
						     "BC_REQUEST_DEATH_NOTIFICATION failed\n",
 | 
			
		||||
						     proc->pid, thread->pid);
 | 
			
		||||
					break;
 | 
			
		||||
				}
 | 
			
		||||
				binder_stats_created(BINDER_STAT_DEATH);
 | 
			
		||||
@@ -2214,8 +2183,8 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread,
 | 
			
		||||
 | 
			
		||||
		default:
 | 
			
		||||
			binder_debug(BINDER_DEBUG_TOP_ERRORS,
 | 
			
		||||
			      "binder: %d:%d unknown command %d\n",
 | 
			
		||||
			       proc->pid, thread->pid, cmd);
 | 
			
		||||
				     "binder: %d:%d unknown command %d\n",
 | 
			
		||||
				     proc->pid, thread->pid, cmd);
 | 
			
		||||
			return -EINVAL;
 | 
			
		||||
		}
 | 
			
		||||
		*consumed = ptr - buffer;
 | 
			
		||||
@@ -2272,6 +2241,7 @@ retry:
 | 
			
		||||
			if (put_user(thread->return_error2, (uint32_t __user *)ptr))
 | 
			
		||||
				return -EFAULT;
 | 
			
		||||
			ptr += sizeof(uint32_t);
 | 
			
		||||
			binder_stat_br(proc, thread, thread->return_error2);
 | 
			
		||||
			if (ptr == end)
 | 
			
		||||
				goto done;
 | 
			
		||||
			thread->return_error2 = BR_OK;
 | 
			
		||||
@@ -2279,6 +2249,7 @@ retry:
 | 
			
		||||
		if (put_user(thread->return_error, (uint32_t __user *)ptr))
 | 
			
		||||
			return -EFAULT;
 | 
			
		||||
		ptr += sizeof(uint32_t);
 | 
			
		||||
		binder_stat_br(proc, thread, thread->return_error);
 | 
			
		||||
		thread->return_error = BR_OK;
 | 
			
		||||
		goto done;
 | 
			
		||||
	}
 | 
			
		||||
@@ -2434,6 +2405,7 @@ retry:
 | 
			
		||||
			if (put_user(death->cookie, (void * __user *)ptr))
 | 
			
		||||
				return -EFAULT;
 | 
			
		||||
			ptr += sizeof(void *);
 | 
			
		||||
			binder_stat_br(proc, thread, cmd);
 | 
			
		||||
			binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION,
 | 
			
		||||
				     "binder: %d:%d %s %p\n",
 | 
			
		||||
				      proc->pid, thread->pid,
 | 
			
		||||
@@ -2541,6 +2513,7 @@ done:
 | 
			
		||||
			     proc->pid, thread->pid);
 | 
			
		||||
		if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer))
 | 
			
		||||
			return -EFAULT;
 | 
			
		||||
		binder_stat_br(proc, thread, BR_SPAWN_LOOPER);
 | 
			
		||||
	}
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
@@ -2684,11 +2657,9 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 | 
			
		||||
	unsigned int size = _IOC_SIZE(cmd);
 | 
			
		||||
	void __user *ubuf = (void __user *)arg;
 | 
			
		||||
 | 
			
		||||
	/*binder_debug(BINDER_DEBUG_TOP_ERRORS, "binder_ioctl: %d:%d %x %lx\n",
 | 
			
		||||
					proc->pid, current->pid, cmd, arg);*/
 | 
			
		||||
	/*printk(KERN_INFO "binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/
 | 
			
		||||
 | 
			
		||||
	ret = wait_event_interruptible(binder_user_error_wait,
 | 
			
		||||
						binder_stop_on_user_error < 2);
 | 
			
		||||
	ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
 | 
			
		||||
	if (ret)
 | 
			
		||||
		return ret;
 | 
			
		||||
 | 
			
		||||
@@ -2745,8 +2716,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 | 
			
		||||
		break;
 | 
			
		||||
	}
 | 
			
		||||
	case BINDER_SET_MAX_THREADS:
 | 
			
		||||
		if (copy_from_user(&proc->max_threads, ubuf,
 | 
			
		||||
					sizeof(proc->max_threads))) {
 | 
			
		||||
		if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) {
 | 
			
		||||
			ret = -EINVAL;
 | 
			
		||||
			goto err;
 | 
			
		||||
		}
 | 
			
		||||
@@ -2754,17 +2724,17 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
 | 
			
		||||
	case BINDER_SET_CONTEXT_MGR:
 | 
			
		||||
		if (binder_context_mgr_node != NULL) {
 | 
			
		||||
			binder_debug(BINDER_DEBUG_TOP_ERRORS,
 | 
			
		||||
				"binder: BINDER_SET_CONTEXT_MGR already set\n");
 | 
			
		||||
				     "binder: BINDER_SET_CONTEXT_MGR already set\n");
 | 
			
		||||
			ret = -EBUSY;
 | 
			
		||||
			goto err;
 | 
			
		||||
		}
 | 
			
		||||
		if (binder_context_mgr_uid != -1) {
 | 
			
		||||
			if (binder_context_mgr_uid != current->cred->euid) {
 | 
			
		||||
				binder_debug(BINDER_DEBUG_TOP_ERRORS,
 | 
			
		||||
				       "binder: BINDER_SET_"
 | 
			
		||||
				       "CONTEXT_MGR bad uid %d != %d\n",
 | 
			
		||||
				       current->cred->euid,
 | 
			
		||||
				       binder_context_mgr_uid);
 | 
			
		||||
					     "binder: BINDER_SET_"
 | 
			
		||||
					     "CONTEXT_MGR bad uid %d != %d\n",
 | 
			
		||||
					     current->cred->euid,
 | 
			
		||||
					     binder_context_mgr_uid);
 | 
			
		||||
				ret = -EPERM;
 | 
			
		||||
				goto err;
 | 
			
		||||
			}
 | 
			
		||||
@@ -2808,8 +2778,8 @@ err:
 | 
			
		||||
	wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2);
 | 
			
		||||
	if (ret && ret != -ERESTARTSYS)
 | 
			
		||||
		binder_debug(BINDER_DEBUG_TOP_ERRORS,
 | 
			
		||||
				"binder: %d:%d ioctl %x %lx returned %d\n",
 | 
			
		||||
				proc->pid, current->pid, cmd, arg, ret);
 | 
			
		||||
			     "binder: %d:%d ioctl %x %lx returned %d\n",
 | 
			
		||||
			     proc->pid, current->pid, cmd, arg, ret);
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -2821,7 +2791,6 @@ static void binder_vma_open(struct vm_area_struct *vma)
 | 
			
		||||
		     proc->pid, vma->vm_start, vma->vm_end,
 | 
			
		||||
		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
 | 
			
		||||
		     (unsigned long)pgprot_val(vma->vm_page_prot));
 | 
			
		||||
	dump_stack();
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void binder_vma_close(struct vm_area_struct *vma)
 | 
			
		||||
@@ -2833,6 +2802,7 @@ static void binder_vma_close(struct vm_area_struct *vma)
 | 
			
		||||
		     (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags,
 | 
			
		||||
		     (unsigned long)pgprot_val(vma->vm_page_prot));
 | 
			
		||||
	proc->vma = NULL;
 | 
			
		||||
	proc->vma_vm_mm = NULL;
 | 
			
		||||
	binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -2865,6 +2835,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
 | 
			
		||||
	}
 | 
			
		||||
	vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE;
 | 
			
		||||
 | 
			
		||||
	mutex_lock(&binder_mmap_lock);
 | 
			
		||||
	if (proc->buffer) {
 | 
			
		||||
		ret = -EBUSY;
 | 
			
		||||
		failure_string = "already mapped";
 | 
			
		||||
@@ -2879,13 +2850,13 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
 | 
			
		||||
	}
 | 
			
		||||
	proc->buffer = area->addr;
 | 
			
		||||
	proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer;
 | 
			
		||||
	mutex_unlock(&binder_mmap_lock);
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_CPU_CACHE_VIPT
 | 
			
		||||
	if (cache_is_vipt_aliasing()) {
 | 
			
		||||
		while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) {
 | 
			
		||||
			binder_debug(BINDER_DEBUG_TOP_ERRORS,
 | 
			
		||||
			"binder_mmap: %d %lx-%lx maps %p bad alignment\n",
 | 
			
		||||
			proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
 | 
			
		||||
				     "binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);
 | 
			
		||||
			vma->vm_start += PAGE_SIZE;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
@@ -2913,11 +2884,11 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma)
 | 
			
		||||
	binder_insert_free_buffer(proc, buffer);
 | 
			
		||||
	proc->free_async_space = proc->buffer_size / 2;
 | 
			
		||||
	barrier();
 | 
			
		||||
	proc->files = get_files_struct(current);
 | 
			
		||||
	proc->files = get_files_struct(proc->tsk);
 | 
			
		||||
	proc->vma = vma;
 | 
			
		||||
	proc->vma_vm_mm = vma->vm_mm;
 | 
			
		||||
 | 
			
		||||
	/*binder_debug(BINDER_DEBUG_TOP_ERRORS,
 | 
			
		||||
		"binder_mmap: %d %lx-%lx maps %p\n",
 | 
			
		||||
	/*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n",
 | 
			
		||||
		 proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/
 | 
			
		||||
	return 0;
 | 
			
		||||
 | 
			
		||||
@@ -2925,14 +2896,17 @@ err_alloc_small_buf_failed:
 | 
			
		||||
	kfree(proc->pages);
 | 
			
		||||
	proc->pages = NULL;
 | 
			
		||||
err_alloc_pages_failed:
 | 
			
		||||
	mutex_lock(&binder_mmap_lock);
 | 
			
		||||
	vfree(proc->buffer);
 | 
			
		||||
	proc->buffer = NULL;
 | 
			
		||||
err_get_vm_area_failed:
 | 
			
		||||
err_already_mapped:
 | 
			
		||||
	mutex_unlock(&binder_mmap_lock);
 | 
			
		||||
err_bad_arg:
 | 
			
		||||
	binder_debug(BINDER_DEBUG_TOP_ERRORS,
 | 
			
		||||
		"binder_mmap: %d %lx-%lx %s failed %d\n",
 | 
			
		||||
	       proc->pid, vma->vm_start, vma->vm_end, failure_string, ret);
 | 
			
		||||
		     "binder_mmap: %d %lx-%lx %s failed %d\n",
 | 
			
		||||
		     proc->pid, vma->vm_start, vma->vm_end, failure_string,
 | 
			
		||||
		     ret);
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -3087,9 +3061,9 @@ static void binder_deferred_release(struct binder_proc *proc)
 | 
			
		||||
			t->buffer = NULL;
 | 
			
		||||
			buffer->transaction = NULL;
 | 
			
		||||
			binder_debug(BINDER_DEBUG_TOP_ERRORS,
 | 
			
		||||
				"binder: release proc %d, "
 | 
			
		||||
			       "transaction %d, not freed\n",
 | 
			
		||||
			       proc->pid, t->debug_id);
 | 
			
		||||
				     "binder: release proc %d, "
 | 
			
		||||
				     "transaction %d, not freed\n",
 | 
			
		||||
				     proc->pid, t->debug_id);
 | 
			
		||||
			/*BUG();*/
 | 
			
		||||
		}
 | 
			
		||||
		binder_free_buf(proc, buffer);
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										262
									
								
								drivers/staging/android/logger.c
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							
							
						
						
									
										262
									
								
								drivers/staging/android/logger.c
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							@@ -37,7 +37,7 @@
 | 
			
		||||
 * mutex 'mutex'.
 | 
			
		||||
 */
 | 
			
		||||
struct logger_log {
 | 
			
		||||
	unsigned char 		*buffer;/* the ring buffer itself */
 | 
			
		||||
	unsigned char		*buffer;/* the ring buffer itself */
 | 
			
		||||
	struct miscdevice	misc;	/* misc device representing the log */
 | 
			
		||||
	wait_queue_head_t	wq;	/* wait queue for readers */
 | 
			
		||||
	struct list_head	readers; /* this log's readers */
 | 
			
		||||
@@ -57,19 +57,25 @@ struct logger_reader {
 | 
			
		||||
	struct logger_log	*log;	/* associated log */
 | 
			
		||||
	struct list_head	list;	/* entry in logger_log's list */
 | 
			
		||||
	size_t			r_off;	/* current read head offset */
 | 
			
		||||
	bool			r_all;	/* reader can read all entries */
 | 
			
		||||
	int			r_ver;	/* reader ABI version */
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
/* logger_offset - returns index 'n' into the log via (optimized) modulus */
 | 
			
		||||
#define logger_offset(n)	((n) & (log->size - 1))
 | 
			
		||||
size_t logger_offset(struct logger_log *log, size_t n)
 | 
			
		||||
{
 | 
			
		||||
	return n & (log->size-1);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * file_get_log - Given a file structure, return the associated log
 | 
			
		||||
 *
 | 
			
		||||
 * This isn't aesthetic. We have several goals:
 | 
			
		||||
 *
 | 
			
		||||
 * 	1) Need to quickly obtain the associated log during an I/O operation
 | 
			
		||||
 * 	2) Readers need to maintain state (logger_reader)
 | 
			
		||||
 * 	3) Writers need to be very fast (open() should be a near no-op)
 | 
			
		||||
 *	1) Need to quickly obtain the associated log during an I/O operation
 | 
			
		||||
 *	2) Readers need to maintain state (logger_reader)
 | 
			
		||||
 *	3) Writers need to be very fast (open() should be a near no-op)
 | 
			
		||||
 *
 | 
			
		||||
 * In the reader case, we can trivially go file->logger_reader->logger_log.
 | 
			
		||||
 * For a writer, we don't want to maintain a logger_reader, so we just go
 | 
			
		||||
@@ -86,25 +92,75 @@ static inline struct logger_log *file_get_log(struct file *file)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * get_entry_len - Grabs the length of the payload of the next entry starting
 | 
			
		||||
 * from 'off'.
 | 
			
		||||
 * get_entry_header - returns a pointer to the logger_entry header within
 | 
			
		||||
 * 'log' starting at offset 'off'. A temporary logger_entry 'scratch' must
 | 
			
		||||
 * be provided. Typically the return value will be a pointer within
 | 
			
		||||
 * 'logger->buf'.  However, a pointer to 'scratch' may be returned if
 | 
			
		||||
 * the log entry spans the end and beginning of the circular buffer.
 | 
			
		||||
 */
 | 
			
		||||
static struct logger_entry *get_entry_header(struct logger_log *log,
 | 
			
		||||
		size_t off, struct logger_entry *scratch)
 | 
			
		||||
{
 | 
			
		||||
	size_t len = min(sizeof(struct logger_entry), log->size - off);
 | 
			
		||||
	if (len != sizeof(struct logger_entry)) {
 | 
			
		||||
		memcpy(((void *) scratch), log->buffer + off, len);
 | 
			
		||||
		memcpy(((void *) scratch) + len, log->buffer,
 | 
			
		||||
			sizeof(struct logger_entry) - len);
 | 
			
		||||
		return scratch;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return (struct logger_entry *) (log->buffer + off);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * get_entry_msg_len - Grabs the length of the message of the entry
 | 
			
		||||
 * starting from from 'off'.
 | 
			
		||||
 *
 | 
			
		||||
 * An entry length is 2 bytes (16 bits) in host endian order.
 | 
			
		||||
 * In the log, the length does not include the size of the log entry structure.
 | 
			
		||||
 * This function returns the size including the log entry structure.
 | 
			
		||||
 *
 | 
			
		||||
 * Caller needs to hold log->mutex.
 | 
			
		||||
 */
 | 
			
		||||
static __u32 get_entry_len(struct logger_log *log, size_t off)
 | 
			
		||||
static __u32 get_entry_msg_len(struct logger_log *log, size_t off)
 | 
			
		||||
{
 | 
			
		||||
	__u16 val;
 | 
			
		||||
	struct logger_entry scratch;
 | 
			
		||||
	struct logger_entry *entry;
 | 
			
		||||
 | 
			
		||||
	switch (log->size - off) {
 | 
			
		||||
	case 1:
 | 
			
		||||
		memcpy(&val, log->buffer + off, 1);
 | 
			
		||||
		memcpy(((char *) &val) + 1, log->buffer, 1);
 | 
			
		||||
		break;
 | 
			
		||||
	default:
 | 
			
		||||
		memcpy(&val, log->buffer + off, 2);
 | 
			
		||||
	entry = get_entry_header(log, off, &scratch);
 | 
			
		||||
	return entry->len;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static size_t get_user_hdr_len(int ver)
 | 
			
		||||
{
 | 
			
		||||
	if (ver < 2)
 | 
			
		||||
		return sizeof(struct user_logger_entry_compat);
 | 
			
		||||
	else
 | 
			
		||||
		return sizeof(struct logger_entry);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static ssize_t copy_header_to_user(int ver, struct logger_entry *entry,
 | 
			
		||||
					 char __user *buf)
 | 
			
		||||
{
 | 
			
		||||
	void *hdr;
 | 
			
		||||
	size_t hdr_len;
 | 
			
		||||
	struct user_logger_entry_compat v1;
 | 
			
		||||
 | 
			
		||||
	if (ver < 2) {
 | 
			
		||||
		v1.len      = entry->len;
 | 
			
		||||
		v1.__pad    = 0;
 | 
			
		||||
		v1.pid      = entry->pid;
 | 
			
		||||
		v1.tid      = entry->tid;
 | 
			
		||||
		v1.sec      = entry->sec;
 | 
			
		||||
		v1.nsec     = entry->nsec;
 | 
			
		||||
		hdr         = &v1;
 | 
			
		||||
		hdr_len     = sizeof(struct user_logger_entry_compat);
 | 
			
		||||
	} else {
 | 
			
		||||
		hdr         = entry;
 | 
			
		||||
		hdr_len     = sizeof(struct logger_entry);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return sizeof(struct logger_entry) + val;
 | 
			
		||||
	return copy_to_user(buf, hdr, hdr_len);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
@@ -118,15 +174,31 @@ static ssize_t do_read_log_to_user(struct logger_log *log,
 | 
			
		||||
				   char __user *buf,
 | 
			
		||||
				   size_t count)
 | 
			
		||||
{
 | 
			
		||||
	struct logger_entry scratch;
 | 
			
		||||
	struct logger_entry *entry;
 | 
			
		||||
	size_t len;
 | 
			
		||||
	size_t msg_start;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * We read from the log in two disjoint operations. First, we read from
 | 
			
		||||
	 * the current read head offset up to 'count' bytes or to the end of
 | 
			
		||||
	 * First, copy the header to userspace, using the version of
 | 
			
		||||
	 * the header requested
 | 
			
		||||
	 */
 | 
			
		||||
	entry = get_entry_header(log, reader->r_off, &scratch);
 | 
			
		||||
	if (copy_header_to_user(reader->r_ver, entry, buf))
 | 
			
		||||
		return -EFAULT;
 | 
			
		||||
 | 
			
		||||
	count -= get_user_hdr_len(reader->r_ver);
 | 
			
		||||
	buf += get_user_hdr_len(reader->r_ver);
 | 
			
		||||
	msg_start = logger_offset(log,
 | 
			
		||||
		reader->r_off + sizeof(struct logger_entry));
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * We read from the msg in two disjoint operations. First, we read from
 | 
			
		||||
	 * the current msg head offset up to 'count' bytes or to the end of
 | 
			
		||||
	 * the log, whichever comes first.
 | 
			
		||||
	 */
 | 
			
		||||
	len = min(count, log->size - reader->r_off);
 | 
			
		||||
	if (copy_to_user(buf, log->buffer + reader->r_off, len))
 | 
			
		||||
	len = min(count, log->size - msg_start);
 | 
			
		||||
	if (copy_to_user(buf, log->buffer + msg_start, len))
 | 
			
		||||
		return -EFAULT;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
@@ -137,9 +209,34 @@ static ssize_t do_read_log_to_user(struct logger_log *log,
 | 
			
		||||
		if (copy_to_user(buf + len, log->buffer, count - len))
 | 
			
		||||
			return -EFAULT;
 | 
			
		||||
 | 
			
		||||
	reader->r_off = logger_offset(reader->r_off + count);
 | 
			
		||||
	reader->r_off = logger_offset(log, reader->r_off +
 | 
			
		||||
		sizeof(struct logger_entry) + count);
 | 
			
		||||
 | 
			
		||||
	return count;
 | 
			
		||||
	return count + get_user_hdr_len(reader->r_ver);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * get_next_entry_by_uid - Starting at 'off', returns an offset into
 | 
			
		||||
 * 'log->buffer' which contains the first entry readable by 'euid'
 | 
			
		||||
 */
 | 
			
		||||
static size_t get_next_entry_by_uid(struct logger_log *log,
 | 
			
		||||
		size_t off, uid_t euid)
 | 
			
		||||
{
 | 
			
		||||
	while (off != log->w_off) {
 | 
			
		||||
		struct logger_entry *entry;
 | 
			
		||||
		struct logger_entry scratch;
 | 
			
		||||
		size_t next_len;
 | 
			
		||||
 | 
			
		||||
		entry = get_entry_header(log, off, &scratch);
 | 
			
		||||
 | 
			
		||||
		if (entry->euid == euid)
 | 
			
		||||
			return off;
 | 
			
		||||
 | 
			
		||||
		next_len = sizeof(struct logger_entry) + entry->len;
 | 
			
		||||
		off = logger_offset(log, off + next_len);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return off;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
@@ -147,11 +244,11 @@ static ssize_t do_read_log_to_user(struct logger_log *log,
 | 
			
		||||
 *
 | 
			
		||||
 * Behavior:
 | 
			
		||||
 *
 | 
			
		||||
 * 	- O_NONBLOCK works
 | 
			
		||||
 * 	- If there are no log entries to read, blocks until log is written to
 | 
			
		||||
 * 	- Atomically reads exactly one log entry
 | 
			
		||||
 *	- O_NONBLOCK works
 | 
			
		||||
 *	- If there are no log entries to read, blocks until log is written to
 | 
			
		||||
 *	- Atomically reads exactly one log entry
 | 
			
		||||
 *
 | 
			
		||||
 * Optimal read size is LOGGER_ENTRY_MAX_LEN. Will set errno to EINVAL if read
 | 
			
		||||
 * Will set errno to EINVAL if read
 | 
			
		||||
 * buffer is insufficient to hold next entry.
 | 
			
		||||
 */
 | 
			
		||||
static ssize_t logger_read(struct file *file, char __user *buf,
 | 
			
		||||
@@ -164,9 +261,10 @@ static ssize_t logger_read(struct file *file, char __user *buf,
 | 
			
		||||
 | 
			
		||||
start:
 | 
			
		||||
	while (1) {
 | 
			
		||||
		mutex_lock(&log->mutex);
 | 
			
		||||
 | 
			
		||||
		prepare_to_wait(&log->wq, &wait, TASK_INTERRUPTIBLE);
 | 
			
		||||
 | 
			
		||||
		mutex_lock(&log->mutex);
 | 
			
		||||
		ret = (log->w_off == reader->r_off);
 | 
			
		||||
		mutex_unlock(&log->mutex);
 | 
			
		||||
		if (!ret)
 | 
			
		||||
@@ -191,6 +289,10 @@ start:
 | 
			
		||||
 | 
			
		||||
	mutex_lock(&log->mutex);
 | 
			
		||||
 | 
			
		||||
	if (!reader->r_all)
 | 
			
		||||
		reader->r_off = get_next_entry_by_uid(log,
 | 
			
		||||
			reader->r_off, current_euid());
 | 
			
		||||
 | 
			
		||||
	/* is there still something to read or did we race? */
 | 
			
		||||
	if (unlikely(log->w_off == reader->r_off)) {
 | 
			
		||||
		mutex_unlock(&log->mutex);
 | 
			
		||||
@@ -198,7 +300,8 @@ start:
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* get the size of the next entry */
 | 
			
		||||
	ret = get_entry_len(log, reader->r_off);
 | 
			
		||||
	ret = get_user_hdr_len(reader->r_ver) +
 | 
			
		||||
		get_entry_msg_len(log, reader->r_off);
 | 
			
		||||
	if (count < ret) {
 | 
			
		||||
		ret = -EINVAL;
 | 
			
		||||
		goto out;
 | 
			
		||||
@@ -224,8 +327,9 @@ static size_t get_next_entry(struct logger_log *log, size_t off, size_t len)
 | 
			
		||||
	size_t count = 0;
 | 
			
		||||
 | 
			
		||||
	do {
 | 
			
		||||
		size_t nr = get_entry_len(log, off);
 | 
			
		||||
		off = logger_offset(off + nr);
 | 
			
		||||
		size_t nr = sizeof(struct logger_entry) +
 | 
			
		||||
			get_entry_msg_len(log, off);
 | 
			
		||||
		off = logger_offset(log, off + nr);
 | 
			
		||||
		count += nr;
 | 
			
		||||
	} while (count < len);
 | 
			
		||||
 | 
			
		||||
@@ -233,16 +337,28 @@ static size_t get_next_entry(struct logger_log *log, size_t off, size_t len)
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * clock_interval - is a < c < b in mod-space? Put another way, does the line
 | 
			
		||||
 * from a to b cross c?
 | 
			
		||||
 * is_between - is a < c < b, accounting for wrapping of a, b, and c
 | 
			
		||||
 *    positions in the buffer
 | 
			
		||||
 *
 | 
			
		||||
 * That is, if a<b, check for c between a and b
 | 
			
		||||
 * and if a>b, check for c outside (not between) a and b
 | 
			
		||||
 *
 | 
			
		||||
 * |------- a xxxxxxxx b --------|
 | 
			
		||||
 *               c^
 | 
			
		||||
 *
 | 
			
		||||
 * |xxxxx b --------- a xxxxxxxxx|
 | 
			
		||||
 *    c^
 | 
			
		||||
 *  or                    c^
 | 
			
		||||
 */
 | 
			
		||||
static inline int clock_interval(size_t a, size_t b, size_t c)
 | 
			
		||||
static inline int is_between(size_t a, size_t b, size_t c)
 | 
			
		||||
{
 | 
			
		||||
	if (b < a) {
 | 
			
		||||
		if (a < c || b >= c)
 | 
			
		||||
	if (a < b) {
 | 
			
		||||
		/* is c between a and b? */
 | 
			
		||||
		if (a < c && c <= b)
 | 
			
		||||
			return 1;
 | 
			
		||||
	} else {
 | 
			
		||||
		if (a < c && b >= c)
 | 
			
		||||
		/* is c outside of b through a? */
 | 
			
		||||
		if (c <= b || a < c)
 | 
			
		||||
			return 1;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@@ -260,14 +376,14 @@ static inline int clock_interval(size_t a, size_t b, size_t c)
 | 
			
		||||
static void fix_up_readers(struct logger_log *log, size_t len)
 | 
			
		||||
{
 | 
			
		||||
	size_t old = log->w_off;
 | 
			
		||||
	size_t new = logger_offset(old + len);
 | 
			
		||||
	size_t new = logger_offset(log, old + len);
 | 
			
		||||
	struct logger_reader *reader;
 | 
			
		||||
 | 
			
		||||
	if (clock_interval(old, new, log->head))
 | 
			
		||||
	if (is_between(old, new, log->head))
 | 
			
		||||
		log->head = get_next_entry(log, log->head, len);
 | 
			
		||||
 | 
			
		||||
	list_for_each_entry(reader, &log->readers, list)
 | 
			
		||||
		if (clock_interval(old, new, reader->r_off))
 | 
			
		||||
		if (is_between(old, new, reader->r_off))
 | 
			
		||||
			reader->r_off = get_next_entry(log, reader->r_off, len);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -286,7 +402,7 @@ static void do_write_log(struct logger_log *log, const void *buf, size_t count)
 | 
			
		||||
	if (count != len)
 | 
			
		||||
		memcpy(log->buffer, buf + len, count - len);
 | 
			
		||||
 | 
			
		||||
	log->w_off = logger_offset(log->w_off + count);
 | 
			
		||||
	log->w_off = logger_offset(log, log->w_off + count);
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
@@ -309,9 +425,15 @@ static ssize_t do_write_log_from_user(struct logger_log *log,
 | 
			
		||||
 | 
			
		||||
	if (count != len)
 | 
			
		||||
		if (copy_from_user(log->buffer, buf + len, count - len))
 | 
			
		||||
			/*
 | 
			
		||||
			 * Note that by not updating w_off, this abandons the
 | 
			
		||||
			 * portion of the new entry that *was* successfully
 | 
			
		||||
			 * copied, just above.  This is intentional to avoid
 | 
			
		||||
			 * message corruption from missing fragments.
 | 
			
		||||
			 */
 | 
			
		||||
			return -EFAULT;
 | 
			
		||||
 | 
			
		||||
	log->w_off = logger_offset(log->w_off + count);
 | 
			
		||||
	log->w_off = logger_offset(log, log->w_off + count);
 | 
			
		||||
 | 
			
		||||
	return count;
 | 
			
		||||
}
 | 
			
		||||
@@ -336,7 +458,9 @@ ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov,
 | 
			
		||||
	header.tid = current->pid;
 | 
			
		||||
	header.sec = now.tv_sec;
 | 
			
		||||
	header.nsec = now.tv_nsec;
 | 
			
		||||
	header.euid = current_euid();
 | 
			
		||||
	header.len = min_t(size_t, iocb->ki_left, LOGGER_ENTRY_MAX_PAYLOAD);
 | 
			
		||||
	header.hdr_size = sizeof(struct logger_entry);
 | 
			
		||||
 | 
			
		||||
	/* null writes succeed, return zero */
 | 
			
		||||
	if (unlikely(!header.len))
 | 
			
		||||
@@ -409,6 +533,10 @@ static int logger_open(struct inode *inode, struct file *file)
 | 
			
		||||
			return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
		reader->log = log;
 | 
			
		||||
		reader->r_ver = 1;
 | 
			
		||||
		reader->r_all = in_egroup_p(inode->i_gid) ||
 | 
			
		||||
			capable(CAP_SYSLOG);
 | 
			
		||||
 | 
			
		||||
		INIT_LIST_HEAD(&reader->list);
 | 
			
		||||
 | 
			
		||||
		mutex_lock(&log->mutex);
 | 
			
		||||
@@ -433,9 +561,11 @@ static int logger_release(struct inode *ignored, struct file *file)
 | 
			
		||||
	if (file->f_mode & FMODE_READ) {
 | 
			
		||||
		struct logger_reader *reader = file->private_data;
 | 
			
		||||
		struct logger_log *log = reader->log;
 | 
			
		||||
 | 
			
		||||
		mutex_lock(&log->mutex);
 | 
			
		||||
		list_del(&reader->list);
 | 
			
		||||
		mutex_unlock(&log->mutex);
 | 
			
		||||
 | 
			
		||||
		kfree(reader);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
@@ -466,6 +596,10 @@ static unsigned int logger_poll(struct file *file, poll_table *wait)
 | 
			
		||||
	poll_wait(file, &log->wq, wait);
 | 
			
		||||
 | 
			
		||||
	mutex_lock(&log->mutex);
 | 
			
		||||
	if (!reader->r_all)
 | 
			
		||||
		reader->r_off = get_next_entry_by_uid(log,
 | 
			
		||||
			reader->r_off, current_euid());
 | 
			
		||||
 | 
			
		||||
	if (log->w_off != reader->r_off)
 | 
			
		||||
		ret |= POLLIN | POLLRDNORM;
 | 
			
		||||
	mutex_unlock(&log->mutex);
 | 
			
		||||
@@ -473,11 +607,25 @@ static unsigned int logger_poll(struct file *file, poll_table *wait)
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static long logger_set_version(struct logger_reader *reader, void __user *arg)
 | 
			
		||||
{
 | 
			
		||||
	int version;
 | 
			
		||||
	if (copy_from_user(&version, arg, sizeof(int)))
 | 
			
		||||
		return -EFAULT;
 | 
			
		||||
 | 
			
		||||
	if ((version < 1) || (version > 2))
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	reader->r_ver = version;
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 | 
			
		||||
{
 | 
			
		||||
	struct logger_log *log = file_get_log(file);
 | 
			
		||||
	struct logger_reader *reader;
 | 
			
		||||
	long ret = -ENOTTY;
 | 
			
		||||
	long ret = -EINVAL;
 | 
			
		||||
	void __user *argp = (void __user *) arg;
 | 
			
		||||
 | 
			
		||||
	mutex_lock(&log->mutex);
 | 
			
		||||
 | 
			
		||||
@@ -502,8 +650,14 @@ static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 | 
			
		||||
			break;
 | 
			
		||||
		}
 | 
			
		||||
		reader = file->private_data;
 | 
			
		||||
 | 
			
		||||
		if (!reader->r_all)
 | 
			
		||||
			reader->r_off = get_next_entry_by_uid(log,
 | 
			
		||||
				reader->r_off, current_euid());
 | 
			
		||||
 | 
			
		||||
		if (log->w_off != reader->r_off)
 | 
			
		||||
			ret = get_entry_len(log, reader->r_off);
 | 
			
		||||
			ret = get_user_hdr_len(reader->r_ver) +
 | 
			
		||||
				get_entry_msg_len(log, reader->r_off);
 | 
			
		||||
		else
 | 
			
		||||
			ret = 0;
 | 
			
		||||
		break;
 | 
			
		||||
@@ -517,6 +671,22 @@ static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 | 
			
		||||
		log->head = log->w_off;
 | 
			
		||||
		ret = 0;
 | 
			
		||||
		break;
 | 
			
		||||
	case LOGGER_GET_VERSION:
 | 
			
		||||
		if (!(file->f_mode & FMODE_READ)) {
 | 
			
		||||
			ret = -EBADF;
 | 
			
		||||
			break;
 | 
			
		||||
		}
 | 
			
		||||
		reader = file->private_data;
 | 
			
		||||
		ret = reader->r_ver;
 | 
			
		||||
		break;
 | 
			
		||||
	case LOGGER_SET_VERSION:
 | 
			
		||||
		if (!(file->f_mode & FMODE_READ)) {
 | 
			
		||||
			ret = -EBADF;
 | 
			
		||||
			break;
 | 
			
		||||
		}
 | 
			
		||||
		reader = file->private_data;
 | 
			
		||||
		ret = logger_set_version(reader, argp);
 | 
			
		||||
		break;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	mutex_unlock(&log->mutex);
 | 
			
		||||
@@ -537,8 +707,8 @@ static const struct file_operations logger_fops = {
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Defines a log structure with name 'NAME' and a size of 'SIZE' bytes, which
 | 
			
		||||
 * must be a power of two, greater than LOGGER_ENTRY_MAX_LEN, and less than
 | 
			
		||||
 * LONG_MAX minus LOGGER_ENTRY_MAX_LEN.
 | 
			
		||||
 * must be a power of two, and greater than
 | 
			
		||||
 * (LOGGER_ENTRY_MAX_PAYLOAD + sizeof(struct logger_entry)).
 | 
			
		||||
 */
 | 
			
		||||
#define DEFINE_LOGGER_DEVICE(VAR, NAME, SIZE) \
 | 
			
		||||
static unsigned char _buf_ ## VAR[SIZE]; \
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										29
									
								
								drivers/staging/android/logger.h
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							
							
						
						
									
										29
									
								
								drivers/staging/android/logger.h
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							@@ -20,7 +20,12 @@
 | 
			
		||||
#include <linux/types.h>
 | 
			
		||||
#include <linux/ioctl.h>
 | 
			
		||||
 | 
			
		||||
struct logger_entry {
 | 
			
		||||
/*
 | 
			
		||||
 * The userspace structure for version 1 of the logger_entry ABI.
 | 
			
		||||
 * This structure is returned to userspace unless the caller requests
 | 
			
		||||
 * an upgrade to a newer ABI version.
 | 
			
		||||
 */
 | 
			
		||||
struct user_logger_entry_compat {
 | 
			
		||||
	__u16		len;	/* length of the payload */
 | 
			
		||||
	__u16		__pad;	/* no matter what, we get 2 bytes of padding */
 | 
			
		||||
	__s32		pid;	/* generating process's pid */
 | 
			
		||||
@@ -30,14 +35,28 @@ struct logger_entry {
 | 
			
		||||
	char		msg[0];	/* the entry's payload */
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * The structure for version 2 of the logger_entry ABI.
 | 
			
		||||
 * This structure is returned to userspace if ioctl(LOGGER_SET_VERSION)
 | 
			
		||||
 * is called with version >= 2
 | 
			
		||||
 */
 | 
			
		||||
struct logger_entry {
 | 
			
		||||
	__u16		len;		/* length of the payload */
 | 
			
		||||
	__u16		hdr_size;	/* sizeof(struct logger_entry_v2) */
 | 
			
		||||
	__s32		pid;		/* generating process's pid */
 | 
			
		||||
	__s32		tid;		/* generating process's tid */
 | 
			
		||||
	__s32		sec;		/* seconds since Epoch */
 | 
			
		||||
	__s32		nsec;		/* nanoseconds */
 | 
			
		||||
	uid_t		euid;		/* effective UID of logger */
 | 
			
		||||
	char		msg[0];		/* the entry's payload */
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
#define LOGGER_LOG_RADIO	"log_radio"	/* radio-related messages */
 | 
			
		||||
#define LOGGER_LOG_EVENTS	"log_events"	/* system/hardware events */
 | 
			
		||||
#define LOGGER_LOG_SYSTEM	"log_system"	/* system/framework messages */
 | 
			
		||||
#define LOGGER_LOG_MAIN		"log_main"	/* everything else */
 | 
			
		||||
 | 
			
		||||
#define LOGGER_ENTRY_MAX_LEN		(4*1024)
 | 
			
		||||
#define LOGGER_ENTRY_MAX_PAYLOAD	\
 | 
			
		||||
	(LOGGER_ENTRY_MAX_LEN - sizeof(struct logger_entry))
 | 
			
		||||
#define LOGGER_ENTRY_MAX_PAYLOAD	4076
 | 
			
		||||
 | 
			
		||||
#define __LOGGERIO	0xAE
 | 
			
		||||
 | 
			
		||||
@@ -45,5 +64,7 @@ struct logger_entry {
 | 
			
		||||
#define LOGGER_GET_LOG_LEN		_IO(__LOGGERIO, 2) /* used log len */
 | 
			
		||||
#define LOGGER_GET_NEXT_ENTRY_LEN	_IO(__LOGGERIO, 3) /* next entry len */
 | 
			
		||||
#define LOGGER_FLUSH_LOG		_IO(__LOGGERIO, 4) /* flush log */
 | 
			
		||||
#define LOGGER_GET_VERSION		_IO(__LOGGERIO, 5) /* abi version */
 | 
			
		||||
#define LOGGER_SET_VERSION		_IO(__LOGGERIO, 6) /* abi version */
 | 
			
		||||
 | 
			
		||||
#endif /* _LINUX_LOGGER_H */
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										225
									
								
								drivers/staging/android/lowmemorykiller.c
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							
							
						
						
									
										225
									
								
								drivers/staging/android/lowmemorykiller.c
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							@@ -29,12 +29,22 @@
 | 
			
		||||
 *
 | 
			
		||||
 */
 | 
			
		||||
 | 
			
		||||
#include <linux/module.h>
 | 
			
		||||
#include <linux/kernel.h>
 | 
			
		||||
#include <linux/kobject.h>
 | 
			
		||||
#include <linux/memory.h>
 | 
			
		||||
#include <linux/memory_hotplug.h>
 | 
			
		||||
#include <linux/mm.h>
 | 
			
		||||
#include <linux/module.h>
 | 
			
		||||
#include <linux/notifier.h>
 | 
			
		||||
#include <linux/oom.h>
 | 
			
		||||
#include <linux/sched.h>
 | 
			
		||||
#include <linux/notifier.h>
 | 
			
		||||
#include <linux/slab.h>
 | 
			
		||||
#include <linux/sysfs.h>
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_SWAP
 | 
			
		||||
#include <linux/fs.h>
 | 
			
		||||
#include <linux/swap.h>
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
static uint32_t lowmem_debug_level = 2;
 | 
			
		||||
static int lowmem_adj[6] = {
 | 
			
		||||
@@ -52,8 +62,16 @@ static size_t lowmem_minfree[6] = {
 | 
			
		||||
};
 | 
			
		||||
static int lowmem_minfree_size = 4;
 | 
			
		||||
 | 
			
		||||
static size_t lowmem_minfree_notif_trigger;
 | 
			
		||||
 | 
			
		||||
static unsigned int offlining;
 | 
			
		||||
static struct task_struct *lowmem_deathpending;
 | 
			
		||||
static DEFINE_SPINLOCK(lowmem_deathpending_lock);
 | 
			
		||||
static unsigned long lowmem_deathpending_timeout;
 | 
			
		||||
static struct kobject *lowmem_kobj;
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_SWAP
 | 
			
		||||
static int fudgeswap = 512;
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#define lowmem_print(level, x...)			\
 | 
			
		||||
	do {						\
 | 
			
		||||
@@ -68,29 +86,78 @@ static struct notifier_block task_nb = {
 | 
			
		||||
	.notifier_call	= task_notify_func,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
static void task_free_fn(struct work_struct *work)
 | 
			
		||||
{
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
 | 
			
		||||
	task_free_unregister(&task_nb);
 | 
			
		||||
	spin_lock_irqsave(&lowmem_deathpending_lock, flags);
 | 
			
		||||
	lowmem_deathpending = NULL;
 | 
			
		||||
	spin_unlock_irqrestore(&lowmem_deathpending_lock, flags);
 | 
			
		||||
}
 | 
			
		||||
static DECLARE_WORK(task_free_work, task_free_fn);
 | 
			
		||||
 | 
			
		||||
static int
 | 
			
		||||
task_notify_func(struct notifier_block *self, unsigned long val, void *data)
 | 
			
		||||
{
 | 
			
		||||
	struct task_struct *task = data;
 | 
			
		||||
 | 
			
		||||
	if (task == lowmem_deathpending) {
 | 
			
		||||
		schedule_work(&task_free_work);
 | 
			
		||||
	}
 | 
			
		||||
	if (task == lowmem_deathpending)
 | 
			
		||||
		lowmem_deathpending = NULL;
 | 
			
		||||
 | 
			
		||||
	return NOTIFY_OK;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_MEMORY_HOTPLUG
 | 
			
		||||
static int lmk_hotplug_callback(struct notifier_block *self,
 | 
			
		||||
				unsigned long cmd, void *data)
 | 
			
		||||
{
 | 
			
		||||
	switch (cmd) {
 | 
			
		||||
	/* Don't care LMK cases */
 | 
			
		||||
	case MEM_ONLINE:
 | 
			
		||||
	case MEM_OFFLINE:
 | 
			
		||||
	case MEM_CANCEL_ONLINE:
 | 
			
		||||
	case MEM_CANCEL_OFFLINE:
 | 
			
		||||
	case MEM_GOING_ONLINE:
 | 
			
		||||
		offlining = 0;
 | 
			
		||||
		lowmem_print(4, "lmk in normal mode\n");
 | 
			
		||||
		break;
 | 
			
		||||
	/* LMK should account for movable zone */
 | 
			
		||||
	case MEM_GOING_OFFLINE:
 | 
			
		||||
		offlining = 1;
 | 
			
		||||
		lowmem_print(4, "lmk in hotplug mode\n");
 | 
			
		||||
		break;
 | 
			
		||||
	}
 | 
			
		||||
	return NOTIFY_DONE;
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
static void lowmem_notify_killzone_approach(void);
 | 
			
		||||
 | 
			
		||||
static inline void get_free_ram(int *other_free, int *other_file)
 | 
			
		||||
{
 | 
			
		||||
	struct zone *zone;
 | 
			
		||||
	*other_free = global_page_state(NR_FREE_PAGES);
 | 
			
		||||
	*other_file = global_page_state(NR_FILE_PAGES) -
 | 
			
		||||
						global_page_state(NR_SHMEM);
 | 
			
		||||
#ifdef CONFIG_SWAP
 | 
			
		||||
	if(fudgeswap != 0){
 | 
			
		||||
		struct sysinfo si;
 | 
			
		||||
		si_swapinfo(&si);
 | 
			
		||||
 | 
			
		||||
		if(si.freeswap > 0){
 | 
			
		||||
			if(fudgeswap > si.freeswap)
 | 
			
		||||
				other_file += si.freeswap;
 | 
			
		||||
			else
 | 
			
		||||
				other_file += fudgeswap;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
#endif
 | 
			
		||||
	if (offlining) {
 | 
			
		||||
		/* Discount all free space in the section being offlined */
 | 
			
		||||
		for_each_zone(zone) {
 | 
			
		||||
			 if (zone_idx(zone) == ZONE_MOVABLE) {
 | 
			
		||||
				*other_free -= zone_page_state(zone,
 | 
			
		||||
						NR_FREE_PAGES);
 | 
			
		||||
				lowmem_print(4, "lowmem_shrink discounted "
 | 
			
		||||
					"%lu pages in movable zone\n",
 | 
			
		||||
					zone_page_state(zone, NR_FREE_PAGES));
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask)
 | 
			
		||||
{
 | 
			
		||||
	struct task_struct *p;
 | 
			
		||||
@@ -102,10 +169,8 @@ static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask)
 | 
			
		||||
	int selected_tasksize = 0;
 | 
			
		||||
	int selected_oom_adj;
 | 
			
		||||
	int array_size = ARRAY_SIZE(lowmem_adj);
 | 
			
		||||
	int other_free = global_page_state(NR_FREE_PAGES);
 | 
			
		||||
	int other_file = global_page_state(NR_FILE_PAGES);
 | 
			
		||||
	unsigned long flags;
 | 
			
		||||
 | 
			
		||||
	int other_free;
 | 
			
		||||
	int other_file;
 | 
			
		||||
	/*
 | 
			
		||||
	 * If we already have a death outstanding, then
 | 
			
		||||
	 * bail out right away; indicating to vmscan
 | 
			
		||||
@@ -113,15 +178,24 @@ static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask)
 | 
			
		||||
	 * this pass.
 | 
			
		||||
	 *
 | 
			
		||||
	 */
 | 
			
		||||
	if (lowmem_deathpending)
 | 
			
		||||
	if (lowmem_deathpending &&
 | 
			
		||||
	    time_before_eq(jiffies, lowmem_deathpending_timeout))
 | 
			
		||||
		return 0;
 | 
			
		||||
 | 
			
		||||
	get_free_ram(&other_free, &other_file);
 | 
			
		||||
 | 
			
		||||
	if (other_free < lowmem_minfree_notif_trigger &&
 | 
			
		||||
			other_file < lowmem_minfree_notif_trigger) {
 | 
			
		||||
		lowmem_notify_killzone_approach();
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (lowmem_adj_size < array_size)
 | 
			
		||||
		array_size = lowmem_adj_size;
 | 
			
		||||
	if (lowmem_minfree_size < array_size)
 | 
			
		||||
		array_size = lowmem_minfree_size;
 | 
			
		||||
	for (i = 0; i < array_size; i++) {
 | 
			
		||||
		if (other_file < lowmem_minfree[i]) {
 | 
			
		||||
		if (other_free < lowmem_minfree[i] &&
 | 
			
		||||
		    other_file < lowmem_minfree[i]) {
 | 
			
		||||
			min_adj = lowmem_adj[i];
 | 
			
		||||
			break;
 | 
			
		||||
		}
 | 
			
		||||
@@ -176,20 +250,14 @@ static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask)
 | 
			
		||||
		lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n",
 | 
			
		||||
			     p->pid, p->comm, oom_adj, tasksize);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (selected) {
 | 
			
		||||
		spin_lock_irqsave(&lowmem_deathpending_lock, flags);
 | 
			
		||||
		if (!lowmem_deathpending) {
 | 
			
		||||
			lowmem_print(1,
 | 
			
		||||
				"send sigkill to %d (%s), adj %d, size %d\n",
 | 
			
		||||
				selected->pid, selected->comm,
 | 
			
		||||
				selected_oom_adj, selected_tasksize);
 | 
			
		||||
			lowmem_deathpending = selected;
 | 
			
		||||
			task_free_register(&task_nb);
 | 
			
		||||
			force_sig(SIGKILL, selected);
 | 
			
		||||
			rem -= selected_tasksize;
 | 
			
		||||
		}
 | 
			
		||||
		spin_unlock_irqrestore(&lowmem_deathpending_lock, flags);
 | 
			
		||||
		lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n",
 | 
			
		||||
			     selected->pid, selected->comm,
 | 
			
		||||
			     selected_oom_adj, selected_tasksize);
 | 
			
		||||
		lowmem_deathpending = selected;
 | 
			
		||||
		lowmem_deathpending_timeout = jiffies + HZ;
 | 
			
		||||
		force_sig(SIGKILL, selected);
 | 
			
		||||
		rem -= selected_tasksize;
 | 
			
		||||
	}
 | 
			
		||||
	lowmem_print(4, "lowmem_shrink %d, %x, return %d\n",
 | 
			
		||||
		     nr_to_scan, gfp_mask, rem);
 | 
			
		||||
@@ -202,15 +270,93 @@ static struct shrinker lowmem_shrinker = {
 | 
			
		||||
	.seeks = DEFAULT_SEEKS * 16
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static void lowmem_notify_killzone_approach(void)
 | 
			
		||||
{
 | 
			
		||||
	lowmem_print(3, "notification trigger activated\n");
 | 
			
		||||
	sysfs_notify(lowmem_kobj, NULL, "notify_trigger_active");
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static ssize_t lowmem_notify_trigger_active_show(struct kobject *k,
 | 
			
		||||
		struct kobj_attribute *attr, char *buf)
 | 
			
		||||
{
 | 
			
		||||
	int other_free, other_file;
 | 
			
		||||
	get_free_ram(&other_free, &other_file);
 | 
			
		||||
	if (other_free < lowmem_minfree_notif_trigger &&
 | 
			
		||||
			other_file < lowmem_minfree_notif_trigger)
 | 
			
		||||
		return snprintf(buf, 3, "1\n");
 | 
			
		||||
	else
 | 
			
		||||
		return snprintf(buf, 3, "0\n");
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static struct kobj_attribute lowmem_notify_trigger_active_attr =
 | 
			
		||||
	__ATTR(notify_trigger_active, S_IRUGO,
 | 
			
		||||
			lowmem_notify_trigger_active_show, NULL);
 | 
			
		||||
 | 
			
		||||
static struct attribute *lowmem_default_attrs[] = {
 | 
			
		||||
	&lowmem_notify_trigger_active_attr.attr,
 | 
			
		||||
	NULL,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static ssize_t lowmem_show(struct kobject *k, struct attribute *attr, char *buf)
 | 
			
		||||
{
 | 
			
		||||
	struct kobj_attribute *kobj_attr;
 | 
			
		||||
	kobj_attr = container_of(attr, struct kobj_attribute, attr);
 | 
			
		||||
	return kobj_attr->show(k, kobj_attr, buf);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static const struct sysfs_ops lowmem_ops = {
 | 
			
		||||
	.show = lowmem_show,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static void lowmem_kobj_release(struct kobject *kobj)
 | 
			
		||||
{
 | 
			
		||||
	/* Nothing to be done here */
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static struct kobj_type lowmem_kobj_type = {
 | 
			
		||||
	.release = lowmem_kobj_release,
 | 
			
		||||
	.sysfs_ops = &lowmem_ops,
 | 
			
		||||
	.default_attrs = lowmem_default_attrs,
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static int __init lowmem_init(void)
 | 
			
		||||
{
 | 
			
		||||
	int rc;
 | 
			
		||||
	task_free_register(&task_nb);
 | 
			
		||||
	register_shrinker(&lowmem_shrinker);
 | 
			
		||||
#ifdef CONFIG_MEMORY_HOTPLUG
 | 
			
		||||
	hotplug_memory_notifier(lmk_hotplug_callback, 0);
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
	lowmem_kobj = kzalloc(sizeof(*lowmem_kobj), GFP_KERNEL);
 | 
			
		||||
	if (!lowmem_kobj) {
 | 
			
		||||
		rc = -ENOMEM;
 | 
			
		||||
		goto err;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	rc = kobject_init_and_add(lowmem_kobj, &lowmem_kobj_type,
 | 
			
		||||
			mm_kobj, "lowmemkiller");
 | 
			
		||||
	if (rc)
 | 
			
		||||
		goto err_kobj;
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
 | 
			
		||||
err_kobj:
 | 
			
		||||
	kfree(lowmem_kobj);
 | 
			
		||||
 | 
			
		||||
err:
 | 
			
		||||
	unregister_shrinker(&lowmem_shrinker);
 | 
			
		||||
	task_free_unregister(&task_nb);
 | 
			
		||||
 | 
			
		||||
	return rc;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void __exit lowmem_exit(void)
 | 
			
		||||
{
 | 
			
		||||
	kobject_put(lowmem_kobj);
 | 
			
		||||
	kfree(lowmem_kobj);
 | 
			
		||||
	unregister_shrinker(&lowmem_shrinker);
 | 
			
		||||
	task_free_unregister(&task_nb);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR);
 | 
			
		||||
@@ -219,7 +365,12 @@ module_param_array_named(adj, lowmem_adj, int, &lowmem_adj_size,
 | 
			
		||||
module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size,
 | 
			
		||||
			 S_IRUGO | S_IWUSR);
 | 
			
		||||
module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR);
 | 
			
		||||
module_param_named(notify_trigger, lowmem_minfree_notif_trigger, uint,
 | 
			
		||||
			 S_IRUGO | S_IWUSR);
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_SWAP
 | 
			
		||||
module_param_named(fudgeswap, fudgeswap, int, S_IRUGO | S_IWUSR);
 | 
			
		||||
#endif
 | 
			
		||||
module_init(lowmem_init);
 | 
			
		||||
module_exit(lowmem_exit);
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										5
									
								
								drivers/staging/snappy/Kconfig
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										5
									
								
								drivers/staging/snappy/Kconfig
									
									
									
									
									
										Executable file
									
								
							@@ -0,0 +1,5 @@
 | 
			
		||||
config SNAPPY_COMPRESS
 | 
			
		||||
	tristate "Google Snappy Compression"
 | 
			
		||||
 | 
			
		||||
config SNAPPY_DECOMPRESS
 | 
			
		||||
	tristate "Google Snappy Decompression"
 | 
			
		||||
							
								
								
									
										5
									
								
								drivers/staging/snappy/Makefile
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										5
									
								
								drivers/staging/snappy/Makefile
									
									
									
									
									
										Executable file
									
								
							@@ -0,0 +1,5 @@
 | 
			
		||||
snappy_compress-objs := csnappy_compress.o
 | 
			
		||||
snappy_decompress-objs := csnappy_decompress.o
 | 
			
		||||
 | 
			
		||||
obj-$(CONFIG_SNAPPY_COMPRESS) += csnappy_compress.o
 | 
			
		||||
obj-$(CONFIG_SNAPPY_DECOMPRESS) += csnappy_decompress.o
 | 
			
		||||
							
								
								
									
										125
									
								
								drivers/staging/snappy/csnappy.h
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										125
									
								
								drivers/staging/snappy/csnappy.h
									
									
									
									
									
										Executable file
									
								
							@@ -0,0 +1,125 @@
 | 
			
		||||
#ifndef __CSNAPPY_H__
 | 
			
		||||
#define __CSNAPPY_H__
 | 
			
		||||
/*
 | 
			
		||||
File modified for the Linux Kernel by
 | 
			
		||||
Zeev Tarantov <zeev.tarantov at gmail.com>
 | 
			
		||||
*/
 | 
			
		||||
#ifdef __cplusplus
 | 
			
		||||
extern "C" {
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#define CSNAPPY_VERSION	4
 | 
			
		||||
 | 
			
		||||
#define CSNAPPY_WORKMEM_BYTES_POWER_OF_TWO 15
 | 
			
		||||
#define CSNAPPY_WORKMEM_BYTES (1 << CSNAPPY_WORKMEM_BYTES_POWER_OF_TWO)
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Returns the maximal size of the compressed representation of
 | 
			
		||||
 * input data that is "source_len" bytes in length;
 | 
			
		||||
 */
 | 
			
		||||
uint32_t
 | 
			
		||||
csnappy_max_compressed_length(uint32_t source_len) __attribute__((const));
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Flat array compression that does not emit the "uncompressed length"
 | 
			
		||||
 * prefix. Compresses "input" array to the "output" array.
 | 
			
		||||
 *
 | 
			
		||||
 * REQUIRES: "input" is at most 32KiB long.
 | 
			
		||||
 * REQUIRES: "output" points to an array of memory that is at least
 | 
			
		||||
 * "csnappy_max_compressed_length(input_length)" in size.
 | 
			
		||||
 * REQUIRES: working_memory has (1 << workmem_bytes_power_of_two) bytes.
 | 
			
		||||
 * REQUIRES: 9 <= workmem_bytes_power_of_two <= 15.
 | 
			
		||||
 *
 | 
			
		||||
 * Returns an "end" pointer into "output" buffer.
 | 
			
		||||
 * "end - output" is the compressed size of "input".
 | 
			
		||||
 */
 | 
			
		||||
char*
 | 
			
		||||
csnappy_compress_fragment(
 | 
			
		||||
	const char *input,
 | 
			
		||||
	const uint32_t input_length,
 | 
			
		||||
	char *output,
 | 
			
		||||
	void *working_memory,
 | 
			
		||||
	const int workmem_bytes_power_of_two);
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * REQUIRES: "compressed" must point to an area of memory that is at
 | 
			
		||||
 * least "csnappy_max_compressed_length(input_length)" bytes in length.
 | 
			
		||||
 * REQUIRES: working_memory has (1 << workmem_bytes_power_of_two) bytes.
 | 
			
		||||
 * REQUIRES: 9 <= workmem_bytes_power_of_two <= 15.
 | 
			
		||||
 *
 | 
			
		||||
 * Takes the data stored in "input[0..input_length]" and stores
 | 
			
		||||
 * it in the array pointed to by "compressed".
 | 
			
		||||
 *
 | 
			
		||||
 * "*out_compressed_length" is set to the length of the compressed output.
 | 
			
		||||
 */
 | 
			
		||||
void
 | 
			
		||||
csnappy_compress(
 | 
			
		||||
	const char *input,
 | 
			
		||||
	uint32_t input_length,
 | 
			
		||||
	char *compressed,
 | 
			
		||||
	uint32_t *out_compressed_length,
 | 
			
		||||
	void *working_memory,
 | 
			
		||||
	const int workmem_bytes_power_of_two);
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Reads header of compressed data to get stored length of uncompressed data.
 | 
			
		||||
 * REQUIRES: start points to compressed data.
 | 
			
		||||
 * REQUIRES: n is length of available compressed data.
 | 
			
		||||
 *
 | 
			
		||||
 * Returns SNAPPY_E_HEADER_BAD on error.
 | 
			
		||||
 * Returns number of bytes read from input on success.
 | 
			
		||||
 * Stores decoded length into *result.
 | 
			
		||||
 */
 | 
			
		||||
int
 | 
			
		||||
csnappy_get_uncompressed_length(
 | 
			
		||||
	const char *start,
 | 
			
		||||
	uint32_t n,
 | 
			
		||||
	uint32_t *result);
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Safely decompresses all data from array "src" of length "src_len" containing
 | 
			
		||||
 * entire compressed stream (with header) into array "dst" of size "dst_len".
 | 
			
		||||
 * REQUIRES: dst_len is at least csnappy_get_uncompressed_length(...).
 | 
			
		||||
 *
 | 
			
		||||
 * Iff sucessful, returns CSNAPPY_E_OK.
 | 
			
		||||
 * If recorded length in header is greater than dst_len, returns
 | 
			
		||||
 *  CSNAPPY_E_OUTPUT_INSUF.
 | 
			
		||||
 * If compressed data is malformed, does not write more than dst_len into dst.
 | 
			
		||||
 */
 | 
			
		||||
int
 | 
			
		||||
csnappy_decompress(
 | 
			
		||||
	const char *src,
 | 
			
		||||
	uint32_t src_len,
 | 
			
		||||
	char *dst,
 | 
			
		||||
	uint32_t dst_len);
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Safely decompresses stream src_len bytes long read from src to dst.
 | 
			
		||||
 * Amount of available space at dst must be provided in *dst_len by caller.
 | 
			
		||||
 * If compressed stream needs more space, it will not overflow and return
 | 
			
		||||
 *  CSNAPPY_E_OUTPUT_OVERRUN.
 | 
			
		||||
 * On success, sets *dst_len to actal number of bytes decompressed.
 | 
			
		||||
 * Iff sucessful, returns CSNAPPY_E_OK.
 | 
			
		||||
 */
 | 
			
		||||
int
 | 
			
		||||
csnappy_decompress_noheader(
 | 
			
		||||
	const char *src,
 | 
			
		||||
	uint32_t src_len,
 | 
			
		||||
	char *dst,
 | 
			
		||||
	uint32_t *dst_len);
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Return values (< 0 = Error)
 | 
			
		||||
 */
 | 
			
		||||
#define CSNAPPY_E_OK			0
 | 
			
		||||
#define CSNAPPY_E_HEADER_BAD		(-1)
 | 
			
		||||
#define CSNAPPY_E_OUTPUT_INSUF		(-2)
 | 
			
		||||
#define CSNAPPY_E_OUTPUT_OVERRUN	(-3)
 | 
			
		||||
#define CSNAPPY_E_INPUT_NOT_CONSUMED	(-4)
 | 
			
		||||
#define CSNAPPY_E_DATA_MALFORMED	(-5)
 | 
			
		||||
 | 
			
		||||
#ifdef __cplusplus
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
							
								
								
									
										497
									
								
								drivers/staging/snappy/csnappy_compress.c
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										497
									
								
								drivers/staging/snappy/csnappy_compress.c
									
									
									
									
									
										Executable file
									
								
							@@ -0,0 +1,497 @@
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2011, Google Inc.
 | 
			
		||||
All rights reserved.
 | 
			
		||||
 | 
			
		||||
Redistribution and use in source and binary forms, with or without
 | 
			
		||||
modification, are permitted provided that the following conditions are
 | 
			
		||||
met:
 | 
			
		||||
 | 
			
		||||
  * Redistributions of source code must retain the above copyright
 | 
			
		||||
notice, this list of conditions and the following disclaimer.
 | 
			
		||||
  * Redistributions in binary form must reproduce the above
 | 
			
		||||
copyright notice, this list of conditions and the following disclaimer
 | 
			
		||||
in the documentation and/or other materials provided with the
 | 
			
		||||
distribution.
 | 
			
		||||
  * Neither the name of Google Inc. nor the names of its
 | 
			
		||||
contributors may be used to endorse or promote products derived from
 | 
			
		||||
this software without specific prior written permission.
 | 
			
		||||
 | 
			
		||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 | 
			
		||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 | 
			
		||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 | 
			
		||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 | 
			
		||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 | 
			
		||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 | 
			
		||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 | 
			
		||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 | 
			
		||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 | 
			
		||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 | 
			
		||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 | 
			
		||||
 | 
			
		||||
File modified for the Linux Kernel by
 | 
			
		||||
Zeev Tarantov <zeev.tarantov at gmail.com>
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
#include "csnappy_internal.h"
 | 
			
		||||
#ifdef __KERNEL__
 | 
			
		||||
#include <linux/kernel.h>
 | 
			
		||||
#include <linux/module.h>
 | 
			
		||||
#endif
 | 
			
		||||
#include "csnappy.h"
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
static inline char*
 | 
			
		||||
encode_varint32(char *sptr, uint32_t v)
 | 
			
		||||
{
 | 
			
		||||
	uint8_t* ptr = (uint8_t *)sptr;
 | 
			
		||||
	static const int B = 128;
 | 
			
		||||
	if (v < (1<<7)) {
 | 
			
		||||
		*(ptr++) = v;
 | 
			
		||||
	} else if (v < (1<<14)) {
 | 
			
		||||
		*(ptr++) = v | B;
 | 
			
		||||
		*(ptr++) = v>>7;
 | 
			
		||||
	} else if (v < (1<<21)) {
 | 
			
		||||
		*(ptr++) = v | B;
 | 
			
		||||
		*(ptr++) = (v>>7) | B;
 | 
			
		||||
		*(ptr++) = v>>14;
 | 
			
		||||
	} else if (v < (1<<28)) {
 | 
			
		||||
		*(ptr++) = v | B;
 | 
			
		||||
		*(ptr++) = (v>>7) | B;
 | 
			
		||||
		*(ptr++) = (v>>14) | B;
 | 
			
		||||
		*(ptr++) = v>>21;
 | 
			
		||||
	} else {
 | 
			
		||||
		*(ptr++) = v | B;
 | 
			
		||||
		*(ptr++) = (v>>7) | B;
 | 
			
		||||
		*(ptr++) = (v>>14) | B;
 | 
			
		||||
		*(ptr++) = (v>>21) | B;
 | 
			
		||||
		*(ptr++) = v>>28;
 | 
			
		||||
	}
 | 
			
		||||
	return (char *)ptr;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Any hash function will produce a valid compressed bitstream, but a good
 | 
			
		||||
 * hash function reduces the number of collisions and thus yields better
 | 
			
		||||
 * compression for compressible input, and more speed for incompressible
 | 
			
		||||
 * input. Of course, it doesn't hurt if the hash function is reasonably fast
 | 
			
		||||
 * either, as it gets called a lot.
 | 
			
		||||
 */
 | 
			
		||||
static inline uint32_t HashBytes(uint32_t bytes, int shift)
 | 
			
		||||
{
 | 
			
		||||
	uint32_t kMul = 0x1e35a7bd;
 | 
			
		||||
	return (bytes * kMul) >> shift;
 | 
			
		||||
}
 | 
			
		||||
static inline uint32_t Hash(const char *p, int shift)
 | 
			
		||||
{
 | 
			
		||||
	return HashBytes(UNALIGNED_LOAD32(p), shift);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * *** DO NOT CHANGE THE VALUE OF kBlockSize ***
 | 
			
		||||
 | 
			
		||||
 * New Compression code chops up the input into blocks of at most
 | 
			
		||||
 * the following size.  This ensures that back-references in the
 | 
			
		||||
 * output never cross kBlockSize block boundaries.  This can be
 | 
			
		||||
 * helpful in implementing blocked decompression.  However the
 | 
			
		||||
 * decompression code should not rely on this guarantee since older
 | 
			
		||||
 * compression code may not obey it.
 | 
			
		||||
 */
 | 
			
		||||
#define kBlockLog 15
 | 
			
		||||
#define kBlockSize (1 << kBlockLog)
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Return the largest n such that
 | 
			
		||||
 *
 | 
			
		||||
 *   s1[0,n-1] == s2[0,n-1]
 | 
			
		||||
 *   and n <= (s2_limit - s2).
 | 
			
		||||
 *
 | 
			
		||||
 * Does not read *s2_limit or beyond.
 | 
			
		||||
 * Does not read *(s1 + (s2_limit - s2)) or beyond.
 | 
			
		||||
 * Requires that s2_limit >= s2.
 | 
			
		||||
 *
 | 
			
		||||
 * Separate implementation for x86_64, for speed.  Uses the fact that
 | 
			
		||||
 * x86_64 is little endian.
 | 
			
		||||
 */
 | 
			
		||||
#if defined(__x86_64__)
 | 
			
		||||
static inline int
 | 
			
		||||
FindMatchLength(const char *s1, const char *s2, const char *s2_limit)
 | 
			
		||||
{
 | 
			
		||||
	uint64_t x;
 | 
			
		||||
	int matched, matching_bits;
 | 
			
		||||
	DCHECK_GE(s2_limit, s2);
 | 
			
		||||
	matched = 0;
 | 
			
		||||
	/*
 | 
			
		||||
	 * Find out how long the match is. We loop over the data 64 bits at a
 | 
			
		||||
	 * time until we find a 64-bit block that doesn't match; then we find
 | 
			
		||||
	 * the first non-matching bit and use that to calculate the total
 | 
			
		||||
	 * length of the match.
 | 
			
		||||
	 */
 | 
			
		||||
	while (likely(s2 <= s2_limit - 8)) {
 | 
			
		||||
		if (unlikely(UNALIGNED_LOAD64(s1 + matched) ==
 | 
			
		||||
				UNALIGNED_LOAD64(s2))) {
 | 
			
		||||
			s2 += 8;
 | 
			
		||||
			matched += 8;
 | 
			
		||||
		} else {
 | 
			
		||||
			/*
 | 
			
		||||
			 * On current (mid-2008) Opteron models there is a 3%
 | 
			
		||||
			 * more efficient code sequence to find the first
 | 
			
		||||
			 * non-matching byte. However, what follows is ~10%
 | 
			
		||||
			 * better on Intel Core 2 and newer, and we expect AMD's
 | 
			
		||||
			 * bsf instruction to improve.
 | 
			
		||||
			 */
 | 
			
		||||
			x = UNALIGNED_LOAD64(s1 + matched) ^
 | 
			
		||||
				UNALIGNED_LOAD64(s2);
 | 
			
		||||
			matching_bits = FindLSBSetNonZero64(x);
 | 
			
		||||
			matched += matching_bits >> 3;
 | 
			
		||||
			return matched;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	while (likely(s2 < s2_limit)) {
 | 
			
		||||
		if (likely(s1[matched] == *s2)) {
 | 
			
		||||
			++s2;
 | 
			
		||||
			++matched;
 | 
			
		||||
		} else {
 | 
			
		||||
			return matched;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	return matched;
 | 
			
		||||
}
 | 
			
		||||
#else /* !defined(__x86_64__) */
 | 
			
		||||
static inline int
 | 
			
		||||
FindMatchLength(const char *s1, const char *s2, const char *s2_limit)
 | 
			
		||||
{
 | 
			
		||||
	/* Implementation based on the x86-64 version, above. */
 | 
			
		||||
	int matched = 0;
 | 
			
		||||
	DCHECK_GE(s2_limit, s2);
 | 
			
		||||
 | 
			
		||||
	while (s2 <= s2_limit - 4 &&
 | 
			
		||||
		UNALIGNED_LOAD32(s2) == UNALIGNED_LOAD32(s1 + matched)) {
 | 
			
		||||
		s2 += 4;
 | 
			
		||||
		matched += 4;
 | 
			
		||||
	}
 | 
			
		||||
#if defined(__LITTLE_ENDIAN)
 | 
			
		||||
	if (s2 <= s2_limit - 4) {
 | 
			
		||||
		uint32_t x = UNALIGNED_LOAD32(s1 + matched) ^
 | 
			
		||||
				UNALIGNED_LOAD32(s2);
 | 
			
		||||
		int matching_bits = FindLSBSetNonZero(x);
 | 
			
		||||
		matched += matching_bits >> 3;
 | 
			
		||||
	} else {
 | 
			
		||||
		while ((s2 < s2_limit) && (s1[matched] == *s2)) {
 | 
			
		||||
			++s2;
 | 
			
		||||
			++matched;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
#else
 | 
			
		||||
	while ((s2 < s2_limit) && (s1[matched] == *s2)) {
 | 
			
		||||
		++s2;
 | 
			
		||||
		++matched;
 | 
			
		||||
	}
 | 
			
		||||
#endif
 | 
			
		||||
	return matched;
 | 
			
		||||
}
 | 
			
		||||
#endif /* !defined(__x86_64__) */
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
static inline char*
 | 
			
		||||
EmitLiteral(char *op, const char *literal, int len, int allow_fast_path)
 | 
			
		||||
{
 | 
			
		||||
	int n = len - 1; /* Zero-length literals are disallowed */
 | 
			
		||||
	if (n < 60) {
 | 
			
		||||
		/* Fits in tag byte */
 | 
			
		||||
		*op++ = LITERAL | (n << 2);
 | 
			
		||||
		/*
 | 
			
		||||
		The vast majority of copies are below 16 bytes, for which a
 | 
			
		||||
		call to memcpy is overkill. This fast path can sometimes
 | 
			
		||||
		copy up to 15 bytes too much, but that is okay in the
 | 
			
		||||
		main loop, since we have a bit to go on for both sides:
 | 
			
		||||
		- The input will always have kInputMarginBytes = 15 extra
 | 
			
		||||
		available bytes, as long as we're in the main loop, and
 | 
			
		||||
		if not, allow_fast_path = false.
 | 
			
		||||
		- The output will always have 32 spare bytes (see
 | 
			
		||||
		snappy_max_compressed_length).
 | 
			
		||||
		*/
 | 
			
		||||
		if (allow_fast_path && len <= 16) {
 | 
			
		||||
			UNALIGNED_STORE64(op, UNALIGNED_LOAD64(literal));
 | 
			
		||||
			UNALIGNED_STORE64(op + 8,
 | 
			
		||||
						UNALIGNED_LOAD64(literal + 8));
 | 
			
		||||
			return op + len;
 | 
			
		||||
		}
 | 
			
		||||
	} else {
 | 
			
		||||
		/* Encode in upcoming bytes */
 | 
			
		||||
		char *base = op;
 | 
			
		||||
		int count = 0;
 | 
			
		||||
		op++;
 | 
			
		||||
		while (n > 0) {
 | 
			
		||||
			*op++ = n & 0xff;
 | 
			
		||||
			n >>= 8;
 | 
			
		||||
			count++;
 | 
			
		||||
		}
 | 
			
		||||
		DCHECK_GE(count, 1);
 | 
			
		||||
		DCHECK_LE(count, 4);
 | 
			
		||||
		*base = LITERAL | ((59+count) << 2);
 | 
			
		||||
	}
 | 
			
		||||
	memcpy(op, literal, len);
 | 
			
		||||
	return op + len;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline char*
 | 
			
		||||
EmitCopyLessThan64(char *op, int offset, int len)
 | 
			
		||||
{
 | 
			
		||||
	DCHECK_LE(len, 64);
 | 
			
		||||
	DCHECK_GE(len, 4);
 | 
			
		||||
	DCHECK_LT(offset, 65536);
 | 
			
		||||
 | 
			
		||||
	if ((len < 12) && (offset < 2048)) {
 | 
			
		||||
		int len_minus_4 = len - 4;
 | 
			
		||||
		DCHECK_LT(len_minus_4, 8); /* Must fit in 3 bits */
 | 
			
		||||
		*op++ = COPY_1_BYTE_OFFSET   |
 | 
			
		||||
			((len_minus_4) << 2) |
 | 
			
		||||
			((offset >> 8) << 5);
 | 
			
		||||
		*op++ = offset & 0xff;
 | 
			
		||||
	} else {
 | 
			
		||||
		*op++ = COPY_2_BYTE_OFFSET | ((len-1) << 2);
 | 
			
		||||
		put_unaligned_le16(offset, op);
 | 
			
		||||
		op += 2;
 | 
			
		||||
	}
 | 
			
		||||
	return op;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline char*
 | 
			
		||||
EmitCopy(char *op, int offset, int len)
 | 
			
		||||
{
 | 
			
		||||
	/* Emit 64 byte copies but make sure to keep at least four bytes
 | 
			
		||||
	 * reserved */
 | 
			
		||||
	while (len >= 68) {
 | 
			
		||||
		op = EmitCopyLessThan64(op, offset, 64);
 | 
			
		||||
		len -= 64;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* Emit an extra 60 byte copy if have too much data to fit in one
 | 
			
		||||
	 * copy */
 | 
			
		||||
	if (len > 64) {
 | 
			
		||||
		op = EmitCopyLessThan64(op, offset, 60);
 | 
			
		||||
		len -= 60;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* Emit remainder */
 | 
			
		||||
	op = EmitCopyLessThan64(op, offset, len);
 | 
			
		||||
	return op;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * For 0 <= offset <= 4, GetUint32AtOffset(UNALIGNED_LOAD64(p), offset) will
 | 
			
		||||
 * equal UNALIGNED_LOAD32(p + offset).  Motivation: On x86-64 hardware we have
 | 
			
		||||
 * empirically found that overlapping loads such as
 | 
			
		||||
 *  UNALIGNED_LOAD32(p) ... UNALIGNED_LOAD32(p+1) ... UNALIGNED_LOAD32(p+2)
 | 
			
		||||
 * are slower than UNALIGNED_LOAD64(p) followed by shifts and casts to uint32_t.
 | 
			
		||||
 */
 | 
			
		||||
static inline uint32_t
 | 
			
		||||
GetUint32AtOffset(uint64_t v, int offset)
 | 
			
		||||
{
 | 
			
		||||
	DCHECK(0 <= offset && offset <= 4);
 | 
			
		||||
#ifdef __LITTLE_ENDIAN
 | 
			
		||||
	return v >> (8 * offset);
 | 
			
		||||
#else
 | 
			
		||||
	return v >> (32 - 8 * offset);
 | 
			
		||||
#endif
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#define kInputMarginBytes 15
 | 
			
		||||
char*
 | 
			
		||||
csnappy_compress_fragment(
 | 
			
		||||
	const char *input,
 | 
			
		||||
	const uint32_t input_size,
 | 
			
		||||
	char *op,
 | 
			
		||||
	void *working_memory,
 | 
			
		||||
	const int workmem_bytes_power_of_two)
 | 
			
		||||
{
 | 
			
		||||
	const char *ip, *ip_end, *base_ip, *next_emit, *ip_limit, *next_ip,
 | 
			
		||||
			*candidate, *base;
 | 
			
		||||
	uint16_t *table = (uint16_t *)working_memory;
 | 
			
		||||
	uint64_t input_bytes;
 | 
			
		||||
	uint32_t hash, next_hash, prev_hash, cur_hash, skip, candidate_bytes;
 | 
			
		||||
	int shift, matched;
 | 
			
		||||
 | 
			
		||||
	DCHECK_GE(workmem_bytes_power_of_two, 9);
 | 
			
		||||
	DCHECK_LE(workmem_bytes_power_of_two, 15);
 | 
			
		||||
	/* Table of 2^X bytes, need (X-1) bits to address table of uint16_t.
 | 
			
		||||
	 * How many bits of 32bit hash function result are discarded? */
 | 
			
		||||
	shift = 33 - workmem_bytes_power_of_two;
 | 
			
		||||
	/* "ip" is the input pointer, and "op" is the output pointer. */
 | 
			
		||||
	ip = input;
 | 
			
		||||
	DCHECK_LE(input_size, kBlockSize);
 | 
			
		||||
	ip_end = input + input_size;
 | 
			
		||||
	base_ip = ip;
 | 
			
		||||
	/* Bytes in [next_emit, ip) will be emitted as literal bytes. Or
 | 
			
		||||
	   [next_emit, ip_end) after the main loop. */
 | 
			
		||||
	next_emit = ip;
 | 
			
		||||
 | 
			
		||||
	if (unlikely(input_size < kInputMarginBytes))
 | 
			
		||||
		goto emit_remainder;
 | 
			
		||||
 | 
			
		||||
	memset(working_memory, 0, 1 << workmem_bytes_power_of_two);
 | 
			
		||||
 | 
			
		||||
	ip_limit = input + input_size - kInputMarginBytes;
 | 
			
		||||
	next_hash = Hash(++ip, shift);
 | 
			
		||||
 | 
			
		||||
main_loop:
 | 
			
		||||
	DCHECK_LT(next_emit, ip);
 | 
			
		||||
	/*
 | 
			
		||||
	* The body of this loop calls EmitLiteral once and then EmitCopy one or
 | 
			
		||||
	* more times. (The exception is that when we're close to exhausting
 | 
			
		||||
	* the input we goto emit_remainder.)
 | 
			
		||||
	*
 | 
			
		||||
	* In the first iteration of this loop we're just starting, so
 | 
			
		||||
	* there's nothing to copy, so calling EmitLiteral once is
 | 
			
		||||
	* necessary. And we only start a new iteration when the
 | 
			
		||||
	* current iteration has determined that a call to EmitLiteral will
 | 
			
		||||
	* precede the next call to EmitCopy (if any).
 | 
			
		||||
	*
 | 
			
		||||
	* Step 1: Scan forward in the input looking for a 4-byte-long match.
 | 
			
		||||
	* If we get close to exhausting the input then goto emit_remainder.
 | 
			
		||||
	*
 | 
			
		||||
	* Heuristic match skipping: If 32 bytes are scanned with no matches
 | 
			
		||||
	* found, start looking only at every other byte. If 32 more bytes are
 | 
			
		||||
	* scanned, look at every third byte, etc.. When a match is found,
 | 
			
		||||
	* immediately go back to looking at every byte. This is a small loss
 | 
			
		||||
	* (~5% performance, ~0.1% density) for compressible data due to more
 | 
			
		||||
	* bookkeeping, but for non-compressible data (such as JPEG) it's a huge
 | 
			
		||||
	* win since the compressor quickly "realizes" the data is incompressible
 | 
			
		||||
	* and doesn't bother looking for matches everywhere.
 | 
			
		||||
	*
 | 
			
		||||
	* The "skip" variable keeps track of how many bytes there are since the
 | 
			
		||||
	* last match; dividing it by 32 (ie. right-shifting by five) gives the
 | 
			
		||||
	* number of bytes to move ahead for each iteration.
 | 
			
		||||
	*/
 | 
			
		||||
	skip = 32;
 | 
			
		||||
 | 
			
		||||
	next_ip = ip;
 | 
			
		||||
	do {
 | 
			
		||||
		ip = next_ip;
 | 
			
		||||
		hash = next_hash;
 | 
			
		||||
		DCHECK_EQ(hash, Hash(ip, shift));
 | 
			
		||||
		next_ip = ip + (skip++ >> 5);
 | 
			
		||||
		if (unlikely(next_ip > ip_limit))
 | 
			
		||||
			goto emit_remainder;
 | 
			
		||||
		next_hash = Hash(next_ip, shift);
 | 
			
		||||
		candidate = base_ip + table[hash];
 | 
			
		||||
		DCHECK_GE(candidate, base_ip);
 | 
			
		||||
		DCHECK_LT(candidate, ip);
 | 
			
		||||
 | 
			
		||||
		table[hash] = ip - base_ip;
 | 
			
		||||
	} while (likely(UNALIGNED_LOAD32(ip) !=
 | 
			
		||||
			UNALIGNED_LOAD32(candidate)));
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	* Step 2: A 4-byte match has been found. We'll later see if more
 | 
			
		||||
	* than 4 bytes match. But, prior to the match, input
 | 
			
		||||
	* bytes [next_emit, ip) are unmatched. Emit them as "literal bytes."
 | 
			
		||||
	*/
 | 
			
		||||
	DCHECK_LE(next_emit + 16, ip_end);
 | 
			
		||||
	op = EmitLiteral(op, next_emit, ip - next_emit, 1);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	* Step 3: Call EmitCopy, and then see if another EmitCopy could
 | 
			
		||||
	* be our next move. Repeat until we find no match for the
 | 
			
		||||
	* input immediately after what was consumed by the last EmitCopy call.
 | 
			
		||||
	*
 | 
			
		||||
	* If we exit this loop normally then we need to call EmitLiteral next,
 | 
			
		||||
	* though we don't yet know how big the literal will be. We handle that
 | 
			
		||||
	* by proceeding to the next iteration of the main loop. We also can exit
 | 
			
		||||
	* this loop via goto if we get close to exhausting the input.
 | 
			
		||||
	*/
 | 
			
		||||
	input_bytes = 0;
 | 
			
		||||
	candidate_bytes = 0;
 | 
			
		||||
 | 
			
		||||
	do {
 | 
			
		||||
		/* We have a 4-byte match at ip, and no need to emit any
 | 
			
		||||
		 "literal bytes" prior to ip. */
 | 
			
		||||
		base = ip;
 | 
			
		||||
		matched = 4 + FindMatchLength(candidate + 4, ip + 4, ip_end);
 | 
			
		||||
		ip += matched;
 | 
			
		||||
		DCHECK_EQ(0, memcmp(base, candidate, matched));
 | 
			
		||||
		op = EmitCopy(op, base - candidate, matched);
 | 
			
		||||
		/* We could immediately start working at ip now, but to improve
 | 
			
		||||
		 compression we first update table[Hash(ip - 1, ...)]. */
 | 
			
		||||
		next_emit = ip;
 | 
			
		||||
		if (unlikely(ip >= ip_limit))
 | 
			
		||||
			goto emit_remainder;
 | 
			
		||||
		input_bytes = UNALIGNED_LOAD64(ip - 1);
 | 
			
		||||
		prev_hash = HashBytes(GetUint32AtOffset(input_bytes, 0), shift);
 | 
			
		||||
		table[prev_hash] = ip - base_ip - 1;
 | 
			
		||||
		cur_hash = HashBytes(GetUint32AtOffset(input_bytes, 1), shift);
 | 
			
		||||
		candidate = base_ip + table[cur_hash];
 | 
			
		||||
		candidate_bytes = UNALIGNED_LOAD32(candidate);
 | 
			
		||||
		table[cur_hash] = ip - base_ip;
 | 
			
		||||
	} while (GetUint32AtOffset(input_bytes, 1) == candidate_bytes);
 | 
			
		||||
 | 
			
		||||
	next_hash = HashBytes(GetUint32AtOffset(input_bytes, 2), shift);
 | 
			
		||||
	++ip;
 | 
			
		||||
	goto main_loop;
 | 
			
		||||
 | 
			
		||||
emit_remainder:
 | 
			
		||||
	/* Emit the remaining bytes as a literal */
 | 
			
		||||
	if (next_emit < ip_end)
 | 
			
		||||
		op = EmitLiteral(op, next_emit, ip_end - next_emit, 0);
 | 
			
		||||
 | 
			
		||||
	return op;
 | 
			
		||||
}
 | 
			
		||||
#if defined(__KERNEL__) && !defined(STATIC)
 | 
			
		||||
EXPORT_SYMBOL(csnappy_compress_fragment);
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
uint32_t __attribute__((const))
 | 
			
		||||
csnappy_max_compressed_length(uint32_t source_len)
 | 
			
		||||
{
 | 
			
		||||
	return 32 + source_len + source_len/6;
 | 
			
		||||
}
 | 
			
		||||
#if defined(__KERNEL__) && !defined(STATIC)
 | 
			
		||||
EXPORT_SYMBOL(csnappy_max_compressed_length);
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
void
 | 
			
		||||
csnappy_compress(
 | 
			
		||||
	const char *input,
 | 
			
		||||
	uint32_t input_length,
 | 
			
		||||
	char *compressed,
 | 
			
		||||
	uint32_t *compressed_length,
 | 
			
		||||
	void *working_memory,
 | 
			
		||||
	const int workmem_bytes_power_of_two)
 | 
			
		||||
{
 | 
			
		||||
	int workmem_size;
 | 
			
		||||
	int num_to_read;
 | 
			
		||||
	uint32_t written = 0;
 | 
			
		||||
	char *p = encode_varint32(compressed, input_length);
 | 
			
		||||
	written += (p - compressed);
 | 
			
		||||
	compressed = p;
 | 
			
		||||
	while (input_length > 0) {
 | 
			
		||||
		num_to_read = min(input_length, (uint32_t)kBlockSize);
 | 
			
		||||
		workmem_size = workmem_bytes_power_of_two;
 | 
			
		||||
		if (num_to_read < kBlockSize) {
 | 
			
		||||
			for (workmem_size = 9;
 | 
			
		||||
			     workmem_size < workmem_bytes_power_of_two;
 | 
			
		||||
			     ++workmem_size) {
 | 
			
		||||
				if ((1 << (workmem_size-1)) >= num_to_read)
 | 
			
		||||
					break;
 | 
			
		||||
			}
 | 
			
		||||
		}
 | 
			
		||||
		p = csnappy_compress_fragment(
 | 
			
		||||
				input, num_to_read, compressed,
 | 
			
		||||
				working_memory, workmem_size);
 | 
			
		||||
		written += (p - compressed);
 | 
			
		||||
		compressed = p;
 | 
			
		||||
		input_length -= num_to_read;
 | 
			
		||||
		input += num_to_read;
 | 
			
		||||
	}
 | 
			
		||||
	*compressed_length = written;
 | 
			
		||||
}
 | 
			
		||||
#if defined(__KERNEL__) && !defined(STATIC)
 | 
			
		||||
EXPORT_SYMBOL(csnappy_compress);
 | 
			
		||||
 | 
			
		||||
MODULE_LICENSE("BSD");
 | 
			
		||||
MODULE_DESCRIPTION("Snappy Compressor");
 | 
			
		||||
#endif
 | 
			
		||||
							
								
								
									
										321
									
								
								drivers/staging/snappy/csnappy_decompress.c
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										321
									
								
								drivers/staging/snappy/csnappy_decompress.c
									
									
									
									
									
										Executable file
									
								
							@@ -0,0 +1,321 @@
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2011, Google Inc.
 | 
			
		||||
All rights reserved.
 | 
			
		||||
 | 
			
		||||
Redistribution and use in source and binary forms, with or without
 | 
			
		||||
modification, are permitted provided that the following conditions are
 | 
			
		||||
met:
 | 
			
		||||
 | 
			
		||||
  * Redistributions of source code must retain the above copyright
 | 
			
		||||
notice, this list of conditions and the following disclaimer.
 | 
			
		||||
  * Redistributions in binary form must reproduce the above
 | 
			
		||||
copyright notice, this list of conditions and the following disclaimer
 | 
			
		||||
in the documentation and/or other materials provided with the
 | 
			
		||||
distribution.
 | 
			
		||||
  * Neither the name of Google Inc. nor the names of its
 | 
			
		||||
contributors may be used to endorse or promote products derived from
 | 
			
		||||
this software without specific prior written permission.
 | 
			
		||||
 | 
			
		||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 | 
			
		||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 | 
			
		||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 | 
			
		||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 | 
			
		||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 | 
			
		||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 | 
			
		||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 | 
			
		||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 | 
			
		||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 | 
			
		||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 | 
			
		||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 | 
			
		||||
 | 
			
		||||
File modified for the Linux Kernel by
 | 
			
		||||
Zeev Tarantov <zeev.tarantov at gmail.com>
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
#include "csnappy_internal.h"
 | 
			
		||||
#ifdef __KERNEL__
 | 
			
		||||
#include <linux/kernel.h>
 | 
			
		||||
#include <linux/module.h>
 | 
			
		||||
#endif
 | 
			
		||||
#include "csnappy.h"
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
/* Mapping from i in range [0,4] to a mask to extract the bottom 8*i bits */
 | 
			
		||||
static const uint32_t wordmask[] = {
 | 
			
		||||
	0u, 0xffu, 0xffffu, 0xffffffu, 0xffffffffu
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Data stored per entry in lookup table:
 | 
			
		||||
 *      Range   Bits-used       Description
 | 
			
		||||
 *      ------------------------------------
 | 
			
		||||
 *      1..64   0..7            Literal/copy length encoded in opcode byte
 | 
			
		||||
 *      0..7    8..10           Copy offset encoded in opcode byte / 256
 | 
			
		||||
 *      0..4    11..13          Extra bytes after opcode
 | 
			
		||||
 *
 | 
			
		||||
 * We use eight bits for the length even though 7 would have sufficed
 | 
			
		||||
 * because of efficiency reasons:
 | 
			
		||||
 *      (1) Extracting a byte is faster than a bit-field
 | 
			
		||||
 *      (2) It properly aligns copy offset so we do not need a <<8
 | 
			
		||||
 */
 | 
			
		||||
static const uint16_t char_table[256] = {
 | 
			
		||||
	0x0001, 0x0804, 0x1001, 0x2001, 0x0002, 0x0805, 0x1002, 0x2002,
 | 
			
		||||
	0x0003, 0x0806, 0x1003, 0x2003, 0x0004, 0x0807, 0x1004, 0x2004,
 | 
			
		||||
	0x0005, 0x0808, 0x1005, 0x2005, 0x0006, 0x0809, 0x1006, 0x2006,
 | 
			
		||||
	0x0007, 0x080a, 0x1007, 0x2007, 0x0008, 0x080b, 0x1008, 0x2008,
 | 
			
		||||
	0x0009, 0x0904, 0x1009, 0x2009, 0x000a, 0x0905, 0x100a, 0x200a,
 | 
			
		||||
	0x000b, 0x0906, 0x100b, 0x200b, 0x000c, 0x0907, 0x100c, 0x200c,
 | 
			
		||||
	0x000d, 0x0908, 0x100d, 0x200d, 0x000e, 0x0909, 0x100e, 0x200e,
 | 
			
		||||
	0x000f, 0x090a, 0x100f, 0x200f, 0x0010, 0x090b, 0x1010, 0x2010,
 | 
			
		||||
	0x0011, 0x0a04, 0x1011, 0x2011, 0x0012, 0x0a05, 0x1012, 0x2012,
 | 
			
		||||
	0x0013, 0x0a06, 0x1013, 0x2013, 0x0014, 0x0a07, 0x1014, 0x2014,
 | 
			
		||||
	0x0015, 0x0a08, 0x1015, 0x2015, 0x0016, 0x0a09, 0x1016, 0x2016,
 | 
			
		||||
	0x0017, 0x0a0a, 0x1017, 0x2017, 0x0018, 0x0a0b, 0x1018, 0x2018,
 | 
			
		||||
	0x0019, 0x0b04, 0x1019, 0x2019, 0x001a, 0x0b05, 0x101a, 0x201a,
 | 
			
		||||
	0x001b, 0x0b06, 0x101b, 0x201b, 0x001c, 0x0b07, 0x101c, 0x201c,
 | 
			
		||||
	0x001d, 0x0b08, 0x101d, 0x201d, 0x001e, 0x0b09, 0x101e, 0x201e,
 | 
			
		||||
	0x001f, 0x0b0a, 0x101f, 0x201f, 0x0020, 0x0b0b, 0x1020, 0x2020,
 | 
			
		||||
	0x0021, 0x0c04, 0x1021, 0x2021, 0x0022, 0x0c05, 0x1022, 0x2022,
 | 
			
		||||
	0x0023, 0x0c06, 0x1023, 0x2023, 0x0024, 0x0c07, 0x1024, 0x2024,
 | 
			
		||||
	0x0025, 0x0c08, 0x1025, 0x2025, 0x0026, 0x0c09, 0x1026, 0x2026,
 | 
			
		||||
	0x0027, 0x0c0a, 0x1027, 0x2027, 0x0028, 0x0c0b, 0x1028, 0x2028,
 | 
			
		||||
	0x0029, 0x0d04, 0x1029, 0x2029, 0x002a, 0x0d05, 0x102a, 0x202a,
 | 
			
		||||
	0x002b, 0x0d06, 0x102b, 0x202b, 0x002c, 0x0d07, 0x102c, 0x202c,
 | 
			
		||||
	0x002d, 0x0d08, 0x102d, 0x202d, 0x002e, 0x0d09, 0x102e, 0x202e,
 | 
			
		||||
	0x002f, 0x0d0a, 0x102f, 0x202f, 0x0030, 0x0d0b, 0x1030, 0x2030,
 | 
			
		||||
	0x0031, 0x0e04, 0x1031, 0x2031, 0x0032, 0x0e05, 0x1032, 0x2032,
 | 
			
		||||
	0x0033, 0x0e06, 0x1033, 0x2033, 0x0034, 0x0e07, 0x1034, 0x2034,
 | 
			
		||||
	0x0035, 0x0e08, 0x1035, 0x2035, 0x0036, 0x0e09, 0x1036, 0x2036,
 | 
			
		||||
	0x0037, 0x0e0a, 0x1037, 0x2037, 0x0038, 0x0e0b, 0x1038, 0x2038,
 | 
			
		||||
	0x0039, 0x0f04, 0x1039, 0x2039, 0x003a, 0x0f05, 0x103a, 0x203a,
 | 
			
		||||
	0x003b, 0x0f06, 0x103b, 0x203b, 0x003c, 0x0f07, 0x103c, 0x203c,
 | 
			
		||||
	0x0801, 0x0f08, 0x103d, 0x203d, 0x1001, 0x0f09, 0x103e, 0x203e,
 | 
			
		||||
	0x1801, 0x0f0a, 0x103f, 0x203f, 0x2001, 0x0f0b, 0x1040, 0x2040
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Copy "len" bytes from "src" to "op", one byte at a time.  Used for
 | 
			
		||||
 * handling COPY operations where the input and output regions may
 | 
			
		||||
 * overlap.  For example, suppose:
 | 
			
		||||
 *    src    == "ab"
 | 
			
		||||
 *    op     == src + 2
 | 
			
		||||
 *    len    == 20
 | 
			
		||||
 * After IncrementalCopy(src, op, len), the result will have
 | 
			
		||||
 * eleven copies of "ab"
 | 
			
		||||
 *    ababababababababababab
 | 
			
		||||
 * Note that this does not match the semantics of either memcpy()
 | 
			
		||||
 * or memmove().
 | 
			
		||||
 */
 | 
			
		||||
static inline void IncrementalCopy(const char *src, char *op, int len)
 | 
			
		||||
{
 | 
			
		||||
	DCHECK_GT(len, 0);
 | 
			
		||||
	do {
 | 
			
		||||
		*op++ = *src++;
 | 
			
		||||
	} while (--len > 0);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Equivalent to IncrementalCopy except that it can write up to ten extra
 | 
			
		||||
 * bytes after the end of the copy, and that it is faster.
 | 
			
		||||
 *
 | 
			
		||||
 * The main part of this loop is a simple copy of eight bytes at a time until
 | 
			
		||||
 * we've copied (at least) the requested amount of bytes.  However, if op and
 | 
			
		||||
 * src are less than eight bytes apart (indicating a repeating pattern of
 | 
			
		||||
 * length < 8), we first need to expand the pattern in order to get the correct
 | 
			
		||||
 * results. For instance, if the buffer looks like this, with the eight-byte
 | 
			
		||||
 * <src> and <op> patterns marked as intervals:
 | 
			
		||||
 *
 | 
			
		||||
 *    abxxxxxxxxxxxx
 | 
			
		||||
 *    [------]           src
 | 
			
		||||
 *      [------]         op
 | 
			
		||||
 *
 | 
			
		||||
 * a single eight-byte copy from <src> to <op> will repeat the pattern once,
 | 
			
		||||
 * after which we can move <op> two bytes without moving <src>:
 | 
			
		||||
 *
 | 
			
		||||
 *    ababxxxxxxxxxx
 | 
			
		||||
 *    [------]           src
 | 
			
		||||
 *        [------]       op
 | 
			
		||||
 *
 | 
			
		||||
 * and repeat the exercise until the two no longer overlap.
 | 
			
		||||
 *
 | 
			
		||||
 * This allows us to do very well in the special case of one single byte
 | 
			
		||||
 * repeated many times, without taking a big hit for more general cases.
 | 
			
		||||
 *
 | 
			
		||||
 * The worst case of extra writing past the end of the match occurs when
 | 
			
		||||
 * op - src == 1 and len == 1; the last copy will read from byte positions
 | 
			
		||||
 * [0..7] and write to [4..11], whereas it was only supposed to write to
 | 
			
		||||
 * position 1. Thus, ten excess bytes.
 | 
			
		||||
 */
 | 
			
		||||
static const int kMaxIncrementCopyOverflow = 10;
 | 
			
		||||
static inline void IncrementalCopyFastPath(const char *src, char *op, int len)
 | 
			
		||||
{
 | 
			
		||||
	while (op - src < 8) {
 | 
			
		||||
		UNALIGNED_STORE64(op, UNALIGNED_LOAD64(src));
 | 
			
		||||
		len -= op - src;
 | 
			
		||||
		op += op - src;
 | 
			
		||||
	}
 | 
			
		||||
	while (len > 0) {
 | 
			
		||||
		UNALIGNED_STORE64(op, UNALIGNED_LOAD64(src));
 | 
			
		||||
		src += 8;
 | 
			
		||||
		op += 8;
 | 
			
		||||
		len -= 8;
 | 
			
		||||
	}
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
/* A type that writes to a flat array. */
 | 
			
		||||
struct SnappyArrayWriter {
 | 
			
		||||
	char *base;
 | 
			
		||||
	char *op;
 | 
			
		||||
	char *op_limit;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
static inline int
 | 
			
		||||
SAW__Append(struct SnappyArrayWriter *this,
 | 
			
		||||
	    const char *ip, uint32_t len, int allow_fast_path)
 | 
			
		||||
{
 | 
			
		||||
	char *op = this->op;
 | 
			
		||||
	const int space_left = this->op_limit - op;
 | 
			
		||||
	/*Fast path, used for the majority (about 90%) of dynamic invocations.*/
 | 
			
		||||
	if (allow_fast_path && len <= 16 && space_left >= 16) {
 | 
			
		||||
		UNALIGNED_STORE64(op, UNALIGNED_LOAD64(ip));
 | 
			
		||||
		UNALIGNED_STORE64(op + 8, UNALIGNED_LOAD64(ip + 8));
 | 
			
		||||
	} else {
 | 
			
		||||
		if (space_left < len)
 | 
			
		||||
			return CSNAPPY_E_OUTPUT_OVERRUN;
 | 
			
		||||
		memcpy(op, ip, len);
 | 
			
		||||
	}
 | 
			
		||||
	this->op = op + len;
 | 
			
		||||
	return CSNAPPY_E_OK;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline int
 | 
			
		||||
SAW__AppendFromSelf(struct SnappyArrayWriter *this,
 | 
			
		||||
		    uint32_t offset, uint32_t len)
 | 
			
		||||
{
 | 
			
		||||
	char *op = this->op;
 | 
			
		||||
	const int space_left = this->op_limit - op;
 | 
			
		||||
	/* -1u catches offset==0 */
 | 
			
		||||
	if (op - this->base <= offset - 1u)
 | 
			
		||||
		return CSNAPPY_E_DATA_MALFORMED;
 | 
			
		||||
	/* Fast path, used for the majority (70-80%) of dynamic invocations. */
 | 
			
		||||
	if (len <= 16 && offset >= 8 && space_left >= 16) {
 | 
			
		||||
		UNALIGNED_STORE64(op, UNALIGNED_LOAD64(op - offset));
 | 
			
		||||
		UNALIGNED_STORE64(op + 8, UNALIGNED_LOAD64(op - offset + 8));
 | 
			
		||||
	} else if (space_left >= len + kMaxIncrementCopyOverflow) {
 | 
			
		||||
		IncrementalCopyFastPath(op - offset, op, len);
 | 
			
		||||
	} else {
 | 
			
		||||
		if (space_left < len)
 | 
			
		||||
			return CSNAPPY_E_OUTPUT_OVERRUN;
 | 
			
		||||
		IncrementalCopy(op - offset, op, len);
 | 
			
		||||
	}
 | 
			
		||||
	this->op = op + len;
 | 
			
		||||
	return CSNAPPY_E_OK;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
 | 
			
		||||
int
 | 
			
		||||
csnappy_get_uncompressed_length(
 | 
			
		||||
	const char *src,
 | 
			
		||||
	uint32_t src_len,
 | 
			
		||||
	uint32_t *result)
 | 
			
		||||
{
 | 
			
		||||
	const char *src_base = src;
 | 
			
		||||
	uint32_t shift = 0;
 | 
			
		||||
	uint8_t c;
 | 
			
		||||
	/* Length is encoded in 1..5 bytes */
 | 
			
		||||
	*result = 0;
 | 
			
		||||
	for (;;) {
 | 
			
		||||
		if (shift >= 32)
 | 
			
		||||
			goto err_out;
 | 
			
		||||
		if (src_len == 0)
 | 
			
		||||
			goto err_out;
 | 
			
		||||
		c = *(const uint8_t *)src++;
 | 
			
		||||
		src_len -= 1;
 | 
			
		||||
		*result |= (uint32_t)(c & 0x7f) << shift;
 | 
			
		||||
		if (c < 128)
 | 
			
		||||
			break;
 | 
			
		||||
		shift += 7;
 | 
			
		||||
	}
 | 
			
		||||
	return src - src_base;
 | 
			
		||||
err_out:
 | 
			
		||||
	return CSNAPPY_E_HEADER_BAD;
 | 
			
		||||
}
 | 
			
		||||
#if defined(__KERNEL__) && !defined(STATIC)
 | 
			
		||||
EXPORT_SYMBOL(csnappy_get_uncompressed_length);
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
int
 | 
			
		||||
csnappy_decompress_noheader(
 | 
			
		||||
	const char	*src,
 | 
			
		||||
	uint32_t	src_remaining,
 | 
			
		||||
	char		*dst,
 | 
			
		||||
	uint32_t	*dst_len)
 | 
			
		||||
{
 | 
			
		||||
	struct SnappyArrayWriter writer;
 | 
			
		||||
	uint32_t length, trailer, opword, extra_bytes;
 | 
			
		||||
	int ret;
 | 
			
		||||
	uint8_t opcode;
 | 
			
		||||
	char scratch[5];
 | 
			
		||||
	writer.op = writer.base = dst;
 | 
			
		||||
	writer.op_limit = writer.op + *dst_len;
 | 
			
		||||
	while (src_remaining) {
 | 
			
		||||
		if (unlikely(src_remaining < 5)) {
 | 
			
		||||
			memcpy(scratch, src, src_remaining);
 | 
			
		||||
			src = scratch;
 | 
			
		||||
		}
 | 
			
		||||
		opcode = *(const uint8_t *)src++;
 | 
			
		||||
		opword = char_table[opcode];
 | 
			
		||||
		extra_bytes = opword >> 11;
 | 
			
		||||
		trailer = get_unaligned_le32(src) & wordmask[extra_bytes];
 | 
			
		||||
		src += extra_bytes;
 | 
			
		||||
		src_remaining -= 1 + extra_bytes;
 | 
			
		||||
		length = opword & 0xff;
 | 
			
		||||
		if (opcode & 0x3) {
 | 
			
		||||
			trailer += opword & 0x700;
 | 
			
		||||
			ret = SAW__AppendFromSelf(&writer, trailer, length);
 | 
			
		||||
			if (ret < 0)
 | 
			
		||||
				return ret;
 | 
			
		||||
		} else {
 | 
			
		||||
			length += trailer;
 | 
			
		||||
			if (unlikely(src_remaining < length))
 | 
			
		||||
				return CSNAPPY_E_DATA_MALFORMED;
 | 
			
		||||
			ret = src_remaining >= 16;
 | 
			
		||||
			ret = SAW__Append(&writer, src, length, ret);
 | 
			
		||||
			if (ret < 0)
 | 
			
		||||
				return ret;
 | 
			
		||||
			src += length;
 | 
			
		||||
			src_remaining -= length;
 | 
			
		||||
		}
 | 
			
		||||
	}
 | 
			
		||||
	*dst_len = writer.op - writer.base;
 | 
			
		||||
	return CSNAPPY_E_OK;
 | 
			
		||||
}
 | 
			
		||||
#if defined(__KERNEL__) && !defined(STATIC)
 | 
			
		||||
EXPORT_SYMBOL(csnappy_decompress_noheader);
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
int
 | 
			
		||||
csnappy_decompress(
 | 
			
		||||
	const char *src,
 | 
			
		||||
	uint32_t src_len,
 | 
			
		||||
	char *dst,
 | 
			
		||||
	uint32_t dst_len)
 | 
			
		||||
{
 | 
			
		||||
	int n;
 | 
			
		||||
	uint32_t olen = 0;
 | 
			
		||||
	/* Read uncompressed length from the front of the compressed input */
 | 
			
		||||
	n = csnappy_get_uncompressed_length(src, src_len, &olen);
 | 
			
		||||
	if (unlikely(n < CSNAPPY_E_OK))
 | 
			
		||||
		return n;
 | 
			
		||||
	/* Protect against possible DoS attack */
 | 
			
		||||
	if (unlikely(olen > dst_len))
 | 
			
		||||
		return CSNAPPY_E_OUTPUT_INSUF;
 | 
			
		||||
	return csnappy_decompress_noheader(src + n, src_len - n, dst, &olen);
 | 
			
		||||
}
 | 
			
		||||
#if defined(__KERNEL__) && !defined(STATIC)
 | 
			
		||||
EXPORT_SYMBOL(csnappy_decompress);
 | 
			
		||||
 | 
			
		||||
MODULE_LICENSE("BSD");
 | 
			
		||||
MODULE_DESCRIPTION("Snappy Decompressor");
 | 
			
		||||
#endif
 | 
			
		||||
							
								
								
									
										83
									
								
								drivers/staging/snappy/csnappy_internal.h
									
									
									
									
									
										Executable file
									
								
							
							
						
						
									
										83
									
								
								drivers/staging/snappy/csnappy_internal.h
									
									
									
									
									
										Executable file
									
								
							@@ -0,0 +1,83 @@
 | 
			
		||||
/*
 | 
			
		||||
Copyright 2011 Google Inc. All Rights Reserved.
 | 
			
		||||
 | 
			
		||||
Redistribution and use in source and binary forms, with or without
 | 
			
		||||
modification, are permitted provided that the following conditions are
 | 
			
		||||
met:
 | 
			
		||||
 | 
			
		||||
    * Redistributions of source code must retain the above copyright
 | 
			
		||||
notice, this list of conditions and the following disclaimer.
 | 
			
		||||
    * Redistributions in binary form must reproduce the above
 | 
			
		||||
copyright notice, this list of conditions and the following disclaimer
 | 
			
		||||
in the documentation and/or other materials provided with the
 | 
			
		||||
distribution.
 | 
			
		||||
    * Neither the name of Google Inc. nor the names of its
 | 
			
		||||
contributors may be used to endorse or promote products derived from
 | 
			
		||||
this software without specific prior written permission.
 | 
			
		||||
 | 
			
		||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
 | 
			
		||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
 | 
			
		||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
 | 
			
		||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 | 
			
		||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 | 
			
		||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 | 
			
		||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 | 
			
		||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 | 
			
		||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 | 
			
		||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 | 
			
		||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 | 
			
		||||
 | 
			
		||||
Various stubs for the open-source version of Snappy.
 | 
			
		||||
 | 
			
		||||
File modified for the Linux Kernel by
 | 
			
		||||
Zeev Tarantov <zeev.tarantov at gmail.com>
 | 
			
		||||
*/
 | 
			
		||||
 | 
			
		||||
#ifndef CSNAPPY_INTERNAL_H_
 | 
			
		||||
#define CSNAPPY_INTERNAL_H_
 | 
			
		||||
 | 
			
		||||
#ifndef __KERNEL__
 | 
			
		||||
#include "csnappy_internal_userspace.h"
 | 
			
		||||
#else
 | 
			
		||||
 | 
			
		||||
#include <linux/types.h>
 | 
			
		||||
#include <linux/string.h>
 | 
			
		||||
#include <linux/compiler.h>
 | 
			
		||||
#include <asm/byteorder.h>
 | 
			
		||||
#include <asm/unaligned.h>
 | 
			
		||||
 | 
			
		||||
#ifdef DEBUG
 | 
			
		||||
#define DCHECK(cond)	if (!(cond)) \
 | 
			
		||||
			printk(KERN_DEBUG "assert failed @ %s:%i\n", \
 | 
			
		||||
				__FILE__, __LINE__)
 | 
			
		||||
#else
 | 
			
		||||
#define DCHECK(cond)
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#define UNALIGNED_LOAD16(_p)		get_unaligned((const uint16_t *)(_p))
 | 
			
		||||
#define UNALIGNED_LOAD32(_p)		get_unaligned((const uint32_t *)(_p))
 | 
			
		||||
#define UNALIGNED_LOAD64(_p)		get_unaligned((const uint64_t *)(_p))
 | 
			
		||||
#define UNALIGNED_STORE16(_p, _val)	put_unaligned((_val), (uint16_t *)(_p))
 | 
			
		||||
#define UNALIGNED_STORE32(_p, _val)	put_unaligned((_val), (uint32_t *)(_p))
 | 
			
		||||
#define UNALIGNED_STORE64(_p, _val)	put_unaligned((_val), (uint64_t *)(_p))
 | 
			
		||||
 | 
			
		||||
#define FindLSBSetNonZero(n)		__builtin_ctz(n)
 | 
			
		||||
#define FindLSBSetNonZero64(n)		__builtin_ctzll(n)
 | 
			
		||||
 | 
			
		||||
#endif /* __KERNEL__ */
 | 
			
		||||
 | 
			
		||||
#define DCHECK_EQ(a, b)	DCHECK(((a) == (b)))
 | 
			
		||||
#define DCHECK_NE(a, b)	DCHECK(((a) != (b)))
 | 
			
		||||
#define DCHECK_GT(a, b)	DCHECK(((a) >  (b)))
 | 
			
		||||
#define DCHECK_GE(a, b)	DCHECK(((a) >= (b)))
 | 
			
		||||
#define DCHECK_LT(a, b)	DCHECK(((a) <  (b)))
 | 
			
		||||
#define DCHECK_LE(a, b)	DCHECK(((a) <= (b)))
 | 
			
		||||
 | 
			
		||||
enum {
 | 
			
		||||
	LITERAL = 0,
 | 
			
		||||
	COPY_1_BYTE_OFFSET = 1,  /* 3 bit length + 3 bits of offset in opcode */
 | 
			
		||||
	COPY_2_BYTE_OFFSET = 2,
 | 
			
		||||
	COPY_4_BYTE_OFFSET = 3
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
#endif  /* CSNAPPY_INTERNAL_H_ */
 | 
			
		||||
							
								
								
									
										37
									
								
								drivers/staging/zram/Kconfig
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							
							
						
						
									
										37
									
								
								drivers/staging/zram/Kconfig
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							@@ -6,8 +6,6 @@ config ZRAM
 | 
			
		||||
	tristate "Compressed RAM block device support"
 | 
			
		||||
	depends on BLOCK && SYSFS
 | 
			
		||||
	select XVMALLOC
 | 
			
		||||
	select LZO_COMPRESS
 | 
			
		||||
	select LZO_DECOMPRESS
 | 
			
		||||
	default n
 | 
			
		||||
	help
 | 
			
		||||
	  Creates virtual block devices called /dev/zramX (X = 0, 1, ...).
 | 
			
		||||
@@ -21,23 +19,6 @@ config ZRAM
 | 
			
		||||
	  See zram.txt for more information.
 | 
			
		||||
	  Project home: http://compcache.googlecode.com/
 | 
			
		||||
 | 
			
		||||
config ZRAM_NUM_DEVICES
 | 
			
		||||
	int "Default number of zram devices"
 | 
			
		||||
	depends on ZRAM
 | 
			
		||||
	range 1 32
 | 
			
		||||
	default 1
 | 
			
		||||
	help
 | 
			
		||||
	  Select default number of zram devices. You can override this value
 | 
			
		||||
	  using 'num_devices' module parameter.
 | 
			
		||||
 | 
			
		||||
config ZRAM_DEFAULT_PERCENTAGE
 | 
			
		||||
	int "Default number of zram percentage"
 | 
			
		||||
	depends on ZRAM
 | 
			
		||||
	range 10 80
 | 
			
		||||
	default 25
 | 
			
		||||
	help
 | 
			
		||||
	  Select default zram disk size: percentage of total RAM
 | 
			
		||||
 | 
			
		||||
config ZRAM_DEBUG
 | 
			
		||||
	bool "Compressed RAM block device debug support"
 | 
			
		||||
	depends on ZRAM
 | 
			
		||||
@@ -45,11 +26,15 @@ config ZRAM_DEBUG
 | 
			
		||||
	help
 | 
			
		||||
	  This option adds additional debugging code to the compressed
 | 
			
		||||
	  RAM block device driver.
 | 
			
		||||
config ZRAM_LZO
 | 
			
		||||
       bool "LZO compression"
 | 
			
		||||
       default y
 | 
			
		||||
       depends on ZRAM
 | 
			
		||||
       select LZO_COMPRESS
 | 
			
		||||
       select LZO_DECOMPRESS
 | 
			
		||||
 | 
			
		||||
config ZRAM_DEFAULT_DISKSIZE
 | 
			
		||||
	int "Default size of zram in bytes"
 | 
			
		||||
	depends on ZRAM
 | 
			
		||||
	default 100663296
 | 
			
		||||
	help
 | 
			
		||||
	  Set default zram disk size (default ~ 96MB)
 | 
			
		||||
 | 
			
		||||
config ZRAM_SNAPPY
 | 
			
		||||
       bool "Snappy compression"
 | 
			
		||||
       depends on ZRAM
 | 
			
		||||
       select SNAPPY_COMPRESS
 | 
			
		||||
       select SNAPPY_DECOMPRESS
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										0
									
								
								drivers/staging/zram/Makefile
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							
							
						
						
									
										0
									
								
								drivers/staging/zram/Makefile
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							
							
								
								
									
										0
									
								
								drivers/staging/zram/xvmalloc.c
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							
							
						
						
									
										0
									
								
								drivers/staging/zram/xvmalloc.c
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							
							
								
								
									
										0
									
								
								drivers/staging/zram/xvmalloc.h
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							
							
						
						
									
										0
									
								
								drivers/staging/zram/xvmalloc.h
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							
							
								
								
									
										0
									
								
								drivers/staging/zram/xvmalloc_int.h
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							
							
						
						
									
										0
									
								
								drivers/staging/zram/xvmalloc_int.h
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							
							
								
								
									
										0
									
								
								drivers/staging/zram/zram.txt
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							
							
						
						
									
										0
									
								
								drivers/staging/zram/zram.txt
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							
							
								
								
									
										235
									
								
								drivers/staging/zram/zram_drv.c
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							
							
						
						
									
										235
									
								
								drivers/staging/zram/zram_drv.c
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							@@ -29,12 +29,90 @@
 | 
			
		||||
#include <linux/genhd.h>
 | 
			
		||||
#include <linux/highmem.h>
 | 
			
		||||
#include <linux/slab.h>
 | 
			
		||||
#include <linux/lzo.h>
 | 
			
		||||
#include <linux/string.h>
 | 
			
		||||
#include <linux/vmalloc.h>
 | 
			
		||||
 | 
			
		||||
#include "zram_drv.h"
 | 
			
		||||
 | 
			
		||||
#if defined(CONFIG_ZRAM_LZO)
 | 
			
		||||
#include <linux/lzo.h>
 | 
			
		||||
#ifdef MULTIPLE_COMPRESSORS
 | 
			
		||||
static const struct zram_compressor lzo_compressor = {
 | 
			
		||||
	.name = "LZO",
 | 
			
		||||
	.workmem_bytes = LZO1X_MEM_COMPRESS,
 | 
			
		||||
	.compress = &lzo1x_1_compress,
 | 
			
		||||
	.decompress = &lzo1x_decompress_safe
 | 
			
		||||
};
 | 
			
		||||
#else /* !MULTIPLE_COMPRESSORS */
 | 
			
		||||
#define WMSIZE		LZO1X_MEM_COMPRESS
 | 
			
		||||
#define COMPRESS(s, sl, d, dl, wm)	\
 | 
			
		||||
	lzo1x_1_compress(s, sl, d, dl, wm)
 | 
			
		||||
#define DECOMPRESS(s, sl, d, dl)	\
 | 
			
		||||
	lzo1x_decompress_safe(s, sl, d, dl)
 | 
			
		||||
#endif /* !MULTIPLE_COMPRESSORS */
 | 
			
		||||
#endif /* defined(CONFIG_ZRAM_LZO) */
 | 
			
		||||
 | 
			
		||||
#if defined(CONFIG_ZRAM_SNAPPY)
 | 
			
		||||
#include "../snappy/csnappy.h" /* if built in drivers/staging */
 | 
			
		||||
#define WMSIZE_ORDER	((PAGE_SHIFT > 14) ? (15) : (PAGE_SHIFT+1))
 | 
			
		||||
static int
 | 
			
		||||
snappy_compress_(
 | 
			
		||||
	const unsigned char *src,
 | 
			
		||||
	size_t src_len,
 | 
			
		||||
	unsigned char *dst,
 | 
			
		||||
	size_t *dst_len,
 | 
			
		||||
	void *workmem)
 | 
			
		||||
{
 | 
			
		||||
	const unsigned char *end = csnappy_compress_fragment(
 | 
			
		||||
		src, (uint32_t)src_len, dst, workmem, WMSIZE_ORDER);
 | 
			
		||||
	*dst_len = end - dst;
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
static int
 | 
			
		||||
snappy_decompress_(
 | 
			
		||||
	const unsigned char *src,
 | 
			
		||||
	size_t src_len,
 | 
			
		||||
	unsigned char *dst,
 | 
			
		||||
	size_t *dst_len)
 | 
			
		||||
{
 | 
			
		||||
	uint32_t dst_len_ = (uint32_t)*dst_len;
 | 
			
		||||
	int ret = csnappy_decompress_noheader(src, src_len, dst, &dst_len_);
 | 
			
		||||
	*dst_len = (size_t)dst_len_;
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
#ifdef MULTIPLE_COMPRESSORS
 | 
			
		||||
static const struct zram_compressor snappy_compressor = {
 | 
			
		||||
	.name = "SNAPPY",
 | 
			
		||||
	.workmem_bytes = (1 << WMSIZE_ORDER),
 | 
			
		||||
	.compress = &snappy_compress_,
 | 
			
		||||
	.decompress = &snappy_decompress_
 | 
			
		||||
};
 | 
			
		||||
#else /* !MULTIPLE_COMPRESSORS */
 | 
			
		||||
#define WMSIZE		(1 << WMSIZE_ORDER)
 | 
			
		||||
#define COMPRESS(s, sl, d, dl, wm)	\
 | 
			
		||||
	snappy_compress_(s, sl, d, dl, wm)
 | 
			
		||||
#define DECOMPRESS(s, sl, d, dl)	\
 | 
			
		||||
	snappy_decompress_(s, sl, d, dl)
 | 
			
		||||
#endif /* !MULTIPLE_COMPRESSORS */
 | 
			
		||||
#endif /* defined(CONFIG_ZRAM_SNAPPY) */
 | 
			
		||||
 | 
			
		||||
#ifdef MULTIPLE_COMPRESSORS
 | 
			
		||||
const struct zram_compressor * const zram_compressors[] = {
 | 
			
		||||
#if defined(CONFIG_ZRAM_LZO)
 | 
			
		||||
	&lzo_compressor,
 | 
			
		||||
#endif
 | 
			
		||||
#if defined(CONFIG_ZRAM_SNAPPY)
 | 
			
		||||
	&snappy_compressor,
 | 
			
		||||
#endif
 | 
			
		||||
	NULL
 | 
			
		||||
};
 | 
			
		||||
#define WMSIZE		(zram->compressor->workmem_bytes)
 | 
			
		||||
#define COMPRESS(s, sl, d, dl, wm)	\
 | 
			
		||||
	(zram->compressor->compress(s, sl, d, dl, wm))
 | 
			
		||||
#define DECOMPRESS(s, sl, d, dl)	\
 | 
			
		||||
	(zram->compressor->decompress(s, sl, d, dl))
 | 
			
		||||
#endif /* MULTIPLE_COMPRESSORS */
 | 
			
		||||
 | 
			
		||||
/* Globals */
 | 
			
		||||
static int zram_major;
 | 
			
		||||
struct zram *zram_devices;
 | 
			
		||||
@@ -104,19 +182,33 @@ static int page_zero_filled(void *ptr)
 | 
			
		||||
	return 1;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static u64 zram_default_disksize_bytes(void)
 | 
			
		||||
static void zram_set_disksize(struct zram *zram, size_t totalram_bytes)
 | 
			
		||||
{
 | 
			
		||||
#if 0
 | 
			
		||||
	return ((totalram_pages << PAGE_SHIFT) *
 | 
			
		||||
		default_disksize_perc_ram / 100) & PAGE_MASK;
 | 
			
		||||
#endif
 | 
			
		||||
	return CONFIG_ZRAM_DEFAULT_DISKSIZE;
 | 
			
		||||
}
 | 
			
		||||
	if (!zram->disksize) {
 | 
			
		||||
		pr_info(
 | 
			
		||||
		"disk size not provided. You can use disksize_kb module "
 | 
			
		||||
		"param to specify size.\nUsing default: (%u%% of RAM).\n",
 | 
			
		||||
		default_disksize_perc_ram
 | 
			
		||||
		);
 | 
			
		||||
		zram->disksize = default_disksize_perc_ram *
 | 
			
		||||
					(totalram_bytes / 100);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
static void zram_set_disksize(struct zram *zram, u64 size_bytes)
 | 
			
		||||
{
 | 
			
		||||
	zram->disksize = size_bytes;
 | 
			
		||||
	set_capacity(zram->disk, size_bytes >> SECTOR_SHIFT);
 | 
			
		||||
	if (zram->disksize > 2 * (totalram_bytes)) {
 | 
			
		||||
		pr_info(
 | 
			
		||||
		"There is little point creating a zram of greater than "
 | 
			
		||||
		"twice the size of memory since we expect a 2:1 compression "
 | 
			
		||||
		"ratio. Note that zram uses about 0.1%% of the size of "
 | 
			
		||||
		"the disk when not in use so a huge zram is "
 | 
			
		||||
		"wasteful.\n"
 | 
			
		||||
		"\tMemory Size: %zu kB\n"
 | 
			
		||||
		"\tSize you selected: %llu kB\n"
 | 
			
		||||
		"Continuing anyway ...\n",
 | 
			
		||||
		totalram_bytes >> 10, zram->disksize
 | 
			
		||||
		);
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	zram->disksize &= PAGE_MASK;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static void zram_free_page(struct zram *zram, size_t index)
 | 
			
		||||
@@ -243,7 +335,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
 | 
			
		||||
	cmem = kmap_atomic(zram->table[index].page, KM_USER1) +
 | 
			
		||||
		zram->table[index].offset;
 | 
			
		||||
 | 
			
		||||
	ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
 | 
			
		||||
	ret = DECOMPRESS(cmem + sizeof(*zheader),
 | 
			
		||||
				    xv_get_object_size(cmem) - sizeof(*zheader),
 | 
			
		||||
				    uncmem, &clen);
 | 
			
		||||
 | 
			
		||||
@@ -257,7 +349,7 @@ static int zram_bvec_read(struct zram *zram, struct bio_vec *bvec,
 | 
			
		||||
	kunmap_atomic(user_mem, KM_USER0);
 | 
			
		||||
 | 
			
		||||
	/* Should NEVER happen. Return bio error if it does. */
 | 
			
		||||
	if (unlikely(ret != LZO_E_OK)) {
 | 
			
		||||
	if (unlikely(ret)) {
 | 
			
		||||
		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
 | 
			
		||||
		zram_stat64_inc(zram, &zram->stats.failed_reads);
 | 
			
		||||
		return ret;
 | 
			
		||||
@@ -291,13 +383,13 @@ static int zram_read_before_write(struct zram *zram, char *mem, u32 index)
 | 
			
		||||
		return 0;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ret = lzo1x_decompress_safe(cmem + sizeof(*zheader),
 | 
			
		||||
	ret = DECOMPRESS(cmem + sizeof(*zheader),
 | 
			
		||||
				    xv_get_object_size(cmem) - sizeof(*zheader),
 | 
			
		||||
				    mem, &clen);
 | 
			
		||||
	kunmap_atomic(cmem, KM_USER0);
 | 
			
		||||
 | 
			
		||||
	/* Should NEVER happen. Return bio error if it does. */
 | 
			
		||||
	if (unlikely(ret != LZO_E_OK)) {
 | 
			
		||||
	if (unlikely(ret)) {
 | 
			
		||||
		pr_err("Decompression failed! err=%d, page=%u\n", ret, index);
 | 
			
		||||
		zram_stat64_inc(zram, &zram->stats.failed_reads);
 | 
			
		||||
		return ret;
 | 
			
		||||
@@ -363,18 +455,13 @@ static int zram_bvec_write(struct zram *zram, struct bio_vec *bvec, u32 index,
 | 
			
		||||
		goto out;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ret = lzo1x_1_compress(uncmem, PAGE_SIZE, src, &clen,
 | 
			
		||||
	COMPRESS(uncmem, PAGE_SIZE, src, &clen,
 | 
			
		||||
			       zram->compress_workmem);
 | 
			
		||||
 | 
			
		||||
	kunmap_atomic(user_mem, KM_USER0);
 | 
			
		||||
	if (is_partial_io(bvec))
 | 
			
		||||
			kfree(uncmem);
 | 
			
		||||
 | 
			
		||||
	if (unlikely(ret != LZO_E_OK)) {
 | 
			
		||||
		pr_err("Compression failed! err=%d\n", ret);
 | 
			
		||||
		goto out;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Page is incompressible. Store it as-is (uncompressed)
 | 
			
		||||
	 * since we do not want to return too many disk write
 | 
			
		||||
@@ -546,27 +633,35 @@ static int zram_make_request(struct request_queue *queue, struct bio *bio)
 | 
			
		||||
{
 | 
			
		||||
	struct zram *zram = queue->queuedata;
 | 
			
		||||
 | 
			
		||||
	if (unlikely(!zram->init_done) && zram_init_device(zram))
 | 
			
		||||
		goto error;
 | 
			
		||||
 | 
			
		||||
	down_read(&zram->init_lock);
 | 
			
		||||
	if (unlikely(!zram->init_done))
 | 
			
		||||
		goto error_unlock;
 | 
			
		||||
 | 
			
		||||
	if (!valid_io_request(zram, bio)) {
 | 
			
		||||
		zram_stat64_inc(zram, &zram->stats.invalid_io);
 | 
			
		||||
		bio_io_error(bio);
 | 
			
		||||
		return 0;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (unlikely(!zram->init_done) && zram_init_device(zram)) {
 | 
			
		||||
		bio_io_error(bio);
 | 
			
		||||
		return 0;
 | 
			
		||||
		goto error_unlock;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	__zram_make_request(zram, bio, bio_data_dir(bio));
 | 
			
		||||
	up_read(&zram->init_lock);
 | 
			
		||||
 | 
			
		||||
	return 0;
 | 
			
		||||
 | 
			
		||||
error_unlock:
 | 
			
		||||
	up_read(&zram->init_lock);
 | 
			
		||||
error:
 | 
			
		||||
	bio_io_error(bio);
 | 
			
		||||
	return 0;
 | 
			
		||||
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void zram_reset_device(struct zram *zram)
 | 
			
		||||
void __zram_reset_device(struct zram *zram)
 | 
			
		||||
{
 | 
			
		||||
	size_t index;
 | 
			
		||||
 | 
			
		||||
	mutex_lock(&zram->init_lock);
 | 
			
		||||
	zram->init_done = 0;
 | 
			
		||||
 | 
			
		||||
	/* Free various per-device buffers */
 | 
			
		||||
@@ -602,8 +697,14 @@ void zram_reset_device(struct zram *zram)
 | 
			
		||||
	/* Reset stats */
 | 
			
		||||
	memset(&zram->stats, 0, sizeof(zram->stats));
 | 
			
		||||
 | 
			
		||||
	zram_set_disksize(zram, zram_default_disksize_bytes());
 | 
			
		||||
	mutex_unlock(&zram->init_lock);
 | 
			
		||||
	zram->disksize = 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
void zram_reset_device(struct zram *zram)
 | 
			
		||||
{
 | 
			
		||||
	down_write(&zram->init_lock);
 | 
			
		||||
	__zram_reset_device(zram);
 | 
			
		||||
	up_write(&zram->init_lock);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
int zram_init_device(struct zram *zram)
 | 
			
		||||
@@ -611,37 +712,39 @@ int zram_init_device(struct zram *zram)
 | 
			
		||||
	int ret;
 | 
			
		||||
	size_t num_pages;
 | 
			
		||||
 | 
			
		||||
	mutex_lock(&zram->init_lock);
 | 
			
		||||
	down_write(&zram->init_lock);
 | 
			
		||||
 | 
			
		||||
	if (zram->init_done) {
 | 
			
		||||
		mutex_unlock(&zram->init_lock);
 | 
			
		||||
		up_write(&zram->init_lock);
 | 
			
		||||
		return 0;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	zram->compress_workmem = kzalloc(LZO1X_MEM_COMPRESS, GFP_KERNEL);
 | 
			
		||||
	zram_set_disksize(zram, totalram_pages << PAGE_SHIFT);
 | 
			
		||||
 | 
			
		||||
	zram->compress_workmem = kzalloc(WMSIZE, GFP_KERNEL);
 | 
			
		||||
	if (!zram->compress_workmem) {
 | 
			
		||||
		pr_err("Error allocating compressor working memory!\n");
 | 
			
		||||
		ret = -ENOMEM;
 | 
			
		||||
		goto fail;
 | 
			
		||||
		goto fail_no_table;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	zram->compress_buffer = (void *)__get_free_pages(__GFP_ZERO, 1);
 | 
			
		||||
	zram->compress_buffer =
 | 
			
		||||
		(void *)__get_free_pages(GFP_KERNEL | __GFP_ZERO, 1);
 | 
			
		||||
	if (!zram->compress_buffer) {
 | 
			
		||||
		pr_err("Error allocating compressor buffer space\n");
 | 
			
		||||
		ret = -ENOMEM;
 | 
			
		||||
		goto fail;
 | 
			
		||||
		goto fail_no_table;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	num_pages = zram->disksize >> PAGE_SHIFT;
 | 
			
		||||
	zram->table = vmalloc(num_pages * sizeof(*zram->table));
 | 
			
		||||
	if (!zram->table) {
 | 
			
		||||
		pr_err("Error allocating zram address table\n");
 | 
			
		||||
		/* To prevent accessing table entries during cleanup */
 | 
			
		||||
		zram->disksize = 0;
 | 
			
		||||
		ret = -ENOMEM;
 | 
			
		||||
		goto fail;
 | 
			
		||||
		goto fail_no_table;
 | 
			
		||||
	}
 | 
			
		||||
  memset(zram->table, 0, num_pages * sizeof(*zram->table));
 | 
			
		||||
	memset(zram->table, 0, num_pages * sizeof(*zram->table));
 | 
			
		||||
	set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
 | 
			
		||||
 | 
			
		||||
	/* zram devices sort of resembles non-rotational disks */
 | 
			
		||||
	queue_flag_set_unlocked(QUEUE_FLAG_NONROT, zram->disk->queue);
 | 
			
		||||
@@ -654,15 +757,17 @@ int zram_init_device(struct zram *zram)
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	zram->init_done = 1;
 | 
			
		||||
	mutex_unlock(&zram->init_lock);
 | 
			
		||||
	up_write(&zram->init_lock);
 | 
			
		||||
 | 
			
		||||
	pr_debug("Initialization done!\n");
 | 
			
		||||
	return 0;
 | 
			
		||||
 | 
			
		||||
fail_no_table:
 | 
			
		||||
	/* To prevent accessing table entries during cleanup */
 | 
			
		||||
	zram->disksize = 0;
 | 
			
		||||
fail:
 | 
			
		||||
	mutex_unlock(&zram->init_lock);
 | 
			
		||||
	zram_reset_device(zram);
 | 
			
		||||
 | 
			
		||||
	__zram_reset_device(zram);
 | 
			
		||||
	up_write(&zram->init_lock);
 | 
			
		||||
	pr_err("Initialization failed: err=%d\n", ret);
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
@@ -687,7 +792,7 @@ static int create_device(struct zram *zram, int device_id)
 | 
			
		||||
	int ret = 0;
 | 
			
		||||
 | 
			
		||||
	init_rwsem(&zram->lock);
 | 
			
		||||
	mutex_init(&zram->init_lock);
 | 
			
		||||
	init_rwsem(&zram->init_lock);
 | 
			
		||||
	spin_lock_init(&zram->stat64_lock);
 | 
			
		||||
 | 
			
		||||
	zram->queue = blk_alloc_queue(GFP_KERNEL);
 | 
			
		||||
@@ -718,13 +823,13 @@ static int create_device(struct zram *zram, int device_id)
 | 
			
		||||
	zram->disk->private_data = zram;
 | 
			
		||||
	snprintf(zram->disk->disk_name, 16, "zram%d", device_id);
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Set some default disksize. To set another disksize, user
 | 
			
		||||
	 * must reset the device and then write a new disksize to
 | 
			
		||||
	 * corresponding device's sysfs node.
 | 
			
		||||
	 */
 | 
			
		||||
	zram_set_disksize(zram, zram_default_disksize_bytes());
 | 
			
		||||
	/* Actual capacity set using syfs (/sys/block/zram<id>/disksize */
 | 
			
		||||
	set_capacity(zram->disk, 0);
 | 
			
		||||
 | 
			
		||||
	/* Can be changed using sysfs (/sys/block/zram<id>/compressor) */
 | 
			
		||||
#ifdef MULTIPLE_COMPRESSORS
 | 
			
		||||
	zram->compressor = zram_compressors[0];
 | 
			
		||||
#endif
 | 
			
		||||
	/*
 | 
			
		||||
	 * To ensure that we always get PAGE_SIZE aligned
 | 
			
		||||
	 * and n*PAGE_SIZED sized I/O requests.
 | 
			
		||||
@@ -768,13 +873,6 @@ static int __init zram_init(void)
 | 
			
		||||
{
 | 
			
		||||
	int ret, dev_id;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * Module parameter not specified by user. Use default
 | 
			
		||||
	 * value as defined during kernel config.
 | 
			
		||||
	 */
 | 
			
		||||
	if (zram_num_devices == 0)
 | 
			
		||||
		zram_num_devices = CONFIG_ZRAM_NUM_DEVICES;
 | 
			
		||||
 | 
			
		||||
	if (zram_num_devices > max_num_devices) {
 | 
			
		||||
		pr_warning("Invalid value for num_devices: %u\n",
 | 
			
		||||
				zram_num_devices);
 | 
			
		||||
@@ -789,12 +887,15 @@ static int __init zram_init(void)
 | 
			
		||||
		goto out;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (!zram_num_devices) {
 | 
			
		||||
		pr_info("num_devices not specified. Using default: 1\n");
 | 
			
		||||
		zram_num_devices = 1;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/* Allocate the device array and initialize each one */
 | 
			
		||||
	pr_info("Creating %u devices ...\n", zram_num_devices);
 | 
			
		||||
	zram_devices = kzalloc(zram_num_devices * sizeof(struct zram),
 | 
			
		||||
				GFP_KERNEL);
 | 
			
		||||
	if (!zram_devices)
 | 
			
		||||
	{
 | 
			
		||||
	zram_devices = kzalloc(zram_num_devices * sizeof(struct zram), GFP_KERNEL);
 | 
			
		||||
	if (!zram_devices) {
 | 
			
		||||
		ret = -ENOMEM;
 | 
			
		||||
		goto unregister;
 | 
			
		||||
	}
 | 
			
		||||
@@ -836,8 +937,8 @@ static void __exit zram_exit(void)
 | 
			
		||||
	pr_debug("Cleanup done!\n");
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
module_param_named(num_devices, zram_num_devices, uint, 0);
 | 
			
		||||
MODULE_PARM_DESC(num_devices, "Number of zram devices");
 | 
			
		||||
module_param(zram_num_devices, uint, 0);
 | 
			
		||||
MODULE_PARM_DESC(zram_num_devices, "Number of zram devices");
 | 
			
		||||
 | 
			
		||||
module_init(zram_init);
 | 
			
		||||
module_exit(zram_exit);
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										38
									
								
								drivers/staging/zram/zram_drv.h
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							
							
						
						
									
										38
									
								
								drivers/staging/zram/zram_drv.h
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							@@ -41,7 +41,7 @@ struct zobj_header {
 | 
			
		||||
/*-- Configurable parameters */
 | 
			
		||||
 | 
			
		||||
/* Default zram disk size: 25% of total RAM */
 | 
			
		||||
static const unsigned default_disksize_perc_ram = CONFIG_ZRAM_DEFAULT_PERCENTAGE;
 | 
			
		||||
static const unsigned default_disksize_perc_ram = 25;
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * Pages that compress to size greater than this are stored
 | 
			
		||||
@@ -66,6 +66,13 @@ static const size_t max_zpage_size = PAGE_SIZE / 4 * 3;
 | 
			
		||||
#define ZRAM_SECTOR_PER_LOGICAL_BLOCK	\
 | 
			
		||||
	(1 << (ZRAM_LOGICAL_BLOCK_SHIFT - SECTOR_SHIFT))
 | 
			
		||||
 | 
			
		||||
#if defined(CONFIG_ZRAM_LZO) + defined(CONFIG_ZRAM_SNAPPY) == 0
 | 
			
		||||
#error At least one of CONFIG_ZRAM_LZO, CONFIG_ZRAM_SNAPPY must be defined!
 | 
			
		||||
#endif
 | 
			
		||||
#if defined(CONFIG_ZRAM_LZO) + defined(CONFIG_ZRAM_SNAPPY) > 1
 | 
			
		||||
#define MULTIPLE_COMPRESSORS
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
/* Flags for zram pages (table[page_no].flags) */
 | 
			
		||||
enum zram_pageflags {
 | 
			
		||||
	/* Page is stored uncompressed */
 | 
			
		||||
@@ -103,6 +110,9 @@ struct zram_stats {
 | 
			
		||||
 | 
			
		||||
struct zram {
 | 
			
		||||
	struct xv_pool *mem_pool;
 | 
			
		||||
#ifdef MULTIPLE_COMPRESSORS
 | 
			
		||||
	const struct zram_compressor *compressor;
 | 
			
		||||
#endif
 | 
			
		||||
	void *compress_workmem;
 | 
			
		||||
	void *compress_buffer;
 | 
			
		||||
	struct table *table;
 | 
			
		||||
@@ -112,8 +122,8 @@ struct zram {
 | 
			
		||||
	struct request_queue *queue;
 | 
			
		||||
	struct gendisk *disk;
 | 
			
		||||
	int init_done;
 | 
			
		||||
	/* Prevent concurrent execution of device init and reset */
 | 
			
		||||
	struct mutex init_lock;
 | 
			
		||||
	/* Prevent concurrent execution of device init, reset and R/W request */
 | 
			
		||||
	struct rw_semaphore init_lock;
 | 
			
		||||
	/*
 | 
			
		||||
	 * This is the limit on amount of *uncompressed* worth of data
 | 
			
		||||
	 * we can store in a disk.
 | 
			
		||||
@@ -130,7 +140,27 @@ extern struct attribute_group zram_disk_attr_group;
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
extern int zram_init_device(struct zram *zram);
 | 
			
		||||
extern void zram_reset_device(struct zram *zram);
 | 
			
		||||
extern void __zram_reset_device(struct zram *zram);
 | 
			
		||||
 | 
			
		||||
#ifdef MULTIPLE_COMPRESSORS
 | 
			
		||||
struct zram_compressor {
 | 
			
		||||
	const char *name;
 | 
			
		||||
	int (*compress)(
 | 
			
		||||
		const unsigned char *src,
 | 
			
		||||
		size_t src_len,
 | 
			
		||||
		unsigned char *dst,
 | 
			
		||||
		size_t *dst_len,
 | 
			
		||||
		void *workmem);
 | 
			
		||||
	int (*decompress)(
 | 
			
		||||
		const unsigned char *src,
 | 
			
		||||
		size_t src_len,
 | 
			
		||||
		unsigned char *dst,
 | 
			
		||||
		size_t *dst_len);
 | 
			
		||||
	unsigned workmem_bytes;
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
extern const struct zram_compressor * const zram_compressors[];
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										76
									
								
								drivers/staging/zram/zram_sysfs.c
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							
							
						
						
									
										76
									
								
								drivers/staging/zram/zram_sysfs.c
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							@@ -55,23 +55,78 @@ static ssize_t disksize_store(struct device *dev,
 | 
			
		||||
		struct device_attribute *attr, const char *buf, size_t len)
 | 
			
		||||
{
 | 
			
		||||
	int ret;
 | 
			
		||||
	u64 disksize;
 | 
			
		||||
	struct zram *zram = dev_to_zram(dev);
 | 
			
		||||
 | 
			
		||||
	ret = strict_strtoull(buf, 10, &disksize);
 | 
			
		||||
	if (ret)
 | 
			
		||||
		return ret;
 | 
			
		||||
 | 
			
		||||
	down_write(&zram->init_lock);
 | 
			
		||||
	if (zram->init_done) {
 | 
			
		||||
		up_write(&zram->init_lock);
 | 
			
		||||
		pr_info("Cannot change disksize for initialized device\n");
 | 
			
		||||
		return -EBUSY;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ret = strict_strtoull(buf, 10, &zram->disksize);
 | 
			
		||||
	if (ret)
 | 
			
		||||
		return ret;
 | 
			
		||||
 | 
			
		||||
	zram->disksize = PAGE_ALIGN(zram->disksize);
 | 
			
		||||
	zram->disksize = PAGE_ALIGN(disksize);
 | 
			
		||||
	set_capacity(zram->disk, zram->disksize >> SECTOR_SHIFT);
 | 
			
		||||
	up_write(&zram->init_lock);
 | 
			
		||||
 | 
			
		||||
	return len;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#ifdef MULTIPLE_COMPRESSORS
 | 
			
		||||
static ssize_t compressor_show(struct device *dev,
 | 
			
		||||
		struct device_attribute *attr, char *buf)
 | 
			
		||||
{
 | 
			
		||||
	char * const buf_base = buf;
 | 
			
		||||
	const struct zram_compressor *p, *curr;
 | 
			
		||||
	unsigned int i = 0;
 | 
			
		||||
	struct zram *zram = dev_to_zram(dev);
 | 
			
		||||
	curr = zram->compressor;
 | 
			
		||||
	p = zram_compressors[i];
 | 
			
		||||
	while (p) {
 | 
			
		||||
		if (curr == p)
 | 
			
		||||
			buf += sprintf(buf, "*");
 | 
			
		||||
		buf += sprintf(buf, "%u - %s\n", i, p->name);
 | 
			
		||||
		p = zram_compressors[++i];
 | 
			
		||||
	}
 | 
			
		||||
	return buf - buf_base;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static ssize_t compressor_store(struct device *dev,
 | 
			
		||||
		struct device_attribute *attr, const char *buf, size_t len)
 | 
			
		||||
{
 | 
			
		||||
	const struct zram_compressor *p;
 | 
			
		||||
	unsigned long requested;
 | 
			
		||||
	unsigned int i = 0;
 | 
			
		||||
	int ret;
 | 
			
		||||
	struct zram *zram = dev_to_zram(dev);
 | 
			
		||||
 | 
			
		||||
	if (zram->init_done) {
 | 
			
		||||
		pr_info("Cannot change compressor for initialized device\n");
 | 
			
		||||
		return -EBUSY;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ret = strict_strtoul(buf, 10, &requested);
 | 
			
		||||
	if (ret)
 | 
			
		||||
		return ret;
 | 
			
		||||
 | 
			
		||||
	p = zram_compressors[i];
 | 
			
		||||
	while (p && (i < requested))
 | 
			
		||||
		p = zram_compressors[++i];
 | 
			
		||||
 | 
			
		||||
	if (!p) {
 | 
			
		||||
		pr_info("No compressor with index #%lu\n", requested);
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	zram->compressor = p;
 | 
			
		||||
	return len;
 | 
			
		||||
}
 | 
			
		||||
#endif /* MULTIPLE_COMPRESSORS */
 | 
			
		||||
 | 
			
		||||
static ssize_t initstate_show(struct device *dev,
 | 
			
		||||
		struct device_attribute *attr, char *buf)
 | 
			
		||||
{
 | 
			
		||||
@@ -106,8 +161,10 @@ static ssize_t reset_store(struct device *dev,
 | 
			
		||||
	if (bdev)
 | 
			
		||||
		fsync_bdev(bdev);
 | 
			
		||||
 | 
			
		||||
	down_write(&zram->init_lock);
 | 
			
		||||
	if (zram->init_done)
 | 
			
		||||
		zram_reset_device(zram);
 | 
			
		||||
		__zram_reset_device(zram);
 | 
			
		||||
	up_write(&zram->init_lock);
 | 
			
		||||
 | 
			
		||||
	return len;
 | 
			
		||||
}
 | 
			
		||||
@@ -188,6 +245,10 @@ static ssize_t mem_used_total_show(struct device *dev,
 | 
			
		||||
	return sprintf(buf, "%llu\n", val);
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#ifdef MULTIPLE_COMPRESSORS
 | 
			
		||||
static DEVICE_ATTR(compressor, S_IRUGO | S_IWUSR,
 | 
			
		||||
		compressor_show, compressor_store);
 | 
			
		||||
#endif
 | 
			
		||||
static DEVICE_ATTR(disksize, S_IRUGO | S_IWUSR,
 | 
			
		||||
		disksize_show, disksize_store);
 | 
			
		||||
static DEVICE_ATTR(initstate, S_IRUGO, initstate_show, NULL);
 | 
			
		||||
@@ -202,6 +263,9 @@ static DEVICE_ATTR(compr_data_size, S_IRUGO, compr_data_size_show, NULL);
 | 
			
		||||
static DEVICE_ATTR(mem_used_total, S_IRUGO, mem_used_total_show, NULL);
 | 
			
		||||
 | 
			
		||||
static struct attribute *zram_disk_attrs[] = {
 | 
			
		||||
#ifdef MULTIPLE_COMPRESSORS
 | 
			
		||||
	&dev_attr_compressor.attr,
 | 
			
		||||
#endif
 | 
			
		||||
	&dev_attr_disksize.attr,
 | 
			
		||||
	&dev_attr_initstate.attr,
 | 
			
		||||
	&dev_attr_reset.attr,
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										6
									
								
								include/linux/capability.h
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							
							
						
						
									
										6
									
								
								include/linux/capability.h
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							@@ -357,7 +357,11 @@ struct cpu_vfs_cap_data {
 | 
			
		||||
 | 
			
		||||
#define CAP_MAC_ADMIN        33
 | 
			
		||||
 | 
			
		||||
#define CAP_LAST_CAP         CAP_MAC_ADMIN
 | 
			
		||||
/* Allow configuring the kernel's syslog (printk behaviour) */
 | 
			
		||||
 | 
			
		||||
#define CAP_SYSLOG           34
 | 
			
		||||
 | 
			
		||||
#define CAP_LAST_CAP         CAP_SYSLOG
 | 
			
		||||
 | 
			
		||||
#define cap_valid(x) ((x) >= 0 && (x) <= CAP_LAST_CAP)
 | 
			
		||||
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										11
									
								
								include/linux/genlock.h
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							
							
						
						
									
										11
									
								
								include/linux/genlock.h
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							@@ -12,7 +12,7 @@ void genlock_put_handle(struct genlock_handle *handle);
 | 
			
		||||
struct genlock *genlock_create_lock(struct genlock_handle *);
 | 
			
		||||
struct genlock *genlock_attach_lock(struct genlock_handle *, int fd);
 | 
			
		||||
int genlock_wait(struct genlock_handle *handle, u32 timeout);
 | 
			
		||||
void genlock_release_lock(struct genlock_handle *);
 | 
			
		||||
/* genlock_release_lock was deprecated */
 | 
			
		||||
int genlock_lock(struct genlock_handle *handle, int op, int flags,
 | 
			
		||||
	u32 timeout);
 | 
			
		||||
#endif
 | 
			
		||||
@@ -21,7 +21,8 @@ int genlock_lock(struct genlock_handle *handle, int op, int flags,
 | 
			
		||||
#define GENLOCK_WRLOCK 1
 | 
			
		||||
#define GENLOCK_RDLOCK 2
 | 
			
		||||
 | 
			
		||||
#define GENLOCK_NOBLOCK (1 << 0)
 | 
			
		||||
#define GENLOCK_NOBLOCK       (1 << 0)
 | 
			
		||||
#define GENLOCK_WRITE_TO_READ (1 << 1)
 | 
			
		||||
 | 
			
		||||
struct genlock_lock {
 | 
			
		||||
	int fd;
 | 
			
		||||
@@ -37,9 +38,15 @@ struct genlock_lock {
 | 
			
		||||
	struct genlock_lock)
 | 
			
		||||
#define GENLOCK_IOC_ATTACH _IOW(GENLOCK_IOC_MAGIC, 2, \
 | 
			
		||||
	struct genlock_lock)
 | 
			
		||||
 | 
			
		||||
/* Deprecated */
 | 
			
		||||
#define GENLOCK_IOC_LOCK _IOW(GENLOCK_IOC_MAGIC, 3, \
 | 
			
		||||
	struct genlock_lock)
 | 
			
		||||
 | 
			
		||||
/* Deprecated */
 | 
			
		||||
#define GENLOCK_IOC_RELEASE _IO(GENLOCK_IOC_MAGIC, 4)
 | 
			
		||||
#define GENLOCK_IOC_WAIT _IOW(GENLOCK_IOC_MAGIC, 5, \
 | 
			
		||||
	struct genlock_lock)
 | 
			
		||||
#define GENLOCK_IOC_DREADLOCK _IOW(GENLOCK_IOC_MAGIC, 6, \
 | 
			
		||||
	struct genlock_lock)
 | 
			
		||||
#endif
 | 
			
		||||
 
 | 
			
		||||
@@ -10,7 +10,7 @@ SCHED_FEAT(FAIR_SLEEPERS, 1)
 | 
			
		||||
 * them to run sooner, but does not allow tons of sleepers to
 | 
			
		||||
 * rip the spread apart.
 | 
			
		||||
 */
 | 
			
		||||
SCHED_FEAT(GENTLE_FAIR_SLEEPERS, 1)
 | 
			
		||||
SCHED_FEAT(GENTLE_FAIR_SLEEPERS, 0)
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
 * By not normalizing the sleep time, heavy tasks get an effective
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										146
									
								
								mm/ashmem.c
									
									
									
									
									
								
							
							
						
						
									
										146
									
								
								mm/ashmem.c
									
									
									
									
									
								
							@@ -29,9 +29,10 @@
 | 
			
		||||
#include <linux/mutex.h>
 | 
			
		||||
#include <linux/shmem_fs.h>
 | 
			
		||||
#include <linux/ashmem.h>
 | 
			
		||||
#include <asm/cacheflush.h>
 | 
			
		||||
 | 
			
		||||
#define ASHMEM_NAME_PREFIX ""
 | 
			
		||||
#define ASHMEM_NAME_PREFIX_LEN 0
 | 
			
		||||
#define ASHMEM_NAME_PREFIX "dev/ashmem/"
 | 
			
		||||
#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1)
 | 
			
		||||
#define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN)
 | 
			
		||||
 | 
			
		||||
/*
 | 
			
		||||
@@ -45,6 +46,8 @@ struct ashmem_area {
 | 
			
		||||
	struct list_head unpinned_list;	/* list of all ashmem areas */
 | 
			
		||||
	struct file *file;		/* the shmem-based backing file */
 | 
			
		||||
	size_t size;			/* size of the mapping, in bytes */
 | 
			
		||||
	unsigned long vm_start;		/* Start address of vm_area
 | 
			
		||||
					 * which maps this ashmem */
 | 
			
		||||
	unsigned long prot_mask;	/* allowed prot bits, as vm_flags */
 | 
			
		||||
};
 | 
			
		||||
 | 
			
		||||
@@ -178,7 +181,7 @@ static int ashmem_open(struct inode *inode, struct file *file)
 | 
			
		||||
	struct ashmem_area *asma;
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	ret = nonseekable_open(inode, file);
 | 
			
		||||
	ret = generic_file_open(inode, file);
 | 
			
		||||
	if (unlikely(ret))
 | 
			
		||||
		return ret;
 | 
			
		||||
 | 
			
		||||
@@ -187,6 +190,7 @@ static int ashmem_open(struct inode *inode, struct file *file)
 | 
			
		||||
		return -ENOMEM;
 | 
			
		||||
 | 
			
		||||
	INIT_LIST_HEAD(&asma->unpinned_list);
 | 
			
		||||
	memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN);
 | 
			
		||||
	asma->prot_mask = PROT_MASK;
 | 
			
		||||
	file->private_data = asma;
 | 
			
		||||
 | 
			
		||||
@@ -210,6 +214,67 @@ static int ashmem_release(struct inode *ignored, struct file *file)
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static ssize_t ashmem_read(struct file *file, char __user *buf,
 | 
			
		||||
			   size_t len, loff_t *pos)
 | 
			
		||||
{
 | 
			
		||||
	struct ashmem_area *asma = file->private_data;
 | 
			
		||||
	int ret = 0;
 | 
			
		||||
 | 
			
		||||
	mutex_lock(&ashmem_mutex);
 | 
			
		||||
 | 
			
		||||
	/* If size is not set, or set to 0, always return EOF. */
 | 
			
		||||
	if (asma->size == 0) {
 | 
			
		||||
		goto out;
 | 
			
		||||
        }
 | 
			
		||||
 | 
			
		||||
	if (!asma->file) {
 | 
			
		||||
		ret = -EBADF;
 | 
			
		||||
		goto out;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ret = asma->file->f_op->read(asma->file, buf, len, pos);
 | 
			
		||||
	if (ret < 0) {
 | 
			
		||||
		goto out;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/** Update backing file pos, since f_ops->read() doesn't */
 | 
			
		||||
	asma->file->f_pos = *pos;
 | 
			
		||||
 | 
			
		||||
out:
 | 
			
		||||
	mutex_unlock(&ashmem_mutex);
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin)
 | 
			
		||||
{
 | 
			
		||||
	struct ashmem_area *asma = file->private_data;
 | 
			
		||||
	int ret;
 | 
			
		||||
 | 
			
		||||
	mutex_lock(&ashmem_mutex);
 | 
			
		||||
 | 
			
		||||
	if (asma->size == 0) {
 | 
			
		||||
		ret = -EINVAL;
 | 
			
		||||
		goto out;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	if (!asma->file) {
 | 
			
		||||
		ret = -EBADF;
 | 
			
		||||
		goto out;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	ret = asma->file->f_op->llseek(asma->file, offset, origin);
 | 
			
		||||
	if (ret < 0) {
 | 
			
		||||
		goto out;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	/** Copy f_pos from backing file, since f_ops->llseek() sets it */
 | 
			
		||||
	file->f_pos = asma->file->f_pos;
 | 
			
		||||
 | 
			
		||||
out:
 | 
			
		||||
	mutex_unlock(&ashmem_mutex);
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static inline unsigned long
 | 
			
		||||
calc_vm_may_flags(unsigned long prot)
 | 
			
		||||
{
 | 
			
		||||
@@ -264,6 +329,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma)
 | 
			
		||||
		vma->vm_file = asma->file;
 | 
			
		||||
	}
 | 
			
		||||
	vma->vm_flags |= VM_CAN_NONLINEAR;
 | 
			
		||||
	asma->vm_start = vma->vm_start;
 | 
			
		||||
 | 
			
		||||
out:
 | 
			
		||||
	mutex_unlock(&ashmem_mutex);
 | 
			
		||||
@@ -564,6 +630,69 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd,
 | 
			
		||||
	return ret;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
#ifdef CONFIG_OUTER_CACHE
 | 
			
		||||
static unsigned int virtaddr_to_physaddr(unsigned int virtaddr)
 | 
			
		||||
{
 | 
			
		||||
	unsigned int physaddr = 0;
 | 
			
		||||
	pgd_t *pgd_ptr = NULL;
 | 
			
		||||
	pmd_t *pmd_ptr = NULL;
 | 
			
		||||
	pte_t *pte_ptr = NULL, pte;
 | 
			
		||||
 | 
			
		||||
	spin_lock(¤t->mm->page_table_lock);
 | 
			
		||||
	pgd_ptr = pgd_offset(current->mm, virtaddr);
 | 
			
		||||
	if (pgd_none(*pgd) || pgd_bad(*pgd)) {
 | 
			
		||||
		pr_err("Failed to convert virtaddr %x to pgd_ptr\n",
 | 
			
		||||
			virtaddr);
 | 
			
		||||
		goto done;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	pmd_ptr = pmd_offset(pgd_ptr, virtaddr);
 | 
			
		||||
	if (pmd_none(*pmd_ptr) || pmd_bad(*pmd_ptr)) {
 | 
			
		||||
		pr_err("Failed to convert pgd_ptr %p to pmd_ptr\n",
 | 
			
		||||
			(void *)pgd_ptr);
 | 
			
		||||
		goto done;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	pte_ptr = pte_offset_map(pmd_ptr, virtaddr);
 | 
			
		||||
	if (!pte_ptr) {
 | 
			
		||||
		pr_err("Failed to convert pmd_ptr %p to pte_ptr\n",
 | 
			
		||||
			(void *)pmd_ptr);
 | 
			
		||||
		goto done;
 | 
			
		||||
	}
 | 
			
		||||
	pte = *pte_ptr;
 | 
			
		||||
	physaddr = pte_pfn(pte);
 | 
			
		||||
	pte_unmap(pte_ptr);
 | 
			
		||||
done:
 | 
			
		||||
	spin_unlock(¤t->mm->page_table_lock);
 | 
			
		||||
	physaddr <<= PAGE_SHIFT;
 | 
			
		||||
	return physaddr;
 | 
			
		||||
}
 | 
			
		||||
#endif
 | 
			
		||||
 | 
			
		||||
static int ashmem_cache_op(struct ashmem_area *asma,
 | 
			
		||||
	void (*cache_func)(unsigned long vstart, unsigned long length,
 | 
			
		||||
				unsigned long pstart))
 | 
			
		||||
{
 | 
			
		||||
#ifdef CONFIG_OUTER_CACHE
 | 
			
		||||
	unsigned long vaddr;
 | 
			
		||||
#endif
 | 
			
		||||
	mutex_lock(&ashmem_mutex);
 | 
			
		||||
#ifndef CONFIG_OUTER_CACHE
 | 
			
		||||
	cache_func(asma->vm_start, asma->size, 0);
 | 
			
		||||
#else
 | 
			
		||||
	for (vaddr = asma->vm_start; vaddr < asma->vm_start + asma->size;
 | 
			
		||||
		vaddr += PAGE_SIZE) {
 | 
			
		||||
		unsigned long physaddr;
 | 
			
		||||
		physaddr = virtaddr_to_physaddr(vaddr);
 | 
			
		||||
		if (!physaddr)
 | 
			
		||||
			return -EINVAL;
 | 
			
		||||
		cache_func(vaddr, PAGE_SIZE, physaddr);
 | 
			
		||||
	}
 | 
			
		||||
#endif
 | 
			
		||||
	mutex_unlock(&ashmem_mutex);
 | 
			
		||||
	return 0;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 | 
			
		||||
{
 | 
			
		||||
	struct ashmem_area *asma = file->private_data;
 | 
			
		||||
@@ -604,6 +733,15 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
 | 
			
		||||
			ashmem_shrink(ret, GFP_KERNEL);
 | 
			
		||||
		}
 | 
			
		||||
		break;
 | 
			
		||||
	case ASHMEM_CACHE_FLUSH_RANGE:
 | 
			
		||||
		ret = ashmem_cache_op(asma, &clean_and_invalidate_caches);
 | 
			
		||||
		break;
 | 
			
		||||
	case ASHMEM_CACHE_CLEAN_RANGE:
 | 
			
		||||
		ret = ashmem_cache_op(asma, &clean_caches);
 | 
			
		||||
		break;
 | 
			
		||||
	case ASHMEM_CACHE_INV_RANGE:
 | 
			
		||||
		ret = ashmem_cache_op(asma, &invalidate_caches);
 | 
			
		||||
		break;
 | 
			
		||||
	}
 | 
			
		||||
 | 
			
		||||
	return ret;
 | 
			
		||||
@@ -666,6 +804,8 @@ static struct file_operations ashmem_fops = {
 | 
			
		||||
	.owner = THIS_MODULE,
 | 
			
		||||
	.open = ashmem_open,
 | 
			
		||||
	.release = ashmem_release,
 | 
			
		||||
        .read = ashmem_read,
 | 
			
		||||
        .llseek = ashmem_llseek,
 | 
			
		||||
	.mmap = ashmem_mmap,
 | 
			
		||||
	.unlocked_ioctl = ashmem_ioctl,
 | 
			
		||||
	.compat_ioctl = ashmem_ioctl,
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										64
									
								
								mm/ksm.c
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							
							
						
						
									
										64
									
								
								mm/ksm.c
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							@@ -163,9 +163,6 @@ static unsigned long ksm_pages_unshared;
 | 
			
		||||
/* The number of rmap_items in use: to calculate pages_volatile */
 | 
			
		||||
static unsigned long ksm_rmap_items;
 | 
			
		||||
 | 
			
		||||
/* Limit on the number of unswappable pages used */
 | 
			
		||||
static unsigned long ksm_max_kernel_pages;
 | 
			
		||||
 | 
			
		||||
/* Number of pages ksmd should scan in one batch */
 | 
			
		||||
static unsigned int ksm_thread_pages_to_scan = 100;
 | 
			
		||||
 | 
			
		||||
@@ -317,7 +314,7 @@ static int break_ksm(struct vm_area_struct *vma, unsigned long addr)
 | 
			
		||||
	do {
 | 
			
		||||
		cond_resched();
 | 
			
		||||
		page = follow_page(vma, addr, FOLL_GET);
 | 
			
		||||
		if (!page)
 | 
			
		||||
		if (IS_ERR_OR_NULL(page))
 | 
			
		||||
			break;
 | 
			
		||||
		if (PageKsm(page))
 | 
			
		||||
			ret = handle_mm_fault(vma->vm_mm, vma, addr,
 | 
			
		||||
@@ -391,7 +388,7 @@ static struct page *get_mergeable_page(struct rmap_item *rmap_item)
 | 
			
		||||
		goto out;
 | 
			
		||||
 | 
			
		||||
	page = follow_page(vma, addr, FOLL_GET);
 | 
			
		||||
	if (!page)
 | 
			
		||||
	if (IS_ERR_OR_NULL(page))
 | 
			
		||||
		goto out;
 | 
			
		||||
	if (PageAnon(page)) {
 | 
			
		||||
		flush_anon_page(vma, page, addr);
 | 
			
		||||
@@ -628,7 +625,7 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
 | 
			
		||||
	if (!ptep)
 | 
			
		||||
		goto out;
 | 
			
		||||
 | 
			
		||||
	if (pte_write(*ptep)) {
 | 
			
		||||
	if (pte_write(*ptep) || pte_dirty(*ptep)) {
 | 
			
		||||
		pte_t entry;
 | 
			
		||||
 | 
			
		||||
		swapped = PageSwapCache(page);
 | 
			
		||||
@@ -648,10 +645,12 @@ static int write_protect_page(struct vm_area_struct *vma, struct page *page,
 | 
			
		||||
		 * page
 | 
			
		||||
		 */
 | 
			
		||||
		if ((page_mapcount(page) + 2 + swapped) != page_count(page)) {
 | 
			
		||||
			set_pte_at_notify(mm, addr, ptep, entry);
 | 
			
		||||
			set_pte_at(mm, addr, ptep, entry);
 | 
			
		||||
			goto out_unlock;
 | 
			
		||||
		}
 | 
			
		||||
		entry = pte_wrprotect(entry);
 | 
			
		||||
		if (pte_dirty(entry))
 | 
			
		||||
			set_page_dirty(page);
 | 
			
		||||
		entry = pte_mkclean(pte_wrprotect(entry));
 | 
			
		||||
		set_pte_at_notify(mm, addr, ptep, entry);
 | 
			
		||||
	}
 | 
			
		||||
	*orig_pte = *ptep;
 | 
			
		||||
@@ -717,6 +716,8 @@ static int replace_page(struct vm_area_struct *vma, struct page *oldpage,
 | 
			
		||||
	set_pte_at_notify(mm, addr, ptep, mk_pte(newpage, prot));
 | 
			
		||||
 | 
			
		||||
	page_remove_rmap(oldpage);
 | 
			
		||||
	if (!page_mapped(oldpage))
 | 
			
		||||
		try_to_free_swap(oldpage);
 | 
			
		||||
	put_page(oldpage);
 | 
			
		||||
 | 
			
		||||
	pte_unmap_unlock(ptep, ptl);
 | 
			
		||||
@@ -827,13 +828,6 @@ static int try_to_merge_two_pages(struct mm_struct *mm1, unsigned long addr1,
 | 
			
		||||
	struct page *kpage;
 | 
			
		||||
	int err = -EFAULT;
 | 
			
		||||
 | 
			
		||||
	/*
 | 
			
		||||
	 * The number of nodes in the stable tree
 | 
			
		||||
	 * is the number of kernel pages that we hold.
 | 
			
		||||
	 */
 | 
			
		||||
	if (ksm_max_kernel_pages &&
 | 
			
		||||
	    ksm_max_kernel_pages <= ksm_pages_shared)
 | 
			
		||||
		return err;
 | 
			
		||||
 | 
			
		||||
	kpage = alloc_page(GFP_HIGHUSER);
 | 
			
		||||
	if (!kpage)
 | 
			
		||||
@@ -1209,6 +1203,18 @@ static struct rmap_item *scan_get_next_rmap_item(struct page **page)
 | 
			
		||||
 | 
			
		||||
	slot = ksm_scan.mm_slot;
 | 
			
		||||
	if (slot == &ksm_mm_head) {
 | 
			
		||||
	       /*
 | 
			
		||||
	      	* A number of pages can hang around indefinitely on per-cpu
 | 
			
		||||
	      	* pagevecs, raised page count preventing write_protect_page
 | 
			
		||||
	      	* from merging them.  Though it doesn't really matter much,
 | 
			
		||||
	      	* it is puzzling to see some stuck in pages_volatile until
 | 
			
		||||
	      	* other activity jostles them out, and they also prevented
 | 
			
		||||
	      	* LTP's KSM test from succeeding deterministically; so drain
 | 
			
		||||
	      	* them here (here rather than on entry to ksm_do_scan(),
 | 
			
		||||
	      	* so we don't IPI too often when pages_to_scan is set low).
 | 
			
		||||
	      	*/
 | 
			
		||||
		lru_add_drain_all();
 | 
			
		||||
 | 
			
		||||
		root_unstable_tree = RB_ROOT;
 | 
			
		||||
 | 
			
		||||
		spin_lock(&ksm_mmlist_lock);
 | 
			
		||||
@@ -1314,7 +1320,7 @@ next_mm:
 | 
			
		||||
static void ksm_do_scan(unsigned int scan_npages)
 | 
			
		||||
{
 | 
			
		||||
	struct rmap_item *rmap_item;
 | 
			
		||||
	struct page *page;
 | 
			
		||||
	struct page *uninitialized_var(page);
 | 
			
		||||
 | 
			
		||||
	while (scan_npages--) {
 | 
			
		||||
		cond_resched();
 | 
			
		||||
@@ -1577,29 +1583,6 @@ static ssize_t run_store(struct kobject *kobj, struct kobj_attribute *attr,
 | 
			
		||||
}
 | 
			
		||||
KSM_ATTR(run);
 | 
			
		||||
 | 
			
		||||
static ssize_t max_kernel_pages_store(struct kobject *kobj,
 | 
			
		||||
				      struct kobj_attribute *attr,
 | 
			
		||||
				      const char *buf, size_t count)
 | 
			
		||||
{
 | 
			
		||||
	int err;
 | 
			
		||||
	unsigned long nr_pages;
 | 
			
		||||
 | 
			
		||||
	err = strict_strtoul(buf, 10, &nr_pages);
 | 
			
		||||
	if (err)
 | 
			
		||||
		return -EINVAL;
 | 
			
		||||
 | 
			
		||||
	ksm_max_kernel_pages = nr_pages;
 | 
			
		||||
 | 
			
		||||
	return count;
 | 
			
		||||
}
 | 
			
		||||
 | 
			
		||||
static ssize_t max_kernel_pages_show(struct kobject *kobj,
 | 
			
		||||
				     struct kobj_attribute *attr, char *buf)
 | 
			
		||||
{
 | 
			
		||||
	return sprintf(buf, "%lu\n", ksm_max_kernel_pages);
 | 
			
		||||
}
 | 
			
		||||
KSM_ATTR(max_kernel_pages);
 | 
			
		||||
 | 
			
		||||
static ssize_t pages_shared_show(struct kobject *kobj,
 | 
			
		||||
				 struct kobj_attribute *attr, char *buf)
 | 
			
		||||
{
 | 
			
		||||
@@ -1649,7 +1632,6 @@ static struct attribute *ksm_attrs[] = {
 | 
			
		||||
	&sleep_millisecs_attr.attr,
 | 
			
		||||
	&pages_to_scan_attr.attr,
 | 
			
		||||
	&run_attr.attr,
 | 
			
		||||
	&max_kernel_pages_attr.attr,
 | 
			
		||||
	&pages_shared_attr.attr,
 | 
			
		||||
	&pages_sharing_attr.attr,
 | 
			
		||||
	&pages_unshared_attr.attr,
 | 
			
		||||
@@ -1669,8 +1651,6 @@ static int __init ksm_init(void)
 | 
			
		||||
	struct task_struct *ksm_thread;
 | 
			
		||||
	int err;
 | 
			
		||||
 | 
			
		||||
	ksm_max_kernel_pages = totalram_pages / 4;
 | 
			
		||||
 | 
			
		||||
	err = ksm_slab_init();
 | 
			
		||||
	if (err)
 | 
			
		||||
		goto out;
 | 
			
		||||
 
 | 
			
		||||
							
								
								
									
										7
									
								
								mm/vmalloc.c
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							
							
						
						
									
										7
									
								
								mm/vmalloc.c
									
									
									
									
									
										
										
										Normal file → Executable file
									
								
							@@ -1470,6 +1470,7 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
 | 
			
		||||
{
 | 
			
		||||
	struct page **pages;
 | 
			
		||||
	unsigned int nr_pages, array_size, i;
 | 
			
		||||
	gfp_t nested_gfp = (gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO;
 | 
			
		||||
 | 
			
		||||
	nr_pages = (area->size - PAGE_SIZE) >> PAGE_SHIFT;
 | 
			
		||||
	array_size = (nr_pages * sizeof(struct page *));
 | 
			
		||||
@@ -1477,13 +1478,11 @@ static void *__vmalloc_area_node(struct vm_struct *area, gfp_t gfp_mask,
 | 
			
		||||
	area->nr_pages = nr_pages;
 | 
			
		||||
	/* Please note that the recursion is strictly bounded. */
 | 
			
		||||
	if (array_size > PAGE_SIZE) {
 | 
			
		||||
		pages = __vmalloc_node(array_size, 1, gfp_mask | __GFP_ZERO,
 | 
			
		||||
		pages = __vmalloc_node(array_size, 1, nested_gfp|__GFP_HIGHMEM,
 | 
			
		||||
				PAGE_KERNEL, node, caller);
 | 
			
		||||
		area->flags |= VM_VPAGES;
 | 
			
		||||
	} else {
 | 
			
		||||
		pages = kmalloc_node(array_size,
 | 
			
		||||
				(gfp_mask & GFP_RECLAIM_MASK) | __GFP_ZERO,
 | 
			
		||||
				node);
 | 
			
		||||
		pages = kmalloc_node(array_size, nested_gfp, node);
 | 
			
		||||
	}
 | 
			
		||||
	area->pages = pages;
 | 
			
		||||
	area->caller = caller;
 | 
			
		||||
 
 | 
			
		||||
		Reference in New Issue
	
	Block a user