diff --git a/arch/arm/mach-msm/irq.c b/arch/arm/mach-msm/irq.c index 4b4a0442..096285e0 100644 --- a/arch/arm/mach-msm/irq.c +++ b/arch/arm/mach-msm/irq.c @@ -43,38 +43,19 @@ static int msm_irq_debug_mask; module_param_named(debug_mask, msm_irq_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP); #define VIC_REG(off) (MSM_VIC_BASE + (off)) -#if defined(CONFIG_ARCH_MSM7X30) -#define VIC_INT_TO_REG_ADDR(base, irq) (base + (irq / 32) * 4) -#define VIC_INT_TO_REG_INDEX(irq) ((irq >> 5) & 3) -#else -#define VIC_INT_TO_REG_ADDR(base, irq) (base + ((irq & 32) ? 4 : 0)) -#define VIC_INT_TO_REG_INDEX(irq) ((irq >> 5) & 1) -#endif #define VIC_INT_SELECT0 VIC_REG(0x0000) /* 1: FIQ, 0: IRQ */ #define VIC_INT_SELECT1 VIC_REG(0x0004) /* 1: FIQ, 0: IRQ */ -#define VIC_INT_SELECT2 VIC_REG(0x0008) /* 1: FIQ, 0: IRQ */ -#define VIC_INT_SELECT3 VIC_REG(0x000C) /* 1: FIQ, 0: IRQ */ #define VIC_INT_EN0 VIC_REG(0x0010) #define VIC_INT_EN1 VIC_REG(0x0014) -#define VIC_INT_EN2 VIC_REG(0x0018) -#define VIC_INT_EN3 VIC_REG(0x001C) #define VIC_INT_ENCLEAR0 VIC_REG(0x0020) #define VIC_INT_ENCLEAR1 VIC_REG(0x0024) -#define VIC_INT_ENCLEAR2 VIC_REG(0x0028) -#define VIC_INT_ENCLEAR3 VIC_REG(0x002C) #define VIC_INT_ENSET0 VIC_REG(0x0030) #define VIC_INT_ENSET1 VIC_REG(0x0034) -#define VIC_INT_ENSET2 VIC_REG(0x0038) -#define VIC_INT_ENSET3 VIC_REG(0x003C) #define VIC_INT_TYPE0 VIC_REG(0x0040) /* 1: EDGE, 0: LEVEL */ #define VIC_INT_TYPE1 VIC_REG(0x0044) /* 1: EDGE, 0: LEVEL */ -#define VIC_INT_TYPE2 VIC_REG(0x0048) /* 1: EDGE, 0: LEVEL */ -#define VIC_INT_TYPE3 VIC_REG(0x004C) /* 1: EDGE, 0: LEVEL */ #define VIC_INT_POLARITY0 VIC_REG(0x0050) /* 1: NEG, 0: POS */ #define VIC_INT_POLARITY1 VIC_REG(0x0054) /* 1: NEG, 0: POS */ -#define VIC_INT_POLARITY2 VIC_REG(0x0058) /* 1: NEG, 0: POS */ -#define VIC_INT_POLARITY3 VIC_REG(0x005C) /* 1: NEG, 0: POS */ #define VIC_NO_PEND_VAL VIC_REG(0x0060) #if defined(CONFIG_ARCH_MSM_SCORPION) @@ -88,24 +69,14 @@ module_param_named(debug_mask, msm_irq_debug_mask, int, S_IRUGO | S_IWUSR | S_IW #endif #define VIC_IRQ_STATUS0 VIC_REG(0x0080) #define VIC_IRQ_STATUS1 VIC_REG(0x0084) -#define VIC_IRQ_STATUS2 VIC_REG(0x0088) -#define VIC_IRQ_STATUS3 VIC_REG(0x008C) #define VIC_FIQ_STATUS0 VIC_REG(0x0090) #define VIC_FIQ_STATUS1 VIC_REG(0x0094) -#define VIC_FIQ_STATUS2 VIC_REG(0x0098) -#define VIC_FIQ_STATUS3 VIC_REG(0x009C) #define VIC_RAW_STATUS0 VIC_REG(0x00A0) #define VIC_RAW_STATUS1 VIC_REG(0x00A4) -#define VIC_RAW_STATUS2 VIC_REG(0x00A8) -#define VIC_RAW_STATUS3 VIC_REG(0x00AC) #define VIC_INT_CLEAR0 VIC_REG(0x00B0) #define VIC_INT_CLEAR1 VIC_REG(0x00B4) -#define VIC_INT_CLEAR2 VIC_REG(0x00B8) -#define VIC_INT_CLEAR3 VIC_REG(0x00BC) #define VIC_SOFTINT0 VIC_REG(0x00C0) #define VIC_SOFTINT1 VIC_REG(0x00C4) -#define VIC_SOFTINT2 VIC_REG(0x00C8) -#define VIC_SOFTINT3 VIC_REG(0x00CC) #define VIC_IRQ_VEC_RD VIC_REG(0x00D0) /* pending int # */ #define VIC_IRQ_VEC_PEND_RD VIC_REG(0x00D4) /* pending vector addr */ #define VIC_IRQ_VEC_WR VIC_REG(0x00D8) @@ -129,40 +100,14 @@ module_param_named(debug_mask, msm_irq_debug_mask, int, S_IRUGO | S_IWUSR | S_IW #define VIC_VECTPRIORITY(n) VIC_REG(0x0200+((n) * 4)) #define VIC_VECTADDR(n) VIC_REG(0x0400+((n) * 4)) -#if defined(CONFIG_ARCH_MSM7X30) -#define VIC_NUM_REGS 4 -#else -#define VIC_NUM_REGS 2 -#endif - -#if VIC_NUM_REGS == 2 -#define DPRINT_REGS(base_reg, format, ...) \ - printk(KERN_INFO format " %x %x\n", ##__VA_ARGS__, \ - readl(base_reg ## 0), readl(base_reg ## 1)) -#define DPRINT_ARRAY(array, format, ...) \ - printk(KERN_INFO format " %x %x\n", ##__VA_ARGS__, \ - array[0], array[1]) -#elif VIC_NUM_REGS == 4 -#define DPRINT_REGS(base_reg, format, ...) \ - printk(KERN_INFO format " %x %x %x %x\n", ##__VA_ARGS__, \ - readl(base_reg ## 0), readl(base_reg ## 1), \ - readl(base_reg ## 2), readl(base_reg ## 3)) -#define DPRINT_ARRAY(array, format, ...) \ - printk(KERN_INFO format " %x %x %x %x\n", ##__VA_ARGS__, \ - array[0], array[1], \ - array[2], array[3]) -#else -#error "VIC_NUM_REGS set to illegal value" -#endif - static uint32_t msm_irq_smsm_wake_enable[2]; static struct { uint32_t int_en[2]; uint32_t int_type; uint32_t int_polarity; uint32_t int_select; -} msm_irq_shadow_reg[VIC_NUM_REGS]; -static uint32_t msm_irq_idle_disable[VIC_NUM_REGS]; +} msm_irq_shadow_reg[2]; +static uint32_t msm_irq_idle_disable[2]; #if defined(CONFIG_MSM_N_WAY_SMD) #define INT_INFO_SMSM_ID SMEM_APPS_DEM_SLAVE_DATA @@ -198,9 +143,7 @@ static uint8_t msm_irq_to_smsm[NR_MSM_IRQS + NR_SIRC_IRQS] = { [INT_UART1DM_IRQ] = 17, [INT_UART1DM_RX] = 18, [INT_KEYSENSE] = 19, -#if !defined(CONFIG_ARCH_MSM7X30) [INT_AD_HSSD] = 20, -#endif [INT_NAND_WR_ER_DONE] = 21, [INT_NAND_OP_DONE] = 22, @@ -226,31 +169,23 @@ static uint8_t msm_irq_to_smsm[NR_MSM_IRQS + NR_SIRC_IRQS] = { [INT_GP_TIMER_EXP] = SMSM_FAKE_IRQ, [INT_DEBUG_TIMER_EXP] = SMSM_FAKE_IRQ, [INT_ADSP_A11] = SMSM_FAKE_IRQ, -#ifdef CONFIG_ARCH_QSD8X50 +#ifdef CONFIG_ARCH_MSM_SCORPION [INT_SIRC_0] = SMSM_FAKE_IRQ, [INT_SIRC_1] = SMSM_FAKE_IRQ, #endif }; -static inline void msm_irq_write_all_regs(void __iomem *base, unsigned int val) -{ - int i; - /* the address must be continue */ - for (i = 0; i < VIC_NUM_REGS; i++) - writel(val, base + (i * 4)); -} - static void msm_irq_ack(unsigned int irq) { - void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_CLEAR0, irq); + void __iomem *reg = VIC_INT_CLEAR0 + ((irq & 32) ? 4 : 0); irq = 1 << (irq & 31); writel(irq, reg); } static void msm_irq_mask(unsigned int irq) { - void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_ENCLEAR0, irq); - unsigned index = VIC_INT_TO_REG_INDEX(irq); + void __iomem *reg = VIC_INT_ENCLEAR0 + ((irq & 32) ? 4 : 0); + unsigned index = (irq >> 5) & 1; uint32_t mask = 1UL << (irq & 31); int smsm_irq = msm_irq_to_smsm[irq]; @@ -266,8 +201,8 @@ static void msm_irq_mask(unsigned int irq) static void msm_irq_unmask(unsigned int irq) { - void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_ENSET0, irq); - unsigned index = VIC_INT_TO_REG_INDEX(irq); + void __iomem *reg = VIC_INT_ENSET0 + ((irq & 32) ? 4 : 0); + unsigned index = (irq >> 5) & 1; uint32_t mask = 1UL << (irq & 31); int smsm_irq = msm_irq_to_smsm[irq]; @@ -284,7 +219,7 @@ static void msm_irq_unmask(unsigned int irq) static int msm_irq_set_wake(unsigned int irq, unsigned int on) { - unsigned index = VIC_INT_TO_REG_INDEX(irq); + unsigned index = (irq >> 5) & 1; uint32_t mask = 1UL << (irq & 31); int smsm_irq = msm_irq_to_smsm[irq]; @@ -310,9 +245,9 @@ static int msm_irq_set_wake(unsigned int irq, unsigned int on) static int msm_irq_set_type(unsigned int irq, unsigned int flow_type) { - void __iomem *treg = VIC_INT_TO_REG_ADDR(VIC_INT_TYPE0, irq); - void __iomem *preg = VIC_INT_TO_REG_ADDR(VIC_INT_POLARITY0, irq); - unsigned index = VIC_INT_TO_REG_INDEX(irq); + void __iomem *treg = VIC_INT_TYPE0 + ((irq & 32) ? 4 : 0); + void __iomem *preg = VIC_INT_POLARITY0 + ((irq & 32) ? 4 : 0); + unsigned index = (irq >> 5) & 1; int b = 1 << (irq & 31); uint32_t polarity; uint32_t type; @@ -341,24 +276,16 @@ static int msm_irq_set_type(unsigned int irq, unsigned int flow_type) int msm_irq_pending(void) { - int i, pending = 0; - /* the address must be continue */ - for (i = 0; (i < VIC_NUM_REGS) && !pending; i++) - pending |= readl(VIC_IRQ_STATUS0 + (i * 4)); - - return pending; + return readl(VIC_IRQ_STATUS0) || readl(VIC_IRQ_STATUS1); } int msm_irq_idle_sleep_allowed(void) { - int i, disable = 0; - if (msm_irq_debug_mask & IRQ_DEBUG_SLEEP_REQUEST) - DPRINT_ARRAY(msm_irq_idle_disable, - "msm_irq_idle_sleep_allowed: disable"); - for (i = 0; i < VIC_NUM_REGS; i++) - disable |= msm_irq_idle_disable[i]; - return !(disable || !smsm_int_info); + printk(KERN_INFO "msm_irq_idle_sleep_allowed: disable %x %x\n", + msm_irq_idle_disable[0], msm_irq_idle_disable[1]); + return !(msm_irq_idle_disable[0] || msm_irq_idle_disable[1] || + !smsm_int_info); } /* If arm9_wake is set: pass control to the other core. @@ -374,8 +301,8 @@ void msm_irq_enter_sleep1(bool arm9_wake, int from_idle) int msm_irq_enter_sleep2(bool arm9_wake, int from_idle) { - int i, limit = 10; - uint32_t pending[VIC_NUM_REGS]; + int limit = 10; + uint32_t pending0, pending1; if (from_idle && !arm9_wake) return 0; @@ -384,25 +311,23 @@ int msm_irq_enter_sleep2(bool arm9_wake, int from_idle) WARN_ON_ONCE(!arm9_wake && !from_idle); if (msm_irq_debug_mask & IRQ_DEBUG_SLEEP) - DPRINT_REGS(VIC_IRQ_STATUS, "%s change irq, pend", __func__); - - for (i = 0; i < VIC_NUM_REGS; i++) { - pending[i] = readl(VIC_IRQ_STATUS0 + (i * 4)); - pending[i] &= msm_irq_shadow_reg[i].int_en[!from_idle]; - } + printk(KERN_INFO "msm_irq_enter_sleep change irq, pend %x %x\n", + readl(VIC_IRQ_STATUS0), readl(VIC_IRQ_STATUS1)); + pending0 = readl(VIC_IRQ_STATUS0); + pending1 = readl(VIC_IRQ_STATUS1); + pending0 &= msm_irq_shadow_reg[0].int_en[!from_idle]; /* Clear INT_A9_M2A_5 since requesting sleep triggers it */ - pending[0] &= ~(1U << INT_A9_M2A_5); - - for (i = 0; i < VIC_NUM_REGS; i++) { - if (pending[i]) { - if (msm_irq_debug_mask & IRQ_DEBUG_SLEEP_ABORT) - DPRINT_ARRAY(pending, "%s abort", - __func__); - return -EAGAIN; - } + pending0 &= ~(1U << INT_A9_M2A_5); + pending1 &= msm_irq_shadow_reg[1].int_en[!from_idle]; + if (pending0 || pending1) { + if (msm_irq_debug_mask & IRQ_DEBUG_SLEEP_ABORT) + printk(KERN_INFO "msm_irq_enter_sleep2 abort %x %x\n", + pending0, pending1); + return -EAGAIN; } - msm_irq_write_all_regs(VIC_INT_EN0, 0); + writel(0, VIC_INT_EN0); + writel(0, VIC_INT_EN1); while (limit-- > 0) { int pend_irq; @@ -420,9 +345,8 @@ int msm_irq_enter_sleep2(bool arm9_wake, int from_idle) msm_irq_ack(INT_A9_M2A_6); writel(1U << INT_A9_M2A_6, VIC_INT_ENSET0); } else { - for (i = 0; i < VIC_NUM_REGS; i++) - writel(msm_irq_shadow_reg[i].int_en[1], - VIC_INT_ENSET0 + (i * 4)); + writel(msm_irq_shadow_reg[0].int_en[1], VIC_INT_ENSET0); + writel(msm_irq_shadow_reg[1].int_en[1], VIC_INT_ENSET1); } return 0; } @@ -433,7 +357,7 @@ void msm_irq_exit_sleep1(void) msm_irq_ack(INT_A9_M2A_6); msm_irq_ack(INT_PWB_I2C); - for (i = 0; i < VIC_NUM_REGS; i++) { + for (i = 0; i < 2; i++) { writel(msm_irq_shadow_reg[i].int_type, VIC_INT_TYPE0 + i * 4); writel(msm_irq_shadow_reg[i].int_polarity, VIC_INT_POLARITY0 + i * 4); writel(msm_irq_shadow_reg[i].int_en[0], VIC_INT_EN0 + i * 4); @@ -527,16 +451,20 @@ void __init msm_init_irq(void) unsigned n; /* select level interrupts */ - msm_irq_write_all_regs(VIC_INT_TYPE0, 0); + writel(0, VIC_INT_TYPE0); + writel(0, VIC_INT_TYPE1); /* select highlevel interrupts */ - msm_irq_write_all_regs(VIC_INT_POLARITY0, 0); + writel(0, VIC_INT_POLARITY0); + writel(0, VIC_INT_POLARITY1); /* select IRQ for all INTs */ - msm_irq_write_all_regs(VIC_INT_SELECT0, 0); + writel(0, VIC_INT_SELECT0); + writel(0, VIC_INT_SELECT1); /* disable all INTs */ - msm_irq_write_all_regs(VIC_INT_EN0, 0); + writel(0, VIC_INT_EN0); + writel(0, VIC_INT_EN1); /* don't use 1136 vic */ writel(0, VIC_CONFIG); @@ -565,7 +493,7 @@ late_initcall(msm_init_irq_late); #if defined(CONFIG_MSM_FIQ_SUPPORT) void msm_trigger_irq(int irq) { - void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_SOFTINT0, irq); + void __iomem *reg = VIC_SOFTINT0 + ((irq & 32) ? 4 : 0); uint32_t mask = 1UL << (irq & 31); writel(mask, reg); } @@ -588,8 +516,8 @@ void msm_fiq_disable(int irq) static void _msm_fiq_select(int irq) { - void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_SELECT0, irq); - unsigned index = VIC_INT_TO_REG_INDEX(irq); + void __iomem *reg = VIC_INT_SELECT0 + ((irq & 32) ? 4 : 0); + unsigned index = (irq >> 5) & 1; uint32_t mask = 1UL << (irq & 31); unsigned long flags; @@ -601,8 +529,8 @@ static void _msm_fiq_select(int irq) static void _msm_fiq_unselect(int irq) { - void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_SELECT0, irq); - unsigned index = VIC_INT_TO_REG_INDEX(irq); + void __iomem *reg = VIC_INT_SELECT0 + ((irq & 32) ? 4 : 0); + unsigned index = (irq >> 5) & 1; uint32_t mask = 1UL << (irq & 31); unsigned long flags; diff --git a/arch/arm/mach-msm/proc_comm.c b/arch/arm/mach-msm/proc_comm.c index d2870bdf..85b75502 100644 --- a/arch/arm/mach-msm/proc_comm.c +++ b/arch/arm/mach-msm/proc_comm.c @@ -1,6 +1,7 @@ /* arch/arm/mach-msm/proc_comm.c * * Copyright (C) 2007-2008 Google, Inc. + * Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved. * Author: Brian Swetland * * This software is licensed under the terms of the GNU General Public @@ -18,24 +19,24 @@ #include #include #include +#include #include #include #include "proc_comm.h" +#include "smd_private.h" #if defined(CONFIG_ARCH_MSM7X30) -#define MSM_TRIG_A2M_INT(n) (writel(1 << n, MSM_GCC_BASE + 0x8)) +#define MSM_TRIG_A2M_PC_INT (writel(1 << 6, MSM_GCC_BASE + 0x8)) +#elif defined(CONFIG_ARCH_MSM8X60) +#define MSM_TRIG_A2M_PC_INT (writel(1 << 5, MSM_GCC_BASE + 0x8)) +#else +#define MSM_TRIG_A2M_PC_INT (writel(1, MSM_CSR_BASE + 0x400 + (6) * 4)) #endif -#define MSM_A2M_INT(n) (MSM_CSR_BASE + 0x400 + (n) * 4) - static inline void notify_other_proc_comm(void) { -#if defined(CONFIG_ARCH_MSM7X30) - MSM_TRIG_A2M_INT(6); -#else - writel(1, MSM_A2M_INT(6)); -#endif + MSM_TRIG_A2M_PC_INT; } #define APP_COMMAND 0x00 @@ -50,69 +51,84 @@ static inline void notify_other_proc_comm(void) static DEFINE_SPINLOCK(proc_comm_lock); -/* The higher level SMD support will install this to - * provide a way to check for and handle modem restart. - */ -int (*msm_check_for_modem_crash)(void); - /* Poll for a state change, checking for possible * modem crashes along the way (so we don't wait - * forever while the ARM9 is blowing up). + * forever while the ARM9 is blowing up. * * Return an error in the event of a modem crash and * restart so the msm_proc_comm() routine can restart * the operation from the beginning. */ -static int proc_comm_wait_for(void __iomem *addr, unsigned value) +static int proc_comm_wait_for(unsigned addr, unsigned value) { - for (;;) { + while (1) { if (readl(addr) == value) return 0; - if (msm_check_for_modem_crash) - if (msm_check_for_modem_crash()) - return -EAGAIN; + if (smsm_check_for_modem_crash()) + return -EAGAIN; + + udelay(5); } } +void msm_proc_comm_reset_modem_now(void) +{ + unsigned base = (unsigned)MSM_SHARED_RAM_BASE; + unsigned long flags; + + spin_lock_irqsave(&proc_comm_lock, flags); + +again: + if (proc_comm_wait_for(base + MDM_STATUS, PCOM_READY)) + goto again; + + writel(PCOM_RESET_MODEM, base + APP_COMMAND); + writel(0, base + APP_DATA1); + writel(0, base + APP_DATA2); + + spin_unlock_irqrestore(&proc_comm_lock, flags); + + notify_other_proc_comm(); + + return; +} +EXPORT_SYMBOL(msm_proc_comm_reset_modem_now); + int msm_proc_comm(unsigned cmd, unsigned *data1, unsigned *data2) { - void __iomem *base = MSM_SHARED_RAM_BASE; + unsigned base = (unsigned)MSM_SHARED_RAM_BASE; unsigned long flags; int ret; spin_lock_irqsave(&proc_comm_lock, flags); - for (;;) { - if (proc_comm_wait_for(base + MDM_STATUS, PCOM_READY)) - continue; +again: + if (proc_comm_wait_for(base + MDM_STATUS, PCOM_READY)) + goto again; - writel(cmd, base + APP_COMMAND); - writel(data1 ? *data1 : 0, base + APP_DATA1); - writel(data2 ? *data2 : 0, base + APP_DATA2); + writel(cmd, base + APP_COMMAND); + writel(data1 ? *data1 : 0, base + APP_DATA1); + writel(data2 ? *data2 : 0, base + APP_DATA2); - notify_other_proc_comm(); + notify_other_proc_comm(); - if (proc_comm_wait_for(base + APP_COMMAND, PCOM_CMD_DONE)) - continue; + if (proc_comm_wait_for(base + APP_COMMAND, PCOM_CMD_DONE)) + goto again; - if (readl(base + APP_STATUS) != PCOM_CMD_FAIL) { - if (data1) - *data1 = readl(base + APP_DATA1); - if (data2) - *data2 = readl(base + APP_DATA2); - ret = 0; - } else { - ret = -EIO; - } - break; + if (readl(base + APP_STATUS) == PCOM_CMD_SUCCESS) { + if (data1) + *data1 = readl(base + APP_DATA1); + if (data2) + *data2 = readl(base + APP_DATA2); + ret = 0; + } else { + ret = -EIO; } writel(PCOM_CMD_IDLE, base + APP_COMMAND); spin_unlock_irqrestore(&proc_comm_lock, flags); - return ret; } - - +EXPORT_SYMBOL(msm_proc_comm); diff --git a/arch/arm/mach-msm/proc_comm.h b/arch/arm/mach-msm/proc_comm.h index c9269c7c..4d5bee01 100644 --- a/arch/arm/mach-msm/proc_comm.h +++ b/arch/arm/mach-msm/proc_comm.h @@ -1,6 +1,6 @@ /* arch/arm/mach-msm/proc_comm.h * - * Copyright (c) 2007 QUALCOMM Incorporated + * Copyright (c) 2007-2009, Code Aurora Forum. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -179,7 +179,18 @@ enum { PCOM_CLKCTL_RPC_RAIL_DISABLE, PCOM_CLKCTL_RPC_RAIL_CONTROL, PCOM_CLKCTL_RPC_MIN_MSMC1, - PCOM_NUM_CMDS, + PCOM_CLKCTL_RPC_SRC_REQUEST, + PCOM_NPA_INIT, + PCOM_NPA_ISSUE_REQUIRED_REQUEST, +}; + +enum { + PCOM_OEM_FIRST_CMD = 0x10000000, + PCOM_OEM_TEST_CMD = PCOM_OEM_FIRST_CMD, + + /* add OEM PROC COMM commands here */ + + PCOM_OEM_LAST = PCOM_OEM_TEST_CMD, }; enum { @@ -199,7 +210,6 @@ enum { PCOM_CMD_FAIL_SMSM_NOT_INIT, PCOM_CMD_FAIL_PROC_COMM_BUSY, PCOM_CMD_FAIL_PROC_COMM_NOT_INIT, - }; /* List of VREGs that support the Pull Down Resistor setting. */ @@ -294,6 +304,7 @@ enum { (((pull) & 0x3) << 15) | \ (((drvstr) & 0xF) << 17)) +void msm_proc_comm_reset_modem_now(void); int msm_proc_comm(unsigned cmd, unsigned *data1, unsigned *data2); #endif diff --git a/arch/arm/mach-msm/sirc.c b/arch/arm/mach-msm/sirc.c index 50ee773f..2dc3247c 100644 --- a/arch/arm/mach-msm/sirc.c +++ b/arch/arm/mach-msm/sirc.c @@ -1,6 +1,6 @@ /* linux/arch/arm/mach-msm/irq.c * - * Copyright (c) 2009 QUALCOMM Incorporated. + * Copyright (c) 2009-2010 Code Aurora Forum. All rights reserved. * Copyright (C) 2009 Google, Inc. * * This software is licensed under the terms of the GNU General Public @@ -189,9 +189,9 @@ static void sirc_irq_handler(unsigned int irq, struct irq_desc *desc) reg++; if (reg == ARRAY_SIZE(sirc_reg_table)) { - printk(KERN_ERR "%s: incorrect irq %d called\n", - __func__, irq); - return; + printk(KERN_ERR "%s: incorrect irq %d called\n", + __func__, irq); + return; } status = readl(sirc_reg_table[reg].int_status); diff --git a/arch/arm/mach-msm/sirc.h b/arch/arm/mach-msm/sirc.h index 8e1399f0..24f3ae80 100644 --- a/arch/arm/mach-msm/sirc.h +++ b/arch/arm/mach-msm/sirc.h @@ -16,12 +16,20 @@ #ifndef _ARCH_ARM_MACH_MSM_SIRC_H #define _ARCH_ARM_MACH_MSM_SIRC_H -#ifdef CONFIG_ARCH_QSD8X50 +#ifdef CONFIG_ARCH_MSM_SCORPION void sirc_fiq_select(int irq, bool enable); -void __init msm_init_sirc(void); #else static inline void sirc_fiq_select(int irq, bool enable) {} +#endif + +#ifdef CONFIG_ARCH_QSD8X50 +void __init msm_init_sirc(void); +void msm_sirc_enter_sleep(void); +void msm_sirc_exit_sleep(void); +#else static inline void __init msm_init_sirc(void) {} +static inline void msm_sirc_enter_sleep(void) { } +static inline void msm_sirc_exit_sleep(void) { } #endif #endif diff --git a/arch/arm/mach-msm/smd.c b/arch/arm/mach-msm/smd.c index 23d67e8d..8a74234a 100644 --- a/arch/arm/mach-msm/smd.c +++ b/arch/arm/mach-msm/smd.c @@ -140,16 +140,18 @@ static void handle_modem_crash(void) ; } -extern int (*msm_check_for_modem_crash)(void); - uint32_t raw_smsm_get_state(enum smsm_state_item item) { return readl(smd_info.state + item * 4); } -static int check_for_modem_crash(void) +int smsm_check_for_modem_crash(void) { - if (raw_smsm_get_state(SMSM_STATE_MODEM) & SMSM_RESET) { + /* if the modem's not ready yet, we have to hope for the best */ + if (!smd_info.state) + return 0; + + if (raw_smsm_get_state(SMSM_MODEM_STATE) & SMSM_RESET) { handle_modem_crash(); return -1; } @@ -1238,8 +1240,6 @@ static int __init msm_smd_probe(struct platform_device *pdev) do_smd_probe(); - msm_check_for_modem_crash = check_for_modem_crash; - msm_init_last_radio_log(THIS_MODULE); smd_initialized = 1; diff --git a/arch/arm/mach-msm/smd_private.h b/arch/arm/mach-msm/smd_private.h index 3dc0a203..91f19338 100644 --- a/arch/arm/mach-msm/smd_private.h +++ b/arch/arm/mach-msm/smd_private.h @@ -1,7 +1,7 @@ /* arch/arm/mach-msm/smd_private.h * * Copyright (C) 2007 Google, Inc. - * Copyright (c) 2007 QUALCOMM Incorporated + * Copyright (c) 2007-2010, Code Aurora Forum. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -16,6 +16,9 @@ #ifndef _ARCH_ARM_MACH_MSM_MSM_SMD_PRIVATE_H_ #define _ARCH_ARM_MACH_MSM_MSM_SMD_PRIVATE_H_ +#include +#include + struct smem_heap_info { unsigned initialized; unsigned free_offset; @@ -46,12 +49,15 @@ struct smem_proc_comm { #define VERSION_MODEM_SBL 7 #define VERSION_APPS 8 #define VERSION_MODEM 9 +#define VERSION_DSPS 10 + +#define SMD_HEAP_SIZE 512 struct smem_shared { struct smem_proc_comm proc_comm[4]; unsigned version[32]; struct smem_heap_info heap_info; - struct smem_heap_entry heap_toc[512]; + struct smem_heap_entry heap_toc[SMD_HEAP_SIZE]; }; #define SMSM_V1_SIZE (sizeof(unsigned) * 8) @@ -122,35 +128,37 @@ enum { #define ID_SHARED_STATE SMEM_SMSM_SHARED_STATE #define ID_CH_ALLOC_TBL SMEM_CHANNEL_ALLOC_TBL -#define SMSM_INIT 0x00000001 -#define SMSM_OSENTERED 0x00000002 -#define SMSM_SMDWAIT 0x00000004 -#define SMSM_SMDINIT 0x00000008 -#define SMSM_RPCWAIT 0x00000010 -#define SMSM_RPCINIT 0x00000020 -#define SMSM_RESET 0x00000040 -#define SMSM_RSA 0x00000080 -#define SMSM_RUN 0x00000100 -#define SMSM_PWRC 0x00000200 -#define SMSM_TIMEWAIT 0x00000400 -#define SMSM_TIMEINIT 0x00000800 -#define SMSM_PWRC_EARLY_EXIT 0x00001000 -#define SMSM_WFPI 0x00002000 -#define SMSM_SLEEP 0x00004000 -#define SMSM_SLEEPEXIT 0x00008000 -#define SMSM_OEMSBL_RELEASE 0x00010000 -#define SMSM_APPS_REBOOT 0x00020000 -#define SMSM_SYSTEM_POWER_DOWN 0x00040000 -#define SMSM_SYSTEM_REBOOT 0x00080000 -#define SMSM_SYSTEM_DOWNLOAD 0x00100000 -#define SMSM_PWRC_SUSPEND 0x00200000 -#define SMSM_APPS_SHUTDOWN 0x00400000 -#define SMSM_SMD_LOOPBACK 0x00800000 -#define SMSM_RUN_QUIET 0x01000000 -#define SMSM_MODEM_WAIT 0x02000000 -#define SMSM_MODEM_BREAK 0x04000000 -#define SMSM_MODEM_CONTINUE 0x08000000 -#define SMSM_UNKNOWN 0x80000000 +#define SMSM_INIT 0x00000001 +#define SMSM_OSENTERED 0x00000002 +#define SMSM_SMDWAIT 0x00000004 +#define SMSM_SMDINIT 0x00000008 +#define SMSM_RPCWAIT 0x00000010 +#define SMSM_RPCINIT 0x00000020 +#define SMSM_RESET 0x00000040 +#define SMSM_RSA 0x00000080 +#define SMSM_RUN 0x00000100 +#define SMSM_PWRC 0x00000200 +#define SMSM_TIMEWAIT 0x00000400 +#define SMSM_TIMEINIT 0x00000800 +#define SMSM_PWRC_EARLY_EXIT 0x00001000 +#define SMSM_WFPI 0x00002000 +#define SMSM_SLEEP 0x00004000 +#define SMSM_SLEEPEXIT 0x00008000 +#define SMSM_OEMSBL_RELEASE 0x00010000 +#define SMSM_APPS_REBOOT 0x00020000 +#define SMSM_SYSTEM_POWER_DOWN 0x00040000 +#define SMSM_SYSTEM_REBOOT 0x00080000 +#define SMSM_SYSTEM_DOWNLOAD 0x00100000 +#define SMSM_PWRC_SUSPEND 0x00200000 +#define SMSM_APPS_SHUTDOWN 0x00400000 +#define SMSM_SMD_LOOPBACK 0x00800000 +#define SMSM_RUN_QUIET 0x01000000 +#define SMSM_MODEM_WAIT 0x02000000 +#define SMSM_MODEM_BREAK 0x04000000 +#define SMSM_MODEM_CONTINUE 0x08000000 +#define SMSM_SYSTEM_REBOOT_USR 0x20000000 +#define SMSM_SYSTEM_PWRDWN_USR 0x40000000 +#define SMSM_UNKNOWN 0x80000000 #define SMSM_WKUP_REASON_RPC 0x00000001 #define SMSM_WKUP_REASON_INT 0x00000002 @@ -278,18 +286,17 @@ typedef enum { } smem_mem_type; -#define SMD_SS_CLOSED 0x00000000 -#define SMD_SS_OPENING 0x00000001 -#define SMD_SS_OPENED 0x00000002 -#define SMD_SS_FLUSHING 0x00000003 -#define SMD_SS_CLOSING 0x00000004 -#define SMD_SS_RESET 0x00000005 -#define SMD_SS_RESET_OPENING 0x00000006 +#define SMD_SS_CLOSED 0x00000000 +#define SMD_SS_OPENING 0x00000001 +#define SMD_SS_OPENED 0x00000002 +#define SMD_SS_FLUSHING 0x00000003 +#define SMD_SS_CLOSING 0x00000004 +#define SMD_SS_RESET 0x00000005 +#define SMD_SS_RESET_OPENING 0x00000006 -#define SMD_BUF_SIZE 8192 -#define SMD_CHANNELS 64 - -#define SMD_HEADER_SIZE 20 +#define SMD_BUF_SIZE 8192 +#define SMD_CHANNELS 64 +#define SMD_HEADER_SIZE 20 #define SMD_TYPE_MASK 0x0FF #define SMD_TYPE_APPS_MODEM 0x000 @@ -301,6 +308,8 @@ typedef enum { #define SMD_KIND_STREAM 0x100 #define SMD_KIND_PACKET 0x200 +int smsm_check_for_modem_crash(void); +#define msm_check_for_modem_crash smsm_check_for_modem_crash void *smem_find(unsigned id, unsigned size); void *smem_item(unsigned id, unsigned *size); uint32_t raw_smsm_get_state(enum smsm_state_item item); diff --git a/drivers/misc/pmem.c b/drivers/misc/pmem.c index 9e9fb56d..f1523fd5 100755 --- a/drivers/misc/pmem.c +++ b/drivers/misc/pmem.c @@ -1,4 +1,3 @@ -#ifdef CONFIG_MSM_KGSL /* drivers/android/pmem.c * * Copyright (C) 2007 Google, Inc. @@ -15,8 +14,6 @@ * */ - - #include #include #include @@ -36,7 +33,6 @@ #include #include #include -#include #define PMEM_MAX_USER_SPACE_DEVICES (10) #define PMEM_MAX_KERNEL_SPACE_DEVICES (2) @@ -57,6 +53,8 @@ #define PMEM_DEBUG 0 #endif +#define SYSTEM_ALLOC_RETRY 10 + /* indicates that a refernce to this file has been taken via get_pmem_file, * the file should not be released until put_pmem_file is called */ #define PMEM_FLAGS_BUSY 0x1 @@ -143,6 +141,14 @@ unsigned long unstable_pmem_start; /* size of unstable PMEM physical memory */ unsigned long unstable_pmem_size; +struct alloc_list { + void *addr; /* physical addr of allocation */ + void *aaddr; /* aligned physical addr */ + unsigned int size; /* total size of allocation */ + unsigned char __iomem *vaddr; /* Virtual addr */ + struct list_head allocs; +}; + struct pmem_info { struct miscdevice dev; /* physical start address of the remaped pmem space */ @@ -207,6 +213,11 @@ struct pmem_info { unsigned short quanta; } *bitm_alloc; } bitmap; + + struct { + unsigned long used; /* Bytes currently allocated */ + struct list_head alist; /* List of allocations */ + } system_mem; } allocator; int id; @@ -313,28 +324,7 @@ static struct pmem_attr pmem_attr_## name = \ #define WO_PMEM_ATTR(name) \ static struct pmem_attr pmem_attr_## name = \ PMEM_ATTR(name, S_IWUSR, NULL, store_pmem_## name) -/*HTC_START*/ -static struct dentry *root = NULL; -u32 misc_msg_pmem_qcom = 0; -static struct dentry *vidc_debugfs_root; - -static struct dentry *vidc_get_debugfs_root(void) -{ - if (vidc_debugfs_root == NULL) - vidc_debugfs_root = debugfs_create_dir("misc", NULL); - return vidc_debugfs_root; -} - -static void vidc_debugfs_file_create(struct dentry *root, const char *name, - u32 *var) -{ - struct dentry *vidc_debugfs_file = - debugfs_create_u32(name, S_IRUGO | S_IWUSR, root, var); - if (!vidc_debugfs_file) - pr_info("%s(): Error creating/opening file %s\n", __func__, name); -} -/*HTC_END*/ static ssize_t show_pmem(struct kobject *kobj, struct attribute *attr, char *buf) @@ -378,6 +368,8 @@ static ssize_t show_pmem_allocator_type(int id, char *buf) return scnprintf(buf, PAGE_SIZE, "%s\n", "Buddy Bestfit"); case PMEM_ALLOCATORTYPE_BITMAP: return scnprintf(buf, PAGE_SIZE, "%s\n", "Bitmap"); + case PMEM_ALLOCATORTYPE_SYSTEM: + return scnprintf(buf, PAGE_SIZE, "%s\n", "System heap"); default: return scnprintf(buf, PAGE_SIZE, "??? Invalid allocator type (%d) for this region! " @@ -552,11 +544,22 @@ static struct attribute *pmem_bitmap_attrs[] = { NULL }; +static struct attribute *pmem_system_attrs[] = { + PMEM_COMMON_SYSFS_ATTRS, + + NULL +}; + static struct kobj_type pmem_bitmap_ktype = { .sysfs_ops = &pmem_ops, .default_attrs = pmem_bitmap_attrs, }; +static struct kobj_type pmem_system_ktype = { + .sysfs_ops = &pmem_ops, + .default_attrs = pmem_system_attrs, +}; + static int get_id(struct file *file) { return MINOR(file->f_dentry->d_inode->i_rdev); @@ -568,7 +571,7 @@ static char *get_name(struct file *file) return pmem[id].name; } -int is_pmem_file(struct file *file) +static int is_pmem_file(struct file *file) { int id; @@ -588,7 +591,7 @@ static int has_allocation(struct file *file) * means that file is guaranteed not to be NULL upon entry!! * check is_pmem_file first if not accessed via pmem_file_ops */ struct pmem_data *pdata = file->private_data; - return pdata && pdata->index >= 0; + return pdata && pdata->index != -1; } static int is_master_owner(struct file *file) @@ -604,7 +607,8 @@ static int is_master_owner(struct file *file) master_file = fget_light(data->master_fd, &put_needed); if (master_file && data->master_file == master_file) ret = 1; - fput_light(master_file, put_needed); + if (master_file) + fput_light(master_file, put_needed); return ret; } @@ -730,10 +734,9 @@ static int pmem_free_bitmap(int id, int bitnum) /* caller should hold the lock on arena_mutex! */ int i; char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1]; -/*HTC_START*/ - if (misc_msg_pmem_qcom) - pr_info("[PME][%s] pmem_free_bitmap, bitnum %d\n", pmem[id].name, bitnum); -/*HTC_END*/ + + DLOG("bitnum %d\n", bitnum); + for (i = 0; i < pmem[id].allocator.bitmap.bitmap_allocs; i++) { const int curr_bit = pmem[id].allocator.bitmap.bitm_alloc[i].bit; @@ -746,6 +749,7 @@ static int pmem_free_bitmap(int id, int bitnum) curr_bit, curr_bit + curr_quanta); pmem[id].allocator.bitmap.bitmap_free += curr_quanta; pmem[id].allocator.bitmap.bitm_alloc[i].bit = -1; + pmem[id].allocator.bitmap.bitm_alloc[i].quanta = 0; return 0; } } @@ -756,6 +760,27 @@ static int pmem_free_bitmap(int id, int bitnum) return -1; } +static int pmem_free_system(int id, int index) +{ + /* caller should hold the lock on arena_mutex! */ + struct alloc_list *item; + + DLOG("index %d\n", index); + if (index != 0) + item = (struct alloc_list *)index; + else + return 0; + + if (item->vaddr != NULL) { + iounmap(item->vaddr); + kfree(__va(item->addr)); + list_del(&item->allocs); + kfree(item); + } + + return 0; +} + static int pmem_free_space_bitmap(int id, struct pmem_freespace *fs) { int i, j; @@ -778,12 +803,12 @@ static int pmem_free_space_bitmap(int id, struct pmem_freespace *fs) const int curr_alloc = pmem[id].allocator. bitmap.bitm_alloc[j].bit; if (curr_alloc != -1) { + if (alloc_start == curr_alloc) + alloc_idx = j; if (alloc_start >= curr_alloc) continue; - if (curr_alloc < next_alloc) { + if (curr_alloc < next_alloc) next_alloc = curr_alloc; - alloc_idx = j; - } } } alloc_quanta = pmem[id].allocator.bitmap. @@ -804,6 +829,14 @@ static int pmem_free_space_bitmap(int id, struct pmem_freespace *fs) return 0; } +static int pmem_free_space_system(int id, struct pmem_freespace *fs) +{ + fs->total = pmem[id].size; + fs->largest = pmem[id].size; + + return 0; +} + static void pmem_revoke(struct file *file, struct pmem_data *data); static int pmem_release(struct inode *inode, struct file *file) @@ -888,10 +921,6 @@ static int pmem_open(struct inode *inode, struct file *file) DLOG("pid %u(%s) file %p(%ld) dev %s(id: %d)\n", current->pid, get_task_comm(currtask_name, current), file, file_count(file), get_name(file), id); - /* setup file->private_data to indicate its unmapped */ - /* you can only open a pmem device one time */ - if (file->private_data != NULL) - return -EINVAL; data = kmalloc(sizeof(struct pmem_data), GFP_KERNEL); if (!data) { printk(KERN_ALERT "pmem: %s: unable to allocate memory for " @@ -1153,35 +1182,28 @@ static int pmem_allocator_bitmap(const int id, /* caller should hold the lock on arena_mutex! */ int bitnum, i; unsigned int quanta_needed; -/*HTC_START*/ - if (misc_msg_pmem_qcom) - pr_info("[PME][%s] pmem_allocator_bitmap, len %ld\n", pmem[id].name, len); + DLOG("bitmap id %d, len %ld, align %u\n", id, len, align); if (!pmem[id].allocator.bitmap.bitm_alloc) { - if (misc_msg_pmem_qcom) { - printk(KERN_ALERT "[PME][%s] bitm_alloc not present! \n", - pmem[id].name); - } -/*HTC_END*/ - bitnum = -1; goto leave; +#if PMEM_DEBUG + printk(KERN_ALERT "pmem: bitm_alloc not present! id: %d\n", + id); +#endif + return -1; } quanta_needed = (len + pmem[id].quantum - 1) / pmem[id].quantum; -/*HTC_START*/ - if (misc_msg_pmem_qcom) { - pr_info("[PME][%s] quantum size %u quanta needed %u free %u\n", - pmem[id].name, pmem[id].quantum, quanta_needed, - pmem[id].allocator.bitmap.bitmap_free); - } + DLOG("quantum size %u quanta needed %u free %u id %d\n", + pmem[id].quantum, quanta_needed, + pmem[id].allocator.bitmap.bitmap_free, id); if (pmem[id].allocator.bitmap.bitmap_free < quanta_needed) { - if (misc_msg_pmem_qcom) { - printk(KERN_ALERT "[PME][%s] memory allocation failure. " - "PMEM memory region exhausted." - " Unable to comply with allocation request.\n", pmem[id].name); - } -/*HTC_END*/ - bitnum = -1; goto leave; +#if PMEM_DEBUG + printk(KERN_ALERT "pmem: memory allocation failure. " + "PMEM memory region exhausted, id %d." + " Unable to comply with allocation request.\n", id); +#endif + return -1; } bitnum = reserve_quanta(quanta_needed, id, align); @@ -1201,36 +1223,35 @@ static int pmem_allocator_bitmap(const int id, int j; if (!new_bitmap_allocs) { /* failed sanity check!! */ -/*HTC_START*/ - if (misc_msg_pmem_qcom) { - pr_alert("[PME][%s] pmem: bitmap_allocs number" - " wrapped around to zero! Something " - "is VERY wrong.\n", pmem[id].name); - } - bitnum = -1; goto leave; +#if PMEM_DEBUG + pr_alert("pmem: bitmap_allocs number" + " wrapped around to zero! Something " + "is VERY wrong.\n"); +#endif + return -1; } + if (new_bitmap_allocs > pmem[id].num_entries) { /* failed sanity check!! */ - if (misc_msg_pmem_qcom) { - pr_alert("[PME][%s] pmem: required bitmap_allocs" - " number exceeds maximum entries possible" - " for current quanta\n", pmem[id].name); - } - - bitnum = -1; goto leave; +#if PMEM_DEBUG + pr_alert("pmem: required bitmap_allocs" + " number exceeds maximum entries possible" + " for current quanta\n"); +#endif + return -1; } + temp = krealloc(pmem[id].allocator.bitmap.bitm_alloc, new_bitmap_allocs * sizeof(*pmem[id].allocator.bitmap.bitm_alloc), GFP_KERNEL); if (!temp) { - if (misc_msg_pmem_qcom) { - pr_alert("[PME][%s] can't realloc bitmap_allocs," - " current num bitmap allocs %d\n", - pmem[id].name, pmem[id].allocator.bitmap.bitmap_allocs); - } -/*HTC_END*/ - bitnum = -1; goto leave; +#if PMEM_DEBUG + pr_alert("pmem: can't realloc bitmap_allocs," + "id %d, current num bitmap allocs %d\n", + id, pmem[id].allocator.bitmap.bitmap_allocs); +#endif + return -1; } pmem[id].allocator.bitmap.bitmap_allocs = new_bitmap_allocs; pmem[id].allocator.bitmap.bitm_alloc = temp; @@ -1239,41 +1260,79 @@ static int pmem_allocator_bitmap(const int id, pmem[id].allocator.bitmap.bitm_alloc[j].bit = -1; pmem[id].allocator.bitmap.bitm_alloc[i].quanta = 0; } -/*HTC_START*/ - if (misc_msg_pmem_qcom) { - pr_info("[PME][%s] increased # of allocated regions to %d for \n", - pmem[id].name, pmem[id].allocator.bitmap.bitmap_allocs); - } + + DLOG("increased # of allocated regions to %d for id %d\n", + pmem[id].allocator.bitmap.bitmap_allocs, id); } - if (misc_msg_pmem_qcom) - pr_info("[PME][%s] bitnum %d, bitm_alloc index %d\n", pmem[id].name, bitnum, i); -/*HTC_END*/ + + DLOG("bitnum %d, bitm_alloc index %d\n", bitnum, i); + pmem[id].allocator.bitmap.bitmap_free -= quanta_needed; pmem[id].allocator.bitmap.bitm_alloc[i].bit = bitnum; pmem[id].allocator.bitmap.bitm_alloc[i].quanta = quanta_needed; leave: - if (-1 == bitnum) { - pr_err("[PME][%s] error: pmem_allocator_bitmap failed\n", pmem[id].name); - for (i = 0; i < pmem[id].allocator.bitmap.bitmap_allocs; i++) { - if (pmem[id].allocator.bitmap.bitm_alloc[i].bit != -1) { - /*HTC_START*/ - if (misc_msg_pmem_qcom) { - pr_info("[PME][%s] bitm_alloc[%d].bit: %u bitm_alloc[%d].quanta: %u\n", - pmem[id].name, - i, - pmem[id].allocator.bitmap.bitm_alloc[i].bit, - i, - pmem[id].allocator.bitmap.bitm_alloc[i].quanta - ); - } - /*HTC_END*/ - } - } - } return bitnum; } -static pgprot_t pmem_phys_mem_access_prot(struct file *file, pgprot_t vma_prot) +static int pmem_allocator_system(const int id, + const unsigned long len, + const unsigned int align) +{ + /* caller should hold the lock on arena_mutex! */ + struct alloc_list *list; + unsigned long aligned_len; + int count = SYSTEM_ALLOC_RETRY; + void *buf; + + DLOG("system id %d, len %ld, align %u\n", id, len, align); + + if ((pmem[id].allocator.system_mem.used + len) > pmem[id].size) { + DLOG("requested size would be larger than quota\n"); + return -1; + } + + /* Handle alignment */ + aligned_len = len + align; + + /* Attempt allocation */ + list = kmalloc(sizeof(struct alloc_list), GFP_KERNEL); + if (list == NULL) { + printk(KERN_ERR "pmem: failed to allocate system metadata\n"); + return -1; + } + list->vaddr = NULL; + + buf = NULL; + while ((buf == NULL) && count--) { + buf = kmalloc((aligned_len), GFP_KERNEL); + if (buf == NULL) { + DLOG("pmem: kmalloc %d temporarily failed len= %ld\n", + count, aligned_len); + } + } + if (!buf) { + printk(KERN_CRIT "pmem: kmalloc failed for id= %d len= %ld\n", + id, aligned_len); + kfree(list); + return -1; + } + list->size = aligned_len; + list->addr = (void *)__pa(buf); + list->aaddr = (void *)(((unsigned int)(list->addr) + (align - 1)) & + ~(align - 1)); + + if (!pmem[id].cached) + list->vaddr = ioremap(__pa(buf), aligned_len); + else + list->vaddr = ioremap_cached(__pa(buf), aligned_len); + + INIT_LIST_HEAD(&list->allocs); + list_add(&list->allocs, &pmem[id].allocator.system_mem.alist); + + return (int)list; +} + +static pgprot_t phys_mem_access_prot(struct file *file, pgprot_t vma_prot) { int id = get_id(file); #ifdef pgprot_writecombine @@ -1305,8 +1364,16 @@ static unsigned long pmem_start_addr_bitmap(int id, struct pmem_data *data) return data->index * pmem[id].quantum + pmem[id].base; } +static unsigned long pmem_start_addr_system(int id, struct pmem_data *data) +{ + return (unsigned long)(((struct alloc_list *)(data->index))->aaddr); +} + static void *pmem_start_vaddr(int id, struct pmem_data *data) { + if (pmem[id].allocator_type == PMEM_ALLOCATORTYPE_SYSTEM) + return ((struct alloc_list *)(data->index))->vaddr; + else return pmem[id].start_addr(id, data) - pmem[id].base + pmem[id].vbase; } @@ -1344,6 +1411,18 @@ static unsigned long pmem_len_bitmap(int id, struct pmem_data *data) return ret; } +static unsigned long pmem_len_system(int id, struct pmem_data *data) +{ + unsigned long ret = 0; + + mutex_lock(&pmem[id].arena_mutex); + + ret = ((struct alloc_list *)data->index)->size; + mutex_unlock(&pmem[id].arena_mutex); + + return ret; +} + static int pmem_map_garbage(int id, struct vm_area_struct *vma, struct pmem_data *data, unsigned long offset, unsigned long len) @@ -1481,6 +1560,10 @@ static int pmem_mmap(struct file *file, struct vm_area_struct *vma) unsigned long vma_size = vma->vm_end - vma->vm_start; int ret = 0, id = get_id(file); + if (!data) { + pr_err("pmem: Invalid file descriptor, no private data\n"); + return -EINVAL; + } #if PMEM_DEBUG_MSGS char currtask_name[FIELD_SIZEOF(struct task_struct, comm) + 1]; #endif @@ -1509,24 +1592,21 @@ static int pmem_mmap(struct file *file, struct vm_area_struct *vma) goto error; } /* if file->private_data == unalloced, alloc*/ - if (data && data->index == -1) { + if (data->index == -1) { mutex_lock(&pmem[id].arena_mutex); index = pmem[id].allocate(id, vma->vm_end - vma->vm_start, SZ_4K); mutex_unlock(&pmem[id].arena_mutex); - data->index = index; - if (data->index < 0) { + /* either no space was available or an error occured */ + if (index == -1) { pr_err("pmem: mmap unable to allocate memory" "on %s\n", get_name(file)); + ret = -ENOMEM; + goto error; } - } - - /* either no space was available or an error occured */ - if (!has_allocation(file)) { - ret = -ENOMEM; - pr_err("pmem: could not find allocation for map.\n"); - goto error; + /* store the index of a successful allocation */ + data->index = index; } if (pmem[id].len(id, data) < vma_size) { @@ -1541,7 +1621,7 @@ static int pmem_mmap(struct file *file, struct vm_area_struct *vma) vma->vm_pgoff = pmem[id].start_addr(id, data) >> PAGE_SHIFT; - vma->vm_page_prot = pmem_phys_mem_access_prot(file, vma->vm_page_prot); + vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_page_prot); if (data->flags & PMEM_FLAGS_CONNECTED) { struct pmem_region_node *region_node; @@ -1770,6 +1850,21 @@ void flush_pmem_file(struct file *file, unsigned long offset, unsigned long len) goto end; vaddr = pmem_start_vaddr(id, data); + + if (pmem[id].allocator_type == PMEM_ALLOCATORTYPE_SYSTEM) { + dmac_flush_range(vaddr, + (void *)((unsigned long)vaddr + + ((struct alloc_list *)(data->index))->size)); +#ifdef CONFIG_OUTER_CACHE + phy_start = pmem_start_addr_system(id, data); + + phy_end = phy_start + + ((struct alloc_list *)(data->index))->size; + + outer_flush_range(phy_start, phy_end); +#endif + goto end; + } /* if this isn't a submmapped file, flush the whole thing */ if (unlikely(!(data->flags & PMEM_FLAGS_CONNECTED))) { dmac_flush_range(vaddr, vaddr + pmem[id].len(id, data)); @@ -1958,6 +2053,11 @@ static int pmem_kapi_free_index_bitmap(const int32_t physaddr, int id) bit_from_paddr(id, physaddr) : -1; } +static int pmem_kapi_free_index_system(const int32_t physaddr, int id) +{ + return 0; +} + int pmem_kfree(const int32_t physaddr) { int i; @@ -2503,6 +2603,17 @@ static long pmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) return pmem_cache_maint(file, cmd, &pmem_addr); } + case PMEM_CACHE_FLUSH: + { + struct pmem_region region; + + if (copy_from_user(®ion, (void __user *)arg, + sizeof(struct pmem_region))) + return -EFAULT; + + flush_pmem_file(file, region.offset, region.len); + break; + } default: if (pmem[id].ioctl) return pmem[id].ioctl(file, cmd, arg); @@ -2891,6 +3002,35 @@ int pmem_setup(struct android_pmem_platform_data *pdata, pmem[id].size, pmem[id].quantum); break; + case PMEM_ALLOCATORTYPE_SYSTEM: + +#ifdef CONFIG_MEMORY_HOTPLUG + goto err_no_mem; +#endif + + INIT_LIST_HEAD(&pmem[id].allocator.system_mem.alist); + + pmem[id].allocator.system_mem.used = 0; + pmem[id].vbase = NULL; + + if (kobject_init_and_add(&pmem[id].kobj, + &pmem_system_ktype, NULL, + "%s", pdata->name)) + goto out_put_kobj; + + pmem[id].allocate = pmem_allocator_system; + pmem[id].free = pmem_free_system; + pmem[id].free_space = pmem_free_space_system; + pmem[id].kapi_free_index = pmem_kapi_free_index_system; + pmem[id].len = pmem_len_system; + pmem[id].start_addr = pmem_start_addr_system; + pmem[id].num_entries = 0; + pmem[id].quantum = PAGE_SIZE; + + DLOG("system allocator id %d (%s), raw size %lu\n", + id, pdata->name, pmem[id].size); + break; + default: pr_alert("Invalid allocator type (%d) for pmem driver\n", pdata->allocator_type); @@ -2923,7 +3063,8 @@ int pmem_setup(struct android_pmem_platform_data *pdata, if (pmem[id].memory_state == MEMORY_UNSTABLE_NO_MEMORY_ALLOCATED) return 0; - if (!is_kernel_memtype) { + if ((!is_kernel_memtype) && + (pmem[id].allocator_type != PMEM_ALLOCATORTYPE_SYSTEM)) { ioremap_pmem(id); if (pmem[id].vbase == 0) { pr_err("pmem: ioremap failed for device %s\n", @@ -3017,14 +3158,7 @@ static int __init pmem_init(void) pr_err("pmem(%s):kset_create_and_add fail\n", __func__); return -ENOMEM; } -/*HTC_START*/ -root = vidc_get_debugfs_root(); - if (root) { - vidc_debugfs_file_create(root, "misc_msg_pmem_qcom", - (u32 *) &misc_msg_pmem_qcom); - } -/*HTC_END*/ #ifdef CONFIG_MEMORY_HOTPLUG hotplug_memory_notifier(pmem_memory_callback, 0); #endif @@ -3038,1352 +3172,4 @@ static void __exit pmem_exit(void) module_init(pmem_init); module_exit(pmem_exit); -#else -/* drivers/android/pmem.c - * - * Copyright (C) 2007 Google, Inc. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include - -#define PMEM_MAX_DEVICES 10 -#define PMEM_MAX_ORDER 128 -#define PMEM_MIN_ALLOC PAGE_SIZE - -#define PMEM_DEBUG 1 - -/* indicates that a refernce to this file has been taken via get_pmem_file, - * the file should not be released until put_pmem_file is called */ -#define PMEM_FLAGS_BUSY 0x1 -/* indicates that this is a suballocation of a larger master range */ -#define PMEM_FLAGS_CONNECTED 0x1 << 1 -/* indicates this is a master and not a sub allocation and that it is mmaped */ -#define PMEM_FLAGS_MASTERMAP 0x1 << 2 -/* submap and unsubmap flags indicate: - * 00: subregion has never been mmaped - * 10: subregion has been mmaped, reference to the mm was taken - * 11: subretion has ben released, refernece to the mm still held - * 01: subretion has been released, reference to the mm has been released - */ -#define PMEM_FLAGS_SUBMAP 0x1 << 3 -#define PMEM_FLAGS_UNSUBMAP 0x1 << 4 - - -struct pmem_data { - /* in alloc mode: an index into the bitmap - * in no_alloc mode: the size of the allocation */ - int index; - /* see flags above for descriptions */ - unsigned int flags; - /* protects this data field, if the mm_mmap sem will be held at the - * same time as this sem, the mm sem must be taken first (as this is - * the order for vma_open and vma_close ops */ - struct rw_semaphore sem; - /* info about the mmaping process */ - struct vm_area_struct *vma; - /* task struct of the mapping process */ - struct task_struct *task; - /* process id of teh mapping process */ - pid_t pid; - /* file descriptor of the master */ - int master_fd; - /* file struct of the master */ - struct file *master_file; - /* a list of currently available regions if this is a suballocation */ - struct list_head region_list; - /* a linked list of data so we can access them for debugging */ - struct list_head list; -#if PMEM_DEBUG - int ref; -#endif -}; - -struct pmem_bits { - unsigned allocated:1; /* 1 if allocated, 0 if free */ - unsigned order:7; /* size of the region in pmem space */ -}; - -struct pmem_region_node { - struct pmem_region region; - struct list_head list; -}; - -#define PMEM_DEBUG_MSGS 0 -#if PMEM_DEBUG_MSGS -#define DLOG(fmt,args...) \ - do { printk(KERN_INFO "[%s:%s:%d] "fmt, __FILE__, __func__, __LINE__, \ - ##args); } \ - while (0) -#else -#define DLOG(x...) do {} while (0) -#endif - -struct pmem_info { - struct miscdevice dev; - /* physical start address of the remaped pmem space */ - unsigned long base; - /* vitual start address of the remaped pmem space */ - unsigned char __iomem *vbase; - /* total size of the pmem space */ - unsigned long size; - /* number of entries in the pmem space */ - unsigned long num_entries; - /* pfn of the garbage page in memory */ - unsigned long garbage_pfn; - /* index of the garbage page in the pmem space */ - int garbage_index; - /* the bitmap for the region indicating which entries are allocated - * and which are free */ - struct pmem_bits *bitmap; - /* indicates the region should not be managed with an allocator */ - unsigned no_allocator; - /* indicates maps of this region should be cached, if a mix of - * cached and uncached is desired, set this and open the device with - * O_SYNC to get an uncached region */ - unsigned cached; - unsigned buffered; - /* in no_allocator mode the first mapper gets the whole space and sets - * this flag */ - unsigned allocated; - /* for debugging, creates a list of pmem file structs, the - * data_list_sem should be taken before pmem_data->sem if both are - * needed */ - struct semaphore data_list_sem; - struct list_head data_list; - /* pmem_sem protects the bitmap array - * a write lock should be held when modifying entries in bitmap - * a read lock should be held when reading data from bits or - * dereferencing a pointer into bitmap - * - * pmem_data->sem protects the pmem data of a particular file - * Many of the function that require the pmem_data->sem have a non- - * locking version for when the caller is already holding that sem. - * - * IF YOU TAKE BOTH LOCKS TAKE THEM IN THIS ORDER: - * down(pmem_data->sem) => down(bitmap_sem) - */ - struct rw_semaphore bitmap_sem; - - long (*ioctl)(struct file *, unsigned int, unsigned long); - int (*release)(struct inode *, struct file *); -}; - -static struct pmem_info pmem[PMEM_MAX_DEVICES]; -static int id_count; - -#define PMEM_IS_FREE(id, index) !(pmem[id].bitmap[index].allocated) -#define PMEM_ORDER(id, index) pmem[id].bitmap[index].order -#define PMEM_BUDDY_INDEX(id, index) (index ^ (1 << PMEM_ORDER(id, index))) -#define PMEM_NEXT_INDEX(id, index) (index + (1 << PMEM_ORDER(id, index))) -#define PMEM_OFFSET(index) (index * PMEM_MIN_ALLOC) -#define PMEM_START_ADDR(id, index) (PMEM_OFFSET(index) + pmem[id].base) -#define PMEM_LEN(id, index) ((1 << PMEM_ORDER(id, index)) * PMEM_MIN_ALLOC) -#define PMEM_END_ADDR(id, index) (PMEM_START_ADDR(id, index) + \ - PMEM_LEN(id, index)) -#define PMEM_START_VADDR(id, index) (PMEM_OFFSET(id, index) + pmem[id].vbase) -#define PMEM_END_VADDR(id, index) (PMEM_START_VADDR(id, index) + \ - PMEM_LEN(id, index)) -#define PMEM_REVOKED(data) (data->flags & PMEM_FLAGS_REVOKED) -#define PMEM_IS_PAGE_ALIGNED(addr) (!((addr) & (~PAGE_MASK))) -#define PMEM_IS_SUBMAP(data) ((data->flags & PMEM_FLAGS_SUBMAP) && \ - (!(data->flags & PMEM_FLAGS_UNSUBMAP))) - -static int pmem_release(struct inode *, struct file *); -static int pmem_mmap(struct file *, struct vm_area_struct *); -static int pmem_open(struct inode *, struct file *); -static long pmem_ioctl(struct file *, unsigned int, unsigned long); - -struct file_operations pmem_fops = { - .release = pmem_release, - .mmap = pmem_mmap, - .open = pmem_open, - .unlocked_ioctl = pmem_ioctl, -}; - -static int get_id(struct file *file) -{ - return MINOR(file->f_dentry->d_inode->i_rdev); -} - -int is_pmem_file(struct file *file) -{ - int id; - - if (unlikely(!file || !file->f_dentry || !file->f_dentry->d_inode)) - return 0; - id = get_id(file); - if (unlikely(id >= PMEM_MAX_DEVICES)) - return 0; - if (unlikely(file->f_dentry->d_inode->i_rdev != - MKDEV(MISC_MAJOR, pmem[id].dev.minor))) - return 0; - return 1; -} - -static int has_allocation(struct file *file) -{ - struct pmem_data *data; - /* check is_pmem_file first if not accessed via pmem_file_ops */ - - if (unlikely(!file->private_data)) - return 0; - data = (struct pmem_data *)file->private_data; - if (unlikely(data->index < 0)) - return 0; - return 1; -} - -static int is_master_owner(struct file *file) -{ - struct file *master_file; - struct pmem_data *data; - int put_needed, ret = 0; - - if (!is_pmem_file(file) || !has_allocation(file)) - return 0; - data = (struct pmem_data *)file->private_data; - if (PMEM_FLAGS_MASTERMAP & data->flags) - return 1; - master_file = fget_light(data->master_fd, &put_needed); - if (master_file && data->master_file == master_file) - ret = 1; - fput_light(master_file, put_needed); - return ret; -} - -static int pmem_free(int id, int index) -{ - /* caller should hold the write lock on pmem_sem! */ - int buddy, curr = index; - DLOG("index %d\n", index); - - if (pmem[id].no_allocator) { - pmem[id].allocated = 0; - return 0; - } - /* clean up the bitmap, merging any buddies */ - pmem[id].bitmap[curr].allocated = 0; - /* find a slots buddy Buddy# = Slot# ^ (1 << order) - * if the buddy is also free merge them - * repeat until the buddy is not free or end of the bitmap is reached - */ - do { - buddy = PMEM_BUDDY_INDEX(id, curr); - if (buddy < pmem[id].num_entries && - PMEM_IS_FREE(id, buddy) && - PMEM_ORDER(id, buddy) == PMEM_ORDER(id, curr)) { - PMEM_ORDER(id, buddy)++; - PMEM_ORDER(id, curr)++; - curr = min(buddy, curr); - } else { - break; - } - } while (curr < pmem[id].num_entries); - - return 0; -} - -static void pmem_revoke(struct file *file, struct pmem_data *data); - -static int pmem_release(struct inode *inode, struct file *file) -{ - struct pmem_data *data = (struct pmem_data *)file->private_data; - struct pmem_region_node *region_node; - struct list_head *elt, *elt2; - int id = get_id(file), ret = 0; - - - down(&pmem[id].data_list_sem); - /* if this file is a master, revoke all the memory in the connected - * files */ - if (PMEM_FLAGS_MASTERMAP & data->flags) { - struct pmem_data *sub_data; - list_for_each(elt, &pmem[id].data_list) { - sub_data = list_entry(elt, struct pmem_data, list); - down_read(&sub_data->sem); - if (PMEM_IS_SUBMAP(sub_data) && - file == sub_data->master_file) { - up_read(&sub_data->sem); - pmem_revoke(file, sub_data); - } else - up_read(&sub_data->sem); - } - } - list_del(&data->list); - up(&pmem[id].data_list_sem); - - - down_write(&data->sem); - - /* if its not a conencted file and it has an allocation, free it */ - if (!(PMEM_FLAGS_CONNECTED & data->flags) && has_allocation(file)) { - down_write(&pmem[id].bitmap_sem); - ret = pmem_free(id, data->index); - up_write(&pmem[id].bitmap_sem); - } - - /* if this file is a submap (mapped, connected file), downref the - * task struct */ - if (PMEM_FLAGS_SUBMAP & data->flags) - if (data->task) { - put_task_struct(data->task); - data->task = NULL; - } - - file->private_data = NULL; - - list_for_each_safe(elt, elt2, &data->region_list) { - region_node = list_entry(elt, struct pmem_region_node, list); - list_del(elt); - kfree(region_node); - } - BUG_ON(!list_empty(&data->region_list)); - - up_write(&data->sem); - kfree(data); - if (pmem[id].release) - ret = pmem[id].release(inode, file); - - return ret; -} - -static int pmem_open(struct inode *inode, struct file *file) -{ - struct pmem_data *data; - int id = get_id(file); - int ret = 0; - - DLOG("current %u file %p(%d)\n", current->pid, file, file_count(file)); - /* setup file->private_data to indicate its unmapped */ - /* you can only open a pmem device one time */ - if (file->private_data != NULL) - return -1; - data = kmalloc(sizeof(struct pmem_data), GFP_KERNEL); - if (!data) { - printk("pmem: unable to allocate memory for pmem metadata."); - return -1; - } - data->flags = 0; - data->index = -1; - data->task = NULL; - data->vma = NULL; - data->pid = 0; - data->master_file = NULL; -#if PMEM_DEBUG - data->ref = 0; -#endif - INIT_LIST_HEAD(&data->region_list); - init_rwsem(&data->sem); - - file->private_data = data; - INIT_LIST_HEAD(&data->list); - - down(&pmem[id].data_list_sem); - list_add(&data->list, &pmem[id].data_list); - up(&pmem[id].data_list_sem); - return ret; -} - -static unsigned long pmem_order(unsigned long len) -{ - int i; - - len = (len + PMEM_MIN_ALLOC - 1)/PMEM_MIN_ALLOC; - len--; - for (i = 0; i < sizeof(len)*8; i++) - if (len >> i == 0) - break; - return i; -} - -static int pmem_allocate(int id, unsigned long len) -{ - /* caller should hold the write lock on pmem_sem! */ - /* return the corresponding pdata[] entry */ - int curr = 0; - int end = pmem[id].num_entries; - int best_fit = -1; - unsigned long order = pmem_order(len); - - if (pmem[id].no_allocator) { - DLOG("no allocator"); - if ((len > pmem[id].size) || pmem[id].allocated) - return -1; - pmem[id].allocated = 1; - return len; - } - - if (order > PMEM_MAX_ORDER) - return -1; - DLOG("order %lx\n", order); - - /* look through the bitmap: - * if you find a free slot of the correct order use it - * otherwise, use the best fit (smallest with size > order) slot - */ - while (curr < end) { - if (PMEM_IS_FREE(id, curr)) { - if (PMEM_ORDER(id, curr) == (unsigned char)order) { - /* set the not free bit and clear others */ - best_fit = curr; - break; - } - if (PMEM_ORDER(id, curr) > (unsigned char)order && - (best_fit < 0 || - PMEM_ORDER(id, curr) < PMEM_ORDER(id, best_fit))) - best_fit = curr; - } - curr = PMEM_NEXT_INDEX(id, curr); - } - - /* if best_fit < 0, there are no suitable slots, - * return an error - */ - if (best_fit < 0) { - printk("pmem: no space left to allocate!\n"); - return -1; - } - - /* now partition the best fit: - * split the slot into 2 buddies of order - 1 - * repeat until the slot is of the correct order - */ - while (PMEM_ORDER(id, best_fit) > (unsigned char)order) { - int buddy; - PMEM_ORDER(id, best_fit) -= 1; - buddy = PMEM_BUDDY_INDEX(id, best_fit); - PMEM_ORDER(id, buddy) = PMEM_ORDER(id, best_fit); - } - pmem[id].bitmap[best_fit].allocated = 1; - return best_fit; -} - -static pgprot_t phys_mem_access_prot(struct file *file, pgprot_t vma_prot) -{ - int id = get_id(file); -#ifdef pgprot_noncached - if (pmem[id].cached == 0 || file->f_flags & O_SYNC) - return pgprot_noncached(vma_prot); -#endif -#ifdef pgprot_ext_buffered - else if (pmem[id].buffered) - return pgprot_ext_buffered(vma_prot); -#endif - return vma_prot; -} - -static unsigned long pmem_start_addr(int id, struct pmem_data *data) -{ - if (pmem[id].no_allocator) - return PMEM_START_ADDR(id, 0); - else - return PMEM_START_ADDR(id, data->index); - -} - -static void *pmem_start_vaddr(int id, struct pmem_data *data) -{ - return pmem_start_addr(id, data) - pmem[id].base + pmem[id].vbase; -} - -static unsigned long pmem_len(int id, struct pmem_data *data) -{ - if (pmem[id].no_allocator) - return data->index; - else - return PMEM_LEN(id, data->index); -} - -static int pmem_map_garbage(int id, struct vm_area_struct *vma, - struct pmem_data *data, unsigned long offset, - unsigned long len) -{ - int i, garbage_pages = len >> PAGE_SHIFT; - - vma->vm_flags |= VM_IO | VM_RESERVED | VM_PFNMAP | VM_SHARED | VM_WRITE; - for (i = 0; i < garbage_pages; i++) { - if (vm_insert_pfn(vma, vma->vm_start + offset + (i * PAGE_SIZE), - pmem[id].garbage_pfn)) - return -EAGAIN; - } - return 0; -} - -static int pmem_unmap_pfn_range(int id, struct vm_area_struct *vma, - struct pmem_data *data, unsigned long offset, - unsigned long len) -{ - int garbage_pages; - DLOG("unmap offset %lx len %lx\n", offset, len); - - BUG_ON(!PMEM_IS_PAGE_ALIGNED(len)); - - garbage_pages = len >> PAGE_SHIFT; - zap_page_range(vma, vma->vm_start + offset, len, NULL); - pmem_map_garbage(id, vma, data, offset, len); - return 0; -} - -static int pmem_map_pfn_range(int id, struct vm_area_struct *vma, - struct pmem_data *data, unsigned long offset, - unsigned long len) -{ - DLOG("map offset %lx len %lx\n", offset, len); - BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_start)); - BUG_ON(!PMEM_IS_PAGE_ALIGNED(vma->vm_end)); - BUG_ON(!PMEM_IS_PAGE_ALIGNED(len)); - BUG_ON(!PMEM_IS_PAGE_ALIGNED(offset)); - - if (io_remap_pfn_range(vma, vma->vm_start + offset, - (pmem_start_addr(id, data) + offset) >> PAGE_SHIFT, - len, vma->vm_page_prot)) { - return -EAGAIN; - } - return 0; -} - -static int pmem_remap_pfn_range(int id, struct vm_area_struct *vma, - struct pmem_data *data, unsigned long offset, - unsigned long len) -{ - /* hold the mm semp for the vma you are modifying when you call this */ - BUG_ON(!vma); - zap_page_range(vma, vma->vm_start + offset, len, NULL); - return pmem_map_pfn_range(id, vma, data, offset, len); -} - -static void pmem_vma_open(struct vm_area_struct *vma) -{ - struct file *file = vma->vm_file; - struct pmem_data *data = file->private_data; - int id = get_id(file); - /* this should never be called as we don't support copying pmem - * ranges via fork */ - BUG_ON(!has_allocation(file)); - down_write(&data->sem); - /* remap the garbage pages, forkers don't get access to the data */ - pmem_unmap_pfn_range(id, vma, data, 0, vma->vm_start - vma->vm_end); - up_write(&data->sem); -} - -static void pmem_vma_close(struct vm_area_struct *vma) -{ - struct file *file = vma->vm_file; - struct pmem_data *data = file->private_data; - - DLOG("current %u ppid %u file %p count %d\n", current->pid, - current->parent->pid, file, file_count(file)); - if (unlikely(!is_pmem_file(file) || !has_allocation(file))) { - printk(KERN_WARNING "pmem: something is very wrong, you are " - "closing a vm backing an allocation that doesn't " - "exist!\n"); - return; - } - down_write(&data->sem); - if (data->vma == vma) { - data->vma = NULL; - if ((data->flags & PMEM_FLAGS_CONNECTED) && - (data->flags & PMEM_FLAGS_SUBMAP)) - data->flags |= PMEM_FLAGS_UNSUBMAP; - } - /* the kernel is going to free this vma now anyway */ - up_write(&data->sem); -} - -static struct vm_operations_struct vm_ops = { - .open = pmem_vma_open, - .close = pmem_vma_close, -}; - -static int pmem_mmap(struct file *file, struct vm_area_struct *vma) -{ - struct pmem_data *data; - int index; - unsigned long vma_size = vma->vm_end - vma->vm_start; - int ret = 0, id = get_id(file); - - if (vma->vm_pgoff || !PMEM_IS_PAGE_ALIGNED(vma_size)) { -#if PMEM_DEBUG - printk(KERN_ERR "pmem: mmaps must be at offset zero, aligned" - " and a multiple of pages_size.\n"); -#endif - return -EINVAL; - } - - data = (struct pmem_data *)file->private_data; - down_write(&data->sem); - /* check this file isn't already mmaped, for submaps check this file - * has never been mmaped */ - if ((data->flags & PMEM_FLAGS_MASTERMAP) || - (data->flags & PMEM_FLAGS_SUBMAP) || - (data->flags & PMEM_FLAGS_UNSUBMAP)) { -#if PMEM_DEBUG - printk(KERN_ERR "pmem: you can only mmap a pmem file once, " - "this file is already mmaped. %x\n", data->flags); -#endif - ret = -EINVAL; - goto error; - } - /* if file->private_data == unalloced, alloc*/ - if (data && data->index == -1) { - down_write(&pmem[id].bitmap_sem); - index = pmem_allocate(id, vma->vm_end - vma->vm_start); - up_write(&pmem[id].bitmap_sem); - data->index = index; - } - /* either no space was available or an error occured */ - if (!has_allocation(file)) { - ret = -EINVAL; - printk("pmem: could not find allocation for map.\n"); - goto error; - } - - if (pmem_len(id, data) < vma_size) { -#if PMEM_DEBUG - printk(KERN_WARNING "pmem: mmap size [%lu] does not match" - "size of backing region [%lu].\n", vma_size, - pmem_len(id, data)); -#endif - ret = -EINVAL; - goto error; - } - - vma->vm_pgoff = pmem_start_addr(id, data) >> PAGE_SHIFT; - vma->vm_page_prot = phys_mem_access_prot(file, vma->vm_page_prot); - - if (data->flags & PMEM_FLAGS_CONNECTED) { - struct pmem_region_node *region_node; - struct list_head *elt; - if (pmem_map_garbage(id, vma, data, 0, vma_size)) { - printk("pmem: mmap failed in kernel!\n"); - ret = -EAGAIN; - goto error; - } - list_for_each(elt, &data->region_list) { - region_node = list_entry(elt, struct pmem_region_node, - list); - DLOG("remapping file: %p %lx %lx\n", file, - region_node->region.offset, - region_node->region.len); - if (pmem_remap_pfn_range(id, vma, data, - region_node->region.offset, - region_node->region.len)) { - ret = -EAGAIN; - goto error; - } - } - data->flags |= PMEM_FLAGS_SUBMAP; - get_task_struct(current->group_leader); - data->task = current->group_leader; - data->vma = vma; -#if PMEM_DEBUG - data->pid = current->pid; -#endif - DLOG("submmapped file %p vma %p pid %u\n", file, vma, - current->pid); - } else { - if (pmem_map_pfn_range(id, vma, data, 0, vma_size)) { - printk(KERN_INFO "pmem: mmap failed in kernel!\n"); - ret = -EAGAIN; - goto error; - } - data->flags |= PMEM_FLAGS_MASTERMAP; - data->pid = current->pid; - } - vma->vm_ops = &vm_ops; -error: - up_write(&data->sem); - return ret; -} - -/* the following are the api for accessing pmem regions by other drivers - * from inside the kernel */ -int get_pmem_user_addr(struct file *file, unsigned long *start, - unsigned long *len) -{ - struct pmem_data *data; - if (!is_pmem_file(file) || !has_allocation(file)) { -#if PMEM_DEBUG - printk(KERN_INFO "pmem: requested pmem data from invalid" - "file.\n"); -#endif - return -1; - } - data = (struct pmem_data *)file->private_data; - down_read(&data->sem); - if (data->vma) { - *start = data->vma->vm_start; - *len = data->vma->vm_end - data->vma->vm_start; - } else { - *start = 0; - *len = 0; - } - up_read(&data->sem); - return 0; -} - -int get_pmem_addr(struct file *file, unsigned long *start, - unsigned long *vstart, unsigned long *len) -{ - struct pmem_data *data; - int id; - - if (!is_pmem_file(file) || !has_allocation(file)) { - return -1; - } - - data = (struct pmem_data *)file->private_data; - if (data->index == -1) { -#if PMEM_DEBUG - printk(KERN_INFO "pmem: requested pmem data from file with no " - "allocation.\n"); - return -1; -#endif - } - id = get_id(file); - - down_read(&data->sem); - *start = pmem_start_addr(id, data); - *len = pmem_len(id, data); - *vstart = (unsigned long)pmem_start_vaddr(id, data); - up_read(&data->sem); -#if PMEM_DEBUG - down_write(&data->sem); - data->ref++; - up_write(&data->sem); -#endif - return 0; -} - -int get_pmem_file(int fd, unsigned long *start, unsigned long *vstart, - unsigned long *len, struct file **filp) -{ - struct file *file; - - file = fget(fd); - if (unlikely(file == NULL)) { - printk(KERN_INFO "pmem: requested data from file descriptor " - "that doesn't exist."); - return -1; - } - - if (get_pmem_addr(file, start, vstart, len)) - goto end; - - if (filp) - *filp = file; - return 0; -end: - fput(file); - return -1; -} - -void put_pmem_file(struct file *file) -{ - struct pmem_data *data; - int id; - - if (!is_pmem_file(file)) - return; - id = get_id(file); - data = (struct pmem_data *)file->private_data; -#if PMEM_DEBUG - down_write(&data->sem); - if (data->ref == 0) { - printk("pmem: pmem_put > pmem_get %s (pid %d)\n", - pmem[id].dev.name, data->pid); - BUG(); - } - data->ref--; - up_write(&data->sem); -#endif - fput(file); -} - -void flush_pmem_file(struct file *file, unsigned long offset, unsigned long len) -{ - struct pmem_data *data; - int id; - void *vaddr; - struct pmem_region_node *region_node; - struct list_head *elt; - void *flush_start, *flush_end; - - if (!is_pmem_file(file) || !has_allocation(file)) { - return; - } - - id = get_id(file); - data = (struct pmem_data *)file->private_data; - if (!pmem[id].cached || file->f_flags & O_SYNC) - return; - - down_read(&data->sem); - vaddr = pmem_start_vaddr(id, data); - /* if this isn't a submmapped file, flush the whole thing */ - if (unlikely(!(data->flags & PMEM_FLAGS_CONNECTED))) { - dmac_flush_range(vaddr, vaddr + pmem_len(id, data)); - goto end; - } - /* otherwise, flush the region of the file we are drawing */ - list_for_each(elt, &data->region_list) { - region_node = list_entry(elt, struct pmem_region_node, list); - if ((offset >= region_node->region.offset) && - ((offset + len) <= (region_node->region.offset + - region_node->region.len))) { - flush_start = vaddr + region_node->region.offset; - flush_end = flush_start + region_node->region.len; - dmac_flush_range(flush_start, flush_end); - break; - } - } -end: - up_read(&data->sem); -} - -static int pmem_connect(unsigned long connect, struct file *file) -{ - struct pmem_data *data = (struct pmem_data *)file->private_data; - struct pmem_data *src_data; - struct file *src_file; - int ret = 0, put_needed; - - down_write(&data->sem); - /* retrieve the src file and check it is a pmem file with an alloc */ - src_file = fget_light(connect, &put_needed); - DLOG("connect %p to %p\n", file, src_file); - if (!src_file) { - printk("pmem: src file not found!\n"); - ret = -EINVAL; - goto err_no_file; - } - if (unlikely(!is_pmem_file(src_file) || !has_allocation(src_file))) { - printk(KERN_INFO "pmem: src file is not a pmem file or has no " - "alloc!\n"); - ret = -EINVAL; - goto err_bad_file; - } - src_data = (struct pmem_data *)src_file->private_data; - - if (has_allocation(file) && (data->index != src_data->index)) { - printk("pmem: file is already mapped but doesn't match this" - " src_file!\n"); - ret = -EINVAL; - goto err_bad_file; - } - data->index = src_data->index; - data->flags |= PMEM_FLAGS_CONNECTED; - data->master_fd = connect; - data->master_file = src_file; - -err_bad_file: - fput_light(src_file, put_needed); -err_no_file: - up_write(&data->sem); - return ret; -} - -static void pmem_unlock_data_and_mm(struct pmem_data *data, - struct mm_struct *mm) -{ - up_write(&data->sem); - if (mm != NULL) { - up_write(&mm->mmap_sem); - mmput(mm); - } -} - -static int pmem_lock_data_and_mm(struct file *file, struct pmem_data *data, - struct mm_struct **locked_mm) -{ - int ret = 0; - struct mm_struct *mm = NULL; - *locked_mm = NULL; -lock_mm: - down_read(&data->sem); - if (PMEM_IS_SUBMAP(data)) { - mm = get_task_mm(data->task); - if (!mm) { -#if PMEM_DEBUG - printk("pmem: can't remap task is gone!\n"); -#endif - up_read(&data->sem); - return -1; - } - } - up_read(&data->sem); - - if (mm) - down_write(&mm->mmap_sem); - - down_write(&data->sem); - /* check that the file didn't get mmaped before we could take the - * data sem, this should be safe b/c you can only submap each file - * once */ - if (PMEM_IS_SUBMAP(data) && !mm) { - pmem_unlock_data_and_mm(data, mm); - up_write(&data->sem); - goto lock_mm; - } - /* now check that vma.mm is still there, it could have been - * deleted by vma_close before we could get the data->sem */ - if ((data->flags & PMEM_FLAGS_UNSUBMAP) && (mm != NULL)) { - /* might as well release this */ - if (data->flags & PMEM_FLAGS_SUBMAP) { - put_task_struct(data->task); - data->task = NULL; - /* lower the submap flag to show the mm is gone */ - data->flags &= ~(PMEM_FLAGS_SUBMAP); - } - pmem_unlock_data_and_mm(data, mm); - return -1; - } - *locked_mm = mm; - return ret; -} - -int pmem_remap(struct pmem_region *region, struct file *file, - unsigned operation) -{ - int ret; - struct pmem_region_node *region_node; - struct mm_struct *mm = NULL; - struct list_head *elt, *elt2; - int id = get_id(file); - struct pmem_data *data = (struct pmem_data *)file->private_data; - - /* pmem region must be aligned on a page boundry */ - if (unlikely(!PMEM_IS_PAGE_ALIGNED(region->offset) || - !PMEM_IS_PAGE_ALIGNED(region->len))) { -#if PMEM_DEBUG - printk("pmem: request for unaligned pmem suballocation " - "%lx %lx\n", region->offset, region->len); -#endif - return -EINVAL; - } - - /* if userspace requests a region of len 0, there's nothing to do */ - if (region->len == 0) - return 0; - - /* lock the mm and data */ - ret = pmem_lock_data_and_mm(file, data, &mm); - if (ret) - return 0; - - /* only the owner of the master file can remap the client fds - * that back in it */ - if (!is_master_owner(file)) { -#if PMEM_DEBUG - printk("pmem: remap requested from non-master process\n"); -#endif - ret = -EINVAL; - goto err; - } - - /* check that the requested range is within the src allocation */ - if (unlikely((region->offset > pmem_len(id, data)) || - (region->len > pmem_len(id, data)) || - (region->offset + region->len > pmem_len(id, data)))) { -#if PMEM_DEBUG - printk(KERN_INFO "pmem: suballoc doesn't fit in src_file!\n"); -#endif - ret = -EINVAL; - goto err; - } - - if (operation == PMEM_MAP) { - region_node = kmalloc(sizeof(struct pmem_region_node), - GFP_KERNEL); - if (!region_node) { - ret = -ENOMEM; -#if PMEM_DEBUG - printk(KERN_INFO "No space to allocate metadata!"); -#endif - goto err; - } - region_node->region = *region; - list_add(®ion_node->list, &data->region_list); - } else if (operation == PMEM_UNMAP) { - int found = 0; - list_for_each_safe(elt, elt2, &data->region_list) { - region_node = list_entry(elt, struct pmem_region_node, - list); - if (region->len == 0 || - (region_node->region.offset == region->offset && - region_node->region.len == region->len)) { - list_del(elt); - kfree(region_node); - found = 1; - } - } - if (!found) { -#if PMEM_DEBUG - printk("pmem: Unmap region does not map any mapped " - "region!"); -#endif - ret = -EINVAL; - goto err; - } - } - - if (data->vma && PMEM_IS_SUBMAP(data)) { - if (operation == PMEM_MAP) - ret = pmem_remap_pfn_range(id, data->vma, data, - region->offset, region->len); - else if (operation == PMEM_UNMAP) - ret = pmem_unmap_pfn_range(id, data->vma, data, - region->offset, region->len); - } - -err: - pmem_unlock_data_and_mm(data, mm); - return ret; -} - -static void pmem_revoke(struct file *file, struct pmem_data *data) -{ - struct pmem_region_node *region_node; - struct list_head *elt, *elt2; - struct mm_struct *mm = NULL; - int id = get_id(file); - int ret = 0; - - data->master_file = NULL; - ret = pmem_lock_data_and_mm(file, data, &mm); - /* if lock_data_and_mm fails either the task that mapped the fd, or - * the vma that mapped it have already gone away, nothing more - * needs to be done */ - if (ret) - return; - /* unmap everything */ - /* delete the regions and region list nothing is mapped any more */ - if (data->vma) - list_for_each_safe(elt, elt2, &data->region_list) { - region_node = list_entry(elt, struct pmem_region_node, - list); - pmem_unmap_pfn_range(id, data->vma, data, - region_node->region.offset, - region_node->region.len); - list_del(elt); - kfree(region_node); - } - /* delete the master file */ - pmem_unlock_data_and_mm(data, mm); -} - -static void pmem_get_size(struct pmem_region *region, struct file *file) -{ - struct pmem_data *data = (struct pmem_data *)file->private_data; - int id = get_id(file); - - if (!has_allocation(file)) { - region->offset = 0; - region->len = 0; - return; - } else { - region->offset = pmem_start_addr(id, data); - region->len = pmem_len(id, data); - } - DLOG("offset %lx len %lx\n", region->offset, region->len); -} - - -static long pmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) -{ - struct pmem_data *data; - int id = get_id(file); - - switch (cmd) { - case PMEM_GET_PHYS: - { - struct pmem_region region; - DLOG("get_phys\n"); - if (!has_allocation(file)) { - region.offset = 0; - region.len = 0; - } else { - data = (struct pmem_data *)file->private_data; - region.offset = pmem_start_addr(id, data); - region.len = pmem_len(id, data); - } - printk(KERN_INFO "pmem: request for physical address of pmem region " - "from process %d.\n", current->pid); - if (copy_to_user((void __user *)arg, ®ion, - sizeof(struct pmem_region))) - return -EFAULT; - break; - } - case PMEM_MAP: - { - struct pmem_region region; - if (copy_from_user(®ion, (void __user *)arg, - sizeof(struct pmem_region))) - return -EFAULT; - data = (struct pmem_data *)file->private_data; - return pmem_remap(®ion, file, PMEM_MAP); - } - break; - case PMEM_UNMAP: - { - struct pmem_region region; - if (copy_from_user(®ion, (void __user *)arg, - sizeof(struct pmem_region))) - return -EFAULT; - data = (struct pmem_data *)file->private_data; - return pmem_remap(®ion, file, PMEM_UNMAP); - break; - } - case PMEM_GET_SIZE: - { - struct pmem_region region; - DLOG("get_size\n"); - pmem_get_size(®ion, file); - if (copy_to_user((void __user *)arg, ®ion, - sizeof(struct pmem_region))) - return -EFAULT; - break; - } - case PMEM_GET_TOTAL_SIZE: - { - struct pmem_region region; - DLOG("get total size\n"); - region.offset = 0; - get_id(file); - region.len = pmem[id].size; - if (copy_to_user((void __user *)arg, ®ion, - sizeof(struct pmem_region))) - return -EFAULT; - break; - } - case PMEM_ALLOCATE: - { - if (has_allocation(file)) - return -EINVAL; - data = (struct pmem_data *)file->private_data; - data->index = pmem_allocate(id, arg); - break; - } - case PMEM_CONNECT: - DLOG("connect\n"); - return pmem_connect(arg, file); - break; - case PMEM_CACHE_FLUSH: - { - struct pmem_region region; - DLOG("flush\n"); - if (copy_from_user(®ion, (void __user *)arg, - sizeof(struct pmem_region))) - return -EFAULT; - flush_pmem_file(file, region.offset, region.len); - break; - } - default: - if (pmem[id].ioctl) - return pmem[id].ioctl(file, cmd, arg); - return -EINVAL; - } - return 0; -} - -#if PMEM_DEBUG -static ssize_t debug_open(struct inode *inode, struct file *file) -{ - file->private_data = inode->i_private; - return 0; -} - -static ssize_t debug_read(struct file *file, char __user *buf, size_t count, - loff_t *ppos) -{ - struct list_head *elt, *elt2; - struct pmem_data *data; - struct pmem_region_node *region_node; - int id = (int)file->private_data; - const int debug_bufmax = 4096; - static char buffer[4096]; - int n = 0; - - DLOG("debug open\n"); - n = scnprintf(buffer, debug_bufmax, - "pid #: mapped regions (offset, len) (offset,len)...\n"); - - down(&pmem[id].data_list_sem); - list_for_each(elt, &pmem[id].data_list) { - data = list_entry(elt, struct pmem_data, list); - down_read(&data->sem); - n += scnprintf(buffer + n, debug_bufmax - n, "pid %u:", - data->pid); - list_for_each(elt2, &data->region_list) { - region_node = list_entry(elt2, struct pmem_region_node, - list); - n += scnprintf(buffer + n, debug_bufmax - n, - "(%lx,%lx) ", - region_node->region.offset, - region_node->region.len); - } - n += scnprintf(buffer + n, debug_bufmax - n, "\n"); - up_read(&data->sem); - } - up(&pmem[id].data_list_sem); - - n++; - buffer[n] = 0; - return simple_read_from_buffer(buf, count, ppos, buffer, n); -} - -static struct file_operations debug_fops = { - .read = debug_read, - .open = debug_open, -}; -#endif - -#if 0 -static struct miscdevice pmem_dev = { - .name = "pmem", - .fops = &pmem_fops, -}; -#endif - -int pmem_setup(struct android_pmem_platform_data *pdata, - long (*ioctl)(struct file *, unsigned int, unsigned long), - int (*release)(struct inode *, struct file *)) -{ - int err = 0; - int i, index = 0; - int id = id_count; - id_count++; - - pmem[id].no_allocator = pdata->no_allocator; - pmem[id].cached = pdata->cached; - pmem[id].buffered = pdata->buffered; - pmem[id].base = pdata->start; - pmem[id].size = pdata->size; - pmem[id].ioctl = ioctl; - pmem[id].release = release; - init_rwsem(&pmem[id].bitmap_sem); - init_MUTEX(&pmem[id].data_list_sem); - INIT_LIST_HEAD(&pmem[id].data_list); - pmem[id].dev.name = pdata->name; - pmem[id].dev.minor = id; - pmem[id].dev.fops = &pmem_fops; - printk(KERN_INFO "%s: %d init\n", pdata->name, pdata->cached); - - err = misc_register(&pmem[id].dev); - if (err) { - printk(KERN_ALERT "Unable to register pmem driver!\n"); - goto err_cant_register_device; - } - pmem[id].num_entries = pmem[id].size / PMEM_MIN_ALLOC; - - pmem[id].bitmap = kmalloc(pmem[id].num_entries * - sizeof(struct pmem_bits), GFP_KERNEL); - if (!pmem[id].bitmap) - goto err_no_mem_for_metadata; - - memset(pmem[id].bitmap, 0, sizeof(struct pmem_bits) * - pmem[id].num_entries); - - for (i = sizeof(pmem[id].num_entries) * 8 - 1; i >= 0; i--) { - if ((pmem[id].num_entries) & 1<name, S_IFREG | S_IRUGO, NULL, (void *)id, - &debug_fops); -#endif - return 0; -error_cant_remap: - kfree(pmem[id].bitmap); -err_no_mem_for_metadata: - misc_deregister(&pmem[id].dev); -err_cant_register_device: - return -1; -} - -static int pmem_probe(struct platform_device *pdev) -{ - struct android_pmem_platform_data *pdata; - - if (!pdev || !pdev->dev.platform_data) { - printk(KERN_ALERT "Unable to probe pmem!\n"); - return -1; - } - pdata = pdev->dev.platform_data; - return pmem_setup(pdata, NULL, NULL); -} - - -static int pmem_remove(struct platform_device *pdev) -{ - int id = pdev->id; - __free_page(pfn_to_page(pmem[id].garbage_pfn)); - misc_deregister(&pmem[id].dev); - return 0; -} - -static struct platform_driver pmem_driver = { - .probe = pmem_probe, - .remove = pmem_remove, - .driver = { .name = "android_pmem" } -}; - - -static int __init pmem_init(void) -{ - return platform_driver_register(&pmem_driver); -} - -static void __exit pmem_exit(void) -{ - platform_driver_unregister(&pmem_driver); -} - -module_init(pmem_init); -module_exit(pmem_exit); - -#endif diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c index 1e5676f1..079918eb 100644 --- a/drivers/staging/android/binder.c +++ b/drivers/staging/android/binder.c @@ -29,6 +29,7 @@ #include #include #include +#include #include #include @@ -81,7 +82,6 @@ enum { BINDER_DEBUG_BUFFER_ALLOC = 1U << 13, BINDER_DEBUG_PRIORITY_CAP = 1U << 14, BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15, - BINDER_DEBUG_IOCTL = 1U << 16, }; static uint32_t binder_debug_mask = BINDER_DEBUG_USER_ERROR | BINDER_DEBUG_FAILED_TRANSACTION | BINDER_DEBUG_DEAD_TRANSACTION; @@ -2615,9 +2615,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) unsigned int size = _IOC_SIZE(cmd); void __user *ubuf = (void __user *)arg; - binder_debug(BINDER_DEBUG_IOCTL, - "binder_ioctl begin: %d:%d %x %lx\n", - proc->pid, current->pid, cmd, arg); + /*printk(KERN_INFO "binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/ ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); if (ret) @@ -2736,9 +2734,6 @@ err: wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); if (ret && ret != -ERESTARTSYS) printk(KERN_INFO "binder: %d:%d ioctl %x %lx returned %d\n", proc->pid, current->pid, cmd, arg, ret); - binder_debug(BINDER_DEBUG_IOCTL, - "binder_ioctl end: %d:%d %x %lx\n", - proc->pid, current->pid, cmd, arg); return ret; } diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c index 15b2e970..1a0c1391 100644 --- a/drivers/staging/android/logger.c +++ b/drivers/staging/android/logger.c @@ -23,6 +23,7 @@ #include #include #include +#include #include #include "logger.h" @@ -431,7 +432,10 @@ static int logger_release(struct inode *ignored, struct file *file) { if (file->f_mode & FMODE_READ) { struct logger_reader *reader = file->private_data; + struct logger_log *log = reader->log; + mutex_lock(&log->mutex); list_del(&reader->list); + mutex_unlock(&log->mutex); kfree(reader); } diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index 7c65cce1..42cd93ea 100644 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c @@ -52,19 +52,8 @@ static size_t lowmem_minfree[6] = { }; static int lowmem_minfree_size = 4; -static size_t lowmem_minfile[6] = { - 1536, - 2048, - 4096, - 5120, - 5632, - 6144 -}; -static int lowmem_minfile_size = 6; - static struct task_struct *lowmem_deathpending; - -static uint32_t lowmem_check_filepages = 0; +static DEFINE_SPINLOCK(lowmem_deathpending_lock); #define lowmem_print(level, x...) \ do { \ @@ -79,13 +68,25 @@ static struct notifier_block task_nb = { .notifier_call = task_notify_func, }; + +static void task_free_fn(struct work_struct *work) +{ + unsigned long flags; + + task_free_unregister(&task_nb); + spin_lock_irqsave(&lowmem_deathpending_lock, flags); + lowmem_deathpending = NULL; + spin_unlock_irqrestore(&lowmem_deathpending_lock, flags); +} +static DECLARE_WORK(task_free_work, task_free_fn); + static int task_notify_func(struct notifier_block *self, unsigned long val, void *data) { struct task_struct *task = data; + if (task == lowmem_deathpending) { - lowmem_deathpending = NULL; - task_free_unregister(&task_nb); + schedule_work(&task_free_work); } return NOTIFY_OK; } @@ -103,8 +104,7 @@ static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask) int array_size = ARRAY_SIZE(lowmem_adj); int other_free = global_page_state(NR_FREE_PAGES); int other_file = global_page_state(NR_FILE_PAGES); - int lru_file = global_page_state(NR_ACTIVE_FILE) + - global_page_state(NR_INACTIVE_FILE); + unsigned long flags; /* * If we already have a death outstanding, then @@ -121,14 +121,9 @@ static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask) if (lowmem_minfree_size < array_size) array_size = lowmem_minfree_size; for (i = 0; i < array_size; i++) { - if (other_free < lowmem_minfree[i]) { - if(other_file < lowmem_minfree[i] || - (lowmem_check_filepages && - (lru_file < lowmem_minfile[i]))) { - - min_adj = lowmem_adj[i]; - break; - } + if (other_file < lowmem_minfree[i]) { + min_adj = lowmem_adj[i]; + break; } } if (nr_to_scan > 0) @@ -181,14 +176,20 @@ static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask) lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n", p->pid, p->comm, oom_adj, tasksize); } + if (selected) { - lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n", - selected->pid, selected->comm, - selected_oom_adj, selected_tasksize); - lowmem_deathpending = selected; - task_free_register(&task_nb); - force_sig(SIGKILL, selected); - rem -= selected_tasksize; + spin_lock_irqsave(&lowmem_deathpending_lock, flags); + if (!lowmem_deathpending) { + lowmem_print(1, + "send sigkill to %d (%s), adj %d, size %d\n", + selected->pid, selected->comm, + selected_oom_adj, selected_tasksize); + lowmem_deathpending = selected; + task_free_register(&task_nb); + force_sig(SIGKILL, selected); + rem -= selected_tasksize; + } + spin_unlock_irqrestore(&lowmem_deathpending_lock, flags); } lowmem_print(4, "lowmem_shrink %d, %x, return %d\n", nr_to_scan, gfp_mask, rem); @@ -219,11 +220,6 @@ module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size, S_IRUGO | S_IWUSR); module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR); -module_param_named(check_filepages , lowmem_check_filepages, uint, - S_IRUGO | S_IWUSR); -module_param_array_named(minfile, lowmem_minfile, uint, &lowmem_minfile_size, - S_IRUGO | S_IWUSR); - module_init(lowmem_init); module_exit(lowmem_exit); diff --git a/drivers/staging/android/timed_gpio.c b/drivers/staging/android/timed_gpio.c index a646107d..a64481c3 100644 --- a/drivers/staging/android/timed_gpio.c +++ b/drivers/staging/android/timed_gpio.c @@ -16,6 +16,7 @@ #include #include +#include #include #include #include diff --git a/include/linux/android_alarm.h b/include/linux/android_alarm.h index f8f14e79..cbfeafce 100644 --- a/include/linux/android_alarm.h +++ b/include/linux/android_alarm.h @@ -74,6 +74,7 @@ ktime_t alarm_get_elapsed_realtime(void); /* set rtc while preserving elapsed realtime */ int alarm_set_rtc(const struct timespec ts); +void alarm_update_timedelta(struct timespec tv, struct timespec ts); #endif diff --git a/include/linux/android_pmem.h b/include/linux/android_pmem.h index 6e37c0f8..f5548820 100644 --- a/include/linux/android_pmem.h +++ b/include/linux/android_pmem.h @@ -8,12 +8,12 @@ * may be copied, distributed, and modified under those terms. * * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of + * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * */ -#ifdef CONFIG_MSM_KGSL + #ifndef _ANDROID_PMEM_H_ #define _ANDROID_PMEM_H_ @@ -45,7 +45,7 @@ */ #define PMEM_CONNECT _IOW(PMEM_IOCTL_MAGIC, 6, unsigned int) /* Returns the total size of the pmem region it is sent to as a pmem_region - * struct (with offset set to 0). + * struct (with offset set to 0). */ #define PMEM_GET_TOTAL_SIZE _IOW(PMEM_IOCTL_MAGIC, 7, unsigned int) /* Revokes gpu registers and resets the gpu. Pass a pointer to the @@ -54,6 +54,7 @@ #define HW3D_REVOKE_GPU _IOW(PMEM_IOCTL_MAGIC, 8, unsigned int) #define PMEM_CACHE_FLUSH _IOW(PMEM_IOCTL_MAGIC, 8, unsigned int) #define HW3D_GRANT_GPU _IOW(PMEM_IOCTL_MAGIC, 9, unsigned int) +#define HW3D_WAIT_FOR_INTERRUPT _IOW(PMEM_IOCTL_MAGIC, 10, unsigned int) #define PMEM_CLEAN_INV_CACHES _IOW(PMEM_IOCTL_MAGIC, 11, unsigned int) #define PMEM_CLEAN_CACHES _IOW(PMEM_IOCTL_MAGIC, 12, unsigned int) @@ -85,6 +86,8 @@ struct pmem_allocation { #ifdef __KERNEL__ int get_pmem_file(unsigned int fd, unsigned long *start, unsigned long *vstart, unsigned long *end, struct file **filp); +int get_pmem_addr(struct file *file, unsigned long *start, + unsigned long *vstart, unsigned long *len); int get_pmem_fd(int fd, unsigned long *start, unsigned long *end); int get_pmem_user_addr(struct file *file, unsigned long *start, unsigned long *end); @@ -101,6 +104,7 @@ enum pmem_allocator_type { * defined */ PMEM_ALLOCATORTYPE_BITMAP = 0, /* forced to be zero here */ + PMEM_ALLOCATORTYPE_SYSTEM, PMEM_ALLOCATORTYPE_ALLORNOTHING, PMEM_ALLOCATORTYPE_BUDDYBESTFIT, @@ -163,104 +167,7 @@ int pmem_setup(struct android_pmem_platform_data *pdata, int pmem_remap(struct pmem_region *region, struct file *file, unsigned operation); -int is_pmem_file(struct file *file); - #endif /* __KERNEL__ */ #endif //_ANDROID_PPP_H_ -#else -/* include/linux/android_pmem.h - * - * Copyright (C) 2007 Google, Inc. - * - * This software is licensed under the terms of the GNU General Public - * License version 2, as published by the Free Software Foundation, and - * may be copied, distributed, and modified under those terms. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the - * GNU General Public License for more details. - * - */ - -#ifndef _ANDROID_PMEM_H_ -#define _ANDROID_PMEM_H_ - -#define PMEM_IOCTL_MAGIC 'p' -#define PMEM_GET_PHYS _IOW(PMEM_IOCTL_MAGIC, 1, unsigned int) -#define PMEM_MAP _IOW(PMEM_IOCTL_MAGIC, 2, unsigned int) -#define PMEM_GET_SIZE _IOW(PMEM_IOCTL_MAGIC, 3, unsigned int) -#define PMEM_UNMAP _IOW(PMEM_IOCTL_MAGIC, 4, unsigned int) -/* This ioctl will allocate pmem space, backing the file, it will fail - * if the file already has an allocation, pass it the len as the argument - * to the ioctl */ -#define PMEM_ALLOCATE _IOW(PMEM_IOCTL_MAGIC, 5, unsigned int) -/* This will connect a one pmem file to another, pass the file that is already - * backed in memory as the argument to the ioctl - */ -#define PMEM_CONNECT _IOW(PMEM_IOCTL_MAGIC, 6, unsigned int) -/* Returns the total size of the pmem region it is sent to as a pmem_region - * struct (with offset set to 0). - */ -#define PMEM_GET_TOTAL_SIZE _IOW(PMEM_IOCTL_MAGIC, 7, unsigned int) -#define PMEM_CACHE_FLUSH _IOW(PMEM_IOCTL_MAGIC, 8, unsigned int) - -struct android_pmem_platform_data -{ - const char* name; - /* starting physical address of memory region */ - unsigned long start; - /* size of memory region */ - unsigned long size; - /* set to indicate the region should not be managed with an allocator */ - unsigned no_allocator; - /* set to indicate maps of this region should be cached, if a mix of - * cached and uncached is desired, set this and open the device with - * O_SYNC to get an uncached region */ - unsigned cached; - /* The MSM7k has bits to enable a write buffer in the bus controller*/ - unsigned buffered; -}; - -struct pmem_region { - unsigned long offset; - unsigned long len; -}; - -#ifdef CONFIG_ANDROID_PMEM -int is_pmem_file(struct file *file); -int get_pmem_file(int fd, unsigned long *start, unsigned long *vstart, - unsigned long *end, struct file **filp); -int get_pmem_user_addr(struct file *file, unsigned long *start, - unsigned long *end); -void put_pmem_file(struct file* file); -void flush_pmem_file(struct file *file, unsigned long start, unsigned long len); -int pmem_setup(struct android_pmem_platform_data *pdata, - long (*ioctl)(struct file *, unsigned int, unsigned long), - int (*release)(struct inode *, struct file *)); -int pmem_remap(struct pmem_region *region, struct file *file, - unsigned operation); - -#else -static inline int is_pmem_file(struct file *file) { return 0; } -static inline int get_pmem_file(int fd, unsigned long *start, - unsigned long *vstart, unsigned long *end, - struct file **filp) { return -ENOSYS; } -static inline int get_pmem_user_addr(struct file *file, unsigned long *start, - unsigned long *end) { return -ENOSYS; } -static inline void put_pmem_file(struct file* file) { return; } -static inline void flush_pmem_file(struct file *file, unsigned long start, - unsigned long len) { return; } -static inline int pmem_setup(struct android_pmem_platform_data *pdata, - long (*ioctl)(struct file *, unsigned int, unsigned long), - int (*release)(struct inode *, struct file *)) { return -ENOSYS; } - -static inline int pmem_remap(struct pmem_region *region, struct file *file, - unsigned operation) { return -ENOSYS; } -#endif - -#endif //_ANDROID_PPP_H_ - -#endif \ No newline at end of file diff --git a/include/linux/gfp.h b/include/linux/gfp.h index 557bdad3..76c0893d 100644 --- a/include/linux/gfp.h +++ b/include/linux/gfp.h @@ -15,7 +15,7 @@ struct vm_area_struct; * Zone modifiers (see linux/mmzone.h - low three bits) * * Do not put any conditional on these. If necessary modify the definitions - * without the underscores and use the consistently. The definitions here may + * without the underscores and use them consistently. The definitions here may * be used in bit comparisons. */ #define __GFP_DMA ((__force gfp_t)0x01u) @@ -30,7 +30,8 @@ struct vm_area_struct; * _might_ fail. This depends upon the particular VM implementation. * * __GFP_NOFAIL: The VM implementation _must_ retry infinitely: the caller - * cannot handle allocation failures. + * cannot handle allocation failures. This modifier is deprecated and no new + * users should be added. * * __GFP_NORETRY: The VM implementation must not retry indefinitely. * @@ -83,6 +84,7 @@ struct vm_area_struct; #define GFP_HIGHUSER_MOVABLE (__GFP_WAIT | __GFP_IO | __GFP_FS | \ __GFP_HARDWALL | __GFP_HIGHMEM | \ __GFP_MOVABLE) +#define GFP_IOFS (__GFP_IO | __GFP_FS) #ifdef CONFIG_NUMA #define GFP_THISNODE (__GFP_THISNODE | __GFP_NOWARN | __GFP_NORETRY) @@ -99,7 +101,7 @@ struct vm_area_struct; __GFP_NORETRY|__GFP_NOMEMALLOC) /* Control slab gfp mask during early boot */ -#define GFP_BOOT_MASK __GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS) +#define GFP_BOOT_MASK (__GFP_BITS_MASK & ~(__GFP_WAIT|__GFP_IO|__GFP_FS)) /* Control allocation constraints */ #define GFP_CONSTRAINT_MASK (__GFP_HARDWALL|__GFP_THISNODE) @@ -150,12 +152,12 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags) * GFP_ZONE_TABLE is a word size bitstring that is used for looking up the * zone to use given the lowest 4 bits of gfp_t. Entries are ZONE_SHIFT long * and there are 16 of them to cover all possible combinations of - * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM + * __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM. * * The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA. * But GFP_MOVABLE is not only a zone specifier but also an allocation * policy. Therefore __GFP_MOVABLE plus another zone selector is valid. - * Only 1bit of the lowest 3 bit (DMA,DMA32,HIGHMEM) can be set to "1". + * Only 1 bit of the lowest 3 bits (DMA,DMA32,HIGHMEM) can be set to "1". * * bit result * ================= @@ -185,7 +187,7 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags) #define GFP_ZONE_TABLE ( \ (ZONE_NORMAL << 0 * ZONES_SHIFT) \ - | (OPT_ZONE_DMA << __GFP_DMA * ZONES_SHIFT) \ + | (OPT_ZONE_DMA << __GFP_DMA * ZONES_SHIFT) \ | (OPT_ZONE_HIGHMEM << __GFP_HIGHMEM * ZONES_SHIFT) \ | (OPT_ZONE_DMA32 << __GFP_DMA32 * ZONES_SHIFT) \ | (ZONE_NORMAL << __GFP_MOVABLE * ZONES_SHIFT) \ @@ -195,7 +197,7 @@ static inline int allocflags_to_migratetype(gfp_t gfp_flags) ) /* - * GFP_ZONE_BAD is a bitmap for all combination of __GFP_DMA, __GFP_DMA32 + * GFP_ZONE_BAD is a bitmap for all combinations of __GFP_DMA, __GFP_DMA32 * __GFP_HIGHMEM and __GFP_MOVABLE that are not permitted. One flag per * entry starting with bit 0. Bit is set if the combination is not * allowed. @@ -318,10 +320,10 @@ void *alloc_pages_exact(size_t size, gfp_t gfp_mask); void free_pages_exact(void *virt, size_t size); #define __get_free_page(gfp_mask) \ - __get_free_pages((gfp_mask),0) + __get_free_pages((gfp_mask), 0) #define __get_dma_pages(gfp_mask, order) \ - __get_free_pages((gfp_mask) | GFP_DMA,(order)) + __get_free_pages((gfp_mask) | GFP_DMA, (order)) extern void __free_pages(struct page *page, unsigned int order); extern void free_pages(unsigned long addr, unsigned int order); diff --git a/include/linux/msm_kgsl.h b/include/linux/msm_kgsl.h index 86f6b5c5..ceada785 100644 --- a/include/linux/msm_kgsl.h +++ b/include/linux/msm_kgsl.h @@ -1,4 +1,3 @@ -#ifdef CONFIG_MSM_KGSL /* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved. * * Redistribution and use in source and binary forms, with or without @@ -464,355 +463,3 @@ int kgsl_gem_obj_addr(int drm_fd, int handle, unsigned long *start, #endif #endif #endif /* _MSM_KGSL_H */ - - -#else - -/* - * (C) Copyright Advanced Micro Devices, Inc. 2002, 2007 - * Copyright (c) 2008-2009 QUALCOMM USA, INC. - * - * All source code in this file is licensed under the following license - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * version 2 as published by the Free Software Foundation. - * - * This program is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. - * See the GNU General Public License for more details. - * - * You should have received a copy of the GNU General Public License - * along with this program; if not, you can find it at http://www.fsf.org - */ -#ifndef _MSM_KGSL_H -#define _MSM_KGSL_H - -/*context flags */ -#define KGSL_CONTEXT_SAVE_GMEM 1 -#define KGSL_CONTEXT_NO_GMEM_ALLOC 2 - -/* generic flag values */ -#define KGSL_FLAGS_NORMALMODE 0x00000000 -#define KGSL_FLAGS_SAFEMODE 0x00000001 -#define KGSL_FLAGS_INITIALIZED0 0x00000002 -#define KGSL_FLAGS_INITIALIZED 0x00000004 -#define KGSL_FLAGS_STARTED 0x00000008 -#define KGSL_FLAGS_ACTIVE 0x00000010 -#define KGSL_FLAGS_RESERVED0 0x00000020 -#define KGSL_FLAGS_RESERVED1 0x00000040 -#define KGSL_FLAGS_RESERVED2 0x00000080 - -/* device id */ -enum kgsl_deviceid { - KGSL_DEVICE_ANY = 0x00000000, - KGSL_DEVICE_YAMATO = 0x00000001, - KGSL_DEVICE_G12 = 0x00000002, - KGSL_DEVICE_MAX = 0x00000002 -}; - -struct kgsl_devinfo { - - unsigned int device_id; - /* chip revision id - * coreid:8 majorrev:8 minorrev:8 patch:8 - */ - unsigned int chip_id; - unsigned int mmu_enabled; - unsigned int gmem_gpubaseaddr; - /* if gmem_hostbaseaddr is NULL, we would know its not mapped into - * mmio space */ - unsigned int gmem_hostbaseaddr; - unsigned int gmem_sizebytes; -}; - -/* this structure defines the region of memory that can be mmap()ed from this - driver. The timestamp fields are volatile because they are written by the - GPU -*/ -struct kgsl_devmemstore { - volatile unsigned int soptimestamp; - unsigned int sbz; - volatile unsigned int eoptimestamp; - unsigned int sbz2; - volatile unsigned int ts_cmp_enable; - unsigned int sbz3; - volatile unsigned int ref_wait_ts; - unsigned int sbz4; -}; - -#define KGSL_DEVICE_MEMSTORE_OFFSET(field) \ - offsetof(struct kgsl_devmemstore, field) - - -/* timestamp id*/ -enum kgsl_timestamp_type { - KGSL_TIMESTAMP_CONSUMED = 0x00000001, /* start-of-pipeline timestamp */ - KGSL_TIMESTAMP_RETIRED = 0x00000002, /* end-of-pipeline timestamp*/ - KGSL_TIMESTAMP_MAX = 0x00000002, -}; - -/* property types - used with kgsl_device_getproperty */ -enum kgsl_property_type { - KGSL_PROP_DEVICE_INFO = 0x00000001, - KGSL_PROP_DEVICE_SHADOW = 0x00000002, - KGSL_PROP_DEVICE_POWER = 0x00000003, - KGSL_PROP_SHMEM = 0x00000004, - KGSL_PROP_SHMEM_APERTURES = 0x00000005, - KGSL_PROP_MMU_ENABLE = 0x00000006, - KGSL_PROP_INTERRUPT_WAITS = 0x00000007, -}; - -struct kgsl_shadowprop { - unsigned int gpuaddr; - unsigned int size; - unsigned int flags; /* contains KGSL_FLAGS_ values */ -}; - -#ifdef CONFIG_ARCH_MSM7X30 -struct kgsl_platform_data { - unsigned int high_axi_2d; - unsigned int high_axi_3d; - unsigned int max_grp2d_freq; - unsigned int min_grp2d_freq; - int (*set_grp2d_async)(void); - unsigned int max_grp3d_freq; - unsigned int min_grp3d_freq; - int (*set_grp3d_async)(void); -}; -#endif -/* ioctls */ -#define KGSL_IOC_TYPE 0x09 - -/* get misc info about the GPU - type should be a value from enum kgsl_property_type - value points to a structure that varies based on type - sizebytes is sizeof() that structure - for KGSL_PROP_DEVICE_INFO, use struct kgsl_devinfo - this structure contaings hardware versioning info. - for KGSL_PROP_DEVICE_SHADOW, use struct kgsl_shadowprop - this is used to find mmap() offset and sizes for mapping - struct kgsl_memstore into userspace. -*/ -struct kgsl_device_getproperty { -#ifdef CONFIG_ARCH_MSM7X30 - unsigned int device_id; -#endif - unsigned int type; - void *value; - unsigned int sizebytes; -}; - -#define IOCTL_KGSL_DEVICE_GETPROPERTY \ - _IOWR(KGSL_IOC_TYPE, 0x2, struct kgsl_device_getproperty) - - -/* read a GPU register. - offsetwords it the 32 bit word offset from the beginning of the - GPU register space. - */ -struct kgsl_device_regread { -#ifdef CONFIG_ARCH_MSM7X30 - unsigned int device_id; -#endif - unsigned int offsetwords; - unsigned int value; /* output param */ -}; - -#define IOCTL_KGSL_DEVICE_REGREAD \ - _IOWR(KGSL_IOC_TYPE, 0x3, struct kgsl_device_regread) - - -/* block until the GPU has executed past a given timestamp - * timeout is in milliseconds. - */ -struct kgsl_device_waittimestamp { -#ifdef CONFIG_ARCH_MSM7X30 - unsigned int device_id; -#endif - unsigned int timestamp; - unsigned int timeout; -}; - -#define IOCTL_KGSL_DEVICE_WAITTIMESTAMP \ - _IOW(KGSL_IOC_TYPE, 0x6, struct kgsl_device_waittimestamp) - - -/* issue indirect commands to the GPU. - * drawctxt_id must have been created with IOCTL_KGSL_DRAWCTXT_CREATE - * ibaddr and sizedwords must specify a subset of a buffer created - * with IOCTL_KGSL_SHAREDMEM_FROM_PMEM - * flags may be a mask of KGSL_CONTEXT_ values - * timestamp is a returned counter value which can be passed to - * other ioctls to determine when the commands have been executed by - * the GPU. - */ -struct kgsl_ringbuffer_issueibcmds { -#ifdef CONFIG_ARCH_MSM7X30 - unsigned int device_id; -#endif - unsigned int drawctxt_id; - unsigned int ibaddr; - unsigned int sizedwords; - unsigned int timestamp; /*output param */ - unsigned int flags; -}; - -#define IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS \ - _IOWR(KGSL_IOC_TYPE, 0x10, struct kgsl_ringbuffer_issueibcmds) - -/* read the most recently executed timestamp value - * type should be a value from enum kgsl_timestamp_type - */ -struct kgsl_cmdstream_readtimestamp { -#ifdef CONFIG_ARCH_MSM7X30 - unsigned int device_id; -#endif - unsigned int type; - unsigned int timestamp; /*output param */ -}; - -#define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP \ - _IOR(KGSL_IOC_TYPE, 0x11, struct kgsl_cmdstream_readtimestamp) - -/* free memory when the GPU reaches a given timestamp. - * gpuaddr specify a memory region created by a - * IOCTL_KGSL_SHAREDMEM_FROM_PMEM call - * type should be a value from enum kgsl_timestamp_type - */ -struct kgsl_cmdstream_freememontimestamp { -#ifdef CONFIG_ARCH_MSM7X30 - unsigned int device_id; -#endif - unsigned int gpuaddr; - unsigned int type; - unsigned int timestamp; -}; - -#define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP \ - _IOR(KGSL_IOC_TYPE, 0x12, struct kgsl_cmdstream_freememontimestamp) - -/* create a draw context, which is used to preserve GPU state. - * The flags field may contain a mask KGSL_CONTEXT_* values - */ -struct kgsl_drawctxt_create { -#ifdef CONFIG_ARCH_MSM7X30 - unsigned int device_id; -#endif - unsigned int flags; - unsigned int drawctxt_id; /*output param */ -}; - -#define IOCTL_KGSL_DRAWCTXT_CREATE \ - _IOWR(KGSL_IOC_TYPE, 0x13, struct kgsl_drawctxt_create) - -/* destroy a draw context */ -struct kgsl_drawctxt_destroy { -#ifdef CONFIG_ARCH_MSM7X30 - unsigned int device_id; -#endif - unsigned int drawctxt_id; -}; - -#define IOCTL_KGSL_DRAWCTXT_DESTROY \ - _IOW(KGSL_IOC_TYPE, 0x14, struct kgsl_drawctxt_destroy) - -/* add a block of pmem or fb into the GPU address space */ -struct kgsl_sharedmem_from_pmem { - int pmem_fd; - unsigned int gpuaddr; /*output param */ - unsigned int len; - unsigned int offset; -}; - -#define IOCTL_KGSL_SHAREDMEM_FROM_PMEM \ - _IOWR(KGSL_IOC_TYPE, 0x20, struct kgsl_sharedmem_from_pmem) - -/* remove memory from the GPU's address space */ -struct kgsl_sharedmem_free { - unsigned int gpuaddr; -}; - -#define IOCTL_KGSL_SHAREDMEM_FREE \ - _IOW(KGSL_IOC_TYPE, 0x21, struct kgsl_sharedmem_free) - -struct kgsl_gmem_desc { - unsigned int x; - unsigned int y; - unsigned int width; - unsigned int height; - unsigned int pitch; -}; - -struct kgsl_buffer_desc { - void *hostptr; - unsigned int gpuaddr; - int size; - unsigned int format; - unsigned int pitch; - unsigned int enabled; -}; - -struct kgsl_bind_gmem_shadow { - unsigned int drawctxt_id; - struct kgsl_gmem_desc gmem_desc; - unsigned int shadow_x; - unsigned int shadow_y; - struct kgsl_buffer_desc shadow_buffer; - unsigned int buffer_id; -}; - -#define IOCTL_KGSL_DRAWCTXT_BIND_GMEM_SHADOW \ - _IOW(KGSL_IOC_TYPE, 0x22, struct kgsl_bind_gmem_shadow) - -/* add a block of memory into the GPU address space */ -struct kgsl_sharedmem_from_vmalloc { - unsigned int gpuaddr; /*output param */ - unsigned int hostptr; - /* If set from user space then will attempt to - * allocate even if low watermark is crossed */ - int force_no_low_watermark; -}; - -#define IOCTL_KGSL_SHAREDMEM_FROM_VMALLOC \ - _IOWR(KGSL_IOC_TYPE, 0x23, struct kgsl_sharedmem_from_vmalloc) - -#define IOCTL_KGSL_SHAREDMEM_FLUSH_CACHE \ - _IOW(KGSL_IOC_TYPE, 0x24, struct kgsl_sharedmem_free) - -struct kgsl_drawctxt_set_bin_base_offset { -#ifdef CONFIG_ARCH_MSM7X30 - unsigned int device_id; -#endif - unsigned int drawctxt_id; - unsigned int offset; -}; - -#define IOCTL_KGSL_DRAWCTXT_SET_BIN_BASE_OFFSET \ - _IOW(KGSL_IOC_TYPE, 0x25, struct kgsl_drawctxt_set_bin_base_offset) - -enum kgsl_cmdwindow_type { - KGSL_CMDWINDOW_MIN = 0x00000000, - KGSL_CMDWINDOW_2D = 0x00000000, - KGSL_CMDWINDOW_3D = 0x00000001, /* legacy */ - KGSL_CMDWINDOW_MMU = 0x00000002, - KGSL_CMDWINDOW_ARBITER = 0x000000FF, - KGSL_CMDWINDOW_MAX = 0x000000FF, -}; - -/* write to the command window */ -struct kgsl_cmdwindow_write { -#ifdef CONFIG_ARCH_MSM7X30 - unsigned int device_id; -#endif - enum kgsl_cmdwindow_type target; - unsigned int addr; - unsigned int data; -}; - -#define IOCTL_KGSL_CMDWINDOW_WRITE \ - _IOW(KGSL_IOC_TYPE, 0x2e, struct kgsl_cmdwindow_write) - -#endif /* _MSM_KGSL_H */ -#endif diff --git a/mm/ashmem.c b/mm/ashmem.c index 16058090..a16f3f7c 100644 --- a/mm/ashmem.c +++ b/mm/ashmem.c @@ -29,6 +29,7 @@ #include #include #include +#include #define ASHMEM_NAME_PREFIX "dev/ashmem/" #define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1) @@ -45,6 +46,8 @@ struct ashmem_area { struct list_head unpinned_list; /* list of all ashmem areas */ struct file *file; /* the shmem-based backing file */ size_t size; /* size of the mapping, in bytes */ + unsigned long vm_start; /* Start address of vm_area + * which maps this ashmem */ unsigned long prot_mask; /* allowed prot bits, as vm_flags */ }; @@ -178,7 +181,7 @@ static int ashmem_open(struct inode *inode, struct file *file) struct ashmem_area *asma; int ret; - ret = nonseekable_open(inode, file); + ret = generic_file_open(inode, file); if (unlikely(ret)) return ret; @@ -211,6 +214,75 @@ static int ashmem_release(struct inode *ignored, struct file *file) return 0; } +static ssize_t ashmem_read(struct file *file, char __user *buf, + size_t len, loff_t *pos) +{ + struct ashmem_area *asma = file->private_data; + int ret = 0; + + mutex_lock(&ashmem_mutex); + + /* If size is not set, or set to 0, always return EOF. */ + if (asma->size == 0) { + goto out; + } + + if (!asma->file) { + ret = -EBADF; + goto out; + } + + ret = asma->file->f_op->read(asma->file, buf, len, pos); + if (ret < 0) { + goto out; + } + + /** Update backing file pos, since f_ops->read() doesn't */ + asma->file->f_pos = *pos; + +out: + mutex_unlock(&ashmem_mutex); + return ret; +} + +static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin) +{ + struct ashmem_area *asma = file->private_data; + int ret; + + mutex_lock(&ashmem_mutex); + + if (asma->size == 0) { + ret = -EINVAL; + goto out; + } + + if (!asma->file) { + ret = -EBADF; + goto out; + } + + ret = asma->file->f_op->llseek(asma->file, offset, origin); + if (ret < 0) { + goto out; + } + + /** Copy f_pos from backing file, since f_ops->llseek() sets it */ + file->f_pos = asma->file->f_pos; + +out: + mutex_unlock(&ashmem_mutex); + return ret; +} + +static inline unsigned long +calc_vm_may_flags(unsigned long prot) +{ + return _calc_vm_trans(prot, PROT_READ, VM_MAYREAD ) | + _calc_vm_trans(prot, PROT_WRITE, VM_MAYWRITE) | + _calc_vm_trans(prot, PROT_EXEC, VM_MAYEXEC); +} + static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) { struct ashmem_area *asma = file->private_data; @@ -225,10 +297,12 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) } /* requested protection bits must match our allowed protection mask */ - if (unlikely((vma->vm_flags & ~asma->prot_mask) & PROT_MASK)) { + if (unlikely((vma->vm_flags & ~calc_vm_prot_bits(asma->prot_mask)) & + calc_vm_prot_bits(PROT_MASK))) { ret = -EPERM; goto out; } + vma->vm_flags &= ~calc_vm_may_flags(~asma->prot_mask); if (!asma->file) { char *name = ASHMEM_NAME_DEF; @@ -255,6 +329,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) vma->vm_file = asma->file; } vma->vm_flags |= VM_CAN_NONLINEAR; + asma->vm_start = vma->vm_start; out: mutex_unlock(&ashmem_mutex); @@ -286,11 +361,7 @@ static int ashmem_shrink(struct shrinker *s, int nr_to_scan, gfp_t gfp_mask) if (!nr_to_scan) return lru_count; - /* If our mutex is held, we are recursing into ourselves, so bail out */ - if (!mutex_trylock(&ashmem_mutex)) { - return -1; - } - + mutex_lock(&ashmem_mutex); list_for_each_entry_safe(range, next, &ashmem_lru_list, lru) { struct inode *inode = range->asma->file->f_dentry->d_inode; loff_t start = range->pgstart * PAGE_SIZE; @@ -559,6 +630,69 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, return ret; } +#ifdef CONFIG_OUTER_CACHE +static unsigned int virtaddr_to_physaddr(unsigned int virtaddr) +{ + unsigned int physaddr = 0; + pgd_t *pgd_ptr = NULL; + pmd_t *pmd_ptr = NULL; + pte_t *pte_ptr = NULL, pte; + + spin_lock(¤t->mm->page_table_lock); + pgd_ptr = pgd_offset(current->mm, virtaddr); + if (pgd_none(*pgd) || pgd_bad(*pgd)) { + pr_err("Failed to convert virtaddr %x to pgd_ptr\n", + virtaddr); + goto done; + } + + pmd_ptr = pmd_offset(pgd_ptr, virtaddr); + if (pmd_none(*pmd_ptr) || pmd_bad(*pmd_ptr)) { + pr_err("Failed to convert pgd_ptr %p to pmd_ptr\n", + (void *)pgd_ptr); + goto done; + } + + pte_ptr = pte_offset_map(pmd_ptr, virtaddr); + if (!pte_ptr) { + pr_err("Failed to convert pmd_ptr %p to pte_ptr\n", + (void *)pmd_ptr); + goto done; + } + pte = *pte_ptr; + physaddr = pte_pfn(pte); + pte_unmap(pte_ptr); +done: + spin_unlock(¤t->mm->page_table_lock); + physaddr <<= PAGE_SHIFT; + return physaddr; +} +#endif + +static int ashmem_cache_op(struct ashmem_area *asma, + void (*cache_func)(unsigned long vstart, unsigned long length, + unsigned long pstart)) +{ +#ifdef CONFIG_OUTER_CACHE + unsigned long vaddr; +#endif + mutex_lock(&ashmem_mutex); +#ifndef CONFIG_OUTER_CACHE + cache_func(asma->vm_start, asma->size, 0); +#else + for (vaddr = asma->vm_start; vaddr < asma->vm_start + asma->size; + vaddr += PAGE_SIZE) { + unsigned long physaddr; + physaddr = virtaddr_to_physaddr(vaddr); + if (!physaddr) + return -EINVAL; + cache_func(vaddr, PAGE_SIZE, physaddr); + } +#endif + mutex_unlock(&ashmem_mutex); + return 0; +} + static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct ashmem_area *asma = file->private_data; @@ -599,6 +733,15 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ashmem_shrink(&ashmem_shrinker, ret, GFP_KERNEL); } break; + case ASHMEM_CACHE_FLUSH_RANGE: + ret = ashmem_cache_op(asma, &clean_and_invalidate_caches); + break; + case ASHMEM_CACHE_CLEAN_RANGE: + ret = ashmem_cache_op(asma, &clean_caches); + break; + case ASHMEM_CACHE_INV_RANGE: + ret = ashmem_cache_op(asma, &invalidate_caches); + break; } return ret; @@ -661,6 +804,8 @@ static struct file_operations ashmem_fops = { .owner = THIS_MODULE, .open = ashmem_open, .release = ashmem_release, + .read = ashmem_read, + .llseek = ashmem_llseek, .mmap = ashmem_mmap, .unlocked_ioctl = ashmem_ioctl, .compat_ioctl = ashmem_ioctl,