From 05369deec98ccb9fc7996d6b2b92f0476fcc83af Mon Sep 17 00:00:00 2001 From: securecrt Date: Tue, 31 Jul 2012 13:59:12 +0800 Subject: [PATCH 01/22] include: checkout msm_mdp from ics_chocolate *needs to match userspace* --- include/linux/msm_mdp.h | 442 ++++++++++++++++++++++++++++++++++++++-- 1 file changed, 430 insertions(+), 12 deletions(-) diff --git a/include/linux/msm_mdp.h b/include/linux/msm_mdp.h index a933facc..dcedc554 100644 --- a/include/linux/msm_mdp.h +++ b/include/linux/msm_mdp.h @@ -1,6 +1,7 @@ /* include/linux/msm_mdp.h * * Copyright (C) 2007 Google Incorporated + * Copyright (c) 2012 Code Aurora Forum. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -15,25 +16,90 @@ #define _MSM_MDP_H_ #include +#include #define MSMFB_IOCTL_MAGIC 'm' #define MSMFB_GRP_DISP _IOW(MSMFB_IOCTL_MAGIC, 1, unsigned int) #define MSMFB_BLIT _IOW(MSMFB_IOCTL_MAGIC, 2, unsigned int) +#define MSMFB_SUSPEND_SW_REFRESHER _IOW(MSMFB_IOCTL_MAGIC, 128, unsigned int) +#define MSMFB_RESUME_SW_REFRESHER _IOW(MSMFB_IOCTL_MAGIC, 129, unsigned int) +#define MSMFB_CURSOR _IOW(MSMFB_IOCTL_MAGIC, 130, struct fb_cursor) +#define MSMFB_SET_LUT _IOW(MSMFB_IOCTL_MAGIC, 131, struct fb_cmap) +#define MSMFB_HISTOGRAM _IOWR(MSMFB_IOCTL_MAGIC, 132, struct mdp_histogram_data) +/* new ioctls's for set/get ccs matrix */ +#define MSMFB_GET_CCS_MATRIX _IOWR(MSMFB_IOCTL_MAGIC, 133, struct mdp_ccs) +#define MSMFB_SET_CCS_MATRIX _IOW(MSMFB_IOCTL_MAGIC, 134, struct mdp_ccs) +#define MSMFB_OVERLAY_SET _IOWR(MSMFB_IOCTL_MAGIC, 135, \ + struct mdp_overlay) +#define MSMFB_OVERLAY_UNSET _IOW(MSMFB_IOCTL_MAGIC, 136, unsigned int) +#define MSMFB_OVERLAY_PLAY _IOW(MSMFB_IOCTL_MAGIC, 137, \ + struct msmfb_overlay_data) +#define MSMFB_GET_PAGE_PROTECTION _IOR(MSMFB_IOCTL_MAGIC, 138, \ + struct mdp_page_protection) +#define MSMFB_SET_PAGE_PROTECTION _IOW(MSMFB_IOCTL_MAGIC, 139, \ + struct mdp_page_protection) +#define MSMFB_OVERLAY_GET _IOR(MSMFB_IOCTL_MAGIC, 140, \ + struct mdp_overlay) +#define MSMFB_OVERLAY_PLAY_ENABLE _IOW(MSMFB_IOCTL_MAGIC, 141, unsigned int) +#define MSMFB_OVERLAY_BLT _IOWR(MSMFB_IOCTL_MAGIC, 142, \ + struct msmfb_overlay_blt) +#define MSMFB_OVERLAY_BLT_OFFSET _IOW(MSMFB_IOCTL_MAGIC, 143, unsigned int) +#define MSMFB_HISTOGRAM_START _IOR(MSMFB_IOCTL_MAGIC, 144, \ + struct mdp_histogram_start_req) +#define MSMFB_HISTOGRAM_STOP _IOR(MSMFB_IOCTL_MAGIC, 145, unsigned int) +#define MSMFB_NOTIFY_UPDATE _IOW(MSMFB_IOCTL_MAGIC, 146, unsigned int) + +#define MSMFB_OVERLAY_3D _IOWR(MSMFB_IOCTL_MAGIC, 147, \ + struct msmfb_overlay_3d) + +#define MSMFB_MIXER_INFO _IOWR(MSMFB_IOCTL_MAGIC, 148, \ + struct msmfb_mixer_info_req) +#define MSMFB_OVERLAY_PLAY_WAIT _IOWR(MSMFB_IOCTL_MAGIC, 149, \ + struct msmfb_overlay_data) +#define MSMFB_WRITEBACK_INIT _IO(MSMFB_IOCTL_MAGIC, 150) +#define MSMFB_WRITEBACK_START _IO(MSMFB_IOCTL_MAGIC, 151) +#define MSMFB_WRITEBACK_STOP _IO(MSMFB_IOCTL_MAGIC, 152) +#define MSMFB_WRITEBACK_QUEUE_BUFFER _IOW(MSMFB_IOCTL_MAGIC, 153, \ + struct msmfb_data) +#define MSMFB_WRITEBACK_DEQUEUE_BUFFER _IOW(MSMFB_IOCTL_MAGIC, 154, \ + struct msmfb_data) +#define MSMFB_WRITEBACK_TERMINATE _IO(MSMFB_IOCTL_MAGIC, 155) +#define MSMFB_MDP_PP _IOWR(MSMFB_IOCTL_MAGIC, 156, struct msmfb_mdp_pp) + +#define FB_TYPE_3D_PANEL 0x10101010 +#define MDP_IMGTYPE2_START 0x10000 +#define MSMFB_DRIVER_VERSION 0xF9E8D701 enum { - MDP_RGB_565, /* RGB 565 planar */ + NOTIFY_UPDATE_START, + NOTIFY_UPDATE_STOP, +}; + +enum { + MDP_RGB_565, /* RGB 565 planer */ MDP_XRGB_8888, /* RGB 888 padded */ - MDP_Y_CBCR_H2V2, /* Y and CbCr, pseudo planar w/ Cb is in MSB */ + MDP_Y_CBCR_H2V2, /* Y and CbCr, pseudo planer w/ Cb is in MSB */ + MDP_Y_CBCR_H2V2_ADRENO, MDP_ARGB_8888, /* ARGB 888 */ - MDP_RGB_888, /* RGB 888 planar */ - MDP_Y_CRCB_H2V2, /* Y and CrCb, pseudo planar w/ Cr is in MSB */ + MDP_RGB_888, /* RGB 888 planer */ + MDP_Y_CRCB_H2V2, /* Y and CrCb, pseudo planer w/ Cr is in MSB */ MDP_YCRYCB_H2V1, /* YCrYCb interleave */ - MDP_Y_CRCB_H2V1, /* Y and CrCb, pseduo planar w/ Cr is in MSB */ - MDP_Y_CBCR_H2V1, /* Y and CrCb, pseduo planar w/ Cr is in MSB */ + MDP_Y_CRCB_H2V1, /* Y and CrCb, pseduo planer w/ Cr is in MSB */ + MDP_Y_CBCR_H2V1, /* Y and CrCb, pseduo planer w/ Cr is in MSB */ MDP_RGBA_8888, /* ARGB 888 */ MDP_BGRA_8888, /* ABGR 888 */ MDP_RGBX_8888, /* RGBX 888 */ - MDP_IMGTYPE_LIMIT /* Non valid image type after this enum */ + MDP_Y_CRCB_H2V2_TILE, /* Y and CrCb, pseudo planer tile */ + MDP_Y_CBCR_H2V2_TILE, /* Y and CbCr, pseudo planer tile */ + MDP_Y_CR_CB_H2V2, /* Y, Cr and Cb, planar */ + MDP_Y_CR_CB_GH2V2, /* Y, Cr and Cb, planar aligned to Android YV12 */ + MDP_Y_CB_CR_H2V2, /* Y, Cb and Cr, planar */ + MDP_Y_CRCB_H1V1, /* Y and CrCb, pseduo planer w/ Cr is in MSB */ + MDP_Y_CBCR_H1V1, /* Y and CbCr, pseduo planer w/ Cb is in MSB */ + MDP_IMGTYPE_LIMIT, + MDP_BGR_565 = MDP_IMGTYPE2_START, /* BGR 565 planer */ + MDP_FB_FORMAT, /* framebuffer format */ + MDP_IMGTYPE_LIMIT2 /* Non valid image type after this enum */ }; enum { @@ -41,24 +107,57 @@ enum { FB_IMG, }; -/* flag values */ +enum { + HSIC_HUE = 0, + HSIC_SAT, + HSIC_INT, + HSIC_CON, + NUM_HSIC_PARAM, +}; + +/* mdp_blit_req flag values */ #define MDP_ROT_NOP 0 #define MDP_FLIP_LR 0x1 #define MDP_FLIP_UD 0x2 #define MDP_ROT_90 0x4 #define MDP_ROT_180 (MDP_FLIP_UD|MDP_FLIP_LR) #define MDP_ROT_270 (MDP_ROT_90|MDP_FLIP_UD|MDP_FLIP_LR) -#define MDP_ROT_MASK 0x7 #define MDP_DITHER 0x8 #define MDP_BLUR 0x10 #define MDP_BLEND_FG_PREMULT 0x20000 +#define MDP_DEINTERLACE 0x80000000 +#define MDP_SHARPENING 0x40000000 +#define MDP_NO_DMA_BARRIER_START 0x20000000 +#define MDP_NO_DMA_BARRIER_END 0x10000000 +#define MDP_NO_BLIT 0x08000000 +#define MDP_BLIT_WITH_DMA_BARRIERS 0x000 +#define MDP_BLIT_WITH_NO_DMA_BARRIERS \ + (MDP_NO_DMA_BARRIER_START | MDP_NO_DMA_BARRIER_END) +#define MDP_BLIT_SRC_GEM 0x04000000 +#define MDP_BLIT_DST_GEM 0x02000000 +#define MDP_BLIT_NON_CACHED 0x01000000 +#define MDP_OV_PIPE_SHARE 0x00800000 +#define MDP_DEINTERLACE_ODD 0x00400000 +#define MDP_OV_PLAY_NOWAIT 0x00200000 +#define MDP_SOURCE_ROTATED_90 0x00100000 +#define MDP_DPP_HSIC 0x00080000 +#define MDP_BACKEND_COMPOSITION 0x00040000 +#define MDP_BORDERFILL_SUPPORTED 0x00010000 +#define MDP_SECURE_OVERLAY_SESSION 0x00008000 +#define MDP_MEMORY_ID_TYPE_FB 0x00001000 #define MDP_TRANSP_NOP 0xffffffff #define MDP_ALPHA_NOP 0xff -/* drewis: added for android 4.0 */ -#define MDP_BLIT_NON_CACHED 0x01000000 -/* drewis: end */ +#define MDP_FB_PAGE_PROTECTION_NONCACHED (0) +#define MDP_FB_PAGE_PROTECTION_WRITECOMBINE (1) +#define MDP_FB_PAGE_PROTECTION_WRITETHROUGHCACHE (2) +#define MDP_FB_PAGE_PROTECTION_WRITEBACKCACHE (3) +#define MDP_FB_PAGE_PROTECTION_WRITEBACKWACACHE (4) +/* Sentinel: Don't use! */ +#define MDP_FB_PAGE_PROTECTION_INVALID (5) +/* Count of the number of MDP_FB_PAGE_PROTECTION_... values. */ +#define MDP_NUM_FB_PAGE_PROTECTION_VALUES (5) struct mdp_rect { uint32_t x; @@ -73,8 +172,41 @@ struct mdp_img { uint32_t format; uint32_t offset; int memory_id; /* the file descriptor */ + uint32_t priv; }; +/* + * {3x3} + {3} ccs matrix + */ + +#define MDP_CCS_RGB2YUV 0 +#define MDP_CCS_YUV2RGB 1 + +#define MDP_CCS_SIZE 9 +#define MDP_BV_SIZE 3 + +struct mdp_ccs { + int direction; /* MDP_CCS_RGB2YUV or YUV2RGB */ + uint16_t ccs[MDP_CCS_SIZE]; /* 3x3 color coefficients */ + uint16_t bv[MDP_BV_SIZE]; /* 1x3 bias vector */ +}; + +struct mdp_csc { + int id; + uint32_t csc_mv[9]; + uint32_t csc_pre_bv[3]; + uint32_t csc_post_bv[3]; + uint32_t csc_pre_lv[6]; + uint32_t csc_post_lv[6]; +}; + +/* The version of the mdp_blit_req structure so that + * user applications can selectively decide which functionality + * to include + */ + +#define MDP_BLIT_REQ_VERSION 2 + struct mdp_blit_req { struct mdp_img src; struct mdp_img dst; @@ -83,6 +215,7 @@ struct mdp_blit_req { uint32_t alpha; uint32_t transp_mask; uint32_t flags; + int sharpening_strength; /* -127 <--> 127, default 64 */ }; struct mdp_blit_req_list { @@ -90,4 +223,289 @@ struct mdp_blit_req_list { struct mdp_blit_req req[]; }; +#define MSMFB_DATA_VERSION 2 + +struct msmfb_data { + uint32_t offset; + int memory_id; + int id; + uint32_t flags; + uint32_t priv; + uint32_t iova; +}; + +#define MSMFB_NEW_REQUEST -1 + +struct msmfb_overlay_data { + uint32_t id; + struct msmfb_data data; + uint32_t version_key; + struct msmfb_data plane1_data; + struct msmfb_data plane2_data; +}; + +struct msmfb_img { + uint32_t width; + uint32_t height; + uint32_t format; +}; + +#define MSMFB_WRITEBACK_DEQUEUE_BLOCKING 0x1 +struct msmfb_writeback_data { + struct msmfb_data buf_info; + struct msmfb_img img; +}; + +struct dpp_ctrl { + /* + *'sharp_strength' has inputs = -128 <-> 127 + * Increasingly positive values correlate with increasingly sharper + * picture. Increasingly negative values correlate with increasingly + * smoothed picture. + */ + int8_t sharp_strength; + int8_t hsic_params[NUM_HSIC_PARAM]; +}; + +struct mdp_overlay { + struct msmfb_img src; + struct mdp_rect src_rect; + struct mdp_rect dst_rect; + uint32_t z_order; /* stage number */ + uint32_t is_fg; /* control alpha & transp */ + uint32_t alpha; + uint32_t transp_mask; + uint32_t flags; + uint32_t id; + uint32_t user_data[8]; + struct dpp_ctrl dpp; +}; + +struct msmfb_overlay_3d { + uint32_t is_3d; + uint32_t width; + uint32_t height; +}; + + +struct msmfb_overlay_blt { + uint32_t enable; + uint32_t offset; + uint32_t width; + uint32_t height; + uint32_t bpp; +}; + +struct mdp_histogram { + uint32_t frame_cnt; + uint32_t bin_cnt; + uint32_t *r; + uint32_t *g; + uint32_t *b; +}; + + +/* + + mdp_block_type defines the identifiers for each of pipes in MDP 4.3 + + MDP_BLOCK_RESERVED is provided for backward compatibility and is + deprecated. It corresponds to DMA_P. So MDP_BLOCK_DMA_P should be used + instead. + +*/ + +enum { + MDP_BLOCK_RESERVED = 0, + MDP_BLOCK_OVERLAY_0, + MDP_BLOCK_OVERLAY_1, + MDP_BLOCK_VG_1, + MDP_BLOCK_VG_2, + MDP_BLOCK_RGB_1, + MDP_BLOCK_RGB_2, + MDP_BLOCK_DMA_P, + MDP_BLOCK_DMA_S, + MDP_BLOCK_DMA_E, + MDP_BLOCK_MAX, +}; + +/* +mdp_histogram_start_req is used to provide the parameters for +histogram start request +*/ + +struct mdp_histogram_start_req { + uint32_t block; + uint8_t frame_cnt; + uint8_t bit_mask; + uint8_t num_bins; +}; + + +/* + + mdp_histogram_data is used to return the histogram data, once + the histogram is done/stopped/cance + + */ + + +struct mdp_histogram_data { + uint32_t block; + uint8_t bin_cnt; + uint32_t *c0; + uint32_t *c1; + uint32_t *c2; + uint32_t *extra_info; +}; + +struct mdp_pcc_coeff { + uint32_t c, r, g, b, rr, gg, bb, rg, gb, rb, rgb_0, rgb_1; +}; + +struct mdp_pcc_cfg_data { + uint32_t block; + uint32_t ops; + struct mdp_pcc_coeff r, g, b; +}; + +#define MDP_CSC_FLAG_ENABLE 0x1 +#define MDP_CSC_FLAG_YUV_IN 0x2 +#define MDP_CSC_FLAG_YUV_OUT 0x4 + +struct mdp_csc_cfg { + /* flags for enable CSC, toggling RGB,YUV input/output */ + uint32_t flags; + uint32_t csc_mv[9]; + uint32_t csc_pre_bv[3]; + uint32_t csc_post_bv[3]; + uint32_t csc_pre_lv[6]; + uint32_t csc_post_lv[6]; +}; + +struct mdp_csc_cfg_data { + uint32_t block; + struct mdp_csc_cfg csc_data; +}; + +enum { + mdp_lut_igc, + mdp_lut_pgc, + mdp_lut_hist, + mdp_lut_max, +}; + + +struct mdp_igc_lut_data { + uint32_t block; + uint32_t len, ops; + uint32_t *c0_c1_data; + uint32_t *c2_data; +}; + +struct mdp_ar_gc_lut_data { + uint32_t x_start; + uint32_t slope; + uint32_t offset; +}; + +struct mdp_pgc_lut_data { + uint32_t block; + uint32_t flags; + uint8_t num_r_stages; + uint8_t num_g_stages; + uint8_t num_b_stages; + struct mdp_ar_gc_lut_data *r_data; + struct mdp_ar_gc_lut_data *g_data; + struct mdp_ar_gc_lut_data *b_data; +}; + + +struct mdp_hist_lut_data { + uint32_t block; + uint32_t ops; + uint32_t len; + uint32_t *data; +}; + + +struct mdp_lut_cfg_data { + uint32_t lut_type; + union { + struct mdp_igc_lut_data igc_lut_data; + struct mdp_pgc_lut_data pgc_lut_data; + struct mdp_hist_lut_data hist_lut_data; + } data; +}; + +struct mdp_qseed_cfg_data { + uint32_t block; + uint32_t table_num; + uint32_t ops; + uint32_t len; + uint32_t *data; +}; + + +enum { + mdp_op_pcc_cfg, + mdp_op_csc_cfg, + mdp_op_lut_cfg, + mdp_op_qseed_cfg, + mdp_op_max, +}; + +struct msmfb_mdp_pp { + uint32_t op; + union { + struct mdp_pcc_cfg_data pcc_cfg_data; + struct mdp_csc_cfg_data csc_cfg_data; + struct mdp_lut_cfg_data lut_cfg_data; + struct mdp_qseed_cfg_data qseed_cfg_data; + } data; +}; + + +struct mdp_page_protection { + uint32_t page_protection; +}; + + +struct mdp_mixer_info { + int pndx; + int pnum; + int ptype; + int mixer_num; + int z_order; +}; + +#define MAX_PIPE_PER_MIXER 4 + +struct msmfb_mixer_info_req { + int mixer_num; + int cnt; + struct mdp_mixer_info info[MAX_PIPE_PER_MIXER]; +}; + +enum { + DISPLAY_SUBSYSTEM_ID, + ROTATOR_SUBSYSTEM_ID, +}; + +#ifdef __KERNEL__ + +/* get the framebuffer physical address information */ +int get_fb_phys_info(unsigned long *start, unsigned long *len, int fb_num, + int subsys_id); +struct fb_info *msm_fb_get_writeback_fb(void); +int msm_fb_writeback_init(struct fb_info *info); +int msm_fb_writeback_start(struct fb_info *info); +int msm_fb_writeback_queue_buffer(struct fb_info *info, + struct msmfb_data *data); +int msm_fb_writeback_dequeue_buffer(struct fb_info *info, + struct msmfb_data *data); +int msm_fb_writeback_stop(struct fb_info *info); +int msm_fb_writeback_terminate(struct fb_info *info); +#endif + #endif /* _MSM_MDP_H_ */ From 1e188cc5e18bd168de398dd9c3dff19c3dc20464 Mon Sep 17 00:00:00 2001 From: securecrt Date: Thu, 2 Aug 2012 13:21:31 +0800 Subject: [PATCH 02/22] net: netfilter: enable bandwidth control to be able to set mobile data limit --- arch/arm/configs/htcleo_defconfig | 21 +++++++++++++++++---- net/netfilter/xt_TPROXY.c | 2 ++ net/netfilter/xt_socket.c | 2 ++ 3 files changed, 21 insertions(+), 4 deletions(-) diff --git a/arch/arm/configs/htcleo_defconfig b/arch/arm/configs/htcleo_defconfig index d330e5ef..96d43e38 100644 --- a/arch/arm/configs/htcleo_defconfig +++ b/arch/arm/configs/htcleo_defconfig @@ -32,7 +32,7 @@ CONFIG_EXPERIMENTAL=y CONFIG_BROKEN_ON_SMP=y CONFIG_LOCK_KERNEL=y CONFIG_INIT_ENV_ARG_LIMIT=32 -CONFIG_LOCALVERSION="_tytung_HWA_r2.5" +CONFIG_LOCALVERSION="_tytung_HWA_r3.3_JB" # CONFIG_LOCALVERSION_AUTO is not set CONFIG_HAVE_KERNEL_GZIP=y CONFIG_HAVE_KERNEL_BZIP2=y @@ -608,7 +608,7 @@ CONFIG_NETFILTER_XT_TARGET_CONNMARK=y # CONFIG_NETFILTER_XT_TARGET_DSCP is not set # CONFIG_NETFILTER_XT_TARGET_HL is not set CONFIG_NETFILTER_XT_TARGET_MARK=y -# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set +CONFIG_NETFILTER_XT_TARGET_NFLOG=y CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y # CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set # CONFIG_NETFILTER_XT_TARGET_RATEEST is not set @@ -709,8 +709,21 @@ CONFIG_IP_NF_ARP_MANGLE=y CONFIG_NF_DEFRAG_IPV6=y CONFIG_NF_CONNTRACK_IPV6=y # CONFIG_IP6_NF_QUEUE is not set -# CONFIG_IP6_NF_IPTABLES is not set -# CONFIG_BRIDGE_NF_EBTABLES is not set +ONFIG_IP6_NF_IPTABLES=y +# CONFIG_IP6_NF_MATCH_AH is not set +# CONFIG_IP6_NF_MATCH_EUI64 is not set +# CONFIG_IP6_NF_MATCH_FRAG is not set +# CONFIG_IP6_NF_MATCH_OPTS is not set +# CONFIG_IP6_NF_MATCH_HL is not set +# CONFIG_IP6_NF_MATCH_IPV6HEADER is not set +# CONFIG_IP6_NF_MATCH_MH is not set +# CONFIG_IP6_NF_MATCH_RT is not set +# CONFIG_IP6_NF_TARGET_HL is not set +CONFIG_IP6_NF_TARGET_LOG=y +CONFIG_IP6_NF_FILTER=y +CONFIG_IP6_NF_TARGET_REJECT=y +CONFIG_IP6_NF_MANGLE=y +CONFIG_IP6_NF_RAW=y # CONFIG_IP_DCCP is not set # CONFIG_IP_SCTP is not set # CONFIG_RDS is not set diff --git a/net/netfilter/xt_TPROXY.c b/net/netfilter/xt_TPROXY.c index 4fa12857..10640fdd 100644 --- a/net/netfilter/xt_TPROXY.c +++ b/net/netfilter/xt_TPROXY.c @@ -22,6 +22,7 @@ #include +/* #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) #define XT_TPROXY_HAVE_IPV6 1 #include @@ -29,6 +30,7 @@ #include #include #endif +*/ #include #include diff --git a/net/netfilter/xt_socket.c b/net/netfilter/xt_socket.c index 9b38fd15..0f10dfc6 100644 --- a/net/netfilter/xt_socket.c +++ b/net/netfilter/xt_socket.c @@ -22,11 +22,13 @@ #include #include +/* #if defined(CONFIG_IP6_NF_IPTABLES) || defined(CONFIG_IP6_NF_IPTABLES_MODULE) #define XT_SOCKET_HAVE_IPV6 1 #include #include #endif +*/ #include From 55ece772cde41f9925aaaaa64664cb723e12d87e Mon Sep 17 00:00:00 2001 From: securecrt Date: Thu, 2 Aug 2012 17:04:07 +0800 Subject: [PATCH 03/22] msm: kgsl: Add per context timestamp Add new ioctls for per context timestamps. Timestamp functions (read/write/wait) will now be context specific rather than only using the global timestamp. Per context timestamps is a requirement for priority based queueing. --- drivers/gpu/msm/adreno.c | 128 +++++++++----- drivers/gpu/msm/adreno_a2xx.c | 24 +-- drivers/gpu/msm/adreno_a3xx.c | 14 +- drivers/gpu/msm/adreno_drawctxt.c | 12 +- drivers/gpu/msm/adreno_drawctxt.h | 5 + drivers/gpu/msm/adreno_postmortem.c | 18 +- drivers/gpu/msm/adreno_ringbuffer.c | 128 ++++++++++---- drivers/gpu/msm/adreno_ringbuffer.h | 3 +- drivers/gpu/msm/kgsl.c | 254 ++++++++++++++++++++++------ drivers/gpu/msm/kgsl.h | 9 + drivers/gpu/msm/kgsl_device.h | 19 ++- drivers/gpu/msm/z180.c | 14 +- include/linux/msm_kgsl.h | 59 ++++++- 13 files changed, 515 insertions(+), 172 deletions(-) mode change 100644 => 100755 drivers/gpu/msm/z180.c diff --git a/drivers/gpu/msm/adreno.c b/drivers/gpu/msm/adreno.c index 4020fefd..4e434e4c 100755 --- a/drivers/gpu/msm/adreno.c +++ b/drivers/gpu/msm/adreno.c @@ -565,13 +565,13 @@ adreno_recover_hang(struct kgsl_device *device) struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer; unsigned int timestamp; unsigned int num_rb_contents; - unsigned int bad_context; unsigned int reftimestamp; unsigned int enable_ts; unsigned int soptimestamp; unsigned int eoptimestamp; - struct adreno_context *drawctxt; + unsigned int context_id; struct kgsl_context *context; + struct adreno_context *adreno_context; int next = 0; KGSL_DRV_ERR(device, "Starting recovery from 3D GPU hang....\n"); @@ -587,22 +587,35 @@ adreno_recover_hang(struct kgsl_device *device) ret = adreno_ringbuffer_extract(rb, rb_buffer, &num_rb_contents); if (ret) goto done; - timestamp = rb->timestamp; - KGSL_DRV_ERR(device, "Last issued timestamp: %x\n", timestamp); - kgsl_sharedmem_readl(&device->memstore, &bad_context, - KGSL_DEVICE_MEMSTORE_OFFSET(current_context)); + kgsl_sharedmem_readl(&device->memstore, &context_id, + KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, + current_context)); + context = idr_find(&device->context_idr, context_id); + if (context == NULL) { + KGSL_DRV_ERR(device, "Last context unknown id:%d\n", + context_id); + context_id = KGSL_MEMSTORE_GLOBAL; + } + + timestamp = rb->timestamp[KGSL_MEMSTORE_GLOBAL]; + KGSL_DRV_ERR(device, "Last issued global timestamp: %x\n", timestamp); + kgsl_sharedmem_readl(&device->memstore, &reftimestamp, - KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts)); + KGSL_MEMSTORE_OFFSET(context_id, + ref_wait_ts)); kgsl_sharedmem_readl(&device->memstore, &enable_ts, - KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable)); + KGSL_MEMSTORE_OFFSET(context_id, + ts_cmp_enable)); kgsl_sharedmem_readl(&device->memstore, &soptimestamp, - KGSL_DEVICE_MEMSTORE_OFFSET(soptimestamp)); + KGSL_MEMSTORE_OFFSET(context_id, + soptimestamp)); kgsl_sharedmem_readl(&device->memstore, &eoptimestamp, - KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp)); + KGSL_MEMSTORE_OFFSET(context_id, + eoptimestamp)); /* Make sure memory is synchronized before restarting the GPU */ mb(); KGSL_CTXT_ERR(device, - "Context that caused a GPU hang: %x\n", bad_context); + "Context id that caused a GPU hang: %d\n", context_id); /* restart device */ ret = adreno_stop(device); if (ret) @@ -613,20 +626,20 @@ adreno_recover_hang(struct kgsl_device *device) KGSL_DRV_ERR(device, "Device has been restarted after hang\n"); /* Restore timestamp states */ kgsl_sharedmem_writel(&device->memstore, - KGSL_DEVICE_MEMSTORE_OFFSET(soptimestamp), + KGSL_MEMSTORE_OFFSET(context_id, soptimestamp), soptimestamp); kgsl_sharedmem_writel(&device->memstore, - KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp), + KGSL_MEMSTORE_OFFSET(context_id, eoptimestamp), eoptimestamp); kgsl_sharedmem_writel(&device->memstore, - KGSL_DEVICE_MEMSTORE_OFFSET(soptimestamp), + KGSL_MEMSTORE_OFFSET(context_id, soptimestamp), soptimestamp); if (num_rb_contents) { kgsl_sharedmem_writel(&device->memstore, - KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts), + KGSL_MEMSTORE_OFFSET(context_id, ref_wait_ts), reftimestamp); kgsl_sharedmem_writel(&device->memstore, - KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable), + KGSL_MEMSTORE_OFFSET(context_id, ts_cmp_enable), enable_ts); } /* Make sure all writes are posted before the GPU reads them */ @@ -634,12 +647,12 @@ adreno_recover_hang(struct kgsl_device *device) /* Mark the invalid context so no more commands are accepted from * that context */ - drawctxt = (struct adreno_context *) bad_context; + adreno_context = context->devctxt; KGSL_CTXT_ERR(device, - "Context that caused a GPU hang: %x\n", bad_context); + "Context that caused a GPU hang: %d\n", adreno_context->id); - drawctxt->flags |= CTXT_FLAGS_GPU_HANG; + adreno_context->flags |= CTXT_FLAGS_GPU_HANG; /* * Set the reset status of all contexts to @@ -649,7 +662,7 @@ adreno_recover_hang(struct kgsl_device *device) while ((context = idr_get_next(&device->context_idr, &next))) { if (KGSL_CTX_STAT_GUILTY_CONTEXT_RESET_EXT != context->reset_status) { - if (context->devctxt != drawctxt) + if (context->id != context_id) context->reset_status = KGSL_CTX_STAT_INNOCENT_CONTEXT_RESET_EXT; else @@ -661,7 +674,7 @@ adreno_recover_hang(struct kgsl_device *device) /* Restore valid commands in ringbuffer */ adreno_ringbuffer_restore(rb, rb_buffer, num_rb_contents); - rb->timestamp = timestamp; + rb->timestamp[KGSL_MEMSTORE_GLOBAL] = timestamp; done: vfree(rb_buffer); return ret; @@ -755,7 +768,8 @@ static int adreno_getproperty(struct kgsl_device *device, shadowprop.size = device->memstore.size; /* GSL needs this to be set, even if it appears to be meaningless */ - shadowprop.flags = KGSL_FLAGS_INITIALIZED; + shadowprop.flags = KGSL_FLAGS_INITIALIZED | + KGSL_FLAGS_PER_CONTEXT_TIMESTAMPS; } if (copy_to_user(value, &shadowprop, sizeof(shadowprop))) { @@ -1011,38 +1025,58 @@ void adreno_regwrite(struct kgsl_device *device, unsigned int offsetwords, __raw_writel(value, reg); } +static unsigned int _get_context_id(struct kgsl_context *k_ctxt) +{ + unsigned int context_id = KGSL_MEMSTORE_GLOBAL; + + if (k_ctxt != NULL) { + struct adreno_context *a_ctxt = k_ctxt->devctxt; + /* + * if the context was not created with per context timestamp + * support, we must use the global timestamp since issueibcmds + * will be returning that one. + */ + if (a_ctxt->flags & CTXT_FLAGS_PER_CONTEXT_TS) + context_id = a_ctxt->id; + } + + return context_id; +} + static int kgsl_check_interrupt_timestamp(struct kgsl_device *device, - unsigned int timestamp) + struct kgsl_context *context, unsigned int timestamp) { int status; unsigned int ref_ts, enableflag; + unsigned int context_id = _get_context_id(context); - status = kgsl_check_timestamp(device, timestamp); + status = kgsl_check_timestamp(device, context, timestamp); if (!status) { mutex_lock(&device->mutex); kgsl_sharedmem_readl(&device->memstore, &enableflag, - KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable)); + KGSL_MEMSTORE_OFFSET(context_id, ts_cmp_enable)); mb(); if (enableflag) { kgsl_sharedmem_readl(&device->memstore, &ref_ts, - KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts)); + KGSL_MEMSTORE_OFFSET(context_id, + ref_wait_ts)); mb(); if (timestamp_cmp(ref_ts, timestamp) >= 0) { kgsl_sharedmem_writel(&device->memstore, - KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts), - timestamp); + KGSL_MEMSTORE_OFFSET(context_id, + ref_wait_ts), timestamp); wmb(); } } else { unsigned int cmds[2]; kgsl_sharedmem_writel(&device->memstore, - KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts), - timestamp); + KGSL_MEMSTORE_OFFSET(context_id, + ref_wait_ts), timestamp); enableflag = 1; kgsl_sharedmem_writel(&device->memstore, - KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable), - enableflag); + KGSL_MEMSTORE_OFFSET(context_id, + ts_cmp_enable), enableflag); wmb(); /* submit a dummy packet so that even if all * commands upto timestamp get executed we will still @@ -1076,6 +1110,7 @@ static int kgsl_check_interrupt_timestamp(struct kgsl_device *device, /* MUST be called with the device mutex held */ static int adreno_waittimestamp(struct kgsl_device *device, + struct kgsl_context *context, unsigned int timestamp, unsigned int msecs) { @@ -1087,15 +1122,19 @@ static int adreno_waittimestamp(struct kgsl_device *device, int retries; unsigned int msecs_first; unsigned int msecs_part; + unsigned int ts_issued; + unsigned int context_id = _get_context_id(context); + + ts_issued = adreno_dev->ringbuffer.timestamp[context_id]; /* Don't wait forever, set a max value for now */ if (msecs == -1) msecs = adreno_dev->wait_timeout; - if (timestamp_cmp(timestamp, adreno_dev->ringbuffer.timestamp) > 0) { - KGSL_DRV_ERR(device, "Cannot wait for invalid ts: %x, " - "rb->timestamp: %x\n", - timestamp, adreno_dev->ringbuffer.timestamp); + if (timestamp_cmp(timestamp, ts_issued) > 0) { + KGSL_DRV_ERR(device, "Cannot wait for invalid ts <%d:0x%x>, " + "last issued ts <%d:0x%x>\n", + context_id, timestamp, context_id, ts_issued); status = -EINVAL; goto done; } @@ -1107,7 +1146,7 @@ static int adreno_waittimestamp(struct kgsl_device *device, msecs_first = (msecs <= 100) ? ((msecs + 4) / 5) : 100; msecs_part = (msecs - msecs_first + 3) / 4; for (retries = 0; retries < 5; retries++) { - if (kgsl_check_timestamp(device, timestamp)) { + if (kgsl_check_timestamp(device, context, timestamp)) { /* if the timestamp happens while we're not * waiting, there's a chance that an interrupt * will not be generated and thus the timestamp @@ -1130,7 +1169,7 @@ static int adreno_waittimestamp(struct kgsl_device *device, status = kgsl_wait_event_interruptible_timeout( device->wait_queue, kgsl_check_interrupt_timestamp(device, - timestamp), + context, timestamp), msecs_to_jiffies(retries ? msecs_part : msecs_first), io); mutex_lock(&device->mutex); @@ -1147,9 +1186,10 @@ static int adreno_waittimestamp(struct kgsl_device *device, } status = -ETIMEDOUT; KGSL_DRV_ERR(device, - "Device hang detected while waiting for timestamp: %x," - "last submitted(rb->timestamp): %x, wptr: %x\n", - timestamp, adreno_dev->ringbuffer.timestamp, + "Device hang detected while waiting for timestamp: " + "<%d:0x%x>, last submitted timestamp: <%d:0x%x>, " + "wptr: 0x%x\n", + context_id, timestamp, context_id, ts_issued, adreno_dev->ringbuffer.wptr); if (!adreno_dump_and_recover(device)) { /* wait for idle after recovery as the @@ -1163,15 +1203,17 @@ done: } static unsigned int adreno_readtimestamp(struct kgsl_device *device, - enum kgsl_timestamp_type type) + struct kgsl_context *context, enum kgsl_timestamp_type type) { unsigned int timestamp = 0; + unsigned int context_id = _get_context_id(context); if (type == KGSL_TIMESTAMP_CONSUMED) adreno_regread(device, REG_CP_TIMESTAMP, ×tamp); else if (type == KGSL_TIMESTAMP_RETIRED) kgsl_sharedmem_readl(&device->memstore, ×tamp, - KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp)); + KGSL_MEMSTORE_OFFSET(context_id, + eoptimestamp)); rmb(); return timestamp; diff --git a/drivers/gpu/msm/adreno_a2xx.c b/drivers/gpu/msm/adreno_a2xx.c index cc611779..dc43062e 100755 --- a/drivers/gpu/msm/adreno_a2xx.c +++ b/drivers/gpu/msm/adreno_a2xx.c @@ -1427,8 +1427,8 @@ static void a2xx_drawctxt_restore(struct adreno_device *adreno_dev, cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER; cmds[2] = cp_type3_packet(CP_MEM_WRITE, 2); cmds[3] = device->memstore.gpuaddr + - KGSL_DEVICE_MEMSTORE_OFFSET(current_context); - cmds[4] = (unsigned int) context; + KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context); + cmds[4] = context->id; adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE, cmds, 5); kgsl_mmu_setstate(device, context->pagetable); @@ -1551,11 +1551,18 @@ static void a2xx_cp_intrcallback(struct kgsl_device *device) if (status & CP_INT_CNTL__RB_INT_MASK) { /* signal intr completion event */ - unsigned int enableflag = 0; + unsigned int context_id; + kgsl_sharedmem_readl(&device->memstore, + &context_id, + KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, + current_context)); + if (context_id < KGSL_MEMSTORE_MAX) { kgsl_sharedmem_writel(&rb->device->memstore, - KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable), - enableflag); + KGSL_MEMSTORE_OFFSET(context_id, + ts_cmp_enable), 0); + device->last_expired_ctxt_id = context_id; wmb(); + } KGSL_CMD_WARN(rb->device, "ringbuffer rb interrupt\n"); } @@ -1780,7 +1787,6 @@ static void a2xx_gmeminit(struct adreno_device *adreno_dev) static void a2xx_start(struct adreno_device *adreno_dev) { struct kgsl_device *device = &adreno_dev->dev; - int init_reftimestamp = 0x7fffffff; /* * We need to make sure all blocks are powered up and clocked @@ -1833,12 +1839,6 @@ static void a2xx_start(struct adreno_device *adreno_dev) else adreno_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0x80); - kgsl_sharedmem_set(&device->memstore, 0, 0, device->memstore.size); - - kgsl_sharedmem_writel(&device->memstore, - KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts), - init_reftimestamp); - adreno_regwrite(device, REG_RBBM_DEBUG, 0x00080000); /* Make sure interrupts are disabled */ diff --git a/drivers/gpu/msm/adreno_a3xx.c b/drivers/gpu/msm/adreno_a3xx.c index cbc7bed4..507ad02e 100755 --- a/drivers/gpu/msm/adreno_a3xx.c +++ b/drivers/gpu/msm/adreno_a3xx.c @@ -2222,8 +2222,8 @@ static void a3xx_drawctxt_restore(struct adreno_device *adreno_dev, cmds[1] = KGSL_CONTEXT_TO_MEM_IDENTIFIER; cmds[2] = cp_type3_packet(CP_MEM_WRITE, 2); cmds[3] = device->memstore.gpuaddr + - KGSL_DEVICE_MEMSTORE_OFFSET(current_context); - cmds[4] = (unsigned int)context; + KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, current_context); + cmds[4] = context->id; adreno_ringbuffer_issuecmds(device, KGSL_CMD_FLAGS_NONE, cmds, 5); kgsl_mmu_setstate(device, context->pagetable); @@ -2366,9 +2366,17 @@ static void a3xx_cp_callback(struct adreno_device *adreno_dev, int irq) struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer; if (irq == A3XX_INT_CP_RB_INT) { + unsigned int context_id; + kgsl_sharedmem_readl(&adreno_dev->dev.memstore, + &context_id, + KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, + current_context)); + if (context_id < KGSL_MEMSTORE_MAX) { kgsl_sharedmem_writel(&rb->device->memstore, - KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable), 0); + KGSL_MEMSTORE_OFFSET(context_id, + ts_cmp_enable), 0); wmb(); + } KGSL_CMD_WARN(rb->device, "ringbuffer rb interrupt\n"); } diff --git a/drivers/gpu/msm/adreno_drawctxt.c b/drivers/gpu/msm/adreno_drawctxt.c index 87f9efe4..fc4789ad 100755 --- a/drivers/gpu/msm/adreno_drawctxt.c +++ b/drivers/gpu/msm/adreno_drawctxt.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -17,6 +17,8 @@ #include "kgsl_sharedmem.h" #include "adreno.h" +#define KGSL_INIT_REFTIMESTAMP 0x7FFFFFFF + /* quad for copying GMEM to context shadow */ #define QUAD_LEN 12 #define QUAD_RESTORE_LEN 14 @@ -154,6 +156,7 @@ int adreno_drawctxt_create(struct kgsl_device *device, drawctxt->pagetable = pagetable; drawctxt->bin_base_offset = 0; + drawctxt->id = context->id; if (flags & KGSL_CONTEXT_PREAMBLE) drawctxt->flags |= CTXT_FLAGS_PREAMBLE; @@ -161,10 +164,17 @@ int adreno_drawctxt_create(struct kgsl_device *device, if (flags & KGSL_CONTEXT_NO_GMEM_ALLOC) drawctxt->flags |= CTXT_FLAGS_NOGMEMALLOC; + if (flags & KGSL_CONTEXT_PER_CONTEXT_TS) + drawctxt->flags |= CTXT_FLAGS_PER_CONTEXT_TS; + ret = adreno_dev->gpudev->ctxt_create(adreno_dev, drawctxt); if (ret) goto err; + kgsl_sharedmem_writel(&device->memstore, + KGSL_MEMSTORE_OFFSET(drawctxt->id, ref_wait_ts), + KGSL_INIT_REFTIMESTAMP); + context->devctxt = drawctxt; return 0; err: diff --git a/drivers/gpu/msm/adreno_drawctxt.h b/drivers/gpu/msm/adreno_drawctxt.h index 50ee3450..61198ebd 100755 --- a/drivers/gpu/msm/adreno_drawctxt.h +++ b/drivers/gpu/msm/adreno_drawctxt.h @@ -40,6 +40,10 @@ #define CTXT_FLAGS_GPU_HANG 0x00008000 /* Specifies there is no need to save GMEM */ #define CTXT_FLAGS_NOGMEMALLOC 0x00010000 +/* Trash state for context */ +#define CTXT_FLAGS_TRASHSTATE 0x00020000 +/* per context timestamps enabled */ +#define CTXT_FLAGS_PER_CONTEXT_TS 0x00040000 struct kgsl_device; struct adreno_device; @@ -72,6 +76,7 @@ struct gmem_shadow_t { }; struct adreno_context { + unsigned int id; uint32_t flags; struct kgsl_pagetable *pagetable; struct kgsl_memdesc gpustate; diff --git a/drivers/gpu/msm/adreno_postmortem.c b/drivers/gpu/msm/adreno_postmortem.c index 7e073fd9..427741f1 100755 --- a/drivers/gpu/msm/adreno_postmortem.c +++ b/drivers/gpu/msm/adreno_postmortem.c @@ -14,6 +14,7 @@ #include #include "kgsl.h" +#include "kgsl_sharedmem.h" #include "adreno.h" #include "adreno_pm4types.h" @@ -464,7 +465,9 @@ static int adreno_dump(struct kgsl_device *device) const uint32_t *rb_vaddr; int num_item = 0; int read_idx, write_idx; - unsigned int ts_processed; + unsigned int ts_processed = 0xdeaddead; + struct kgsl_context *context; + unsigned int context_id; static struct ib_list ib_list; @@ -660,9 +663,18 @@ static int adreno_dump(struct kgsl_device *device) KGSL_LOG_DUMP(device, "MH_INTERRUPT: MASK = %08X | STATUS = %08X\n", r1, r2); - ts_processed = device->ftbl->readtimestamp(device, + kgsl_sharedmem_readl(&device->memstore, + (unsigned int *) &context_id, + KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, + current_context)); + context = idr_find(&device->context_idr, context_id); + if (context) { + ts_processed = device->ftbl->readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED); - KGSL_LOG_DUMP(device, "TIMESTM RTRD: %08X\n", ts_processed); + KGSL_LOG_DUMP(device, "CTXT: %d TIMESTM RTRD: %08X\n", + context->id, ts_processed); + } else + KGSL_LOG_DUMP(device, "BAD CTXT: %d\n", context_id); num_item = adreno_ringbuffer_count(&adreno_dev->ringbuffer, cp_rb_rptr); diff --git a/drivers/gpu/msm/adreno_ringbuffer.c b/drivers/gpu/msm/adreno_ringbuffer.c index 71f239cc..da80576f 100755 --- a/drivers/gpu/msm/adreno_ringbuffer.c +++ b/drivers/gpu/msm/adreno_ringbuffer.c @@ -236,7 +236,7 @@ int adreno_ringbuffer_start(struct adreno_ringbuffer *rb, unsigned int init_ram) return 0; if (init_ram) { - rb->timestamp = 0; + rb->timestamp[KGSL_MEMSTORE_GLOBAL] = 0; GSL_RB_INIT_TIMESTAMP(rb); } @@ -321,18 +321,13 @@ int adreno_ringbuffer_start(struct adreno_ringbuffer *rb, unsigned int init_ram) } /* setup scratch/timestamp */ - adreno_regwrite(device, REG_SCRATCH_ADDR, - device->memstore.gpuaddr + - KGSL_DEVICE_MEMSTORE_OFFSET(soptimestamp)); + adreno_regwrite(device, REG_SCRATCH_ADDR, device->memstore.gpuaddr + + KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, + soptimestamp)); adreno_regwrite(device, REG_SCRATCH_UMSK, GSL_RB_MEMPTRS_SCRATCH_MASK); - /* update the eoptimestamp field with the last retired timestamp */ - kgsl_sharedmem_writel(&device->memstore, - KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp), - rb->timestamp); - /* load the CP ucode */ status = adreno_ringbuffer_load_pm4_ucode(device); @@ -431,15 +426,28 @@ void adreno_ringbuffer_close(struct adreno_ringbuffer *rb) static uint32_t adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb, + struct adreno_context *context, unsigned int flags, unsigned int *cmds, int sizedwords) { struct adreno_device *adreno_dev = ADRENO_DEVICE(rb->device); unsigned int *ringcmds; unsigned int timestamp; - unsigned int total_sizedwords = sizedwords + 6; + unsigned int total_sizedwords = sizedwords; unsigned int i; unsigned int rcmd_gpu; + unsigned int context_id = KGSL_MEMSTORE_GLOBAL; + unsigned int gpuaddr = rb->device->memstore.gpuaddr; + + if (context != NULL) { + /* + * if the context was not created with per context timestamp + * support, we must use the global timestamp since issueibcmds + * will be returning that one. + */ + if (context->flags & CTXT_FLAGS_PER_CONTEXT_TS) + context_id = context->id; + } /* reserve space to temporarily turn off protected mode * error checking if needed @@ -451,6 +459,13 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb, if (adreno_is_a3xx(adreno_dev)) total_sizedwords += 7; + total_sizedwords += 2; /* scratchpad ts for recovery */ + if (context) { + total_sizedwords += 3; /* sop timestamp */ + total_sizedwords += 4; /* eop timestamp */ + } + total_sizedwords += 4; /* global timestamp for recovery*/ + ringcmds = adreno_ringbuffer_allocspace(rb, total_sizedwords); rcmd_gpu = rb->buffer_desc.gpuaddr + sizeof(uint)*(rb->wptr-total_sizedwords); @@ -478,12 +493,20 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb, GSL_RB_WRITE(ringcmds, rcmd_gpu, 1); } - rb->timestamp++; - timestamp = rb->timestamp; + /* always increment the global timestamp. once. */ + rb->timestamp[KGSL_MEMSTORE_GLOBAL]++; + if (context) { + if (context_id == KGSL_MEMSTORE_GLOBAL) + rb->timestamp[context_id] = + rb->timestamp[KGSL_MEMSTORE_GLOBAL]; + else + rb->timestamp[context_id]++; + } + timestamp = rb->timestamp[context_id]; - /* start-of-pipeline and end-of-pipeline timestamps */ + /* scratchpad ts for recovery */ GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type0_packet(REG_CP_TIMESTAMP, 1)); - GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp); + GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp[KGSL_MEMSTORE_GLOBAL]); if (adreno_is_a3xx(adreno_dev)) { /* @@ -499,22 +522,41 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb, GSL_RB_WRITE(ringcmds, rcmd_gpu, 0x00); } + if (context) { + /* start-of-pipeline timestamp */ + GSL_RB_WRITE(ringcmds, rcmd_gpu, + cp_type3_packet(CP_MEM_WRITE, 2)); + GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr + + KGSL_MEMSTORE_OFFSET(context->id, soptimestamp))); + GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp); + + /* end-of-pipeline timestamp */ + GSL_RB_WRITE(ringcmds, rcmd_gpu, + cp_type3_packet(CP_EVENT_WRITE, 3)); + GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS); + GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr + + KGSL_MEMSTORE_OFFSET(context->id, eoptimestamp))); + GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp); + } + GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type3_packet(CP_EVENT_WRITE, 3)); GSL_RB_WRITE(ringcmds, rcmd_gpu, CACHE_FLUSH_TS); - GSL_RB_WRITE(ringcmds, rcmd_gpu, - (rb->device->memstore.gpuaddr + - KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp))); - GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp); + GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr + + KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, + eoptimestamp))); + GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp[KGSL_MEMSTORE_GLOBAL]); if (!(flags & KGSL_CMD_FLAGS_NO_TS_CMP)) { /* Conditional execution based on memory values */ GSL_RB_WRITE(ringcmds, rcmd_gpu, cp_type3_packet(CP_COND_EXEC, 4)); - GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr + - KGSL_DEVICE_MEMSTORE_OFFSET(ts_cmp_enable)) >> 2); - GSL_RB_WRITE(ringcmds, rcmd_gpu, (rb->device->memstore.gpuaddr + - KGSL_DEVICE_MEMSTORE_OFFSET(ref_wait_ts)) >> 2); - GSL_RB_WRITE(ringcmds, rcmd_gpu, rb->timestamp); + GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr + + KGSL_MEMSTORE_OFFSET( + context_id, ts_cmp_enable)) >> 2); + GSL_RB_WRITE(ringcmds, rcmd_gpu, (gpuaddr + + KGSL_MEMSTORE_OFFSET( + context_id, ref_wait_ts)) >> 2); + GSL_RB_WRITE(ringcmds, rcmd_gpu, timestamp); /* # of conditional command DWORDs */ GSL_RB_WRITE(ringcmds, rcmd_gpu, 2); GSL_RB_WRITE(ringcmds, rcmd_gpu, @@ -533,7 +575,6 @@ adreno_ringbuffer_addcmds(struct adreno_ringbuffer *rb, adreno_ringbuffer_submit(rb); - /* return timestamp of issued coREG_ands */ return timestamp; } @@ -548,7 +589,7 @@ adreno_ringbuffer_issuecmds(struct kgsl_device *device, if (device->state & KGSL_STATE_HUNG) return; - adreno_ringbuffer_addcmds(rb, flags, cmds, sizedwords); + adreno_ringbuffer_addcmds(rb, NULL, flags, cmds, sizedwords); } static bool _parse_ibs(struct kgsl_device_private *dev_priv, uint gpuaddr, @@ -769,8 +810,8 @@ adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv, if (drawctxt->flags & CTXT_FLAGS_GPU_HANG) { KGSL_CTXT_WARN(device, "Context %p caused a gpu hang.." - " will not accept commands for this context\n", - drawctxt); + " will not accept commands for context %d\n", + drawctxt, drawctxt->id); return -EDEADLK; } @@ -822,6 +863,7 @@ adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv, adreno_drawctxt_switch(adreno_dev, drawctxt, flags); *timestamp = adreno_ringbuffer_addcmds(&adreno_dev->ringbuffer, + drawctxt, KGSL_CMD_FLAGS_NOT_KERNEL_CMD, &link[0], (cmds - link)); @@ -855,11 +897,25 @@ int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb, unsigned int val2; unsigned int val3; unsigned int copy_rb_contents = 0; - unsigned int cur_context; + struct kgsl_context *context; + unsigned int context_id; GSL_RB_GET_READPTR(rb, &rb->rptr); - retired_timestamp = device->ftbl->readtimestamp(device, + /* current_context is the context that is presently active in the + * GPU, i.e the context in which the hang is caused */ + kgsl_sharedmem_readl(&device->memstore, &context_id, + KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, + current_context)); + KGSL_DRV_ERR(device, "Last context id: %d\n", context_id); + context = idr_find(&device->context_idr, context_id); + if (context == NULL) { + KGSL_DRV_ERR(device, + "GPU recovery from hang not possible because last" + " context id is invalid.\n"); + return -EINVAL; + } + retired_timestamp = device->ftbl->readtimestamp(device, context, KGSL_TIMESTAMP_RETIRED); KGSL_DRV_ERR(device, "GPU successfully executed till ts: %x\n", retired_timestamp); @@ -894,7 +950,8 @@ int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb, (val1 == cp_type3_packet(CP_EVENT_WRITE, 3) && val2 == CACHE_FLUSH_TS && val3 == (rb->device->memstore.gpuaddr + - KGSL_DEVICE_MEMSTORE_OFFSET(eoptimestamp)))) { + KGSL_MEMSTORE_OFFSET(context_id, + eoptimestamp)))) { rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr, rb->buffer_desc.size); KGSL_DRV_ERR(device, @@ -940,10 +997,6 @@ int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb, return -EINVAL; } - /* current_context is the context that is presently active in the - * GPU, i.e the context in which the hang is caused */ - kgsl_sharedmem_readl(&device->memstore, &cur_context, - KGSL_DEVICE_MEMSTORE_OFFSET(current_context)); while ((rb_rptr / sizeof(unsigned int)) != rb->wptr) { kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr); rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr, @@ -958,7 +1011,8 @@ int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb, rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr, rb->buffer_desc.size); BUG_ON(val1 != (device->memstore.gpuaddr + - KGSL_DEVICE_MEMSTORE_OFFSET(current_context))); + KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL, + current_context))); kgsl_sharedmem_readl(&rb->buffer_desc, &value, rb_rptr); rb_rptr = adreno_ringbuffer_inc_wrapped(rb_rptr, rb->buffer_desc.size); @@ -970,7 +1024,7 @@ int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb, * and leave. */ - if ((copy_rb_contents == 0) && (value == cur_context)) { + if ((copy_rb_contents == 0) && (value == context_id)) { KGSL_DRV_ERR(device, "GPU recovery could not " "find the previous context\n"); return -EINVAL; @@ -986,7 +1040,7 @@ int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb, /* if context switches to a context that did not cause * hang then start saving the rb contents as those * commands can be executed */ - if (value != cur_context) { + if (value != context_id) { copy_rb_contents = 1; temp_rb_buffer[temp_idx++] = cp_nop_packet(1); temp_rb_buffer[temp_idx++] = diff --git a/drivers/gpu/msm/adreno_ringbuffer.h b/drivers/gpu/msm/adreno_ringbuffer.h index d0110b9f..7c93b3b5 100755 --- a/drivers/gpu/msm/adreno_ringbuffer.h +++ b/drivers/gpu/msm/adreno_ringbuffer.h @@ -54,7 +54,8 @@ struct adreno_ringbuffer { unsigned int wptr; /* write pointer offset in dwords from baseaddr */ unsigned int rptr; /* read pointer offset in dwords from baseaddr */ - uint32_t timestamp; + + unsigned int timestamp[KGSL_MEMSTORE_MAX]; }; diff --git a/drivers/gpu/msm/kgsl.c b/drivers/gpu/msm/kgsl.c index e7c4ff8b..491944ba 100755 --- a/drivers/gpu/msm/kgsl.c +++ b/drivers/gpu/msm/kgsl.c @@ -58,22 +58,30 @@ static struct ion_client *kgsl_ion_client; * @returns - 0 on success or error code on failure */ -static int kgsl_add_event(struct kgsl_device *device, u32 ts, - void (*cb)(struct kgsl_device *, void *, u32), void *priv, +static int kgsl_add_event(struct kgsl_device *device, u32 id, u32 ts, + void (*cb)(struct kgsl_device *, void *, u32, u32), void *priv, struct kgsl_device_private *owner) { struct kgsl_event *event; struct list_head *n; - unsigned int cur = device->ftbl->readtimestamp(device, - KGSL_TIMESTAMP_RETIRED); + unsigned int cur_ts; + struct kgsl_context *context = NULL; if (cb == NULL) return -EINVAL; + if (id != KGSL_MEMSTORE_GLOBAL) { + context = idr_find(&device->context_idr, id); + if (context == NULL) + return -EINVAL; + } + cur_ts = device->ftbl->readtimestamp(device, context, + KGSL_TIMESTAMP_RETIRED); + /* Check to see if the requested timestamp has already fired */ - if (timestamp_cmp(cur, ts) >= 0) { - cb(device, priv, cur); + if (timestamp_cmp(cur_ts, ts) >= 0) { + cb(device, priv, id, cur_ts); return 0; } @@ -81,17 +89,24 @@ static int kgsl_add_event(struct kgsl_device *device, u32 ts, if (event == NULL) return -ENOMEM; + event->context = context; event->timestamp = ts; event->priv = priv; event->func = cb; event->owner = owner; - /* Add the event in order to the list */ + /* + * Add the event in order to the list. Order is by context id + * first and then by timestamp for that context. + */ for (n = device->events.next ; n != &device->events; n = n->next) { struct kgsl_event *e = list_entry(n, struct kgsl_event, list); + if (e->context != context) + continue; + if (timestamp_cmp(e->timestamp, ts) > 0) { list_add(&event->list, n->prev); break; @@ -115,12 +130,16 @@ static void kgsl_cancel_events(struct kgsl_device *device, struct kgsl_device_private *owner) { struct kgsl_event *event, *event_tmp; - unsigned int cur = device->ftbl->readtimestamp(device, - KGSL_TIMESTAMP_RETIRED); + unsigned int id, cur; list_for_each_entry_safe(event, event_tmp, &device->events, list) { if (event->owner != owner) continue; + + cur = device->ftbl->readtimestamp(device, event->context, + KGSL_TIMESTAMP_RETIRED); + + id = event->context ? event->context->id : KGSL_MEMSTORE_GLOBAL; /* * "cancel" the events by calling their callback. * Currently, events are used for lock and memory @@ -128,7 +147,7 @@ static void kgsl_cancel_events(struct kgsl_device *device, * thing to do is release or free. */ if (event->func) - event->func(device, event->priv, cur); + event->func(device, event->priv, id, cur); list_del(&event->list); kfree(event); @@ -265,8 +284,8 @@ kgsl_create_context(struct kgsl_device_private *dev_priv) return NULL; } - ret = idr_get_new(&dev_priv->device->context_idr, - context, &id); + ret = idr_get_new_above(&dev_priv->device->context_idr, + context, 1, &id); if (ret != -EAGAIN) break; @@ -277,6 +296,16 @@ kgsl_create_context(struct kgsl_device_private *dev_priv) return NULL; } + /* MAX - 1, there is one memdesc in memstore for device info */ + if (id >= KGSL_MEMSTORE_MAX) { + KGSL_DRV_ERR(dev_priv->device, "cannot have more than %d " + "ctxts due to memstore limitation\n", + KGSL_MEMSTORE_MAX); + idr_remove(&dev_priv->device->context_idr, id); + kfree(context); + return NULL; + } + context->id = id; context->dev_priv = dev_priv; @@ -307,25 +336,28 @@ static void kgsl_timestamp_expired(struct work_struct *work) ts_expired_ws); struct kgsl_event *event, *event_tmp; uint32_t ts_processed; + unsigned int id; mutex_lock(&device->mutex); - /* get current EOP timestamp */ - ts_processed = device->ftbl->readtimestamp(device, - KGSL_TIMESTAMP_RETIRED); - /* Process expired events */ list_for_each_entry_safe(event, event_tmp, &device->events, list) { + ts_processed = device->ftbl->readtimestamp(device, + event->context, KGSL_TIMESTAMP_RETIRED); if (timestamp_cmp(ts_processed, event->timestamp) < 0) - break; + continue; + + id = event->context ? event->context->id : KGSL_MEMSTORE_GLOBAL; if (event->func) - event->func(device, event->priv, ts_processed); + event->func(device, event->priv, id, ts_processed); list_del(&event->list); kfree(event); } + device->last_expired_ctxt_id = KGSL_CONTEXT_INVALID; + mutex_unlock(&device->mutex); } @@ -400,11 +432,15 @@ int kgsl_unregister_ts_notifier(struct kgsl_device *device, } EXPORT_SYMBOL(kgsl_unregister_ts_notifier); -int kgsl_check_timestamp(struct kgsl_device *device, unsigned int timestamp) +int kgsl_check_timestamp(struct kgsl_device *device, + struct kgsl_context *context, unsigned int timestamp) { unsigned int ts_processed; + unsigned int global; - ts_processed = device->ftbl->readtimestamp(device, + ts_processed = device->ftbl->readtimestamp(device, context, + KGSL_TIMESTAMP_RETIRED); + global = device->ftbl->readtimestamp(device, NULL, KGSL_TIMESTAMP_RETIRED); return (timestamp_cmp(ts_processed, timestamp) >= 0); @@ -745,6 +781,9 @@ static int kgsl_open(struct inode *inodep, struct file *filep) kgsl_check_suspended(device); if (device->open_count == 0) { + kgsl_sharedmem_set(&device->memstore, 0, 0, + device->memstore.size); + result = device->ftbl->start(device, true); if (result) { @@ -885,21 +924,35 @@ static long kgsl_ioctl_device_getproperty(struct kgsl_device_private *dev_priv, return result; } -static long kgsl_ioctl_device_waittimestamp(struct kgsl_device_private - *dev_priv, unsigned int cmd, - void *data) +static long kgsl_ioctl_device_setproperty(struct kgsl_device_private *dev_priv, + unsigned int cmd, void *data) { int result = 0; - struct kgsl_device_waittimestamp *param = data; + /* The getproperty struct is reused for setproperty too */ + struct kgsl_device_getproperty *param = data; - /* Set the active count so that suspend doesn't do the - wrong thing */ + if (dev_priv->device->ftbl->setproperty) + result = dev_priv->device->ftbl->setproperty( + dev_priv->device, param->type, + param->value, param->sizebytes); + + return result; +} + +static long _device_waittimestamp(struct kgsl_device_private *dev_priv, + struct kgsl_context *context, + unsigned int timestamp, + unsigned int timeout) +{ + int result = 0; + + /* Set the active count so that suspend doesn't do the wrong thing */ dev_priv->device->active_cnt++; result = dev_priv->device->ftbl->waittimestamp(dev_priv->device, - param->timestamp, - param->timeout); + context, timestamp, timeout); + /* Fire off any pending suspend operations that are in flight */ @@ -910,6 +963,34 @@ static long kgsl_ioctl_device_waittimestamp(struct kgsl_device_private return result; } +static long kgsl_ioctl_device_waittimestamp(struct kgsl_device_private + *dev_priv, unsigned int cmd, + void *data) +{ + struct kgsl_device_waittimestamp *param = data; + + return _device_waittimestamp(dev_priv, KGSL_MEMSTORE_GLOBAL, + param->timestamp, param->timeout); +} + +static long kgsl_ioctl_device_waittimestamp_ctxtid(struct kgsl_device_private + *dev_priv, unsigned int cmd, + void *data) +{ + struct kgsl_device_waittimestamp_ctxtid *param = data; + struct kgsl_context *context; + + context = kgsl_find_context(dev_priv, param->context_id); + if (context == NULL) { + KGSL_DRV_ERR(dev_priv->device, "invalid context_id %d\n", + param->context_id); + return -EINVAL; + } + + return _device_waittimestamp(dev_priv, context, + param->timestamp, param->timeout); +} + static long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv, unsigned int cmd, void *data) { @@ -926,7 +1007,7 @@ static long kgsl_ioctl_rb_issueibcmds(struct kgsl_device_private *dev_priv, if (context == NULL) { result = -EINVAL; KGSL_DRV_ERR(dev_priv->device, - "invalid drawctxt drawctxt_id %d\n", + "invalid context_id %d\n", param->drawctxt_id); goto done; } @@ -997,21 +1078,46 @@ done: return result; } +static long _cmdstream_readtimestamp(struct kgsl_device_private *dev_priv, + struct kgsl_context *context, unsigned int type, + unsigned int *timestamp) +{ + *timestamp = dev_priv->device->ftbl->readtimestamp(dev_priv->device, + context, type); + + return 0; +} + static long kgsl_ioctl_cmdstream_readtimestamp(struct kgsl_device_private *dev_priv, unsigned int cmd, void *data) { struct kgsl_cmdstream_readtimestamp *param = data; - param->timestamp = - dev_priv->device->ftbl->readtimestamp(dev_priv->device, - param->type); + return _cmdstream_readtimestamp(dev_priv, NULL, + param->type, ¶m->timestamp); +} - return 0; +static long kgsl_ioctl_cmdstream_readtimestamp_ctxtid(struct kgsl_device_private + *dev_priv, unsigned int cmd, + void *data) +{ + struct kgsl_cmdstream_readtimestamp_ctxtid *param = data; + struct kgsl_context *context; + + context = kgsl_find_context(dev_priv, param->context_id); + if (context == NULL) { + KGSL_DRV_ERR(dev_priv->device, "invalid context_id %d\n", + param->context_id); + return -EINVAL; + } + + return _cmdstream_readtimestamp(dev_priv, context, + param->type, ¶m->timestamp); } static void kgsl_freemem_event_cb(struct kgsl_device *device, - void *priv, u32 timestamp) + void *priv, u32 id, u32 timestamp) { struct kgsl_mem_entry *entry = priv; spin_lock(&entry->priv->mem_lock); @@ -1020,30 +1126,65 @@ static void kgsl_freemem_event_cb(struct kgsl_device *device, kgsl_mem_entry_detach_process(entry); } -static long kgsl_ioctl_cmdstream_freememontimestamp(struct kgsl_device_private - *dev_priv, unsigned int cmd, - void *data) +static long _cmdstream_freememontimestamp(struct kgsl_device_private *dev_priv, + unsigned int gpuaddr, struct kgsl_context *context, + unsigned int timestamp, unsigned int type) { int result = 0; - struct kgsl_cmdstream_freememontimestamp *param = data; struct kgsl_mem_entry *entry = NULL; + struct kgsl_device *device = dev_priv->device; + unsigned int cur; + unsigned int context_id = context ? context->id : KGSL_MEMSTORE_GLOBAL; spin_lock(&dev_priv->process_priv->mem_lock); - entry = kgsl_sharedmem_find(dev_priv->process_priv, param->gpuaddr); + entry = kgsl_sharedmem_find(dev_priv->process_priv, gpuaddr); spin_unlock(&dev_priv->process_priv->mem_lock); if (entry) { - result = kgsl_add_event(dev_priv->device, param->timestamp, - kgsl_freemem_event_cb, entry, dev_priv); + cur = device->ftbl->readtimestamp(device, context, + KGSL_TIMESTAMP_RETIRED); + + result = kgsl_add_event(dev_priv->device, context_id, + timestamp, kgsl_freemem_event_cb, + entry, dev_priv); } else { KGSL_DRV_ERR(dev_priv->device, - "invalid gpuaddr %08x\n", param->gpuaddr); + "invalid gpuaddr %08x\n", gpuaddr); result = -EINVAL; } return result; } +static long kgsl_ioctl_cmdstream_freememontimestamp(struct kgsl_device_private + *dev_priv, unsigned int cmd, + void *data) +{ + struct kgsl_cmdstream_freememontimestamp *param = data; + + return _cmdstream_freememontimestamp(dev_priv, param->gpuaddr, + NULL, param->timestamp, param->type); +} + +static long kgsl_ioctl_cmdstream_freememontimestamp_ctxtid( + struct kgsl_device_private + *dev_priv, unsigned int cmd, + void *data) +{ + struct kgsl_cmdstream_freememontimestamp_ctxtid *param = data; + struct kgsl_context *context; + + context = kgsl_find_context(dev_priv, param->context_id); + if (context == NULL) { + KGSL_DRV_ERR(dev_priv->device, + "invalid drawctxt context_id %d\n", param->context_id); + return -EINVAL; + } + + return _cmdstream_freememontimestamp(dev_priv, param->gpuaddr, + context, param->timestamp, param->type); +} + static long kgsl_ioctl_drawctxt_create(struct kgsl_device_private *dev_priv, unsigned int cmd, void *data) { @@ -1760,13 +1901,14 @@ struct kgsl_genlock_event_priv { * kgsl_genlock_event_cb - Event callback for a genlock timestamp event * @device - The KGSL device that expired the timestamp * @priv - private data for the event + * @context_id - the context id that goes with the timestamp * @timestamp - the timestamp that triggered the event * * Release a genlock lock following the expiration of a timestamp */ static void kgsl_genlock_event_cb(struct kgsl_device *device, - void *priv, u32 timestamp) + void *priv, u32 context_id, u32 timestamp) { struct kgsl_genlock_event_priv *ev = priv; int ret; @@ -1794,7 +1936,7 @@ static void kgsl_genlock_event_cb(struct kgsl_device *device, */ static int kgsl_add_genlock_event(struct kgsl_device *device, - u32 timestamp, void __user *data, int len, + u32 context_id, u32 timestamp, void __user *data, int len, struct kgsl_device_private *owner) { struct kgsl_genlock_event_priv *event; @@ -1820,8 +1962,8 @@ static int kgsl_add_genlock_event(struct kgsl_device *device, return ret; } - ret = kgsl_add_event(device, timestamp, kgsl_genlock_event_cb, event, - owner); + ret = kgsl_add_event(device, context_id, timestamp, + kgsl_genlock_event_cb, event, owner); if (ret) kfree(event); @@ -1829,7 +1971,7 @@ static int kgsl_add_genlock_event(struct kgsl_device *device, } #else static long kgsl_add_genlock_event(struct kgsl_device *device, - u32 timestamp, void __user *data, int len, + u32 context_id, u32 timestamp, void __user *data, int len, struct kgsl_device_private *owner) { return -EINVAL; @@ -1853,8 +1995,8 @@ static long kgsl_ioctl_timestamp_event(struct kgsl_device_private *dev_priv, switch (param->type) { case KGSL_TIMESTAMP_EVENT_GENLOCK: ret = kgsl_add_genlock_event(dev_priv->device, - param->timestamp, param->priv, param->len, - dev_priv); + param->context_id, param->timestamp, param->priv, + param->len, dev_priv); break; default: ret = -EINVAL; @@ -1878,12 +2020,18 @@ static const struct { kgsl_ioctl_device_getproperty, 1), KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_WAITTIMESTAMP, kgsl_ioctl_device_waittimestamp, 1), + KGSL_IOCTL_FUNC(IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID, + kgsl_ioctl_device_waittimestamp_ctxtid, 1), KGSL_IOCTL_FUNC(IOCTL_KGSL_RINGBUFFER_ISSUEIBCMDS, kgsl_ioctl_rb_issueibcmds, 1), KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_READTIMESTAMP, kgsl_ioctl_cmdstream_readtimestamp, 1), + KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_CTXTID, + kgsl_ioctl_cmdstream_readtimestamp_ctxtid, 1), KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP, kgsl_ioctl_cmdstream_freememontimestamp, 1), + KGSL_IOCTL_FUNC(IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_CTXTID, + kgsl_ioctl_cmdstream_freememontimestamp_ctxtid, 1), KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_CREATE, kgsl_ioctl_drawctxt_create, 1), KGSL_IOCTL_FUNC(IOCTL_KGSL_DRAWCTXT_DESTROY, @@ -1906,6 +2054,8 @@ static const struct { kgsl_ioctl_cff_user_event, 0), KGSL_IOCTL_FUNC(IOCTL_KGSL_TIMESTAMP_EVENT, kgsl_ioctl_timestamp_event, 1), + KGSL_IOCTL_FUNC(IOCTL_KGSL_SETPROPERTY, + kgsl_ioctl_device_setproperty, 1), }; static long kgsl_ioctl(struct file *filep, unsigned int cmd, unsigned long arg) @@ -2211,13 +2361,13 @@ kgsl_register_device(struct kgsl_device *device) INIT_LIST_HEAD(&device->events); + device->last_expired_ctxt_id = KGSL_CONTEXT_INVALID; + ret = kgsl_mmu_init(device); if (ret != 0) goto err_dest_work_q; - ret = kgsl_allocate_contiguous(&device->memstore, - sizeof(struct kgsl_devmemstore)); - + ret = kgsl_allocate_contiguous(&device->memstore, KGSL_MEMSTORE_SIZE); if (ret != 0) goto err_close_mmu; diff --git a/drivers/gpu/msm/kgsl.h b/drivers/gpu/msm/kgsl.h index 3f9ff843..aff17338 100755 --- a/drivers/gpu/msm/kgsl.h +++ b/drivers/gpu/msm/kgsl.h @@ -25,6 +25,14 @@ #define KGSL_NAME "kgsl" +/* The number of memstore arrays limits the number of contexts allowed. + * If more contexts are needed, update multiple for MEMSTORE_SIZE + */ +#define KGSL_MEMSTORE_SIZE ((int)(PAGE_SIZE * 2)) +#define KGSL_MEMSTORE_GLOBAL (0) +#define KGSL_MEMSTORE_MAX (KGSL_MEMSTORE_SIZE / \ + sizeof(struct kgsl_devmemstore) - 1) + /* Timestamp window used to detect rollovers */ #define KGSL_TIMESTAMP_WINDOW 0x80000000 @@ -150,6 +158,7 @@ struct kgsl_mem_entry { void *priv_data; struct list_head list; uint32_t free_timestamp; + unsigned int context_id; /* back pointer to private structure under whose context this * allocation is made */ struct kgsl_process_private *priv; diff --git a/drivers/gpu/msm/kgsl_device.h b/drivers/gpu/msm/kgsl_device.h index ff78ae3f..45e41d91 100755 --- a/drivers/gpu/msm/kgsl_device.h +++ b/drivers/gpu/msm/kgsl_device.h @@ -1,5 +1,4 @@ -/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved. - * Copyright (C) 2011 Sony Ericsson Mobile Communications AB. +/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -47,6 +46,7 @@ #define KGSL_STATE_SUSPEND 0x00000010 #define KGSL_STATE_HUNG 0x00000020 #define KGSL_STATE_DUMP_AND_RECOVER 0x00000040 +#define KGSL_STATE_SLUMBER 0x00000080 #define KGSL_GRAPHICS_MEMORY_LOW_WATERMARK 0x1000000 @@ -76,9 +76,10 @@ struct kgsl_functable { enum kgsl_property_type type, void *value, unsigned int sizebytes); int (*waittimestamp) (struct kgsl_device *device, - unsigned int timestamp, unsigned int msecs); + struct kgsl_context *context, unsigned int timestamp, + unsigned int msecs); unsigned int (*readtimestamp) (struct kgsl_device *device, - enum kgsl_timestamp_type type); + struct kgsl_context *context, enum kgsl_timestamp_type type); int (*issueibcmds) (struct kgsl_device_private *dev_priv, struct kgsl_context *context, struct kgsl_ibdesc *ibdesc, unsigned int sizedwords, uint32_t *timestamp, @@ -101,6 +102,9 @@ struct kgsl_functable { struct kgsl_context *context); long (*ioctl) (struct kgsl_device_private *dev_priv, unsigned int cmd, void *data); + int (*setproperty) (struct kgsl_device *device, + enum kgsl_property_type type, void *value, + unsigned int sizebytes); }; struct kgsl_memregion { @@ -120,8 +124,9 @@ struct kgsl_mh { }; struct kgsl_event { + struct kgsl_context *context; uint32_t timestamp; - void (*func)(struct kgsl_device *, void *, u32); + void (*func)(struct kgsl_device *, void *, u32, u32); void *priv; struct list_head list; struct kgsl_device_private *owner; @@ -153,6 +158,7 @@ struct kgsl_device { uint32_t state; uint32_t requested_state; + unsigned int last_expired_ctxt_id; unsigned int active_cnt; struct completion suspend_gate; @@ -304,7 +310,8 @@ kgsl_find_context(struct kgsl_device_private *dev_priv, uint32_t id) return (ctxt && ctxt->dev_priv == dev_priv) ? ctxt : NULL; } -int kgsl_check_timestamp(struct kgsl_device *device, unsigned int timestamp); +int kgsl_check_timestamp(struct kgsl_device *device, + struct kgsl_context *context, unsigned int timestamp); int kgsl_register_ts_notifier(struct kgsl_device *device, struct notifier_block *nb); diff --git a/drivers/gpu/msm/z180.c b/drivers/gpu/msm/z180.c old mode 100644 new mode 100755 index e7a1d521..688f23d8 --- a/drivers/gpu/msm/z180.c +++ b/drivers/gpu/msm/z180.c @@ -100,6 +100,7 @@ enum z180_cmdwindow_type { static int z180_start(struct kgsl_device *device, unsigned int init_ram); static int z180_stop(struct kgsl_device *device); static int z180_wait(struct kgsl_device *device, + struct kgsl_context *context, unsigned int timestamp, unsigned int msecs); static void z180_regread(struct kgsl_device *device, @@ -382,8 +383,8 @@ static int z180_idle(struct kgsl_device *device, unsigned int timeout) if (timestamp_cmp(z180_dev->current_timestamp, z180_dev->timestamp) > 0) - status = z180_wait(device, z180_dev->current_timestamp, - timeout); + status = z180_wait(device, NULL, + z180_dev->current_timestamp, timeout); if (status) KGSL_DRV_ERR(device, "z180_waittimestamp() timed out\n"); @@ -793,14 +794,16 @@ static void z180_cmdwindow_write(struct kgsl_device *device, } static unsigned int z180_readtimestamp(struct kgsl_device *device, - enum kgsl_timestamp_type type) + struct kgsl_context *context, enum kgsl_timestamp_type type) { struct z180_device *z180_dev = Z180_DEVICE(device); + (void)context; /* get current EOP timestamp */ return z180_dev->timestamp; } static int z180_waittimestamp(struct kgsl_device *device, + struct kgsl_context *context, unsigned int timestamp, unsigned int msecs) { @@ -811,13 +814,14 @@ static int z180_waittimestamp(struct kgsl_device *device, msecs = 10 * MSEC_PER_SEC; mutex_unlock(&device->mutex); - status = z180_wait(device, timestamp, msecs); + status = z180_wait(device, context, timestamp, msecs); mutex_lock(&device->mutex); return status; } static int z180_wait(struct kgsl_device *device, + struct kgsl_context *context, unsigned int timestamp, unsigned int msecs) { @@ -826,7 +830,7 @@ static int z180_wait(struct kgsl_device *device, timeout = wait_io_event_interruptible_timeout( device->wait_queue, - kgsl_check_timestamp(device, timestamp), + kgsl_check_timestamp(device, context, timestamp), msecs_to_jiffies(msecs)); if (timeout > 0) diff --git a/include/linux/msm_kgsl.h b/include/linux/msm_kgsl.h index 36357e08..92b41a5d 100755 --- a/include/linux/msm_kgsl.h +++ b/include/linux/msm_kgsl.h @@ -35,14 +35,18 @@ #define _MSM_KGSL_H #define KGSL_VERSION_MAJOR 3 -#define KGSL_VERSION_MINOR 8 +#define KGSL_VERSION_MINOR 10 /*context flags */ -#define KGSL_CONTEXT_SAVE_GMEM 1 -#define KGSL_CONTEXT_NO_GMEM_ALLOC 2 -#define KGSL_CONTEXT_SUBMIT_IB_LIST 4 -#define KGSL_CONTEXT_CTX_SWITCH 8 -#define KGSL_CONTEXT_PREAMBLE 16 +#define KGSL_CONTEXT_SAVE_GMEM 0x00000001 +#define KGSL_CONTEXT_NO_GMEM_ALLOC 0x00000002 +#define KGSL_CONTEXT_SUBMIT_IB_LIST 0x00000004 +#define KGSL_CONTEXT_CTX_SWITCH 0x00000008 +#define KGSL_CONTEXT_PREAMBLE 0x00000010 +#define KGSL_CONTEXT_TRASH_STATE 0x00000020 +#define KGSL_CONTEXT_PER_CONTEXT_TS 0x00000040 + +#define KGSL_CONTEXT_INVALID 0xffffffff /* Memory allocayion flags */ #define KGSL_MEMFLAGS_GPUREADONLY 0x01000000 @@ -58,6 +62,7 @@ #define KGSL_FLAGS_RESERVED1 0x00000040 #define KGSL_FLAGS_RESERVED2 0x00000080 #define KGSL_FLAGS_SOFT_RESET 0x00000100 +#define KGSL_FLAGS_PER_CONTEXT_TIMESTAMPS 0x00000200 /* Clock flags to show which clocks should be controled by a given platform */ #define KGSL_CLK_SRC 0x00000001 @@ -132,9 +137,9 @@ struct kgsl_devmemstore { unsigned int sbz5; }; -#define KGSL_DEVICE_MEMSTORE_OFFSET(field) \ - offsetof(struct kgsl_devmemstore, field) - +#define KGSL_MEMSTORE_OFFSET(ctxt_id, field) \ + ((ctxt_id)*sizeof(struct kgsl_devmemstore) + \ + offsetof(struct kgsl_devmemstore, field)) /* timestamp id*/ enum kgsl_timestamp_type { @@ -268,6 +273,14 @@ struct kgsl_device_waittimestamp { #define IOCTL_KGSL_DEVICE_WAITTIMESTAMP \ _IOW(KGSL_IOC_TYPE, 0x6, struct kgsl_device_waittimestamp) +struct kgsl_device_waittimestamp_ctxtid { + unsigned int context_id; + unsigned int timestamp; + unsigned int timeout; +}; + +#define IOCTL_KGSL_DEVICE_WAITTIMESTAMP_CTXTID \ + _IOW(KGSL_IOC_TYPE, 0x7, struct kgsl_device_waittimestamp_ctxtid) /* issue indirect commands to the GPU. * drawctxt_id must have been created with IOCTL_KGSL_DRAWCTXT_CREATE @@ -361,6 +374,26 @@ struct kgsl_map_user_mem { #define IOCTL_KGSL_MAP_USER_MEM \ _IOWR(KGSL_IOC_TYPE, 0x15, struct kgsl_map_user_mem) +struct kgsl_cmdstream_readtimestamp_ctxtid { + unsigned int context_id; + unsigned int type; + unsigned int timestamp; /*output param */ +}; + +#define IOCTL_KGSL_CMDSTREAM_READTIMESTAMP_CTXTID \ + _IOWR(KGSL_IOC_TYPE, 0x16, struct kgsl_cmdstream_readtimestamp_ctxtid) + +struct kgsl_cmdstream_freememontimestamp_ctxtid { + unsigned int context_id; + unsigned int gpuaddr; + unsigned int type; + unsigned int timestamp; +}; + +#define IOCTL_KGSL_CMDSTREAM_FREEMEMONTIMESTAMP_CTXTID \ + _IOW(KGSL_IOC_TYPE, 0x17, \ + struct kgsl_cmdstream_freememontimestamp_ctxtid) + /* add a block of pmem or fb into the GPU address space */ struct kgsl_sharedmem_from_pmem { int pmem_fd; @@ -504,6 +537,14 @@ struct kgsl_timestamp_event_genlock { int handle; /* Handle of the genlock lock to release */ }; +/* + * Set a property within the kernel. Uses the same structure as + * IOCTL_KGSL_GETPROPERTY + */ + +#define IOCTL_KGSL_SETPROPERTY \ + _IOW(KGSL_IOC_TYPE, 0x32, struct kgsl_device_getproperty) + #ifdef __KERNEL__ #ifdef CONFIG_MSM_KGSL_DRM int kgsl_gem_obj_addr(int drm_fd, int handle, unsigned long *start, From 65b62e5ee41c10c0e780718682613c86a8e650ad Mon Sep 17 00:00:00 2001 From: SecureCRT Date: Tue, 7 Aug 2012 00:45:11 +0800 Subject: [PATCH 04/22] base: genlock: Remove genlock_release_lock and associated ioctl base: genlock: allow synchronization with a single gralloc handle base: genlock: remove BIT macro usage --- drivers/base/genlock.c | 305 +++++++++++++++++++++++++++++++--------- include/linux/genlock.h | 11 +- 2 files changed, 251 insertions(+), 65 deletions(-) mode change 100644 => 100755 drivers/base/genlock.c mode change 100644 => 100755 include/linux/genlock.h diff --git a/drivers/base/genlock.c b/drivers/base/genlock.c old mode 100644 new mode 100755 index afe8eb1c..a89ee7ee --- a/drivers/base/genlock.c +++ b/drivers/base/genlock.c @@ -1,4 +1,4 @@ -/* Copyright (c) 2011, Code Aurora Forum. All rights reserved. +/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved. * * This program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License version 2 and @@ -22,7 +22,6 @@ #include #include #include -#include /* for in_interrupt() */ /* Lock states - can either be unlocked, held as an exclusive write lock or a * shared read lock @@ -32,6 +31,9 @@ #define _RDLOCK GENLOCK_RDLOCK #define _WRLOCK GENLOCK_WRLOCK +#define GENLOCK_LOG_ERR(fmt, args...) \ +pr_err("genlock: %s: " fmt, __func__, ##args) + struct genlock { struct list_head active; /* List of handles holding lock */ spinlock_t lock; /* Spinlock to protect the lock internals */ @@ -49,12 +51,29 @@ struct genlock_handle { taken */ }; +/* + * Create a spinlock to protect against a race condition when a lock gets + * released while another process tries to attach it + */ + +static DEFINE_SPINLOCK(genlock_file_lock); + static void genlock_destroy(struct kref *kref) { - struct genlock *lock = container_of(kref, struct genlock, - refcount); + struct genlock *lock = container_of(kref, struct genlock, + refcount); - kfree(lock); + /* + * Clear the private data for the file descriptor in case the fd is + * still active after the lock gets released + */ + + spin_lock(&genlock_file_lock); + if (lock->file) + lock->file->private_data = NULL; + spin_unlock(&genlock_file_lock); + + kfree(lock); } /* @@ -64,6 +83,15 @@ static void genlock_destroy(struct kref *kref) static int genlock_release(struct inode *inodep, struct file *file) { + struct genlock *lock = file->private_data; + /* + * Clear the refrence back to this file structure to avoid + * somehow reusing the lock after the file has been destroyed + */ + + if (lock) + lock->file = NULL; + return 0; } @@ -82,12 +110,21 @@ struct genlock *genlock_create_lock(struct genlock_handle *handle) { struct genlock *lock; - if (handle->lock != NULL) + if (IS_ERR_OR_NULL(handle)) { + GENLOCK_LOG_ERR("Invalid handle\n"); return ERR_PTR(-EINVAL); + } + + if (handle->lock != NULL) { + GENLOCK_LOG_ERR("Handle already has a lock attached\n"); + return ERR_PTR(-EINVAL); + } lock = kzalloc(sizeof(*lock), GFP_KERNEL); - if (lock == NULL) + if (lock == NULL) { + GENLOCK_LOG_ERR("Unable to allocate memory for a lock\n"); return ERR_PTR(-ENOMEM); + } INIT_LIST_HEAD(&lock->active); init_waitqueue_head(&lock->queue); @@ -120,8 +157,10 @@ static int genlock_get_fd(struct genlock *lock) { int ret; - if (!lock->file) + if (!lock->file) { + GENLOCK_LOG_ERR("No file attached to the lock\n"); return -EINVAL; + } ret = get_unused_fd_flags(0); if (ret < 0) @@ -143,19 +182,37 @@ struct genlock *genlock_attach_lock(struct genlock_handle *handle, int fd) struct file *file; struct genlock *lock; - if (handle->lock != NULL) + if (IS_ERR_OR_NULL(handle)) { + GENLOCK_LOG_ERR("Invalid handle\n"); return ERR_PTR(-EINVAL); + } + + if (handle->lock != NULL) { + GENLOCK_LOG_ERR("Handle already has a lock attached\n"); + return ERR_PTR(-EINVAL); + } file = fget(fd); - if (file == NULL) + if (file == NULL) { + GENLOCK_LOG_ERR("Bad file descriptor\n"); return ERR_PTR(-EBADF); + } + /* + * take a spinlock to avoid a race condition if the lock is + * released and then attached + */ + + spin_lock(&genlock_file_lock); lock = file->private_data; + spin_unlock(&genlock_file_lock); fput(file); - if (lock == NULL) + if (lock == NULL) { + GENLOCK_LOG_ERR("File descriptor is invalid\n"); return ERR_PTR(-EINVAL); + } handle->lock = lock; kref_get(&lock->refcount); @@ -199,13 +256,16 @@ static int _genlock_unlock(struct genlock *lock, struct genlock_handle *handle) spin_lock_irqsave(&lock->lock, irqflags); - if (lock->state == _UNLOCKED) + if (lock->state == _UNLOCKED) { + GENLOCK_LOG_ERR("Trying to unlock an unlocked handle\n"); goto done; + } /* Make sure this handle is an owner of the lock */ - if (!handle_has_lock(lock, handle)) + if (!handle_has_lock(lock, handle)) { + GENLOCK_LOG_ERR("handle does not have lock attached to it\n"); goto done; - + } /* If the handle holds no more references to the lock then release it (maybe) */ @@ -228,7 +288,7 @@ static int _genlock_lock(struct genlock *lock, struct genlock_handle *handle, { unsigned long irqflags; int ret = 0; - unsigned int ticks = msecs_to_jiffies(timeout); + unsigned long ticks = msecs_to_jiffies(timeout); spin_lock_irqsave(&lock->lock, irqflags); @@ -236,8 +296,8 @@ static int _genlock_lock(struct genlock *lock, struct genlock_handle *handle, * succeed to not block, the mere idea is too dangerous to continue */ - if (in_interrupt() && !(flags & GENLOCK_NOBLOCK)) - BUG(); +// if (in_interrupt() && !(flags & GENLOCK_NOBLOCK)) +// BUG(); /* Fast path - the lock is unlocked, so go do the needful */ @@ -247,12 +307,15 @@ static int _genlock_lock(struct genlock *lock, struct genlock_handle *handle, if (handle_has_lock(lock, handle)) { /* - * If the handle already holds the lock and the type matches, - * then just increment the active pointer. This allows the - * handle to do recursive locks + * If the handle already holds the lock and the lock type is + * a read lock then just increment the active pointer. This + * allows the handle to do recursive read locks. Recursive + * write locks are not allowed in order to support + * synchronization within a process using a single gralloc + * handle. */ - if (lock->state == op) { + if (lock->state == _RDLOCK && op == _RDLOCK) { handle->active++; goto done; } @@ -261,32 +324,46 @@ static int _genlock_lock(struct genlock *lock, struct genlock_handle *handle, * If the handle holds a write lock then the owner can switch * to a read lock if they want. Do the transition atomically * then wake up any pending waiters in case they want a read - * lock too. + * lock too. In order to support synchronization within a + * process the caller must explicity request to convert the + * lock type with the GENLOCK_WRITE_TO_READ flag. */ - if (op == _RDLOCK && handle->active == 1) { - lock->state = _RDLOCK; - wake_up(&lock->queue); + if (flags & GENLOCK_WRITE_TO_READ) { + if (lock->state == _WRLOCK && op == _RDLOCK) { + lock->state = _RDLOCK; + wake_up(&lock->queue); + goto done; + } else { + GENLOCK_LOG_ERR("Invalid state to convert" + "write to read\n"); + ret = -EINVAL; + goto done; + } + } + } else { + + /* + * Check to ensure the caller has not attempted to convert a + * write to a read without holding the lock. + */ + + if (flags & GENLOCK_WRITE_TO_READ) { + GENLOCK_LOG_ERR("Handle must have lock to convert" + "write to read\n"); + ret = -EINVAL; goto done; } /* - * Otherwise the user tried to turn a read into a write, and we - * don't allow that. + * If we request a read and the lock is held by a read, then go + * ahead and share the lock */ - ret = -EINVAL; - goto done; + if (op == GENLOCK_RDLOCK && lock->state == _RDLOCK) + goto dolock; } - /* - * If we request a read and the lock is held by a read, then go - * ahead and share the lock - */ - - if (op == GENLOCK_RDLOCK && lock->state == _RDLOCK) - goto dolock; - /* Treat timeout 0 just like a NOBLOCK flag and return if the lock cannot be aquired without blocking */ @@ -295,15 +372,26 @@ static int _genlock_lock(struct genlock *lock, struct genlock_handle *handle, goto done; } - /* Wait while the lock remains in an incompatible state */ + /* + * Wait while the lock remains in an incompatible state + * state op wait + * ------------------- + * unlocked n/a no + * read read no + * read write yes + * write n/a yes + */ - while (lock->state != _UNLOCKED) { - unsigned int elapsed; + while ((lock->state == _RDLOCK && op == _WRLOCK) || + lock->state == _WRLOCK) { + signed long elapsed; spin_unlock_irqrestore(&lock->lock, irqflags); elapsed = wait_event_interruptible_timeout(lock->queue, - lock->state == _UNLOCKED, ticks); + lock->state == _UNLOCKED || + (lock->state == _RDLOCK && op == _RDLOCK), + ticks); spin_lock_irqsave(&lock->lock, irqflags); @@ -312,7 +400,7 @@ static int _genlock_lock(struct genlock *lock, struct genlock_handle *handle, goto done; } - ticks = elapsed; + ticks = (unsigned long) elapsed; } dolock: @@ -320,7 +408,7 @@ dolock: list_add_tail(&handle->entry, &lock->active); lock->state = op; - handle->active = 1; + handle->active++; done: spin_unlock_irqrestore(&lock->lock, irqflags); @@ -329,7 +417,7 @@ done: } /** - * genlock_lock - Acquire or release a lock + * genlock_lock - Acquire or release a lock (depreciated) * @handle - pointer to the genlock handle that is requesting the lock * @op - the operation to perform (RDLOCK, WRLOCK, UNLOCK) * @flags - flags to control the operation @@ -341,11 +429,76 @@ done: int genlock_lock(struct genlock_handle *handle, int op, int flags, uint32_t timeout) { - struct genlock *lock = handle->lock; + struct genlock *lock; + unsigned long irqflags; + int ret = 0; - if (lock == NULL) + if (IS_ERR_OR_NULL(handle)) { + GENLOCK_LOG_ERR("Invalid handle\n"); return -EINVAL; + } + + lock = handle->lock; + + if (lock == NULL) { + GENLOCK_LOG_ERR("Handle does not have a lock attached\n"); + return -EINVAL; + } + + switch (op) { + case GENLOCK_UNLOCK: + ret = _genlock_unlock(lock, handle); + break; + case GENLOCK_RDLOCK: + spin_lock_irqsave(&lock->lock, irqflags); + if (handle_has_lock(lock, handle)) { + /* request the WRITE_TO_READ flag for compatibility */ + flags |= GENLOCK_WRITE_TO_READ; + } + spin_unlock_irqrestore(&lock->lock, irqflags); + /* fall through to take lock */ + case GENLOCK_WRLOCK: + ret = _genlock_lock(lock, handle, op, flags, timeout); + break; + default: + GENLOCK_LOG_ERR("Invalid lock operation\n"); + ret = -EINVAL; + break; + } + + return ret; +} +EXPORT_SYMBOL(genlock_lock); + +/** + * genlock_dreadlock - Acquire or release a lock + * @handle - pointer to the genlock handle that is requesting the lock + * @op - the operation to perform (RDLOCK, WRLOCK, UNLOCK) + * @flags - flags to control the operation + * @timeout - optional timeout to wait for the lock to come free + * + * Returns: 0 on success or error code on failure + */ + +int genlock_dreadlock(struct genlock_handle *handle, int op, int flags, + uint32_t timeout) +{ + struct genlock *lock; + + int ret = 0; + + if (IS_ERR_OR_NULL(handle)) { + GENLOCK_LOG_ERR("Invalid handle\n"); + return -EINVAL; + } + + lock = handle->lock; + + if (lock == NULL) { + GENLOCK_LOG_ERR("Handle does not have a lock attached\n"); + return -EINVAL; + } switch (op) { case GENLOCK_UNLOCK: @@ -356,13 +509,14 @@ int genlock_lock(struct genlock_handle *handle, int op, int flags, ret = _genlock_lock(lock, handle, op, flags, timeout); break; default: + GENLOCK_LOG_ERR("Invalid lock operation\n"); ret = -EINVAL; break; } return ret; } -EXPORT_SYMBOL(genlock_lock); +EXPORT_SYMBOL(genlock_dreadlock); /** * genlock_wait - Wait for the lock to be released @@ -372,13 +526,22 @@ EXPORT_SYMBOL(genlock_lock); int genlock_wait(struct genlock_handle *handle, uint32_t timeout) { - struct genlock *lock = handle->lock; + struct genlock *lock; unsigned long irqflags; int ret = 0; - unsigned int ticks = msecs_to_jiffies(timeout); + unsigned long ticks = msecs_to_jiffies(timeout); - if (lock == NULL) + if (IS_ERR_OR_NULL(handle)) { + GENLOCK_LOG_ERR("Invalid handle\n"); return -EINVAL; + } + + lock = handle->lock; + + if (lock == NULL) { + GENLOCK_LOG_ERR("Handle does not have a lock attached\n"); + return -EINVAL; + } spin_lock_irqsave(&lock->lock, irqflags); @@ -393,7 +556,7 @@ int genlock_wait(struct genlock_handle *handle, uint32_t timeout) } while (lock->state != _UNLOCKED) { - unsigned int elapsed; + signed long elapsed; spin_unlock_irqrestore(&lock->lock, irqflags); @@ -407,7 +570,7 @@ int genlock_wait(struct genlock_handle *handle, uint32_t timeout) break; } - ticks = elapsed; + ticks = (unsigned long) elapsed; } done: @@ -415,12 +578,7 @@ done: return ret; } -/** - * genlock_release_lock - Release a lock attached to a handle - * @handle - Pointer to the handle holding the lock - */ - -void genlock_release_lock(struct genlock_handle *handle) +static void genlock_release_lock(struct genlock_handle *handle) { unsigned long flags; @@ -441,7 +599,6 @@ void genlock_release_lock(struct genlock_handle *handle) handle->lock = NULL; handle->active = 0; } -EXPORT_SYMBOL(genlock_release_lock); /* * Release function called when all references to a handle are released @@ -468,8 +625,10 @@ static const struct file_operations genlock_handle_fops = { static struct genlock_handle *_genlock_get_handle(void) { struct genlock_handle *handle = kzalloc(sizeof(*handle), GFP_KERNEL); - if (handle == NULL) + if (handle == NULL) { + GENLOCK_LOG_ERR("Unable to allocate memory for the handle\n"); return ERR_PTR(-ENOMEM); + } return handle; } @@ -531,6 +690,9 @@ static long genlock_dev_ioctl(struct file *filep, unsigned int cmd, struct genlock *lock; int ret; + if (IS_ERR_OR_NULL(handle)) + return -EINVAL; + switch (cmd) { case GENLOCK_IOC_NEW: { lock = genlock_create_lock(handle); @@ -540,8 +702,11 @@ static long genlock_dev_ioctl(struct file *filep, unsigned int cmd, return 0; } case GENLOCK_IOC_EXPORT: { - if (handle->lock == NULL) + if (handle->lock == NULL) { + GENLOCK_LOG_ERR("Handle does not have a lock" + "attached\n"); return -EINVAL; + } ret = genlock_get_fd(handle->lock); if (ret < 0) @@ -574,6 +739,14 @@ static long genlock_dev_ioctl(struct file *filep, unsigned int cmd, return genlock_lock(handle, param.op, param.flags, param.timeout); } + case GENLOCK_IOC_DREADLOCK: { + if (copy_from_user(¶m, (void __user *) arg, + sizeof(param))) + return -EFAULT; + + return genlock_dreadlock(handle, param.op, param.flags, + param.timeout); + } case GENLOCK_IOC_WAIT: { if (copy_from_user(¶m, (void __user *) arg, sizeof(param))) @@ -582,10 +755,16 @@ static long genlock_dev_ioctl(struct file *filep, unsigned int cmd, return genlock_wait(handle, param.timeout); } case GENLOCK_IOC_RELEASE: { - genlock_release_lock(handle); - return 0; + /* + * Return error - this ioctl has been deprecated. + * Locks should only be released when the handle is + * destroyed + */ + GENLOCK_LOG_ERR("Deprecated RELEASE ioctl called\n"); + return -EINVAL; } default: + GENLOCK_LOG_ERR("Invalid ioctl\n"); return -EINVAL; } } diff --git a/include/linux/genlock.h b/include/linux/genlock.h old mode 100644 new mode 100755 index 2e9f9d68..587c49df --- a/include/linux/genlock.h +++ b/include/linux/genlock.h @@ -12,7 +12,7 @@ void genlock_put_handle(struct genlock_handle *handle); struct genlock *genlock_create_lock(struct genlock_handle *); struct genlock *genlock_attach_lock(struct genlock_handle *, int fd); int genlock_wait(struct genlock_handle *handle, u32 timeout); -void genlock_release_lock(struct genlock_handle *); +/* genlock_release_lock was deprecated */ int genlock_lock(struct genlock_handle *handle, int op, int flags, u32 timeout); #endif @@ -21,7 +21,8 @@ int genlock_lock(struct genlock_handle *handle, int op, int flags, #define GENLOCK_WRLOCK 1 #define GENLOCK_RDLOCK 2 -#define GENLOCK_NOBLOCK (1 << 0) +#define GENLOCK_NOBLOCK (1 << 0) +#define GENLOCK_WRITE_TO_READ (1 << 1) struct genlock_lock { int fd; @@ -37,9 +38,15 @@ struct genlock_lock { struct genlock_lock) #define GENLOCK_IOC_ATTACH _IOW(GENLOCK_IOC_MAGIC, 2, \ struct genlock_lock) + +/* Deprecated */ #define GENLOCK_IOC_LOCK _IOW(GENLOCK_IOC_MAGIC, 3, \ struct genlock_lock) + +/* Deprecated */ #define GENLOCK_IOC_RELEASE _IO(GENLOCK_IOC_MAGIC, 4) #define GENLOCK_IOC_WAIT _IOW(GENLOCK_IOC_MAGIC, 5, \ struct genlock_lock) +#define GENLOCK_IOC_DREADLOCK _IOW(GENLOCK_IOC_MAGIC, 6, \ + struct genlock_lock) #endif From 11a56d2216439215f6acdbf7a41f19b8a6d4dd57 Mon Sep 17 00:00:00 2001 From: SecureCRT Date: Tue, 7 Aug 2012 00:53:53 +0800 Subject: [PATCH 05/22] add in_interrupt back --- drivers/base/genlock.c | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/drivers/base/genlock.c b/drivers/base/genlock.c index a89ee7ee..b5f8e42e 100755 --- a/drivers/base/genlock.c +++ b/drivers/base/genlock.c @@ -22,6 +22,7 @@ #include #include #include +#include /* Lock states - can either be unlocked, held as an exclusive write lock or a * shared read lock @@ -296,8 +297,8 @@ static int _genlock_lock(struct genlock *lock, struct genlock_handle *handle, * succeed to not block, the mere idea is too dangerous to continue */ -// if (in_interrupt() && !(flags & GENLOCK_NOBLOCK)) -// BUG(); + if (in_interrupt() && !(flags & GENLOCK_NOBLOCK)) + BUG(); /* Fast path - the lock is unlocked, so go do the needful */ From 310719402884355d73d2ce5cc8a4bcab7ee821f1 Mon Sep 17 00:00:00 2001 From: securecrt Date: Tue, 7 Aug 2012 16:52:14 +0800 Subject: [PATCH 06/22] fixed typo in config file --- arch/arm/configs/htcleo_defconfig | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/configs/htcleo_defconfig b/arch/arm/configs/htcleo_defconfig index 96d43e38..86053034 100644 --- a/arch/arm/configs/htcleo_defconfig +++ b/arch/arm/configs/htcleo_defconfig @@ -709,7 +709,7 @@ CONFIG_IP_NF_ARP_MANGLE=y CONFIG_NF_DEFRAG_IPV6=y CONFIG_NF_CONNTRACK_IPV6=y # CONFIG_IP6_NF_QUEUE is not set -ONFIG_IP6_NF_IPTABLES=y +CONFIG_IP6_NF_IPTABLES=y # CONFIG_IP6_NF_MATCH_AH is not set # CONFIG_IP6_NF_MATCH_EUI64 is not set # CONFIG_IP6_NF_MATCH_FRAG is not set From 0fa4a5529ca5820b445ff48cb8862fe723ea21a7 Mon Sep 17 00:00:00 2001 From: securecrt Date: Wed, 15 Aug 2012 18:52:07 +0800 Subject: [PATCH 07/22] Staging: android: binder: Don't call dump_stack in binder_vma_open Staging: android: binder: Fix crashes when sharing a binder file between processes drivers:staging:android Typos: fix some comments that have typos in them. fs: Remove missed ->fds_bits from cessation use of fd_set structs internally Staging:android: Change type for binder_debug_no_lock switch to bool Staging: android: binder: Fix use-after-free bug --- drivers/staging/android/binder.c | 270 ++++++++++++++----------------- 1 file changed, 120 insertions(+), 150 deletions(-) diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c index a0763da9..c44eb407 100644 --- a/drivers/staging/android/binder.c +++ b/drivers/staging/android/binder.c @@ -3,7 +3,6 @@ * Android IPC Subsystem * * Copyright (C) 2007-2008 Google, Inc. - * Copyright (c) 2012, Code Aurora Forum. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -31,14 +30,15 @@ #include #include #include -#include #include #include +#include #include "binder.h" static DEFINE_MUTEX(binder_lock); static DEFINE_MUTEX(binder_deferred_lock); +static DEFINE_MUTEX(binder_mmap_lock); static HLIST_HEAD(binder_procs); static HLIST_HEAD(binder_deferred_list); @@ -98,12 +98,12 @@ enum { BINDER_DEBUG_BUFFER_ALLOC = 1U << 13, BINDER_DEBUG_PRIORITY_CAP = 1U << 14, BINDER_DEBUG_BUFFER_ALLOC_ASYNC = 1U << 15, - BINDER_DEBUG_TOP_ERRORS = 1U << 16, + BINDER_DEBUG_TOP_ERRORS = 1U << 16, }; static uint32_t binder_debug_mask; module_param_named(debug_mask, binder_debug_mask, uint, S_IWUSR | S_IRUGO); -static int binder_debug_no_lock; +static bool binder_debug_no_lock; module_param_named(proc_no_lock, binder_debug_no_lock, bool, S_IWUSR | S_IRUGO); static DECLARE_WAIT_QUEUE_HEAD(binder_user_error_wait); @@ -258,7 +258,7 @@ struct binder_ref { }; struct binder_buffer { - struct list_head entry; /* free and allocated entries by addesss */ + struct list_head entry; /* free and allocated entries by address */ struct rb_node rb_node; /* free entry by size or allocated entry */ /* by address */ unsigned free:1; @@ -288,6 +288,7 @@ struct binder_proc { struct rb_root refs_by_node; int pid; struct vm_area_struct *vma; + struct mm_struct *vma_vm_mm; struct task_struct *tsk; struct files_struct *files; struct hlist_node deferred_work_node; @@ -380,8 +381,7 @@ int task_get_unused_fd_flags(struct binder_proc *proc, int flags) repeat: fdt = files_fdtable(files); - fd = find_next_zero_bit(fdt->open_fds->fds_bits, fdt->max_fds, - files->next_fd); + fd = find_next_zero_bit(fdt->open_fds, fdt->max_fds, files->next_fd); /* * N.B. For clone tasks sharing a files structure, this test @@ -633,6 +633,11 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate, if (mm) { down_write(&mm->mmap_sem); vma = proc->vma; + if (vma && mm != proc->vma_vm_mm) { + pr_err("binder: %d: vma mm and task mm mismatch\n", + proc->pid); + vma = NULL; + } } if (allocate == 0) @@ -640,8 +645,8 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate, if (vma == NULL) { binder_debug(BINDER_DEBUG_TOP_ERRORS, - "binder: %d: binder_alloc_buf failed to " - "map pages in userspace, no vma\n", proc->pid); + "binder: %d: binder_alloc_buf failed to " + "map pages in userspace, no vma\n", proc->pid); goto err_no_vma; } @@ -654,8 +659,8 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate, *page = alloc_page(GFP_KERNEL | __GFP_ZERO); if (*page == NULL) { binder_debug(BINDER_DEBUG_TOP_ERRORS, - "binder: %d: binder_alloc_buf failed " - "for page at %p\n", proc->pid, page_addr); + "binder: %d: binder_alloc_buf failed " + "for page at %p\n", proc->pid, page_addr); goto err_alloc_page_failed; } tmp_area.addr = page_addr; @@ -664,9 +669,9 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate, ret = map_vm_area(&tmp_area, PAGE_KERNEL, &page_array_ptr); if (ret) { binder_debug(BINDER_DEBUG_TOP_ERRORS, - "binder: %d: binder_alloc_buf failed " - "to map page at %p in kernel\n", - proc->pid, page_addr); + "binder: %d: binder_alloc_buf failed " + "to map page at %p in kernel\n", + proc->pid, page_addr); goto err_map_kernel_failed; } user_page_addr = @@ -674,9 +679,9 @@ static int binder_update_page_range(struct binder_proc *proc, int allocate, ret = vm_insert_page(vma, user_page_addr, page[0]); if (ret) { binder_debug(BINDER_DEBUG_TOP_ERRORS, - "binder: %d: binder_alloc_buf failed " - "to map page at %lx in userspace\n", - proc->pid, user_page_addr); + "binder: %d: binder_alloc_buf failed " + "to map page at %lx in userspace\n", + proc->pid, user_page_addr); goto err_vm_insert_page_failed; } /* vm_insert_page does not seem to increment the refcount */ @@ -724,8 +729,8 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, if (proc->vma == NULL) { binder_debug(BINDER_DEBUG_TOP_ERRORS, - "binder: %d: binder_alloc_buf, no vma\n", - proc->pid); + "binder: %d: binder_alloc_buf, no vma\n", + proc->pid); return NULL; } @@ -763,8 +768,8 @@ static struct binder_buffer *binder_alloc_buf(struct binder_proc *proc, } if (best_fit == NULL) { binder_debug(BINDER_DEBUG_TOP_ERRORS, - "binder: %d: binder_alloc_buf size %zd failed, " - "no address space\n", proc->pid, size); + "binder: %d: binder_alloc_buf size %zd failed, " + "no address space\n", proc->pid, size); return NULL; } if (n == NULL) { @@ -999,8 +1004,8 @@ static int binder_inc_node(struct binder_node *node, int strong, int internal, !(node == binder_context_mgr_node && node->has_strong_ref)) { binder_debug(BINDER_DEBUG_TOP_ERRORS, - "binder: invalid inc strong " - "node for %d\n", node->debug_id); + "binder: invalid inc strong " + "node for %d\n", node->debug_id); return -EINVAL; } node->internal_strong_refs++; @@ -1016,8 +1021,8 @@ static int binder_inc_node(struct binder_node *node, int strong, int internal, if (!node->has_weak_ref && list_empty(&node->work.entry)) { if (target_list == NULL) { binder_debug(BINDER_DEBUG_TOP_ERRORS, - "binder: invalid inc weak node " - "for %d\n", node->debug_id); + "binder: invalid inc weak node " + "for %d\n", node->debug_id); return -EINVAL; } list_add_tail(&node->work.entry, target_list); @@ -1053,7 +1058,7 @@ static int binder_dec_node(struct binder_node *node, int strong, int internal) if (node->proc) { rb_erase(&node->rb_node, &node->proc->nodes); binder_debug(BINDER_DEBUG_INTERNAL_REFS, - "binder: refless node %d deleted\n", + "binder: refless node %d deleted\n", node->debug_id); } else { hlist_del(&node->dead_node); @@ -1272,8 +1277,7 @@ static void binder_send_failed_reply(struct binder_transaction *t, binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, "binder: send failed reply for " "transaction %d to %d:%d\n", - t->debug_id, - target_thread->proc->pid, + t->debug_id, target_thread->proc->pid, target_thread->pid); binder_pop_transaction(target_thread, t); @@ -1281,11 +1285,12 @@ static void binder_send_failed_reply(struct binder_transaction *t, wake_up_interruptible(&target_thread->wait); } else { binder_debug(BINDER_DEBUG_TOP_ERRORS, - "binder: reply failed, target " - "thread, %d:%d, has error code %d " - "already\n", target_thread->proc->pid, - target_thread->pid, - target_thread->return_error); + "binder: reply failed, target " + "thread, %d:%d, has error code %d " + "already\n", + target_thread->proc->pid, + target_thread->pid, + target_thread->return_error); } return; } else { @@ -1319,15 +1324,14 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, int debug_id = buffer->debug_id; binder_debug(BINDER_DEBUG_TRANSACTION, - "binder: %d buffer release %d, size %zd-%zd, failed at" - " %p\n", proc->pid, buffer->debug_id, + "binder: %d buffer release %d, size %zd-%zd, failed at %p\n", + proc->pid, buffer->debug_id, buffer->data_size, buffer->offsets_size, failed_at); if (buffer->target_node) binder_dec_node(buffer->target_node, 1, 0); - offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, - sizeof(void *))); + offp = (size_t *)(buffer->data + ALIGN(buffer->data_size, sizeof(void *))); if (failed_at) off_end = failed_at; else @@ -1338,44 +1342,41 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, buffer->data_size < sizeof(*fp) || !IS_ALIGNED(*offp, sizeof(void *))) { binder_debug(BINDER_DEBUG_TOP_ERRORS, - "binder: transaction release %d bad" - "offset %zd, size %zd\n", debug_id, - *offp, buffer->data_size); + "binder: transaction release %d bad" + "offset %zd, size %zd\n", debug_id, + *offp, buffer->data_size); continue; } fp = (struct flat_binder_object *)(buffer->data + *offp); switch (fp->type) { case BINDER_TYPE_BINDER: case BINDER_TYPE_WEAK_BINDER: { - struct binder_node *node = binder_get_node(proc, - fp->binder); + struct binder_node *node = binder_get_node(proc, fp->binder); if (node == NULL) { binder_debug(BINDER_DEBUG_TOP_ERRORS, - "binder: transaction release %d" - " bad node %p\n", debug_id, fp->binder); + "binder: transaction release %d" + " bad node %p\n", debug_id, + fp->binder); break; } binder_debug(BINDER_DEBUG_TRANSACTION, " node %d u%p\n", node->debug_id, node->ptr); - binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, - 0); + binder_dec_node(node, fp->type == BINDER_TYPE_BINDER, 0); } break; case BINDER_TYPE_HANDLE: case BINDER_TYPE_WEAK_HANDLE: { - struct binder_ref *ref = binder_get_ref(proc, - fp->handle); + struct binder_ref *ref = binder_get_ref(proc, fp->handle); if (ref == NULL) { binder_debug(BINDER_DEBUG_TOP_ERRORS, - "binder: transaction release %d" - " bad handle %ld\n", debug_id, - fp->handle); + "binder: transaction release %d" + " bad handle %ld\n", debug_id, + fp->handle); break; } binder_debug(BINDER_DEBUG_TRANSACTION, " ref %d desc %d (node %d)\n", - ref->debug_id, ref->desc, - ref->node->debug_id); + ref->debug_id, ref->desc, ref->node->debug_id); binder_dec_ref(ref, fp->type == BINDER_TYPE_HANDLE); } break; @@ -1388,8 +1389,8 @@ static void binder_transaction_buffer_release(struct binder_proc *proc, default: binder_debug(BINDER_DEBUG_TOP_ERRORS, - "binder: transaction release %d bad " - "object type %lx\n", debug_id, fp->type); + "binder: transaction release %d bad " + "object type %lx\n", debug_id, fp->type); break; } } @@ -1614,19 +1615,15 @@ static void binder_transaction(struct binder_proc *proc, case BINDER_TYPE_BINDER: case BINDER_TYPE_WEAK_BINDER: { struct binder_ref *ref; - struct binder_node *node = binder_get_node(proc, - fp->binder); + struct binder_node *node = binder_get_node(proc, fp->binder); if (node == NULL) { - node = binder_new_node(proc, fp->binder, - fp->cookie); + node = binder_new_node(proc, fp->binder, fp->cookie); if (node == NULL) { return_error = BR_FAILED_REPLY; goto err_binder_new_node_failed; } - node->min_priority = fp->flags & - FLAT_BINDER_FLAG_PRIORITY_MASK; - node->accept_fds = !!(fp->flags & - FLAT_BINDER_FLAG_ACCEPTS_FDS); + node->min_priority = fp->flags & FLAT_BINDER_FLAG_PRIORITY_MASK; + node->accept_fds = !!(fp->flags & FLAT_BINDER_FLAG_ACCEPTS_FDS); } if (fp->cookie != node->cookie) { binder_user_error("binder: %d:%d sending u%p " @@ -1656,8 +1653,7 @@ static void binder_transaction(struct binder_proc *proc, } break; case BINDER_TYPE_HANDLE: case BINDER_TYPE_WEAK_HANDLE: { - struct binder_ref *ref = binder_get_ref(proc, - fp->handle); + struct binder_ref *ref = binder_get_ref(proc, fp->handle); if (ref == NULL) { binder_user_error("binder: %d:%d got " "transaction with invalid " @@ -1673,31 +1669,24 @@ static void binder_transaction(struct binder_proc *proc, fp->type = BINDER_TYPE_WEAK_BINDER; fp->binder = ref->node->ptr; fp->cookie = ref->node->cookie; - binder_inc_node(ref->node, fp->type == - BINDER_TYPE_BINDER, 0, NULL); + binder_inc_node(ref->node, fp->type == BINDER_TYPE_BINDER, 0, NULL); binder_debug(BINDER_DEBUG_TRANSACTION, - " ref %d desc %d -> node %d u%p\n", - ref->debug_id, ref->desc, - ref->node->debug_id, - ref->node->ptr); + " ref %d desc %d -> node %d u%p\n", + ref->debug_id, ref->desc, ref->node->debug_id, + ref->node->ptr); } else { struct binder_ref *new_ref; - new_ref = binder_get_ref_for_node(target_proc, - ref->node); + new_ref = binder_get_ref_for_node(target_proc, ref->node); if (new_ref == NULL) { return_error = BR_FAILED_REPLY; goto err_binder_get_ref_for_node_failed; } fp->handle = new_ref->desc; - binder_inc_ref(new_ref, fp->type == - BINDER_TYPE_HANDLE, NULL); + binder_inc_ref(new_ref, fp->type == BINDER_TYPE_HANDLE, NULL); binder_debug(BINDER_DEBUG_TRANSACTION, - " ref %d desc %d -> ref %d" - " desc %d (node %d)\n", - ref->debug_id, ref->desc, - new_ref->debug_id, - new_ref->desc, - ref->node->debug_id); + " ref %d desc %d -> ref %d desc %d (node %d)\n", + ref->debug_id, ref->desc, new_ref->debug_id, + new_ref->desc, ref->node->debug_id); } } break; @@ -1707,19 +1696,13 @@ static void binder_transaction(struct binder_proc *proc, if (reply) { if (!(in_reply_to->flags & TF_ACCEPT_FDS)) { - binder_user_error("binder: %d:%d got" - " reply with fd, %ld, but" - " target does not allow fds\n", - proc->pid, thread->pid, - fp->handle); + binder_user_error("binder: %d:%d got reply with fd, %ld, but target does not allow fds\n", + proc->pid, thread->pid, fp->handle); return_error = BR_FAILED_REPLY; goto err_fd_not_allowed; } } else if (!target_node->accept_fds) { - binder_user_error( - "binder: %d:%d got transaction" - " with fd, %ld, but target does" - " not allow fds\n", + binder_user_error("binder: %d:%d got transaction with fd, %ld, but target does not allow fds\n", proc->pid, thread->pid, fp->handle); return_error = BR_FAILED_REPLY; goto err_fd_not_allowed; @@ -1727,15 +1710,12 @@ static void binder_transaction(struct binder_proc *proc, file = fget(fp->handle); if (file == NULL) { - binder_user_error( - "binder: %d:%d got transaction" - " with invalid fd, %ld\n", + binder_user_error("binder: %d:%d got transaction with invalid fd, %ld\n", proc->pid, thread->pid, fp->handle); return_error = BR_FAILED_REPLY; goto err_fget_failed; } - target_fd = task_get_unused_fd_flags(target_proc, - O_CLOEXEC); + target_fd = task_get_unused_fd_flags(target_proc, O_CLOEXEC); if (target_fd < 0) { fput(file); return_error = BR_FAILED_REPLY; @@ -1743,8 +1723,7 @@ static void binder_transaction(struct binder_proc *proc, } task_fd_install(target_proc, target_fd, file); binder_debug(BINDER_DEBUG_TRANSACTION, - " fd %ld -> %d\n", fp->handle, - target_fd); + " fd %ld -> %d\n", fp->handle, target_fd); /* TODO: fput? */ fp->handle = target_fd; } break; @@ -1893,11 +1872,9 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, break; } binder_debug(BINDER_DEBUG_USER_REFS, - "binder: %d:%d %s ref %d desc %d s %d w %d" - " for node %d\n", proc->pid, thread->pid, - debug_string, ref->debug_id, ref->desc, - ref->strong, ref->weak, - ref->node->debug_id); + "binder: %d:%d %s ref %d desc %d s %d w %d for node %d\n", + proc->pid, thread->pid, debug_string, ref->debug_id, + ref->desc, ref->strong, ref->weak, ref->node->debug_id); break; } case BC_INCREFS_DONE: @@ -1958,19 +1935,17 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, binder_debug(BINDER_DEBUG_USER_REFS, "binder: %d:%d %s node %d ls %d lw %d\n", proc->pid, thread->pid, - cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" - : "BC_ACQUIRE_DONE", - node->debug_id, node->local_strong_refs, - node->local_weak_refs); + cmd == BC_INCREFS_DONE ? "BC_INCREFS_DONE" : "BC_ACQUIRE_DONE", + node->debug_id, node->local_strong_refs, node->local_weak_refs); break; } case BC_ATTEMPT_ACQUIRE: binder_debug(BINDER_DEBUG_TOP_ERRORS, - "binder: BC_ATTEMPT_ACQUIRE not supported\n"); + "binder: BC_ATTEMPT_ACQUIRE not supported\n"); return -EINVAL; case BC_ACQUIRE_RESULT: - binder_debug(BINDER_DEBUG_TOP_ERRORS, - "binder: BC_ACQUIRE_RESULT not supported\n"); + binder_debug(BINDER_DEBUG_TOP_ERRORS, + "binder: BC_ACQUIRE_RESULT not supported\n"); return -EINVAL; case BC_FREE_BUFFER: { @@ -1996,11 +1971,9 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, break; } binder_debug(BINDER_DEBUG_FREE_BUFFER, - "binder: %d:%d BC_FREE_BUFFER u%p found" - " buffer %d for %s transaction\n", - proc->pid, thread->pid, data_ptr, - buffer->debug_id, buffer->transaction ? - "active" : "finished"); + "binder: %d:%d BC_FREE_BUFFER u%p found buffer %d for %s transaction\n", + proc->pid, thread->pid, data_ptr, buffer->debug_id, + buffer->transaction ? "active" : "finished"); if (buffer->transaction) { buffer->transaction->buffer = NULL; @@ -2097,15 +2070,13 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, } binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, - "binder: %d:%d %s %p ref %d desc %d s %d" - " w %d for node %d\n", + "binder: %d:%d %s %p ref %d desc %d s %d w %d for node %d\n", proc->pid, thread->pid, cmd == BC_REQUEST_DEATH_NOTIFICATION ? "BC_REQUEST_DEATH_NOTIFICATION" : "BC_CLEAR_DEATH_NOTIFICATION", cookie, ref->debug_id, ref->desc, - ref->strong, ref->weak, - ref->node->debug_id); + ref->strong, ref->weak, ref->node->debug_id); if (cmd == BC_REQUEST_DEATH_NOTIFICATION) { if (ref->death) { @@ -2119,12 +2090,10 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, death = kzalloc(sizeof(*death), GFP_KERNEL); if (death == NULL) { thread->return_error = BR_ERROR; - binder_debug( - BINDER_DEBUG_FAILED_TRANSACTION, - "binder: %d:%d " - "BC_REQUEST_DEATH_NOTIFICATION" - " failed\n", - proc->pid, thread->pid); + binder_debug(BINDER_DEBUG_FAILED_TRANSACTION, + "binder: %d:%d " + "BC_REQUEST_DEATH_NOTIFICATION failed\n", + proc->pid, thread->pid); break; } binder_stats_created(BINDER_STAT_DEATH); @@ -2214,8 +2183,8 @@ int binder_thread_write(struct binder_proc *proc, struct binder_thread *thread, default: binder_debug(BINDER_DEBUG_TOP_ERRORS, - "binder: %d:%d unknown command %d\n", - proc->pid, thread->pid, cmd); + "binder: %d:%d unknown command %d\n", + proc->pid, thread->pid, cmd); return -EINVAL; } *consumed = ptr - buffer; @@ -2684,11 +2653,9 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) unsigned int size = _IOC_SIZE(cmd); void __user *ubuf = (void __user *)arg; - /*binder_debug(BINDER_DEBUG_TOP_ERRORS, "binder_ioctl: %d:%d %x %lx\n", - proc->pid, current->pid, cmd, arg);*/ + /*printk(KERN_INFO "binder_ioctl: %d:%d %x %lx\n", proc->pid, current->pid, cmd, arg);*/ - ret = wait_event_interruptible(binder_user_error_wait, - binder_stop_on_user_error < 2); + ret = wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); if (ret) return ret; @@ -2745,8 +2712,7 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) break; } case BINDER_SET_MAX_THREADS: - if (copy_from_user(&proc->max_threads, ubuf, - sizeof(proc->max_threads))) { + if (copy_from_user(&proc->max_threads, ubuf, sizeof(proc->max_threads))) { ret = -EINVAL; goto err; } @@ -2754,17 +2720,17 @@ static long binder_ioctl(struct file *filp, unsigned int cmd, unsigned long arg) case BINDER_SET_CONTEXT_MGR: if (binder_context_mgr_node != NULL) { binder_debug(BINDER_DEBUG_TOP_ERRORS, - "binder: BINDER_SET_CONTEXT_MGR already set\n"); + "binder: BINDER_SET_CONTEXT_MGR already set\n"); ret = -EBUSY; goto err; } if (binder_context_mgr_uid != -1) { if (binder_context_mgr_uid != current->cred->euid) { binder_debug(BINDER_DEBUG_TOP_ERRORS, - "binder: BINDER_SET_" - "CONTEXT_MGR bad uid %d != %d\n", - current->cred->euid, - binder_context_mgr_uid); + "binder: BINDER_SET_" + "CONTEXT_MGR bad uid %d != %d\n", + current->cred->euid, + binder_context_mgr_uid); ret = -EPERM; goto err; } @@ -2808,8 +2774,8 @@ err: wait_event_interruptible(binder_user_error_wait, binder_stop_on_user_error < 2); if (ret && ret != -ERESTARTSYS) binder_debug(BINDER_DEBUG_TOP_ERRORS, - "binder: %d:%d ioctl %x %lx returned %d\n", - proc->pid, current->pid, cmd, arg, ret); + "binder: %d:%d ioctl %x %lx returned %d\n", + proc->pid, current->pid, cmd, arg, ret); return ret; } @@ -2821,7 +2787,6 @@ static void binder_vma_open(struct vm_area_struct *vma) proc->pid, vma->vm_start, vma->vm_end, (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, (unsigned long)pgprot_val(vma->vm_page_prot)); - dump_stack(); } static void binder_vma_close(struct vm_area_struct *vma) @@ -2833,6 +2798,7 @@ static void binder_vma_close(struct vm_area_struct *vma) (vma->vm_end - vma->vm_start) / SZ_1K, vma->vm_flags, (unsigned long)pgprot_val(vma->vm_page_prot)); proc->vma = NULL; + proc->vma_vm_mm = NULL; binder_defer_work(proc, BINDER_DEFERRED_PUT_FILES); } @@ -2865,6 +2831,7 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) } vma->vm_flags = (vma->vm_flags | VM_DONTCOPY) & ~VM_MAYWRITE; + mutex_lock(&binder_mmap_lock); if (proc->buffer) { ret = -EBUSY; failure_string = "already mapped"; @@ -2879,13 +2846,13 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) } proc->buffer = area->addr; proc->user_buffer_offset = vma->vm_start - (uintptr_t)proc->buffer; + mutex_unlock(&binder_mmap_lock); #ifdef CONFIG_CPU_CACHE_VIPT if (cache_is_vipt_aliasing()) { while (CACHE_COLOUR((vma->vm_start ^ (uint32_t)proc->buffer))) { binder_debug(BINDER_DEBUG_TOP_ERRORS, - "binder_mmap: %d %lx-%lx maps %p bad alignment\n", - proc->pid, vma->vm_start, vma->vm_end, proc->buffer); + "binder_mmap: %d %lx-%lx maps %p bad alignment\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer); vma->vm_start += PAGE_SIZE; } } @@ -2913,11 +2880,11 @@ static int binder_mmap(struct file *filp, struct vm_area_struct *vma) binder_insert_free_buffer(proc, buffer); proc->free_async_space = proc->buffer_size / 2; barrier(); - proc->files = get_files_struct(current); + proc->files = get_files_struct(proc->tsk); proc->vma = vma; + proc->vma_vm_mm = vma->vm_mm; - /*binder_debug(BINDER_DEBUG_TOP_ERRORS, - "binder_mmap: %d %lx-%lx maps %p\n", + /*printk(KERN_INFO "binder_mmap: %d %lx-%lx maps %p\n", proc->pid, vma->vm_start, vma->vm_end, proc->buffer);*/ return 0; @@ -2925,14 +2892,17 @@ err_alloc_small_buf_failed: kfree(proc->pages); proc->pages = NULL; err_alloc_pages_failed: + mutex_lock(&binder_mmap_lock); vfree(proc->buffer); proc->buffer = NULL; err_get_vm_area_failed: err_already_mapped: + mutex_unlock(&binder_mmap_lock); err_bad_arg: binder_debug(BINDER_DEBUG_TOP_ERRORS, - "binder_mmap: %d %lx-%lx %s failed %d\n", - proc->pid, vma->vm_start, vma->vm_end, failure_string, ret); + "binder_mmap: %d %lx-%lx %s failed %d\n", + proc->pid, vma->vm_start, vma->vm_end, failure_string, + ret); return ret; } @@ -3087,9 +3057,9 @@ static void binder_deferred_release(struct binder_proc *proc) t->buffer = NULL; buffer->transaction = NULL; binder_debug(BINDER_DEBUG_TOP_ERRORS, - "binder: release proc %d, " - "transaction %d, not freed\n", - proc->pid, t->debug_id); + "binder: release proc %d, " + "transaction %d, not freed\n", + proc->pid, t->debug_id); /*BUG();*/ } binder_free_buf(proc, buffer); From 9f705870677ca748c3baccfc411667cdd4ba1137 Mon Sep 17 00:00:00 2001 From: securecrt Date: Wed, 15 Aug 2012 18:53:09 +0800 Subject: [PATCH 08/22] staging: android: lowmemorykiller: sysfs node and notifications android, lowmemorykiller: remove task handoff notifier staging: android: lowmemorykiller: Fix task_struct leak staging: android/lowmemorykiller: Don't unregister notifier from atomic context staging: android, lowmemorykiller: convert to use oom_score_adj staging: android/lowmemorykiller: Do not kill kernel threads staging: android/lowmemorykiller: No need for task->signal check staging: android/lowmemorykiller: Better mm handling staging: android/lowmemorykiller: Don't grab tasklist_lock staging: android: lowmemorykiller: Don't wait more than one second for a process to die Staging: android: fixed 80 characters warnings in lowmemorykiller.c staging: android: lowmemorykiller: Ignore shmem pages in page-cache staging: android: lowmemorykiller: Remove bitrotted codepath staging: android: lowmemkiller: Substantially reduce overhead during reclaim staging: android: lowmemorykiller: Don't try to kill the same pid over and over --- drivers/staging/android/lowmemorykiller.c | 201 ++++++++++++++++++---- 1 file changed, 164 insertions(+), 37 deletions(-) diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index 42cd93ea..05ebece0 100644 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c @@ -29,12 +29,17 @@ * */ -#include #include +#include +#include +#include #include +#include +#include #include #include -#include +#include +#include static uint32_t lowmem_debug_level = 2; static int lowmem_adj[6] = { @@ -52,8 +57,12 @@ static size_t lowmem_minfree[6] = { }; static int lowmem_minfree_size = 4; +static size_t lowmem_minfree_notif_trigger; + +static unsigned int offlining; static struct task_struct *lowmem_deathpending; -static DEFINE_SPINLOCK(lowmem_deathpending_lock); +static unsigned long lowmem_deathpending_timeout; +static struct kobject *lowmem_kobj; #define lowmem_print(level, x...) \ do { \ @@ -68,29 +77,66 @@ static struct notifier_block task_nb = { .notifier_call = task_notify_func, }; - -static void task_free_fn(struct work_struct *work) -{ - unsigned long flags; - - task_free_unregister(&task_nb); - spin_lock_irqsave(&lowmem_deathpending_lock, flags); - lowmem_deathpending = NULL; - spin_unlock_irqrestore(&lowmem_deathpending_lock, flags); -} -static DECLARE_WORK(task_free_work, task_free_fn); - static int task_notify_func(struct notifier_block *self, unsigned long val, void *data) { struct task_struct *task = data; - if (task == lowmem_deathpending) { - schedule_work(&task_free_work); - } + if (task == lowmem_deathpending) + lowmem_deathpending = NULL; + return NOTIFY_OK; } +#ifdef CONFIG_MEMORY_HOTPLUG +static int lmk_hotplug_callback(struct notifier_block *self, + unsigned long cmd, void *data) +{ + switch (cmd) { + /* Don't care LMK cases */ + case MEM_ONLINE: + case MEM_OFFLINE: + case MEM_CANCEL_ONLINE: + case MEM_CANCEL_OFFLINE: + case MEM_GOING_ONLINE: + offlining = 0; + lowmem_print(4, "lmk in normal mode\n"); + break; + /* LMK should account for movable zone */ + case MEM_GOING_OFFLINE: + offlining = 1; + lowmem_print(4, "lmk in hotplug mode\n"); + break; + } + return NOTIFY_DONE; +} +#endif + + + +static void lowmem_notify_killzone_approach(void); + +static inline void get_free_ram(int *other_free, int *other_file) +{ + struct zone *zone; + *other_free = global_page_state(NR_FREE_PAGES); + *other_file = global_page_state(NR_FILE_PAGES) - + global_page_state(NR_SHMEM); + + if (offlining) { + /* Discount all free space in the section being offlined */ + for_each_zone(zone) { + if (zone_idx(zone) == ZONE_MOVABLE) { + *other_free -= zone_page_state(zone, + NR_FREE_PAGES); + lowmem_print(4, "lowmem_shrink discounted " + "%lu pages in movable zone\n", + zone_page_state(zone, NR_FREE_PAGES)); + } + } + } +} + static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask) { struct task_struct *p; @@ -102,10 +148,8 @@ static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask) int selected_tasksize = 0; int selected_oom_adj; int array_size = ARRAY_SIZE(lowmem_adj); - int other_free = global_page_state(NR_FREE_PAGES); - int other_file = global_page_state(NR_FILE_PAGES); - unsigned long flags; - + int other_free; + int other_file; /* * If we already have a death outstanding, then * bail out right away; indicating to vmscan @@ -113,15 +157,24 @@ static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask) * this pass. * */ - if (lowmem_deathpending) + if (lowmem_deathpending && + time_before_eq(jiffies, lowmem_deathpending_timeout)) return 0; + get_free_ram(&other_free, &other_file); + + if (other_free < lowmem_minfree_notif_trigger && + other_file < lowmem_minfree_notif_trigger) { + lowmem_notify_killzone_approach(); + } + if (lowmem_adj_size < array_size) array_size = lowmem_adj_size; if (lowmem_minfree_size < array_size) array_size = lowmem_minfree_size; for (i = 0; i < array_size; i++) { - if (other_file < lowmem_minfree[i]) { + if (other_free < lowmem_minfree[i] && + other_file < lowmem_minfree[i]) { min_adj = lowmem_adj[i]; break; } @@ -176,20 +229,14 @@ static int lowmem_shrink(int nr_to_scan, gfp_t gfp_mask) lowmem_print(2, "select %d (%s), adj %d, size %d, to kill\n", p->pid, p->comm, oom_adj, tasksize); } - if (selected) { - spin_lock_irqsave(&lowmem_deathpending_lock, flags); - if (!lowmem_deathpending) { - lowmem_print(1, - "send sigkill to %d (%s), adj %d, size %d\n", - selected->pid, selected->comm, - selected_oom_adj, selected_tasksize); - lowmem_deathpending = selected; - task_free_register(&task_nb); - force_sig(SIGKILL, selected); - rem -= selected_tasksize; - } - spin_unlock_irqrestore(&lowmem_deathpending_lock, flags); + lowmem_print(1, "send sigkill to %d (%s), adj %d, size %d\n", + selected->pid, selected->comm, + selected_oom_adj, selected_tasksize); + lowmem_deathpending = selected; + lowmem_deathpending_timeout = jiffies + HZ; + force_sig(SIGKILL, selected); + rem -= selected_tasksize; } lowmem_print(4, "lowmem_shrink %d, %x, return %d\n", nr_to_scan, gfp_mask, rem); @@ -202,15 +249,93 @@ static struct shrinker lowmem_shrinker = { .seeks = DEFAULT_SEEKS * 16 }; +static void lowmem_notify_killzone_approach(void) +{ + lowmem_print(3, "notification trigger activated\n"); + sysfs_notify(lowmem_kobj, NULL, "notify_trigger_active"); +} + +static ssize_t lowmem_notify_trigger_active_show(struct kobject *k, + struct kobj_attribute *attr, char *buf) +{ + int other_free, other_file; + get_free_ram(&other_free, &other_file); + if (other_free < lowmem_minfree_notif_trigger && + other_file < lowmem_minfree_notif_trigger) + return snprintf(buf, 3, "1\n"); + else + return snprintf(buf, 3, "0\n"); +} + +static struct kobj_attribute lowmem_notify_trigger_active_attr = + __ATTR(notify_trigger_active, S_IRUGO, + lowmem_notify_trigger_active_show, NULL); + +static struct attribute *lowmem_default_attrs[] = { + &lowmem_notify_trigger_active_attr.attr, + NULL, +}; + +static ssize_t lowmem_show(struct kobject *k, struct attribute *attr, char *buf) +{ + struct kobj_attribute *kobj_attr; + kobj_attr = container_of(attr, struct kobj_attribute, attr); + return kobj_attr->show(k, kobj_attr, buf); +} + +static const struct sysfs_ops lowmem_ops = { + .show = lowmem_show, +}; + +static void lowmem_kobj_release(struct kobject *kobj) +{ + /* Nothing to be done here */ +} + +static struct kobj_type lowmem_kobj_type = { + .release = lowmem_kobj_release, + .sysfs_ops = &lowmem_ops, + .default_attrs = lowmem_default_attrs, +}; + static int __init lowmem_init(void) { + int rc; + task_free_register(&task_nb); register_shrinker(&lowmem_shrinker); +#ifdef CONFIG_MEMORY_HOTPLUG + hotplug_memory_notifier(lmk_hotplug_callback, 0); +#endif + + lowmem_kobj = kzalloc(sizeof(*lowmem_kobj), GFP_KERNEL); + if (!lowmem_kobj) { + rc = -ENOMEM; + goto err; + } + + rc = kobject_init_and_add(lowmem_kobj, &lowmem_kobj_type, + mm_kobj, "lowmemkiller"); + if (rc) + goto err_kobj; + return 0; + +err_kobj: + kfree(lowmem_kobj); + +err: + unregister_shrinker(&lowmem_shrinker); + task_free_unregister(&task_nb); + + return rc; } static void __exit lowmem_exit(void) { + kobject_put(lowmem_kobj); + kfree(lowmem_kobj); unregister_shrinker(&lowmem_shrinker); + task_free_unregister(&task_nb); } module_param_named(cost, lowmem_shrinker.seeks, int, S_IRUGO | S_IWUSR); @@ -219,6 +344,8 @@ module_param_array_named(adj, lowmem_adj, int, &lowmem_adj_size, module_param_array_named(minfree, lowmem_minfree, uint, &lowmem_minfree_size, S_IRUGO | S_IWUSR); module_param_named(debug_level, lowmem_debug_level, uint, S_IRUGO | S_IWUSR); +module_param_named(notify_trigger, lowmem_minfree_notif_trigger, uint, + S_IRUGO | S_IWUSR); module_init(lowmem_init); module_exit(lowmem_exit); From a8ff4f89b4c1dbea97072bf32dc8278cf7af35fb Mon Sep 17 00:00:00 2001 From: securecrt Date: Wed, 15 Aug 2012 18:53:52 +0800 Subject: [PATCH 09/22] android: logger: Allow a UID to read it's own log entries staging: logger: hold mutex while removing reader staging: android: logger: clarify non-update of w_off in do_write_log_from_user staging: android: logger: clarify code in clock_interval staging: android: logger: reorder prepare_to_wait and mutex_lock staging: android: logger: simplify and optimize get_entry_len staging: android: logger: Change logger_offset() from macro to function Staging: android: fixed white spaces coding style issue in logger.c android: logger: bump up the logger buffer sizes --- drivers/staging/android/logger.c | 262 +++++++++++++++++++++++++------ drivers/staging/android/logger.h | 29 +++- include/linux/capability.h | 6 +- 3 files changed, 246 insertions(+), 51 deletions(-) diff --git a/drivers/staging/android/logger.c b/drivers/staging/android/logger.c index 1a0c1391..05559eec 100644 --- a/drivers/staging/android/logger.c +++ b/drivers/staging/android/logger.c @@ -37,7 +37,7 @@ * mutex 'mutex'. */ struct logger_log { - unsigned char *buffer;/* the ring buffer itself */ + unsigned char *buffer;/* the ring buffer itself */ struct miscdevice misc; /* misc device representing the log */ wait_queue_head_t wq; /* wait queue for readers */ struct list_head readers; /* this log's readers */ @@ -57,19 +57,25 @@ struct logger_reader { struct logger_log *log; /* associated log */ struct list_head list; /* entry in logger_log's list */ size_t r_off; /* current read head offset */ + bool r_all; /* reader can read all entries */ + int r_ver; /* reader ABI version */ }; /* logger_offset - returns index 'n' into the log via (optimized) modulus */ -#define logger_offset(n) ((n) & (log->size - 1)) +size_t logger_offset(struct logger_log *log, size_t n) +{ + return n & (log->size-1); +} + /* * file_get_log - Given a file structure, return the associated log * * This isn't aesthetic. We have several goals: * - * 1) Need to quickly obtain the associated log during an I/O operation - * 2) Readers need to maintain state (logger_reader) - * 3) Writers need to be very fast (open() should be a near no-op) + * 1) Need to quickly obtain the associated log during an I/O operation + * 2) Readers need to maintain state (logger_reader) + * 3) Writers need to be very fast (open() should be a near no-op) * * In the reader case, we can trivially go file->logger_reader->logger_log. * For a writer, we don't want to maintain a logger_reader, so we just go @@ -86,25 +92,75 @@ static inline struct logger_log *file_get_log(struct file *file) } /* - * get_entry_len - Grabs the length of the payload of the next entry starting - * from 'off'. + * get_entry_header - returns a pointer to the logger_entry header within + * 'log' starting at offset 'off'. A temporary logger_entry 'scratch' must + * be provided. Typically the return value will be a pointer within + * 'logger->buf'. However, a pointer to 'scratch' may be returned if + * the log entry spans the end and beginning of the circular buffer. + */ +static struct logger_entry *get_entry_header(struct logger_log *log, + size_t off, struct logger_entry *scratch) +{ + size_t len = min(sizeof(struct logger_entry), log->size - off); + if (len != sizeof(struct logger_entry)) { + memcpy(((void *) scratch), log->buffer + off, len); + memcpy(((void *) scratch) + len, log->buffer, + sizeof(struct logger_entry) - len); + return scratch; + } + + return (struct logger_entry *) (log->buffer + off); +} + +/* + * get_entry_msg_len - Grabs the length of the message of the entry + * starting from from 'off'. + * + * An entry length is 2 bytes (16 bits) in host endian order. + * In the log, the length does not include the size of the log entry structure. + * This function returns the size including the log entry structure. * * Caller needs to hold log->mutex. */ -static __u32 get_entry_len(struct logger_log *log, size_t off) +static __u32 get_entry_msg_len(struct logger_log *log, size_t off) { - __u16 val; + struct logger_entry scratch; + struct logger_entry *entry; - switch (log->size - off) { - case 1: - memcpy(&val, log->buffer + off, 1); - memcpy(((char *) &val) + 1, log->buffer, 1); - break; - default: - memcpy(&val, log->buffer + off, 2); + entry = get_entry_header(log, off, &scratch); + return entry->len; +} + +static size_t get_user_hdr_len(int ver) +{ + if (ver < 2) + return sizeof(struct user_logger_entry_compat); + else + return sizeof(struct logger_entry); +} + +static ssize_t copy_header_to_user(int ver, struct logger_entry *entry, + char __user *buf) +{ + void *hdr; + size_t hdr_len; + struct user_logger_entry_compat v1; + + if (ver < 2) { + v1.len = entry->len; + v1.__pad = 0; + v1.pid = entry->pid; + v1.tid = entry->tid; + v1.sec = entry->sec; + v1.nsec = entry->nsec; + hdr = &v1; + hdr_len = sizeof(struct user_logger_entry_compat); + } else { + hdr = entry; + hdr_len = sizeof(struct logger_entry); } - return sizeof(struct logger_entry) + val; + return copy_to_user(buf, hdr, hdr_len); } /* @@ -118,15 +174,31 @@ static ssize_t do_read_log_to_user(struct logger_log *log, char __user *buf, size_t count) { + struct logger_entry scratch; + struct logger_entry *entry; size_t len; + size_t msg_start; /* - * We read from the log in two disjoint operations. First, we read from - * the current read head offset up to 'count' bytes or to the end of + * First, copy the header to userspace, using the version of + * the header requested + */ + entry = get_entry_header(log, reader->r_off, &scratch); + if (copy_header_to_user(reader->r_ver, entry, buf)) + return -EFAULT; + + count -= get_user_hdr_len(reader->r_ver); + buf += get_user_hdr_len(reader->r_ver); + msg_start = logger_offset(log, + reader->r_off + sizeof(struct logger_entry)); + + /* + * We read from the msg in two disjoint operations. First, we read from + * the current msg head offset up to 'count' bytes or to the end of * the log, whichever comes first. */ - len = min(count, log->size - reader->r_off); - if (copy_to_user(buf, log->buffer + reader->r_off, len)) + len = min(count, log->size - msg_start); + if (copy_to_user(buf, log->buffer + msg_start, len)) return -EFAULT; /* @@ -137,9 +209,34 @@ static ssize_t do_read_log_to_user(struct logger_log *log, if (copy_to_user(buf + len, log->buffer, count - len)) return -EFAULT; - reader->r_off = logger_offset(reader->r_off + count); + reader->r_off = logger_offset(log, reader->r_off + + sizeof(struct logger_entry) + count); - return count; + return count + get_user_hdr_len(reader->r_ver); +} + +/* + * get_next_entry_by_uid - Starting at 'off', returns an offset into + * 'log->buffer' which contains the first entry readable by 'euid' + */ +static size_t get_next_entry_by_uid(struct logger_log *log, + size_t off, uid_t euid) +{ + while (off != log->w_off) { + struct logger_entry *entry; + struct logger_entry scratch; + size_t next_len; + + entry = get_entry_header(log, off, &scratch); + + if (entry->euid == euid) + return off; + + next_len = sizeof(struct logger_entry) + entry->len; + off = logger_offset(log, off + next_len); + } + + return off; } /* @@ -147,11 +244,11 @@ static ssize_t do_read_log_to_user(struct logger_log *log, * * Behavior: * - * - O_NONBLOCK works - * - If there are no log entries to read, blocks until log is written to - * - Atomically reads exactly one log entry + * - O_NONBLOCK works + * - If there are no log entries to read, blocks until log is written to + * - Atomically reads exactly one log entry * - * Optimal read size is LOGGER_ENTRY_MAX_LEN. Will set errno to EINVAL if read + * Will set errno to EINVAL if read * buffer is insufficient to hold next entry. */ static ssize_t logger_read(struct file *file, char __user *buf, @@ -164,9 +261,10 @@ static ssize_t logger_read(struct file *file, char __user *buf, start: while (1) { + mutex_lock(&log->mutex); + prepare_to_wait(&log->wq, &wait, TASK_INTERRUPTIBLE); - mutex_lock(&log->mutex); ret = (log->w_off == reader->r_off); mutex_unlock(&log->mutex); if (!ret) @@ -191,6 +289,10 @@ start: mutex_lock(&log->mutex); + if (!reader->r_all) + reader->r_off = get_next_entry_by_uid(log, + reader->r_off, current_euid()); + /* is there still something to read or did we race? */ if (unlikely(log->w_off == reader->r_off)) { mutex_unlock(&log->mutex); @@ -198,7 +300,8 @@ start: } /* get the size of the next entry */ - ret = get_entry_len(log, reader->r_off); + ret = get_user_hdr_len(reader->r_ver) + + get_entry_msg_len(log, reader->r_off); if (count < ret) { ret = -EINVAL; goto out; @@ -224,8 +327,9 @@ static size_t get_next_entry(struct logger_log *log, size_t off, size_t len) size_t count = 0; do { - size_t nr = get_entry_len(log, off); - off = logger_offset(off + nr); + size_t nr = sizeof(struct logger_entry) + + get_entry_msg_len(log, off); + off = logger_offset(log, off + nr); count += nr; } while (count < len); @@ -233,16 +337,28 @@ static size_t get_next_entry(struct logger_log *log, size_t off, size_t len) } /* - * clock_interval - is a < c < b in mod-space? Put another way, does the line - * from a to b cross c? + * is_between - is a < c < b, accounting for wrapping of a, b, and c + * positions in the buffer + * + * That is, if ab, check for c outside (not between) a and b + * + * |------- a xxxxxxxx b --------| + * c^ + * + * |xxxxx b --------- a xxxxxxxxx| + * c^ + * or c^ */ -static inline int clock_interval(size_t a, size_t b, size_t c) +static inline int is_between(size_t a, size_t b, size_t c) { - if (b < a) { - if (a < c || b >= c) + if (a < b) { + /* is c between a and b? */ + if (a < c && c <= b) return 1; } else { - if (a < c && b >= c) + /* is c outside of b through a? */ + if (c <= b || a < c) return 1; } @@ -260,14 +376,14 @@ static inline int clock_interval(size_t a, size_t b, size_t c) static void fix_up_readers(struct logger_log *log, size_t len) { size_t old = log->w_off; - size_t new = logger_offset(old + len); + size_t new = logger_offset(log, old + len); struct logger_reader *reader; - if (clock_interval(old, new, log->head)) + if (is_between(old, new, log->head)) log->head = get_next_entry(log, log->head, len); list_for_each_entry(reader, &log->readers, list) - if (clock_interval(old, new, reader->r_off)) + if (is_between(old, new, reader->r_off)) reader->r_off = get_next_entry(log, reader->r_off, len); } @@ -286,7 +402,7 @@ static void do_write_log(struct logger_log *log, const void *buf, size_t count) if (count != len) memcpy(log->buffer, buf + len, count - len); - log->w_off = logger_offset(log->w_off + count); + log->w_off = logger_offset(log, log->w_off + count); } @@ -309,9 +425,15 @@ static ssize_t do_write_log_from_user(struct logger_log *log, if (count != len) if (copy_from_user(log->buffer, buf + len, count - len)) + /* + * Note that by not updating w_off, this abandons the + * portion of the new entry that *was* successfully + * copied, just above. This is intentional to avoid + * message corruption from missing fragments. + */ return -EFAULT; - log->w_off = logger_offset(log->w_off + count); + log->w_off = logger_offset(log, log->w_off + count); return count; } @@ -336,7 +458,9 @@ ssize_t logger_aio_write(struct kiocb *iocb, const struct iovec *iov, header.tid = current->pid; header.sec = now.tv_sec; header.nsec = now.tv_nsec; + header.euid = current_euid(); header.len = min_t(size_t, iocb->ki_left, LOGGER_ENTRY_MAX_PAYLOAD); + header.hdr_size = sizeof(struct logger_entry); /* null writes succeed, return zero */ if (unlikely(!header.len)) @@ -409,6 +533,10 @@ static int logger_open(struct inode *inode, struct file *file) return -ENOMEM; reader->log = log; + reader->r_ver = 1; + reader->r_all = in_egroup_p(inode->i_gid) || + capable(CAP_SYSLOG); + INIT_LIST_HEAD(&reader->list); mutex_lock(&log->mutex); @@ -433,9 +561,11 @@ static int logger_release(struct inode *ignored, struct file *file) if (file->f_mode & FMODE_READ) { struct logger_reader *reader = file->private_data; struct logger_log *log = reader->log; + mutex_lock(&log->mutex); list_del(&reader->list); mutex_unlock(&log->mutex); + kfree(reader); } @@ -466,6 +596,10 @@ static unsigned int logger_poll(struct file *file, poll_table *wait) poll_wait(file, &log->wq, wait); mutex_lock(&log->mutex); + if (!reader->r_all) + reader->r_off = get_next_entry_by_uid(log, + reader->r_off, current_euid()); + if (log->w_off != reader->r_off) ret |= POLLIN | POLLRDNORM; mutex_unlock(&log->mutex); @@ -473,11 +607,25 @@ static unsigned int logger_poll(struct file *file, poll_table *wait) return ret; } +static long logger_set_version(struct logger_reader *reader, void __user *arg) +{ + int version; + if (copy_from_user(&version, arg, sizeof(int))) + return -EFAULT; + + if ((version < 1) || (version > 2)) + return -EINVAL; + + reader->r_ver = version; + return 0; +} + static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct logger_log *log = file_get_log(file); struct logger_reader *reader; - long ret = -ENOTTY; + long ret = -EINVAL; + void __user *argp = (void __user *) arg; mutex_lock(&log->mutex); @@ -502,8 +650,14 @@ static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg) break; } reader = file->private_data; + + if (!reader->r_all) + reader->r_off = get_next_entry_by_uid(log, + reader->r_off, current_euid()); + if (log->w_off != reader->r_off) - ret = get_entry_len(log, reader->r_off); + ret = get_user_hdr_len(reader->r_ver) + + get_entry_msg_len(log, reader->r_off); else ret = 0; break; @@ -517,6 +671,22 @@ static long logger_ioctl(struct file *file, unsigned int cmd, unsigned long arg) log->head = log->w_off; ret = 0; break; + case LOGGER_GET_VERSION: + if (!(file->f_mode & FMODE_READ)) { + ret = -EBADF; + break; + } + reader = file->private_data; + ret = reader->r_ver; + break; + case LOGGER_SET_VERSION: + if (!(file->f_mode & FMODE_READ)) { + ret = -EBADF; + break; + } + reader = file->private_data; + ret = logger_set_version(reader, argp); + break; } mutex_unlock(&log->mutex); @@ -537,8 +707,8 @@ static const struct file_operations logger_fops = { /* * Defines a log structure with name 'NAME' and a size of 'SIZE' bytes, which - * must be a power of two, greater than LOGGER_ENTRY_MAX_LEN, and less than - * LONG_MAX minus LOGGER_ENTRY_MAX_LEN. + * must be a power of two, and greater than + * (LOGGER_ENTRY_MAX_PAYLOAD + sizeof(struct logger_entry)). */ #define DEFINE_LOGGER_DEVICE(VAR, NAME, SIZE) \ static unsigned char _buf_ ## VAR[SIZE]; \ diff --git a/drivers/staging/android/logger.h b/drivers/staging/android/logger.h index 2cb06e9d..3f612a3b 100644 --- a/drivers/staging/android/logger.h +++ b/drivers/staging/android/logger.h @@ -20,7 +20,12 @@ #include #include -struct logger_entry { +/* + * The userspace structure for version 1 of the logger_entry ABI. + * This structure is returned to userspace unless the caller requests + * an upgrade to a newer ABI version. + */ +struct user_logger_entry_compat { __u16 len; /* length of the payload */ __u16 __pad; /* no matter what, we get 2 bytes of padding */ __s32 pid; /* generating process's pid */ @@ -30,14 +35,28 @@ struct logger_entry { char msg[0]; /* the entry's payload */ }; +/* + * The structure for version 2 of the logger_entry ABI. + * This structure is returned to userspace if ioctl(LOGGER_SET_VERSION) + * is called with version >= 2 + */ +struct logger_entry { + __u16 len; /* length of the payload */ + __u16 hdr_size; /* sizeof(struct logger_entry_v2) */ + __s32 pid; /* generating process's pid */ + __s32 tid; /* generating process's tid */ + __s32 sec; /* seconds since Epoch */ + __s32 nsec; /* nanoseconds */ + uid_t euid; /* effective UID of logger */ + char msg[0]; /* the entry's payload */ +}; + #define LOGGER_LOG_RADIO "log_radio" /* radio-related messages */ #define LOGGER_LOG_EVENTS "log_events" /* system/hardware events */ #define LOGGER_LOG_SYSTEM "log_system" /* system/framework messages */ #define LOGGER_LOG_MAIN "log_main" /* everything else */ -#define LOGGER_ENTRY_MAX_LEN (4*1024) -#define LOGGER_ENTRY_MAX_PAYLOAD \ - (LOGGER_ENTRY_MAX_LEN - sizeof(struct logger_entry)) +#define LOGGER_ENTRY_MAX_PAYLOAD 4076 #define __LOGGERIO 0xAE @@ -45,5 +64,7 @@ struct logger_entry { #define LOGGER_GET_LOG_LEN _IO(__LOGGERIO, 2) /* used log len */ #define LOGGER_GET_NEXT_ENTRY_LEN _IO(__LOGGERIO, 3) /* next entry len */ #define LOGGER_FLUSH_LOG _IO(__LOGGERIO, 4) /* flush log */ +#define LOGGER_GET_VERSION _IO(__LOGGERIO, 5) /* abi version */ +#define LOGGER_SET_VERSION _IO(__LOGGERIO, 6) /* abi version */ #endif /* _LINUX_LOGGER_H */ diff --git a/include/linux/capability.h b/include/linux/capability.h index c8f2a5f7..c4f6d94d 100644 --- a/include/linux/capability.h +++ b/include/linux/capability.h @@ -357,7 +357,11 @@ struct cpu_vfs_cap_data { #define CAP_MAC_ADMIN 33 -#define CAP_LAST_CAP CAP_MAC_ADMIN +/* Allow configuring the kernel's syslog (printk behaviour) */ + +#define CAP_SYSLOG 34 + +#define CAP_LAST_CAP CAP_SYSLOG #define cap_valid(x) ((x) >= 0 && (x) <= CAP_LAST_CAP) From 8d4f825606dfdbf3c3c756824841b7eb1328a0a4 Mon Sep 17 00:00:00 2001 From: securecrt Date: Wed, 15 Aug 2012 18:54:43 +0800 Subject: [PATCH 10/22] pmem: Check for valid virtual address while flushing pmem: Correctly account for aligned blocks --- drivers/misc/pmem.c | 20 ++++++++++++++------ 1 file changed, 14 insertions(+), 6 deletions(-) diff --git a/drivers/misc/pmem.c b/drivers/misc/pmem.c index f1523fd5..bc083e17 100755 --- a/drivers/misc/pmem.c +++ b/drivers/misc/pmem.c @@ -1,7 +1,7 @@ /* drivers/android/pmem.c * * Copyright (C) 2007 Google, Inc. - * Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved. + * Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved. * * This software is licensed under the terms of the GNU General Public * License version 2, as published by the Free Software Foundation, and @@ -1074,17 +1074,17 @@ static void bitmap_bits_set_all(uint32_t *bitp, int bit_start, int bit_end) static int bitmap_allocate_contiguous(uint32_t *bitp, int num_bits_to_alloc, - int total_bits, int spacing) + int total_bits, int spacing, int start_bit) { int bit_start, last_bit, word_index; if (num_bits_to_alloc <= 0) return -1; - for (bit_start = 0; ; - bit_start = (last_bit + + for (bit_start = start_bit; ; + bit_start = ((last_bit + (word_index << PMEM_32BIT_WORD_ORDER) + spacing - 1) - & ~(spacing - 1)) { + & ~(spacing - 1)) + start_bit) { int bit_end = bit_start + num_bits_to_alloc, total_words; if (bit_end > total_bits) @@ -1162,7 +1162,8 @@ static int reserve_quanta(const unsigned int quanta_needed, ret = bitmap_allocate_contiguous(pmem[id].allocator.bitmap.bitmap, quanta_needed, (pmem[id].size + pmem[id].quantum - 1) / pmem[id].quantum, - spacing); + spacing, + start_bit); #if PMEM_DEBUG if (ret < 0) @@ -1915,6 +1916,13 @@ int pmem_cache_maint(struct file *file, unsigned int cmd, if (!file) return -EBADF; + /* + * check that the vaddr passed for flushing is valid + * so that you don't crash the kernel + */ + if (!pmem_addr->vaddr) + return -EINVAL; + data = file->private_data; id = get_id(file); From e4c201d97a7070f55fc9d0c2f02b5f767c0a685a Mon Sep 17 00:00:00 2001 From: securecrt Date: Wed, 15 Aug 2012 18:55:05 +0800 Subject: [PATCH 11/22] pmem_adsp (user-space) as non-cached pmem_venc (user-space) as non-cached --- arch/arm/mach-msm/board-htcleo.c | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/arch/arm/mach-msm/board-htcleo.c b/arch/arm/mach-msm/board-htcleo.c index 4697ef53..4913f043 100755 --- a/arch/arm/mach-msm/board-htcleo.c +++ b/arch/arm/mach-msm/board-htcleo.c @@ -771,7 +771,7 @@ static struct android_pmem_platform_data android_pmem_adsp_pdata = { #else .no_allocator = 0, #endif - .cached = 1, + .cached = 0, }; @@ -784,7 +784,7 @@ static struct android_pmem_platform_data android_pmem_venc_pdata = { #else .no_allocator = 0, #endif - .cached = 1, + .cached = 0, }; static struct platform_device android_pmem_mdp_device = { From c59e5f029ca79573404a5a01bc2107d3c4d9f729 Mon Sep 17 00:00:00 2001 From: securecrt Date: Wed, 15 Aug 2012 18:55:32 +0800 Subject: [PATCH 12/22] change MSM_NAND_DMA_BUFFER_SIZE to SZ_1M --- drivers/mtd/devices/htcleo_nand.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/mtd/devices/htcleo_nand.c b/drivers/mtd/devices/htcleo_nand.c index 9b23d680..12a9b390 100755 --- a/drivers/mtd/devices/htcleo_nand.c +++ b/drivers/mtd/devices/htcleo_nand.c @@ -51,7 +51,7 @@ unsigned crci_mask; #include "msm_nand.h" -#define MSM_NAND_DMA_BUFFER_SIZE SZ_4K +#define MSM_NAND_DMA_BUFFER_SIZE SZ_1M #define MSM_NAND_DMA_BUFFER_SLOTS \ (MSM_NAND_DMA_BUFFER_SIZE / (sizeof(((atomic_t *)0)->counter) * 8)) From f9c1be0ea35269eb4f687cf114c5b302de7b453f Mon Sep 17 00:00:00 2001 From: securecrt Date: Thu, 23 Aug 2012 13:06:34 +0800 Subject: [PATCH 13/22] fixed merge problem --- drivers/staging/android/lowmemorykiller.c | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index 5430f578..5930a813 100755 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c @@ -40,14 +40,11 @@ #include #include #include -<<<<<<< HEAD -======= #ifdef CONFIG_SWAP #include #include #endif ->>>>>>> ics_HWA static uint32_t lowmem_debug_level = 2; static int lowmem_adj[6] = { @@ -124,6 +121,8 @@ static int lmk_hotplug_callback(struct notifier_block *self, } #endif + + static void lowmem_notify_killzone_approach(void); static inline void get_free_ram(int *other_free, int *other_file) From b989638f53374c1aa496d0bcaa76528720f6d033 Mon Sep 17 00:00:00 2001 From: SecureCRT Date: Thu, 23 Aug 2012 22:41:48 +0800 Subject: [PATCH 14/22] fixed merge problem --- drivers/staging/android/lowmemorykiller.c | 3 --- 1 file changed, 3 deletions(-) diff --git a/drivers/staging/android/lowmemorykiller.c b/drivers/staging/android/lowmemorykiller.c index 5430f578..f9a21b86 100755 --- a/drivers/staging/android/lowmemorykiller.c +++ b/drivers/staging/android/lowmemorykiller.c @@ -40,14 +40,11 @@ #include #include #include -<<<<<<< HEAD -======= #ifdef CONFIG_SWAP #include #include #endif ->>>>>>> ics_HWA static uint32_t lowmem_debug_level = 2; static int lowmem_adj[6] = { From b9ead040d28175b3b783c64f7f73a9a6d8e74bed Mon Sep 17 00:00:00 2001 From: SecureCRT Date: Fri, 21 Sep 2012 01:06:00 +0800 Subject: [PATCH 15/22] enable -pipe --- arch/arm/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/arm/Makefile b/arch/arm/Makefile index a73caaf6..f35b1588 100644 --- a/arch/arm/Makefile +++ b/arch/arm/Makefile @@ -17,7 +17,7 @@ endif OBJCOPYFLAGS :=-O binary -R .note -R .note.gnu.build-id -R .comment -S GZFLAGS :=-9 -#KBUILD_CFLAGS +=-pipe +KBUILD_CFLAGS +=-pipe # Explicitly specifiy 32-bit ARM ISA since toolchain default can be -mthumb: KBUILD_CFLAGS +=$(call cc-option,-marm,) From c8d5db022ffee3d4ee8a6b32710d229a9baf9283 Mon Sep 17 00:00:00 2001 From: SecureCRT Date: Fri, 21 Sep 2012 01:06:29 +0800 Subject: [PATCH 16/22] ashmem: Implement read(2) in ashmem driver ashmem: Fix ASHMEM_SET_PROT_MASK. ashmem: Support lseek(2) in ashmem driver ashmem: Fix the build failure when OUTER_CACHE is enabled ashmem: Fix ashmem vm range comparison to stop roll-over --- mm/ashmem.c | 146 ++++++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 143 insertions(+), 3 deletions(-) mode change 100644 => 100755 mm/ashmem.c diff --git a/mm/ashmem.c b/mm/ashmem.c old mode 100644 new mode 100755 index 5e059283..0404e21f --- a/mm/ashmem.c +++ b/mm/ashmem.c @@ -29,9 +29,10 @@ #include #include #include +#include -#define ASHMEM_NAME_PREFIX "" -#define ASHMEM_NAME_PREFIX_LEN 0 +#define ASHMEM_NAME_PREFIX "dev/ashmem/" +#define ASHMEM_NAME_PREFIX_LEN (sizeof(ASHMEM_NAME_PREFIX) - 1) #define ASHMEM_FULL_NAME_LEN (ASHMEM_NAME_LEN + ASHMEM_NAME_PREFIX_LEN) /* @@ -45,6 +46,8 @@ struct ashmem_area { struct list_head unpinned_list; /* list of all ashmem areas */ struct file *file; /* the shmem-based backing file */ size_t size; /* size of the mapping, in bytes */ + unsigned long vm_start; /* Start address of vm_area + * which maps this ashmem */ unsigned long prot_mask; /* allowed prot bits, as vm_flags */ }; @@ -178,7 +181,7 @@ static int ashmem_open(struct inode *inode, struct file *file) struct ashmem_area *asma; int ret; - ret = nonseekable_open(inode, file); + ret = generic_file_open(inode, file); if (unlikely(ret)) return ret; @@ -187,6 +190,7 @@ static int ashmem_open(struct inode *inode, struct file *file) return -ENOMEM; INIT_LIST_HEAD(&asma->unpinned_list); + memcpy(asma->name, ASHMEM_NAME_PREFIX, ASHMEM_NAME_PREFIX_LEN); asma->prot_mask = PROT_MASK; file->private_data = asma; @@ -210,6 +214,67 @@ static int ashmem_release(struct inode *ignored, struct file *file) return 0; } +static ssize_t ashmem_read(struct file *file, char __user *buf, + size_t len, loff_t *pos) +{ + struct ashmem_area *asma = file->private_data; + int ret = 0; + + mutex_lock(&ashmem_mutex); + + /* If size is not set, or set to 0, always return EOF. */ + if (asma->size == 0) { + goto out; + } + + if (!asma->file) { + ret = -EBADF; + goto out; + } + + ret = asma->file->f_op->read(asma->file, buf, len, pos); + if (ret < 0) { + goto out; + } + + /** Update backing file pos, since f_ops->read() doesn't */ + asma->file->f_pos = *pos; + +out: + mutex_unlock(&ashmem_mutex); + return ret; +} + +static loff_t ashmem_llseek(struct file *file, loff_t offset, int origin) +{ + struct ashmem_area *asma = file->private_data; + int ret; + + mutex_lock(&ashmem_mutex); + + if (asma->size == 0) { + ret = -EINVAL; + goto out; + } + + if (!asma->file) { + ret = -EBADF; + goto out; + } + + ret = asma->file->f_op->llseek(asma->file, offset, origin); + if (ret < 0) { + goto out; + } + + /** Copy f_pos from backing file, since f_ops->llseek() sets it */ + file->f_pos = asma->file->f_pos; + +out: + mutex_unlock(&ashmem_mutex); + return ret; +} + static inline unsigned long calc_vm_may_flags(unsigned long prot) { @@ -264,6 +329,7 @@ static int ashmem_mmap(struct file *file, struct vm_area_struct *vma) vma->vm_file = asma->file; } vma->vm_flags |= VM_CAN_NONLINEAR; + asma->vm_start = vma->vm_start; out: mutex_unlock(&ashmem_mutex); @@ -564,6 +630,69 @@ static int ashmem_pin_unpin(struct ashmem_area *asma, unsigned long cmd, return ret; } +#ifdef CONFIG_OUTER_CACHE +static unsigned int virtaddr_to_physaddr(unsigned int virtaddr) +{ + unsigned int physaddr = 0; + pgd_t *pgd_ptr = NULL; + pmd_t *pmd_ptr = NULL; + pte_t *pte_ptr = NULL, pte; + + spin_lock(¤t->mm->page_table_lock); + pgd_ptr = pgd_offset(current->mm, virtaddr); + if (pgd_none(*pgd) || pgd_bad(*pgd)) { + pr_err("Failed to convert virtaddr %x to pgd_ptr\n", + virtaddr); + goto done; + } + + pmd_ptr = pmd_offset(pgd_ptr, virtaddr); + if (pmd_none(*pmd_ptr) || pmd_bad(*pmd_ptr)) { + pr_err("Failed to convert pgd_ptr %p to pmd_ptr\n", + (void *)pgd_ptr); + goto done; + } + + pte_ptr = pte_offset_map(pmd_ptr, virtaddr); + if (!pte_ptr) { + pr_err("Failed to convert pmd_ptr %p to pte_ptr\n", + (void *)pmd_ptr); + goto done; + } + pte = *pte_ptr; + physaddr = pte_pfn(pte); + pte_unmap(pte_ptr); +done: + spin_unlock(¤t->mm->page_table_lock); + physaddr <<= PAGE_SHIFT; + return physaddr; +} +#endif + +static int ashmem_cache_op(struct ashmem_area *asma, + void (*cache_func)(unsigned long vstart, unsigned long length, + unsigned long pstart)) +{ +#ifdef CONFIG_OUTER_CACHE + unsigned long vaddr; +#endif + mutex_lock(&ashmem_mutex); +#ifndef CONFIG_OUTER_CACHE + cache_func(asma->vm_start, asma->size, 0); +#else + for (vaddr = asma->vm_start; vaddr < asma->vm_start + asma->size; + vaddr += PAGE_SIZE) { + unsigned long physaddr; + physaddr = virtaddr_to_physaddr(vaddr); + if (!physaddr) + return -EINVAL; + cache_func(vaddr, PAGE_SIZE, physaddr); + } +#endif + mutex_unlock(&ashmem_mutex); + return 0; +} + static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) { struct ashmem_area *asma = file->private_data; @@ -604,6 +733,15 @@ static long ashmem_ioctl(struct file *file, unsigned int cmd, unsigned long arg) ashmem_shrink(ret, GFP_KERNEL); } break; + case ASHMEM_CACHE_FLUSH_RANGE: + ret = ashmem_cache_op(asma, &clean_and_invalidate_caches); + break; + case ASHMEM_CACHE_CLEAN_RANGE: + ret = ashmem_cache_op(asma, &clean_caches); + break; + case ASHMEM_CACHE_INV_RANGE: + ret = ashmem_cache_op(asma, &invalidate_caches); + break; } return ret; @@ -666,6 +804,8 @@ static struct file_operations ashmem_fops = { .owner = THIS_MODULE, .open = ashmem_open, .release = ashmem_release, + .read = ashmem_read, + .llseek = ashmem_llseek, .mmap = ashmem_mmap, .unlocked_ioctl = ashmem_ioctl, .compat_ioctl = ashmem_ioctl, From 4d2e9936aa9efff2b3e6162a8f7f89454a2daf5d Mon Sep 17 00:00:00 2001 From: securecrt Date: Fri, 21 Sep 2012 14:11:11 +0800 Subject: [PATCH 17/22] disable GENTLE FAIR SLEEPERS Sleeper Fairness is a concept used by CFS which treat sleeping/waiting tasks as if they were in a run queue. This implies tasks which spend most of the time waiting for an user input and such will get a fair share of CPU when they need it. Disabling Gentle Fair Sleepers could improve UI responsiveness. --- kernel/sched_features.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/kernel/sched_features.h b/kernel/sched_features.h index 0d940835..152922a4 100644 --- a/kernel/sched_features.h +++ b/kernel/sched_features.h @@ -10,7 +10,7 @@ SCHED_FEAT(FAIR_SLEEPERS, 1) * them to run sooner, but does not allow tons of sleepers to * rip the spread apart. */ -SCHED_FEAT(GENTLE_FAIR_SLEEPERS, 1) +SCHED_FEAT(GENTLE_FAIR_SLEEPERS, 0) /* * By not normalizing the sleep time, heavy tasks get an effective From d14b09ad9bd384aa97355616e32554704c859b1c Mon Sep 17 00:00:00 2001 From: SecureCRT Date: Sat, 22 Sep 2012 18:54:01 +0800 Subject: [PATCH 18/22] Merge ics_HWA #00e4d55 --- drivers/base/genlock.c | 50 ++++++++++++++++++++++++++++++++++-------- 1 file changed, 41 insertions(+), 9 deletions(-) diff --git a/drivers/base/genlock.c b/drivers/base/genlock.c index b5f8e42e..8c064888 100755 --- a/drivers/base/genlock.c +++ b/drivers/base/genlock.c @@ -35,7 +35,15 @@ #define GENLOCK_LOG_ERR(fmt, args...) \ pr_err("genlock: %s: " fmt, __func__, ##args) +/* The genlock magic stored in the kernel private data is used to protect + * against the possibility of user space passing a valid fd to a + * non-genlock file for genlock_attach_lock() + */ +#define GENLOCK_MAGIC_OK 0xD2EAD10C +#define GENLOCK_MAGIC_BAD 0xD2EADBAD + struct genlock { + unsigned int magic; /* Magic for attach verification */ struct list_head active; /* List of handles holding lock */ spinlock_t lock; /* Spinlock to protect the lock internals */ wait_queue_head_t queue; /* Holding pen for processes pending lock */ @@ -57,7 +65,7 @@ struct genlock_handle { * released while another process tries to attach it */ -static DEFINE_SPINLOCK(genlock_file_lock); +static DEFINE_SPINLOCK(genlock_ref_lock); static void genlock_destroy(struct kref *kref) { @@ -69,10 +77,9 @@ static void genlock_destroy(struct kref *kref) * still active after the lock gets released */ - spin_lock(&genlock_file_lock); if (lock->file) lock->file->private_data = NULL; - spin_unlock(&genlock_file_lock); + lock->magic = GENLOCK_MAGIC_BAD; kfree(lock); } @@ -110,6 +117,7 @@ static const struct file_operations genlock_fops = { struct genlock *genlock_create_lock(struct genlock_handle *handle) { struct genlock *lock; + void *ret; if (IS_ERR_OR_NULL(handle)) { GENLOCK_LOG_ERR("Invalid handle\n"); @@ -131,6 +139,7 @@ struct genlock *genlock_create_lock(struct genlock_handle *handle) init_waitqueue_head(&lock->queue); spin_lock_init(&lock->lock); + lock->magic = GENLOCK_MAGIC_OK; lock->state = _UNLOCKED; /* @@ -138,8 +147,13 @@ struct genlock *genlock_create_lock(struct genlock_handle *handle) * other processes */ - lock->file = anon_inode_getfile("genlock", &genlock_fops, - lock, O_RDWR); + ret = anon_inode_getfile("genlock", &genlock_fops, lock, O_RDWR); + if (IS_ERR_OR_NULL(ret)) { + GENLOCK_LOG_ERR("Unable to create lock inode\n"); + kfree(lock); + return ret; + } + lock->file = ret; /* Attach the new lock to the handle */ handle->lock = lock; @@ -204,21 +218,30 @@ struct genlock *genlock_attach_lock(struct genlock_handle *handle, int fd) * released and then attached */ - spin_lock(&genlock_file_lock); + spin_lock(&genlock_ref_lock); lock = file->private_data; - spin_unlock(&genlock_file_lock); fput(file); if (lock == NULL) { GENLOCK_LOG_ERR("File descriptor is invalid\n"); - return ERR_PTR(-EINVAL); + goto fail_invalid; + } + + if (lock->magic != GENLOCK_MAGIC_OK) { + GENLOCK_LOG_ERR("Magic is invalid - 0x%X\n", lock->magic); + goto fail_invalid; } handle->lock = lock; kref_get(&lock->refcount); + spin_unlock(&genlock_ref_lock); return lock; + +fail_invalid: + spin_unlock(&genlock_ref_lock); + return ERR_PTR(-EINVAL); } EXPORT_SYMBOL(genlock_attach_lock); @@ -596,7 +619,9 @@ static void genlock_release_lock(struct genlock_handle *handle) } spin_unlock_irqrestore(&handle->lock->lock, flags); + spin_lock(&genlock_ref_lock); kref_put(&handle->lock->refcount, genlock_destroy); + spin_unlock(&genlock_ref_lock); handle->lock = NULL; handle->active = 0; } @@ -642,12 +667,19 @@ static struct genlock_handle *_genlock_get_handle(void) struct genlock_handle *genlock_get_handle(void) { + void *ret; struct genlock_handle *handle = _genlock_get_handle(); if (IS_ERR(handle)) return handle; - handle->file = anon_inode_getfile("genlock-handle", + ret = anon_inode_getfile("genlock-handle", &genlock_handle_fops, handle, O_RDWR); + if (IS_ERR_OR_NULL(ret)) { + GENLOCK_LOG_ERR("Unable to create handle inode\n"); + kfree(handle); + return ret; + } + handle->file = ret; return handle; } From 0a250c8bdfe51b05b97d56181d7b61c4d7b181e4 Mon Sep 17 00:00:00 2001 From: SecureCRT Date: Sat, 22 Sep 2012 20:44:03 +0800 Subject: [PATCH 19/22] Staging: android: binder: Add some missing binder_stat_br calls Cached thread return errors, death notifications and new looper requests were not included in the stats. MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Change-Id: Iabe14b351b662d3f63009ecb3900f92fc3d72cc4 Signed-off-by: Arve Hjønnevåg --- drivers/staging/android/binder.c | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/drivers/staging/android/binder.c b/drivers/staging/android/binder.c index c44eb407..bd8ac9fd 100755 --- a/drivers/staging/android/binder.c +++ b/drivers/staging/android/binder.c @@ -2241,6 +2241,7 @@ retry: if (put_user(thread->return_error2, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); + binder_stat_br(proc, thread, thread->return_error2); if (ptr == end) goto done; thread->return_error2 = BR_OK; @@ -2248,6 +2249,7 @@ retry: if (put_user(thread->return_error, (uint32_t __user *)ptr)) return -EFAULT; ptr += sizeof(uint32_t); + binder_stat_br(proc, thread, thread->return_error); thread->return_error = BR_OK; goto done; } @@ -2403,6 +2405,7 @@ retry: if (put_user(death->cookie, (void * __user *)ptr)) return -EFAULT; ptr += sizeof(void *); + binder_stat_br(proc, thread, cmd); binder_debug(BINDER_DEBUG_DEATH_NOTIFICATION, "binder: %d:%d %s %p\n", proc->pid, thread->pid, @@ -2510,6 +2513,7 @@ done: proc->pid, thread->pid); if (put_user(BR_SPAWN_LOOPER, (uint32_t __user *)buffer)) return -EFAULT; + binder_stat_br(proc, thread, BR_SPAWN_LOOPER); } return 0; } From 7553b3788da08c1e7b44d1dba41ce8f4bacb59cf Mon Sep 17 00:00:00 2001 From: SecureCRT Date: Sun, 23 Sep 2012 22:39:34 +0800 Subject: [PATCH 20/22] video: msm: add mdp version to id string,and put a bogus panel id --- drivers/video/msm/msm_fb.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/video/msm/msm_fb.c b/drivers/video/msm/msm_fb.c index 82e110ce..028a1c7c 100644 --- a/drivers/video/msm/msm_fb.c +++ b/drivers/video/msm/msm_fb.c @@ -992,7 +992,7 @@ static void setup_fb_info(struct msmfb_info *msmfb) int r; /* finish setting up the fb_info struct */ - strncpy(fb_info->fix.id, "msmfb", 16); + strncpy(fb_info->fix.id, "msmfb31_0", 16); fb_info->fix.ypanstep = 1; fb_info->fbops = &msmfb_ops; From 582f409d661d1a813e8fe2f850678c87cf3df2ba Mon Sep 17 00:00:00 2001 From: SecureCRT Date: Mon, 24 Sep 2012 22:35:24 +0800 Subject: [PATCH 21/22] tweaks iosched for better android performance --- block/deadline-iosched.c | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/block/deadline-iosched.c b/block/deadline-iosched.c index b547cbca..d5873c6c 100644 --- a/block/deadline-iosched.c +++ b/block/deadline-iosched.c @@ -17,10 +17,10 @@ /* * See Documentation/block/deadline-iosched.txt */ -static const int read_expire = HZ / 2; /* max time before a read is submitted. */ +static const int read_expire = HZ / 4; /* max time before a read is submitted. */ static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */ -static const int writes_starved = 2; /* max times reads can starve a write */ -static const int fifo_batch = 16; /* # of sequential requests treated as one +static const int writes_starved = 4; /* max times reads can starve a write */ +static const int fifo_batch = 1; /* # of sequential requests treated as one by the above parameters. For throughput. */ struct deadline_data { @@ -362,7 +362,7 @@ static void *deadline_init_queue(struct request_queue *q) dd->fifo_expire[READ] = read_expire; dd->fifo_expire[WRITE] = write_expire; dd->writes_starved = writes_starved; - dd->front_merges = 1; + dd->front_merges = 0; dd->fifo_batch = fifo_batch; return dd; } From 2931196a527410ba0e7f139c809500833510b10c Mon Sep 17 00:00:00 2001 From: SecureCRT Date: Mon, 24 Sep 2012 22:36:20 +0800 Subject: [PATCH 22/22] remove the compile warnings --- include/linux/kobject.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/linux/kobject.h b/include/linux/kobject.h index 58ae8e00..aabe5a8d 100644 --- a/include/linux/kobject.h +++ b/include/linux/kobject.h @@ -106,7 +106,7 @@ extern char *kobject_get_path(struct kobject *kobj, gfp_t flag); struct kobj_type { void (*release)(struct kobject *kobj); - struct sysfs_ops *sysfs_ops; + const struct sysfs_ops *sysfs_ops; struct attribute **default_attrs; };