194 Commits

Author SHA1 Message Date
fe9dc4b1d5 fix few merge issues 2012-12-27 13:28:42 +00:00
ac52e95f27 add merge from tytung branch 2012-12-27 11:44:08 +00:00
a6067c0e2d Revert "htcleo: restore the PMEM_ADSP size to the default value as the HW video decoder works on HD2 now."
This reverts commit 783883f2fe.
2012-11-17 21:14:15 +08:00
139f6ae520 Revert "htcleo: updated htcleo_defconfig to tytung_jellybean_r2"
This reverts commit bd5ec6a5e7.
2012-11-17 21:13:29 +08:00
9031c37be6 htcleo: pm: add HD2 off-mode Alarm Clock for cLK (Credit goes to kokotas and Rick_1995)
Visit http://forum.xda-developers.com/showthread.php?t=1990111 for more info.
2012-11-17 15:36:23 +08:00
bba549a7dc Staging: android: binder: Allow using highmem for binder buffers
The default kernel mapping for the pages allocated for the binder
buffers is never used. Set the __GFP_HIGHMEM flag when allocating
these pages so we don't needlessly use low memory pages that may
be required elsewhere.

Signed-off-by: Arve Hjønnevåg <arve@android.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2012-11-17 02:58:34 +08:00
a0b0a01619 Staging: android: binder: Fix memory leak on thread/process exit
If a thread or process exited while a reply, one-way transaction or
death notification was pending, the struct holding the pending work
was leaked.

Signed-off-by: Arve Hjønnevåg <arve@android.com>
Signed-off-by: Greg Kroah-Hartman <gregkh@linuxfoundation.org>
2012-11-17 02:58:27 +08:00
bd5ec6a5e7 htcleo: updated htcleo_defconfig to tytung_jellybean_r2 2012-11-11 13:26:46 +08:00
783883f2fe htcleo: restore the PMEM_ADSP size to the default value as the HW video decoder works on HD2 now. 2012-11-11 13:24:22 +08:00
2543feb2ce htcleo: updated htcleo_defconfig to tytung_jellybean_r1' 2012-10-14 20:29:01 +08:00
f3e70adb04 htcleo: reduce the pmem_adsp size 2012-10-14 20:28:28 +08:00
facd1b5635 Update build.sh for JellyBean 2012-10-14 18:31:43 +08:00
b9cddc7f36 remove -ics flag 2012-10-14 18:16:44 +08:00
e42ef086be Revert "reduced the PMEM_ADSP size as the HW decoder still can't work on HD2"
This reverts commit 411b4bcb90.
2012-10-14 18:15:16 +08:00
c679e15374 Merge remote-tracking branch 'securecrt/jellybean' into jellybean
Conflicts:
	arch/arm/configs/htcleo_defconfig
2012-10-14 18:14:49 +08:00
f9b8788a89 drivers: power: enable fast_charge by default 2012-10-13 02:28:16 +08:00
5155c8ec93 remove the compile warnings 2012-10-13 02:27:01 +08:00
bda745315c tweaks iosched for better android performance 2012-10-13 02:26:34 +08:00
50391c3525 Merge branch 'ics_HWA' into jellybean 2012-09-24 22:37:15 +08:00
2931196a52 remove the compile warnings 2012-09-24 22:36:20 +08:00
582f409d66 tweaks iosched for better android performance 2012-09-24 22:35:24 +08:00
7553b3788d video: msm: add mdp version to id string,and put a bogus panel id 2012-09-23 22:39:34 +08:00
1b9c88acd9 htcleo: updated htcleo_defconfig to tytung_HWA_r3.5 2012-09-23 19:30:24 +08:00
499a1d65ed Revert "disable KSM"
This reverts commit c9ff149183.
2012-09-23 19:28:36 +08:00
e41593d928 Merge remote-tracking branch 'securecrt/ics_HWA' into ics_HWA
Conflicts:
	arch/arm/configs/htcleo_defconfig
	build.sh
2012-09-23 17:22:13 +08:00
0a250c8bdf Staging: android: binder: Add some missing binder_stat_br calls
Cached thread return errors, death notifications and new looper
requests were not included in the stats.

Change-Id: Iabe14b351b662d3f63009ecb3900f92fc3d72cc4
Signed-off-by: Arve Hjønnevåg <arve@android.com>
2012-09-22 20:44:03 +08:00
7e72949381 Staging: android: binder: Add some missing binder_stat_br calls
Cached thread return errors, death notifications and new looper
requests were not included in the stats.

Change-Id: Iabe14b351b662d3f63009ecb3900f92fc3d72cc4
Signed-off-by: Arve Hjønnevåg <arve@android.com>
2012-09-22 20:39:45 +08:00
d14b09ad9b Merge ics_HWA #00e4d55 2012-09-22 18:54:01 +08:00
00e4d55f8a base: genlock: handle error while creating lock/handle inode
base: genlock: add magic to protect attach from non-genlock file
base: genlock: protect kref counting with spinlock
2012-09-22 18:49:06 +08:00
4d2e9936aa disable GENTLE FAIR SLEEPERS
Sleeper Fairness is a concept used by CFS which treat sleeping/waiting tasks as if they were in a run queue. This implies tasks which spend most of the time waiting for an user input and such will get a fair share of CPU when they need it. Disabling Gentle Fair Sleepers could improve UI responsiveness.
2012-09-21 14:11:11 +08:00
d6a9cabcb5 disable GENTLE FAIR SLEEPERS
Sleeper Fairness is a concept used by CFS which treat sleeping/waiting tasks as if they were in a run queue. This implies tasks which spend most of the time waiting for an user input and such will get a fair share of CPU when they need it. Disabling Gentle Fair Sleepers could improve UI responsiveness.
2012-09-21 13:59:41 +08:00
c6861409a8 ashmem: Implement read(2) in ashmem driver
ashmem: Fix ASHMEM_SET_PROT_MASK.
ashmem: Support lseek(2) in ashmem driver
ashmem: Fix the build failure when OUTER_CACHE is enabled
ashmem: Fix ashmem vm range comparison to stop roll-over
2012-09-21 13:04:50 +08:00
73dd7070ec Merge branch 'jellybean' of https://github.com/securecrt/android_kernel_htcleo-2.6.32-ics into jellybean 2012-09-21 12:57:39 +08:00
c8d5db022f ashmem: Implement read(2) in ashmem driver
ashmem: Fix ASHMEM_SET_PROT_MASK.
ashmem: Support lseek(2) in ashmem driver
ashmem: Fix the build failure when OUTER_CACHE is enabled
ashmem: Fix ashmem vm range comparison to stop roll-over
2012-09-21 01:06:29 +08:00
b9ead040d2 enable -pipe 2012-09-21 01:06:00 +08:00
035e8b5999 change build sh 2012-09-09 22:55:52 +08:00
b989638f53 fixed merge problem 2012-08-23 22:41:48 +08:00
f9c1be0ea3 fixed merge problem 2012-08-23 13:06:34 +08:00
1160ee1202 Merge branch 'ics_HWA' into jellybean
Conflicts:
	arch/arm/configs/htcleo_defconfig
	drivers/staging/android/lowmemorykiller.c
2012-08-23 12:57:17 +08:00
c9ff149183 disable KSM 2012-08-23 12:45:29 +08:00
4b10fc19b9 staging: Add Snappy compression support to zram
Zram currently uses LZO compression. With Snappy, it uses less CPU time and is
thus more useful. The sacrifice in compression ratio is small.
Zram's LZO and Snappy support can be independently enabled at compile time and
each zram device can switch between compression methods when unused.
When only a single compression method was enabled at compile time, no idirection
penalty is incurred.
http://driverdev.linuxdriverproject.org/pipermail/devel/2011-April/015114.html
2012-08-23 12:39:53 +08:00
f3818c8864 Merge branch 'ics_HWA' of github.com:securecrt/android_kernel_htcleo-2.6.32-ics into ics_HWA 2012-08-23 02:50:19 +08:00
2145485d7d mmc: change clock from 50Mhz to 64Mhz PCLK
mmc: change NR_SG from 32 to 128 for better performance
2012-08-23 02:45:27 +08:00
4eade398cd mmc: msm: fix dma usage not to use internal APIs
Remove parts of this driver which use internal API calls. This
replaces the calls as suggested by Russell King.
2012-08-23 01:05:06 +08:00
08802ee5be mmc: msm_sdcc: Fix bug in PIO mode when data size is not word aligned
The current code for PIO doesn't transfer whole data when data size
is not in multiple of 4 bytes. The last few bytes are not written to
the card resulting in no DATAEND interrupt from SDCC. This patch
allows data transfer for non-aligned data size in PIO mode.
2012-08-23 00:43:40 +08:00
29e0f0df9d mmc: msm_sdcc: Add prog done interrupt support
Enable prog done interrupt for stop command(CMD12) that is sent
after a multi-block write(CMD25). The PROG_DONE bit is set when
the card has finished its programming and is ready for next data.

After every write request the card will be polled for ready status
using CMD13. For a multi-block write(CMD25) before sending CMD13,
stop command (CMD12) will be sent.  If we enable prog done interrupt
for CMD12, then CMD13 polling can be avoided. The prog done interrupt
means that the card is done with its programming and is ready for
next request.
2012-08-23 00:34:36 +08:00
81d981629c mmc: msm_sdcc: Fix possible circular locking dependency warning
In the context of request processing thread, data mover lock is
acquired after the host lock.  In another context, in the completion
handler of data mover the locks are acquired in the reverse order,
resulting in possible circular lock dependency warning. Hence,
schedule a tasklet to process the dma completion so as to avoid
nested locks.
2012-08-23 00:10:19 +08:00
04d584c634 drivers/mmc/host/msm-sdcc: remove dead config options
CONFIG_MMC_MSM7X00A_RESUME_IN_WQ and CONFIG_MMC_EMBEDDED_SDIO don't exist
in Kconfig and is never defined anywhere else, therefore removing all
references for it from the source code.
2012-08-23 00:00:26 +08:00
5c726afbb6 Revert "mm: cleancache core ops functions and config"
This reverts commit e0c9143ea1.
2012-08-20 23:15:00 +08:00
61eb7c5296 Revert "add zcache"
This reverts commit 8eb6724dbf.
2012-08-20 23:14:51 +08:00
4cecd4ccb2 Revert "fs: add field to superblock to support cleancache"
This reverts commit 1abd4f495e.
2012-08-20 23:12:22 +08:00
0f794ead76 Revert "enable zcache & cleancache"
This reverts commit c2ff7098d4.
2012-08-20 23:12:10 +08:00
f3a9b636b3 Revert "mm/fs: add hooks to support cleancache"
This reverts commit 3d343ac32a.
2012-08-20 23:10:46 +08:00
7c50bd921f staging:lowmemkiller add Fudgeswap
fudgeswap acts as follows:

If set to non zero (defualt is 512k):
	Check for the amount of SWAP_FREE space avalible
	If > 0KB is avalible:
		if fudgeswap > swapfree:
			other_file += swapfree
		else:
			other_file += fugeswap

In short: we will add in fugeswap as long as its less then the free swap

Setting this to a very large positive number will indicate swap ought
to be fully used as free (and will slow the system down)

smaller numbers will allow you to put some pressure on SWAP without
slowing the system down as much.

small negitive numbers will allow the system to be faster at the same
minfree level.

default is 512 to give a very little bit of pressure to use some swap
but this can be modified at runtime via:
/sys/module/lowmemorykiller/parameters/fugeswap
originally by ezterry
 Please enter the commit message for your changes. Lines starting
2012-08-20 15:24:37 +08:00
3d343ac32a mm/fs: add hooks to support cleancache 2012-08-20 12:10:10 +08:00
b6c1977f35 revert #8b62d33 2012-08-20 00:57:38 +08:00
c2ff7098d4 enable zcache & cleancache 2012-08-20 00:53:53 +08:00
8b62d33820 enable zcache & cleancache 2012-08-20 00:52:57 +08:00
1abd4f495e fs: add field to superblock to support cleancache 2012-08-20 00:51:37 +08:00
8eb6724dbf add zcache 2012-08-20 00:51:06 +08:00
e0c9143ea1 mm: cleancache core ops functions and config 2012-08-20 00:49:43 +08:00
f1beec1b32 vmalloc(): adjust gfp mask passed on nested vmalloc() invocation 2012-08-18 23:45:48 +08:00
7c81b7476d ksm: check for ERR_PTR from follow_page() 2012-08-17 01:49:14 +08:00
8b041c69af mm/ksm.c is doing an unneeded _notify in write_protect_page. 2012-08-17 01:40:53 +08:00
c95ed3371a staging: zram: fix zram locking
Staging: zram: Replace mutex lock by a R/W semaphore
Staging: zram: Add a missing GFP_KERNEL specifier in zram_init_device()
2012-08-17 01:21:55 +08:00
cc0db50c33 ksm: remove unswappable max_kernel_pages
ksm: fix bad user data when swapping
thp: ksm: free swap when swapcache page is replaced
2012-08-17 01:19:24 +08:00
c59e5f029c change MSM_NAND_DMA_BUFFER_SIZE to SZ_1M 2012-08-15 18:55:32 +08:00
e4c201d97a pmem_adsp (user-space) as non-cached
pmem_venc (user-space) as non-cached
2012-08-15 18:55:05 +08:00
8d4f825606 pmem: Check for valid virtual address while flushing
pmem: Correctly account for aligned blocks
2012-08-15 18:54:43 +08:00
a8ff4f89b4 android: logger: Allow a UID to read it's own log entries
staging: logger: hold mutex while removing reader
staging: android: logger: clarify non-update of w_off in do_write_log_from_user
staging: android: logger: clarify code in clock_interval
staging: android: logger: reorder prepare_to_wait and mutex_lock
staging: android: logger: simplify and optimize get_entry_len
staging: android: logger: Change logger_offset() from macro to function
Staging: android: fixed white spaces coding style issue in logger.c
android: logger: bump up the logger buffer sizes
2012-08-15 18:53:52 +08:00
9f70587067 staging: android: lowmemorykiller: sysfs node and notifications
android, lowmemorykiller: remove task handoff notifier
staging: android: lowmemorykiller: Fix task_struct leak
staging: android/lowmemorykiller: Don't unregister notifier from atomic context
staging: android, lowmemorykiller: convert to use oom_score_adj
staging: android/lowmemorykiller: Do not kill kernel threads
staging: android/lowmemorykiller: No need for task->signal check
staging: android/lowmemorykiller: Better mm handling
staging: android/lowmemorykiller: Don't grab tasklist_lock
staging: android: lowmemorykiller: Don't wait more than one second for a process to die
Staging: android: fixed 80 characters warnings in lowmemorykiller.c
staging: android: lowmemorykiller: Ignore shmem pages in page-cache
staging: android: lowmemorykiller: Remove bitrotted codepath
staging: android: lowmemkiller: Substantially reduce overhead during reclaim
staging: android: lowmemorykiller: Don't try to kill the same pid over and over
2012-08-15 18:53:09 +08:00
0fa4a5529c Staging: android: binder: Don't call dump_stack in binder_vma_open
Staging: android: binder: Fix crashes when sharing a binder file between processes
drivers:staging:android Typos: fix some comments that have typos in them.
fs: Remove missed ->fds_bits from cessation use of fd_set structs internally
Staging:android: Change type for binder_debug_no_lock switch to bool
Staging: android: binder: Fix use-after-free bug
2012-08-15 18:52:07 +08:00
0c4a37e304 change MSM_NAND_DMA_BUFFER_SIZE to SZ_1M 2012-08-15 18:07:29 +08:00
cdcb35c854 pmem_adsp (user-space) as non-cached
pmem_venc (user-space) as non-cached
2012-08-15 18:06:24 +08:00
fced437cd2 pmem: Check for valid virtual address while flushing
pmem: Correctly account for aligned blocks
2012-08-15 18:04:29 +08:00
e04d028ecf reduced the log size 2012-08-15 18:03:44 +08:00
9c3257d859 android: logger: Allow a UID to read it's own log entries
staging: logger: hold mutex while removing reader
staging: android: logger: clarify non-update of w_off in do_write_log_from_user
staging: android: logger: clarify code in clock_interval
staging: android: logger: reorder prepare_to_wait and mutex_lock
staging: android: logger: simplify and optimize get_entry_len
staging: android: logger: Change logger_offset() from macro to function
Staging: android: fixed white spaces coding style issue in logger.c
android: logger: bump up the logger buffer sizes
2012-08-15 14:56:37 +08:00
d6b41b0def staging: android: lowmemorykiller: sysfs node and notifications
android, lowmemorykiller: remove task handoff notifier
staging: android: lowmemorykiller: Fix task_struct leak
staging: android/lowmemorykiller: Don't unregister notifier from atomic context
staging: android, lowmemorykiller: convert to use oom_score_adj
staging: android/lowmemorykiller: Do not kill kernel threads
staging: android/lowmemorykiller: No need for task->signal check
staging: android/lowmemorykiller: Better mm handling
staging: android/lowmemorykiller: Don't grab tasklist_lock
staging: android: lowmemorykiller: Don't wait more than one second for a process to die
Staging: android: fixed 80 characters warnings in lowmemorykiller.c
staging: android: lowmemorykiller: Ignore shmem pages in page-cache
staging: android: lowmemorykiller: Remove bitrotted codepath
staging: android: lowmemkiller: Substantially reduce overhead during reclaim
staging: android: lowmemorykiller: Don't try to kill the same pid over and over
2012-08-15 13:37:30 +08:00
9b8229cafb Staging: android: binder: Don't call dump_stack in binder_vma_open
Staging: android: binder: Fix crashes when sharing a binder file between processes
drivers:staging:android Typos: fix some comments that have typos in them.
fs: Remove missed ->fds_bits from cessation use of fd_set structs internally
Staging:android: Change type for binder_debug_no_lock switch to bool
Staging: android: binder: Fix use-after-free bug
2012-08-14 16:48:25 +08:00
e20f7d7077 base: genlock: Remove genlock_release_lock and associated ioctl
base: genlock: allow synchronization with a single gralloc handle
base: genlock: remove BIT macro usage
2012-08-14 13:45:36 +08:00
3107194028 fixed typo in config file 2012-08-07 16:52:14 +08:00
11a56d2216 add in_interrupt back 2012-08-07 00:53:53 +08:00
65b62e5ee4 base: genlock: Remove genlock_release_lock and associated ioctl
base: genlock: allow synchronization with a single gralloc handle
base: genlock: remove BIT macro usage
2012-08-07 00:45:11 +08:00
8d701dfec6 htcleo: updated htcleo_defconfig to tytung_HWA_r3.4 2012-08-07 00:04:47 +08:00
83bc93927f power: ds2746: Add procfs interface for fast charge 2012-08-07 00:11:19 +08:00
55ece772cd msm: kgsl: Add per context timestamp
Add new ioctls for per context timestamps.
Timestamp functions (read/write/wait) will now be context
specific rather than only using the global timestamp.
Per context timestamps is a requirement for priority
based queueing.
2012-08-02 17:04:07 +08:00
1e188cc5e1 net: netfilter: enable bandwidth control
to be able to set mobile data limit
2012-08-02 13:21:31 +08:00
8bdae42d95 net: netfilter: enable bandwidth control
to be able to set mobile data limit
2012-08-01 10:55:06 +08:00
bc9444ab04 htcleo: updated htcleo_defconfig to tytung_HWA_r3.3 2012-08-01 00:21:30 +08:00
47e7119196 Merge branch 'ics_HWA' of git://github.com/securecrt/android_kernel_htcleo-2.6.32-ics into ics_HWA
update the KGSL driver to the latest version
2012-08-01 00:17:27 +08:00
05369deec9 include: checkout msm_mdp from ics_chocolate
*needs to match userspace*
2012-07-31 13:59:12 +08:00
c6e8ee54ff msm: kgsl: rework ib checking
Separate ib parse checking from cffdump as it is useful
in other situations. This is controlled by a new debugfs
file, ib_check. All ib checking is off (0) by default,
because parsing and mem_entry lookup can have a performance
impact on some benchmarks. Level 1 checking verifies the
IB1's. Level 2 checking also verifies the IB2.
2012-07-26 16:14:35 +08:00
d842173fc6 msm: kgsl: Add markers to identify IB sequence start and end
Add nop packets in ringbuffer at the start and end of IB buffers
subnmitted by user space driver. These nop packets serve as markers
that can be used during replay, recovery, and snapshot to get valid
data for a GPU hang dump
2012-07-26 15:37:36 +08:00
1672c9446f msm: kgsl: Optimize page_alloc allocations
User memory needs to be zeroed out before it is sent to the user.
To do this, the kernel maps the page, memsets it to zero and then
unmaps it.  By virtue of mapping it, this forces us to flush the
dcache to ensure cache coherency between kernel and user mappings.
Originally, the page_alloc loop was using GFP_ZERO (which does a
map, memset, and unmap for each individual page) and then we were
additionally calling flush_dcache_page() for each page killing us
on performance.  It is far more efficient, especially for large
allocations (> 1MB), to allocate the pages without GFP_ZERO and
then to vmap the entire allocation, memset it to zero, flush the
cache and then unmap. This process is slightly slower for very
small allocations, but only by a few microseconds, and is well
within the margin of acceptability. In all, the new scheme is
faster than the default for all sizes greater than 16k, and is
almost 4X faster for 2MB and 4MB allocations which are common for
textures and very large buffer objects.

The downside is that if there isn't enough vmalloc room for the
allocation that we are forced to fallback to a slow page by
page memset/flush, but this should happen rarely (if at all) and
is only included for completeness.
2012-07-26 14:45:24 +08:00
394bda433a msm: kgsl: Map a guard page on the back of GPU MMU regions
Add a guard page on the backside of page_alloc MMU mappings to protect
against an over zealous GPU pre-fetch engine that sometimes oversteps the
end of the mapped region. The same phsyical page can be re-used for each
mapping so we only need to allocate one phsyical page to rule them all
and in the darkness bind them.
2012-07-26 14:04:25 +08:00
4822aef009 msm: kgsl: Change name of vmalloc allocator
Change the vmalloc allocation name to something more appropriate since
we do not allocate memory using vmalloc for userspace driver. We
directly allocate physical pages and map that to user address space. The
name is changed to page_alloc instead of vmalloc. Add sysfs files to
track memory usage via both vmalloc and page_alloc.
2012-07-26 13:52:28 +08:00
e2ff78936f msm: kgsl: Do not dereference pointer before checking against NULL
The pagetable pointer was checked against NULL after being used.
Check against NULL first and then dereference it.
2012-07-25 21:10:10 +08:00
41b9064ec2 msm: kgsl: don't clear gpuaddr when unmapping global mappings
Memory mapped through kgsl_mmu_map_global() is supposed to have
the same gpu address in all pagetables. And the memdesc will
persist beyond the lifetime of any single pagetable.
Therefore, memdesc->gpuaddr should not be zeroed for these
memdescs.
2012-07-25 21:08:59 +08:00
121a2a91a5 msm: kgsl: Add GMEM size configuration in gpu list
To avoid msm or gpu specific code in the driver, added
GMEM size configuration parameter as a part of gpu list.
2012-07-25 20:39:13 +08:00
efa80a4cc1 msm: kgsl: Cleanup header file macros
Remove macro logic for macros that are always defined.
2012-07-25 20:27:26 +08:00
503977ed6b fix #4151332 2012-07-25 20:23:24 +08:00
15793c0aaa msm: kgsl: Find a mem_entry by way of a GPU address and a pagetable base
Given a pagetable base and a GPU address, find the struct kgsl_mem_entry
that matches the object.  Move this functionality out from inside another
function and promote it to top level so it can be used by upcoming
functionality.
2012-07-25 19:54:21 +08:00
41513329a1 msm: kgsl: Detach memory objects from a process ahead of destroy time
Previously, memory objects assumed that they remained attached to a
process until they are destroyed. In the past this was mostly true,
but worked by luck because a process could technically map the memory
and then close the file descriptor which would eventually explode. Now we
do the process related cleanup (MMU unmap, fixup statistics) when the
object is released from the process so the process can go away without
affecting the other holders of the mem object refcount.
2012-07-25 19:47:35 +08:00
93d86da2ee msm: kgsl: handle all indirect buffer types in postmortem
Postmortem dump was not parsing CP_INDIRECT_BUFFER_PFE commands.
Snapshot was recently fixed to handle this, and this change
extends support to postmortem dump.
2012-07-25 19:41:35 +08:00
543247cd01 msm: kgsl: return correct error code for unknown ioctls
Unknown ioctl code errors are supposed to be ENOIOCTLCMD,
not EINVAL.
2012-07-25 19:35:35 +08:00
1b6fa28430 msm: kgsl: Update the GMEM and istore size for A320
Set the correct GMEM and istore sizes for A320 on APQ8064.
The more GMEM we have the happier we are, so the code will
work with 256K, but it will be better with 512K.  For the
instruction store the size is important during GPU snapshot
and postmortem dump.  Also, the size of each instruction is
different on A3XX so remove the hard coded constants and
add a GPU specific size variable.
2012-07-25 19:14:12 +08:00
411b4bcb90 reduced the PMEM_ADSP size as the HW decoder still can't work on HD2 2012-07-25 19:12:41 +08:00
0885149512 msm: kgsl: Add support for the A3XX family of GPUs
Add support for the A320, the first of the new generation
of Adreno GPUs.
2012-07-25 00:10:26 +08:00
be4c38e2f5 msm: kgsl: handle larger instruction store for adreno225
This GPU has a larger instruction store, so more memory
needs to be reserved for saving shader state when context
switching.

The initial vertex and pixel partitioning of the
instruction store also needs to be different.
2012-07-24 23:30:19 +08:00
ee339b2bcb msm: kgsl: Write the retired timestamp on resume
Write the retired timestamp into the expected location. This fixes
userspace crashes after resume when the retired timestamp is read
as 0 instead of the expected last timestamp.
2012-07-23 18:59:50 +08:00
148ebef127 reverse DEBUG_TRACE_VDEC 2012-07-23 14:37:40 +08:00
544a54b32b ignore the version check 2012-07-23 14:13:02 +08:00
bb92dc19da Allow high current charging on china chargers 2012-07-06 18:14:50 +02:00
d4595be826 net: bluetooth: Hold wakelock until BT idle timer kicks in.
Source: 7e394e2551
2012-06-29 02:04:58 +08:00
664e118857 Allow high current charging on china chargers 2012-06-25 02:56:04 +08:00
b8450f4096 msm: kgsl: change timestamp frees to use kgsl_event
The timestamp memqueue was unsorted, which could cause
memory to not be freed soon enough. The kgsl_event
list is sorted and does almost exactly the same thing
as the memqueue did, so freememontimestamp is now
implemented using the kgsl_event list.
2012-06-23 19:03:55 +08:00
4520a7c383 msm: kgsl: cancel events from kgsl_release
Events need to be cancelled when an fd is released,
to avoid possible memory leaks or use after free.

When the event is cancelled, its callback is called.
Currently this is sufficient since events are used for
resource management and we have no option but to
release the lock or memory. If future uses need to
distinguish between the callback firing and
a cancel, they can look at the timestamp passed to
the callback, which will be before the timestamp they
expected. Otherwise a separate cancel callback can
be added.
2012-06-23 18:52:06 +08:00
f6acf3ab9f msm: kgsl: queue timestamp expired work more often
There are a some workloads where interrupts do not
always get generated, and as a result the timestamp
work was not triggered often enough.

Queue timestamp expired work from adreno_waittimestamp(),
when the timestamp expires while we are not waiting.
It is possible in this case that no interrupt fired
because no processes were waiting.

Queue timestamp expired work when freememontimestamp
is called, which reduces the amount of memory
built up by applications that use this api often.
2012-06-23 17:48:20 +08:00
5c1047c767 msm: kgsl: set the dma_address field of scatterlists
Ion carveout and content protect heap buffers do not
have a struct page associated with them. Thus
sg_phys() will not work reliably on these buffers.
Set the dma_address field on physically contiguous
buffers.  When mapping a scatterlist to the gpummu
use sg_dma_address() first and if it returns 0
then use sg_phys().

msm: kgsl: Use kzalloc to allocate scatterlists of 1 page or less

The majority of the scatterlist allocations used in KGSL are under 1
page (1 page of struct scatterlist is approximately 1024 entries
equalling 4MB of allocated buffer).  In these cases using vmalloc
for the sglist is undesirable and slow.  Add functions to check the
size of the allocation and favor kzalloc for 1 page allocations and
vmalloc for larger lists.
2012-06-23 17:02:28 +08:00
a7bb935abb revert the pmem size to default configration 2012-06-23 17:01:57 +08:00
361e591fe7 msm: kgsl: remove readl/writel use for dma memory
For dma_alloc_coherent() you don't need writel/readl because
it's just a plain old void *. Linux tries very hard to make a
distinction between io memory (void __iomem *) and memory
(void *) so that drivers are portable to architectures that
don't have a way to access registers via pointer dereferences.
You can see http://lwn.net/Articles/102232/ and the Linus rant
http://lwn.net/Articles/102240/ here for more details behind
the motivation.

msm: kgsl: Allocate physical pages instead of using vmalloc

Replace vmalloc allocation with physical page allocation. For most
allocations we do not need a kernel virual address. vmalloc uses up
the kernel virtual address space. By replacing vmalloc with physical
page alloction and mapping that allocation to kernel space only
when it is required prevents the kgsl driver from using unnecessary
vmalloc virtual space.
2012-06-22 16:49:00 +08:00
8c39724a75 remove zImage before compile 2012-06-22 16:48:37 +08:00
47e6ec131b reverse the GENLOCK 2012-06-22 16:20:22 +08:00
376f66c119 msm: kgsl: convert sg allocation to vmalloc
kmalloc allocates physically contiguous memory and
may fail for larger allocations due to fragmentation.
The large allocations are caused by the fact that the
scatterlist structure is 24 bytes and the array size
is proportional to the number of pages being mapped.
2012-06-22 16:08:12 +08:00
b4c5202bec msm: kgsl: make cffdump work with the MMU enabled
The tools that process cff dumps expect a linear
memory region, but the start address of that region can
be configured. As long as there is only a single
pagetable (so that there aren't duplicate virtual
addresses in the dump), dumps captured with the
mmu on are easier to deal with than reconfiguring
to turn the mmu off.
2012-06-22 15:38:14 +08:00
a19d2698cc msm: kgsl: Add ION as an external memory source
Allow ION buffers to be attached via IOCTL_KGSL_MAP_USER_MEM
2012-06-22 15:24:51 +08:00
91bbe54c4f msm: kgsl: Fixup per-process memory statistics
Make the framework for reporting per-process memory statistics a little bit
more generic.  This should make it easier to keep track of more external
memory sources as they are added.
2012-06-21 13:41:21 +08:00
9d909cf27b msm: kgsl: Make sure kmemleak tool does not report incorrect mem leak.
Certain memory allocations are not properly tracked by kmemleak tool,
which makes it to incorrectly detect memory leak. Notify the tool by using
kmemleak_not_leak() to ignore the memory allocation so that incorrect leaks
report are avoided.
2012-06-21 13:01:23 +08:00
dcf924f072 msm: kgsl: Add a new property to IOCTL_KGSL_DEVICE_GETPROPERTY
Return the reset status of the GPU unit when
IOCTL_KGSL_DEVICE_GETPROPERTY is called with
type KGSL_PROP_GPU_RESET_STAT
2012-06-21 12:54:12 +08:00
69555a62d1 msm: kgsl: Poke regularly in adreno_idle
Poking once during adreno_idle is not enough; a GPU hang may still happen.
Seen on 7x27A. Write a few times during the wait timeout, to ensure that
the WPTR is updated properly.
2012-06-21 12:46:57 +08:00
aa5de9cfcb msm: kgsl: increase valid timestamp range
The existing timestamp_cmp function returns a different
result depending on the order of the input parameters due to
having an asymetric valid window. When no rollover is
detected the window is 2^31 but when a rollover is detected
the window is 25000. This change makes the rollover window
symmetric at 2^31.
2012-06-21 12:34:57 +08:00
d319fcfbbd msm: kgsl: flush outer cache for alloc_page() pages
The outer cache needs to be flushed for these pages
after they are allocated so that the GPU and CPU
have a consistent view of them.
2012-06-21 12:30:20 +08:00
97dd7fe6b5 msm: kgsl: Add a constant for adreno_ringbuffer_issuecmds flags
Use a #define constant instead of a bare constant for the flags
parameter of adreno_ringbuffer_issuecmds.
2012-06-21 00:32:58 +08:00
ae32a212a5 msm: kgsl: fix error handling in adreno_waittimestamp()
This function was incorrectly reporting hangs when an
error such as ERESTARTSYS was returned by
__wait_event_interruptible_timeout().

msm: kgsl: Make sure WPTR reg is updated properly

Sometimes writes to WPTR register do not take effect, causing a
3D core hang. Make sure the WPTR is updated properly when waiting.

msm: kgsl: Set default value of wait_timeout in the adreno_dev struct

Set the initalization value of wait_timeout at compile time in the
declaration of the adreno_device struct instead of at runtime in
adreno_probe.
2012-06-21 00:02:15 +08:00
73aff24078 msm: kgsl: fix size checking in adreno_find_region
This function is supposed to return the memdesc that
contains the range gpuaddr to gpuaddr + size. One of the
lookups was using sizeof(unsigned int) instead of size,
which could cause false positive results from this function
and possibly kernel panics in the snapshot or postmortem
code, which rely on it to do bounds checking for them.
2012-06-20 12:39:35 +08:00
fd5e7d8237 msm: kgsl: let postmortem dump find context switch IBs
Because the IBs used for context switching are not allocated
by userspace, a separate search is needed to find them
in adreno_find_region.
2012-06-20 12:25:12 +08:00
c5ac3240a5 msm: kgsl: improve postmortem and cff bounds checking
Some hangs are fooling the postmortem dump code into
running off the end of a buffer. Fix this by making
its bounds check logic work better by reusing the
logic from kgsl_find_region().
2012-06-19 23:30:34 +08:00
8be096244d msm: kgsl: Fix when GMEM is saved for A2xx
Saving GMEM is set when doing context switching and should not
be set when creating the gmem shadow.
2012-06-19 21:46:18 +08:00
2f3f4d14f9 msm: kgsl: Add support for the preamble context flag
Userspace will set a flag in the context if preambles are in use. If
they are, we can safely skip save and restore commands for the
context. GMEM save/restore is still required.  To improve performance,
preamble commands are skipped when the context hasn't changed since
the last issueibcmds.

from Code Aurora
2012-06-19 14:00:07 +08:00
cad19fbe99 change the build batch file 2012-06-19 01:38:16 +08:00
83cf3269bc add more sf_pmem to prevent memory full 2012-06-19 01:37:29 +08:00
758812c3aa fixed the adsp pmem is too low for camera 2012-06-18 23:52:45 +08:00
1bd0e44d7a reduced the pmem size to save memory for userspace, TEST ONLY!! 2012-06-18 20:31:47 +08:00
4f50d63951 msm: kgsl: fix format of the rbbm read error message
msm: kgsl: Assign a valid context only after one has been restored
2012-06-18 20:28:17 +08:00
d0bde07fa4 set ALLORNOTHING allocator for mdp heap 2012-06-05 00:12:26 +08:00
c0e5d2e3af msm: htcleo: export the real WiFi and Bluetooth MAC addresses.
(Credits go to marc1706)
2012-06-04 17:52:49 +02:00
b9788bd14a drivers: mtd: devices: htcleo_nand: leave only correct candidates for WiFi and Bluetooth MAC addresses.
(Credits go to Rick_1995, aka zeusk.)
http://forum.xda-developers.com/showpost.php?p=26579738&postcount=4
2012-06-04 17:52:17 +02:00
4548f8ea32 drivers: mtd: devices: htcleo_nand: find the real unique WiFi and Bluetooth MAC addresses.
(Credits go to Franck78 <fbourdonnec@chez.com>)

http://forum.xda-developers.com/showpost.php?p=26556691&postcount=3
2012-06-04 17:51:42 +02:00
44f4738129 prep for real mac patches by zeusk and Franck78
Revert "msm: htcleo: removed the real WiFi MAC address for SD build and used new unique WiFi MAC address, so that we have the same WiFi MAC address on NAND and SD."

This reverts commit 9ab858cdb2.
2012-06-04 17:49:47 +02:00
c1c3395221 msm: htcleo: cleanup 2012-06-04 17:48:31 +02:00
32f796ad5c compress boot and system dir only 2012-06-02 16:34:51 +08:00
aacc150509 add auto build script 2012-06-02 16:26:49 +08:00
a7c3c3727b enable the 2700mAh battery for my private use only 2012-06-02 15:35:33 +08:00
b38b808efb enable fast charge 2012-06-02 15:33:41 +08:00
8967e1f301 htcleo: updated htcleo_defconfig to tytung_HWA_r3 2012-06-02 00:54:56 +08:00
0cc4b7c1ca net: netfilter: enable bandwidth control to be able to set mobile data limit. 2012-06-02 00:52:31 +08:00
c3fcc46456 updated README. 2012-06-02 00:45:50 +08:00
e512b34cde htcleo: updated htcleo_defconfig to tytung_HWA_r2.5 2012-06-01 01:16:56 +08:00
db81187e55 msm: htcleo: export the real WiFi and Bluetooth MAC addresses.
(Credits go to marc1706)
2012-06-01 01:16:20 +08:00
8505d01410 drivers: mtd: devices: htcleo_nand: leave only correct candidates for WiFi and Bluetooth MAC addresses.
(Credits go to Rick_1995, aka zeusk.)
http://forum.xda-developers.com/showpost.php?p=26579738&postcount=4
2012-06-01 00:33:19 +08:00
a979a22695 drivers: mtd: devices: htcleo_nand: find the real unique WiFi and Bluetooth MAC addresses.
(Credits go to Franck78 <fbourdonnec@chez.com>)

http://forum.xda-developers.com/showpost.php?p=26556691&postcount=3
2012-06-01 00:21:07 +08:00
560a956cd5 drivers: usb: gadget: update some usb drivers for USB Tethering. 2012-05-22 21:42:00 +02:00
5ff4526737 htcleo: updated htcleo_defconfig to tytung_HWA_r2.4-uniMAC 2012-05-22 19:52:16 +08:00
2c73e1566d Revert "msm: htcleo: removed the real WiFi MAC address for SD build and used new unique WiFi MAC address, so that we have the same WiFi MAC address on NAND and SD."
This reverts commit 9ab858cdb2.

Revert it because new unique WiFi MAC patch only works for NAND ROM and MAGLDR SD boot, not works for WM users using Clrcad & Haret to boot SD build.
2012-05-22 19:51:10 +08:00
88e7e8a6d2 drivers: usb: gadget: update some usb drivers for USB Tethering. 2012-05-19 19:33:14 +08:00
50abfc6c2d Merge pull request #4 from zeusk/ics_HWA
[KGSL] update to msm-kgsl3d0 v3.8
2012-05-13 21:49:47 -07:00
9bb78d2a7f [KGSL] add missing files for last commit 2012-05-13 23:22:49 +02:00
be86226379 [KGSL] add missing files for last commit 2012-05-14 02:47:02 +05:30
351d65fc0a [KGSL] update to msm-kgsl3d0 v3.8 2012-05-13 23:10:15 +02:00
8afb87a6ea [KGSL] update to msm-kgsl3d0 v3.8 2012-05-14 01:49:10 +05:30
1e3f6a9266 updated README. 2012-05-13 20:34:11 +08:00
ac0378e146 htcleo: updated htcleo_defconfig to tytung_HWA_r2.2-uniMAC 2012-05-13 11:49:13 +08:00
14509b73e8 msm: htcleo: cleanup 2012-05-13 03:45:08 +08:00
dfac1a861b Merge pull request #3 from zeusk/ics_HWA
Backport android and MSM parts from caf
2012-05-12 12:18:37 -07:00
ad99bdf4f8 config: minor changes 2012-05-12 18:07:29 +02:00
0a0f12cf99 Backport android and MSM parts from caf 2012-05-12 18:03:41 +02:00
daf542e57e Backport android and MSM parts from caf 2012-05-12 16:56:17 +05:30
9ab858cdb2 msm: htcleo: removed the real WiFi MAC address for SD build and used new unique WiFi MAC address, so that we have the same WiFi MAC address on NAND and SD.
Will revert this commit when someday someone implement the real WiFi MAC address for NAND.
2012-05-09 00:35:54 +08:00
03d0f38950 msm: htcleo: removed the old support for bt.mac=... from kernel cmd line 2012-05-09 00:32:17 +08:00
54c05e087a msm: htcleo: implemented the real unique WiFi/Bluetooth MAC address to solve the MAC address collisions. (Credit to Franck78 <fbourdonnec@chez.com>)
WiFi MAC address collisions: http://forum.xda-developers.com/showthread.php?t=1275095
2012-05-09 00:27:33 +08:00
f13c2b648a updated README. 2012-05-05 22:36:05 +08:00
d1e200de48 htcleo: updated htcleo_defconfig to tytung_HWA_r2 (enable ZRAM). 2012-05-05 00:21:36 +08:00
0d06b3f2b4 drivers: staging: zram: added ZRAM support: /dev/zramX (X = 0, 1, ...). 2012-05-05 00:19:05 +08:00
64ec5359e1 Merge pull request #2 from zeusk/ics_HWA
ics hwa fixes
Thanks zeusk.
2012-05-03 13:02:23 -07:00
3311dbe75b Merge pull request #1 from securecrt/ics_HWA
fixed the boot logo
Thanks securecrt
2012-05-03 10:04:53 -07:00
eeb52b2437 fixed the boot logo 2012-05-04 00:38:00 +08:00
e8bd623aa0 [MSM] revert qdsp backport, seems to break quite a lot of stuff, will fix it later. 2012-05-03 21:30:12 +05:30
48322595f9 [MSM] Backported a lot of stuff from 2.6.35 and 2.6.32.59 kernels, compiles and tested on device 2012-05-03 19:31:48 +05:30
bffda154f5 Cleanup remenant code of older kgsl 2012-05-03 12:28:28 +05:30
c85abf5272 [MSM] Fix makefile, which in an erroneous manner includes the logo file disregarding the board config 2012-05-03 12:15:59 +05:30
d1aeebac34 [KGSL] remove the older driver along with some
apparently unused, unrequired binaries.
2012-05-03 12:09:48 +05:30
4df58bdbe5 htcleo: updated htcleo_defconfig to tytung_HWA_r1 (enable new KGSL, KSM, and GENLOCK). 2012-05-01 15:03:03 +08:00
57f5775c0b Added GENLOCK. 2012-05-01 14:50:48 +08:00
c6de4393cf Added the latest KGSL driver /dev/kgsl-3d0 for ICS HWA (Hardware Acceleration). (Credits to Securecrt and Rick_1995) 2012-05-01 13:12:22 +08:00
226 changed files with 37646 additions and 11257 deletions

161
Documentation/genlock.txt Normal file
View File

@ -0,0 +1,161 @@
Introduction
'genlock' is an in-kernel API and optional userspace interface for a generic
cross-process locking mechanism. The API is designed for situations where
multiple user space processes and/or kernel drivers need to coordinate access
to a shared resource, such as a graphics buffer. The API was designed with
graphics buffers in mind, but is sufficiently generic to allow it to be
independently used with different types of resources. The chief advantage
of genlock over other cross-process locking mechanisms is that the resources
can be accessed by both userspace and kernel drivers which allows resources
to be locked or unlocked by asynchronous events in the kernel without the
intervention of user space.
As an example, consider a graphics buffer that is shared between a rendering
application and a compositing window manager. The application renders into a
buffer. That buffer is reused by the compositing window manager as a texture.
To avoid corruption, access to the buffer needs to be restricted so that one
is not drawing on the surface while the other is reading. Locks can be
explicitly added between the rendering stages in the processes, but explicit
locks require that the application wait for rendering and purposely release the
lock. An implicit release triggered by an asynchronous event from the GPU
kernel driver, however, will let execution continue without requiring the
intercession of user space.
SW Goals
The genlock API implements exclusive write locks and shared read locks meaning
that there can only be one writer at a time, but multiple readers. Processes
that are unable to acquire a lock can be optionally blocked until the resource
becomes available.
Locks are shared between processes. Each process will have its own private
instance for a lock known as a handle. Handles can be shared between user
space and kernel space to allow a kernel driver to unlock or lock a buffer
on behalf of a user process.
Kernel API
Access to the genlock API can either be via the in-kernel API or via an
optional character device (/dev/genlock). The character device is primarily
to be used for legacy resource sharing APIs that cannot be easily changed.
New resource sharing APIs from this point should implement a scheme specific
wrapper for locking.
To create or attach to an existing lock, a process or kernel driver must first
create a handle. Each handle is linked to a single lock at any time. An entityi
may have multiple handles, each associated with a different lock. Once a handle
has been created, the owner may create a new lock or attach an existing lock
that has been exported from a different handle.
Once the handle has a lock attached, the owning process may attempt to lock the
buffer for read or write. Write locks are exclusive, meaning that only one
process may acquire it at any given time. Read locks are shared, meaning that
multiple readers can hold the lock at the same time. Attempts to acquire a read
lock with a writer active or a write lock with one or more readers or writers
active will typically cause the process to block until the lock is acquired.
When the lock is released, all waiting processes will be woken up. Ownership
of the lock is reference counted, meaning that any one owner can "lock"
multiple times. The lock will only be released from the owner when all the
references to the lock are released via unlock.
The owner of a write lock may atomically convert the lock into a read lock
(which will wake up other processes waiting for a read lock) without first
releasing the lock. The owner would simply issue a new request for a read lock.
However, the owner of a read lock cannot convert it into a write lock in the
same manner. To switch from a read lock to a write lock, the owner must
release the lock and then try to reacquire it.
These are the in-kernel API calls that drivers can use to create and
manipulate handles and locks. Handles can either be created and managed
completely inside of kernel space, or shared from user space via a file
descriptor.
* struct genlock_handle *genlock_get_handle(void)
Create a new handle.
* struct genlock_handle * genlock_get_handle_fd(int fd)
Given a valid file descriptor, return the handle associated with that
descriptor.
* void genlock_put_handle(struct genlock_handle *)
Release a handle.
* struct genlock * genlock_create_lock(struct genlock_handle *)
Create a new lock and attach it to the handle.
* struct genlock * genlock_attach_lock(struct genlock_handle *handle, int fd)
Given a valid file descriptor, get the lock associated with it and attach it to
the handle.
* void genlock_release_lock(struct genlock_handle *)
Release a lock attached to a handle.
* int genlock_lock(struct genlock_handle *, int op, int flags, u32 timeout)
Lock or unlock the lock attached to the handle. A zero timeout value will
be treated just like if the GENOCK_NOBLOCK flag is passed; if the lock
can be acquired without blocking then do so otherwise return -EAGAIN.
Function returns -ETIMEDOUT if the timeout expired or 0 if the lock was
acquired.
* int genlock_wait(struct genloc_handle *, u32 timeout)
Wait for a lock held by the handle to go to the unlocked state. A non-zero
timeout value must be passed. Returns -ETIMEDOUT if the timeout expired or
0 if the lock is in an unlocked state.
Character Device
Opening an instance to the /dev/genlock character device will automatically
create a new handle. All ioctl functions with the exception of NEW and
RELEASE use the following parameter structure:
struct genlock_lock {
int fd; /* Returned by EXPORT, used by ATTACH */
int op; /* Used by LOCK */
int flags; /* used by LOCK */
u32 timeout; /* Used by LOCK and WAIT */
}
*GENLOCK_IOC_NEW
Create a new lock and attaches it to the handle. Returns -EINVAL if the handle
already has a lock attached (use GENLOCK_IOC_RELEASE to remove it). Returns
-ENOMEM if the memory for the lock can not be allocated. No data is passed
from the user for this ioctl.
*GENLOCK_IOC_EXPORT
Export the currently attached lock to a file descriptor. The file descriptor
is returned in genlock_lock.fd.
*GENLOCK_IOC_ATTACH
Attach an exported lock file descriptor to the current handle. Return -EINVAL
if the handle already has a lock attached (use GENLOCK_IOC_RELEASE to remove
it). Pass the file descriptor in genlock_lock.fd.
*GENLOCK_IOC_LOCK
Lock or unlock the attached lock. Pass the desired operation in
genlock_lock.op:
* GENLOCK_WRLOCK - write lock
* GENLOCK_RDLOCK - read lock
* GENLOCK_UNLOCK - unlock an existing lock
Pass flags in genlock_lock.flags:
* GENLOCK_NOBLOCK - Do not block if the lock is already taken
Pass a timeout value in milliseconds in genlock_lock.timeout.
genlock_lock.flags and genlock_lock.timeout are not used for UNLOCK.
Returns -EINVAL if no lock is attached, -EAGAIN if the lock is taken and
NOBLOCK is specified or if the timeout value is zero, -ETIMEDOUT if the timeout
expires or 0 if the lock was successful.
* GENLOCK_IOC_WAIT
Wait for the lock attached to the handle to be released (i.e. goes to unlock).
This is mainly used for a thread that needs to wait for a peer to release a
lock on the same shared handle. A non-zero timeout value in milliseconds is
passed in genlock_lock.timeout. Returns 0 when the lock has been released,
-EINVAL if a zero timeout is passed, or -ETIMEDOUT if the timeout expires.
* GENLOCK_IOC_RELEASE
Use this to release an existing lock. This is useful if you wish to attach a
different lock to the same handle. You do not need to call this under normal
circumstances; when the handle is closed the reference to the lock is released.
No data is passed from the user for this ioctl.

View File

@ -1,7 +1,6 @@
VERSION = 2
PATCHLEVEL = 6
SUBLEVEL = 32
EXTRAVERSION = -ics
NAME = Man-Eating Seals of Antiquity
# *DOCUMENTATION*

10
README
View File

@ -20,18 +20,18 @@ Primary features:
- Two-way call recording (Credits: avs333, snq-, and tytung)
- T-Mobile Wi-Fi Calling (Credits: tytung)
- Wi-Fi IEEE 802.1x/EAP authentication (Credits: tytung)
- Native USB Tethering (for Gingerbread) (Credits: tytung)
- Native USB Tethering (Credits: tytung)
- Native Wi-Fi Tethering (Credits: tytung)
- Real Wi-Fi MAC address (only for SD build on WinMo 6.5) (Credits: savan and tytung)
- Unique Wi-Fi MAC address (for MAGLDR and cLK) (Credits: markinus)
- Unique Bluetooth MAC address (Credits: markinus and tytung)
- Official HTC extended battery support (HTC EB 2300mAh) (Credits: arne)
- ALSA sound driver as kernel modules (alsa-pcm-htc-leo.ko and alsa-mix-htc-leo.ko) (Credits: cotulla)
- Wired headphones support for ICS. (Credits: zivan56)
- Backported xt_qtaguid and xt_quota2 to support data usage for ICS. (Credits: tytung)
- Improved Flashlight compatibility for ICS. (Credits: tytung)
- Backported the GPU driver to enable the Hardware Acceleration for ICS. (Credits: Securecrt and Rick_1995)
- Updated to msm-kgsl3d0 v3.8 to match the latest QCOM Adreno200 drivers for ICS. (Credits: Rick_1995)
- Real WiFi and Bluetooth MAC addresses. (Credits: Franck78, Rick_1995 and Marc1706)
Credits: Cotulla, Markinus, Hastarin, TYTung, Letama, Rajko, Dan1j3l, Cedesmith, Arne, Trilu, Charansingh, Mdebeljuh, Jdivic, Avs333, Snq-, Savan, Drizztje, Marc1706, Zivan56, other devs, and testers.
Credits: Cotulla, Markinus, Hastarin, TYTung, Letama, Rajko, Dan1j3l, Cedesmith, Arne, Trilu, Charansingh, Mdebeljuh, Jdivic, Avs333, Snq-, Savan, Drizztje, Marc1706, Zivan56, Securecrt, Rick_1995, Franck78, other devs, and testers.
===============================================================================

View File

@ -17,7 +17,7 @@ endif
OBJCOPYFLAGS :=-O binary -R .note -R .note.gnu.build-id -R .comment -S
GZFLAGS :=-9
#KBUILD_CFLAGS +=-pipe
KBUILD_CFLAGS +=-pipe
# Explicitly specifiy 32-bit ARM ISA since toolchain default can be -mthumb:
KBUILD_CFLAGS +=$(call cc-option,-marm,)

View File

@ -1,7 +1,7 @@
#
# Automatically generated make config: don't edit
# Linux kernel version: 2.6.32-ics
# Tue Feb 28 17:02:10 CST 2012
# Sun Oct 14 20:28:45 CST 2012
#
CONFIG_ARM=y
CONFIG_SYS_SUPPORTS_APM_EMULATION=y
@ -20,6 +20,7 @@ CONFIG_ARCH_HAS_CPUFREQ=y
CONFIG_GENERIC_HWEIGHT=y
CONFIG_GENERIC_CALIBRATE_DELAY=y
CONFIG_GENERIC_HARDIRQS_NO__DO_IRQ=y
CONFIG_OPROFILE_ARMV7=y
CONFIG_VECTORS_BASE=0xffff0000
CONFIG_DEFCONFIG_LIST="/lib/modules/$UNAME_RELEASE/.config"
CONFIG_CONSTRUCTORS=y
@ -31,7 +32,7 @@ CONFIG_EXPERIMENTAL=y
CONFIG_BROKEN_ON_SMP=y
CONFIG_LOCK_KERNEL=y
CONFIG_INIT_ENV_ARG_LIMIT=32
CONFIG_LOCALVERSION="_htcleo_r1"
CONFIG_LOCALVERSION="_cmhtcleo"
# CONFIG_LOCALVERSION_AUTO is not set
CONFIG_HAVE_KERNEL_GZIP=y
CONFIG_HAVE_KERNEL_BZIP2=y
@ -400,9 +401,10 @@ CONFIG_BOUNCE=y
CONFIG_VIRT_TO_BUS=y
CONFIG_HAVE_MLOCK=y
CONFIG_HAVE_MLOCKED_PAGE_BIT=y
# CONFIG_KSM is not set
CONFIG_KSM=y
CONFIG_DEFAULT_MMAP_MIN_ADDR=4096
CONFIG_ALIGNMENT_TRAP=y
CONFIG_ALLOW_CPU_ALIGNMENT=y
# CONFIG_UACCESS_WITH_MEMCPY is not set
#
@ -425,9 +427,9 @@ CONFIG_CPU_FREQ_STAT_DETAILS=y
# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
# CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
# CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
# CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND=y
# CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE=y
# CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE is not set
# CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVEX is not set
# CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS is not set
# CONFIG_CPU_FREQ_DEFAULT_GOV_SMARTASS2 is not set
@ -602,11 +604,10 @@ CONFIG_NETFILTER_XT_CONNMARK=y
#
CONFIG_NETFILTER_XT_TARGET_CLASSIFY=y
CONFIG_NETFILTER_XT_TARGET_CONNMARK=y
# CONFIG_NETFILTER_XT_TARGET_CT is not set
# CONFIG_NETFILTER_XT_TARGET_DSCP is not set
# CONFIG_NETFILTER_XT_TARGET_HL is not set
CONFIG_NETFILTER_XT_TARGET_MARK=y
# CONFIG_NETFILTER_XT_TARGET_NFLOG is not set
CONFIG_NETFILTER_XT_TARGET_NFLOG=y
CONFIG_NETFILTER_XT_TARGET_NFQUEUE=y
# CONFIG_NETFILTER_XT_TARGET_NOTRACK is not set
# CONFIG_NETFILTER_XT_TARGET_RATEEST is not set
@ -630,7 +631,7 @@ CONFIG_NETFILTER_XT_MATCH_CONNTRACK=y
# CONFIG_NETFILTER_XT_MATCH_ESP is not set
CONFIG_NETFILTER_XT_MATCH_HASHLIMIT=y
CONFIG_NETFILTER_XT_MATCH_HELPER=y
CONFIG_NETFILTER_XT_MATCH_HL=y
# CONFIG_NETFILTER_XT_MATCH_HL is not set
CONFIG_NETFILTER_XT_MATCH_IPRANGE=y
CONFIG_NETFILTER_XT_MATCH_LENGTH=y
CONFIG_NETFILTER_XT_MATCH_LIMIT=y
@ -707,8 +708,21 @@ CONFIG_IP_NF_ARP_MANGLE=y
CONFIG_NF_DEFRAG_IPV6=y
CONFIG_NF_CONNTRACK_IPV6=y
# CONFIG_IP6_NF_QUEUE is not set
# CONFIG_IP6_NF_IPTABLES is not set
# CONFIG_BRIDGE_NF_EBTABLES is not set
CONFIG_IP6_NF_IPTABLES=y
# CONFIG_IP6_NF_MATCH_AH is not set
# CONFIG_IP6_NF_MATCH_EUI64 is not set
# CONFIG_IP6_NF_MATCH_FRAG is not set
# CONFIG_IP6_NF_MATCH_OPTS is not set
# CONFIG_IP6_NF_MATCH_HL is not set
# CONFIG_IP6_NF_MATCH_IPV6HEADER is not set
# CONFIG_IP6_NF_MATCH_MH is not set
# CONFIG_IP6_NF_MATCH_RT is not set
# CONFIG_IP6_NF_TARGET_HL is not set
CONFIG_IP6_NF_TARGET_LOG=y
CONFIG_IP6_NF_FILTER=y
CONFIG_IP6_NF_TARGET_REJECT=y
CONFIG_IP6_NF_MANGLE=y
CONFIG_IP6_NF_RAW=y
# CONFIG_IP_DCCP is not set
# CONFIG_IP_SCTP is not set
# CONFIG_RDS is not set
@ -849,6 +863,8 @@ CONFIG_FW_LOADER=y
CONFIG_FIRMWARE_IN_KERNEL=y
CONFIG_EXTRA_FIRMWARE=""
# CONFIG_SYS_HYPERVISOR is not set
CONFIG_GENLOCK=y
CONFIG_GENLOCK_MISCDEVICE=y
CONFIG_CONNECTOR=y
CONFIG_PROC_EVENTS=y
CONFIG_MTD=y
@ -1361,6 +1377,16 @@ CONFIG_DAB=y
#
# Graphics support
#
CONFIG_MSM_KGSL=y
# CONFIG_MSM_KGSL_CFF_DUMP is not set
# CONFIG_MSM_KGSL_PSTMRTMDMP_CP_STAT_NO_DETAIL is not set
# CONFIG_MSM_KGSL_PSTMRTMDMP_NO_IB_DUMP is not set
# CONFIG_MSM_KGSL_PSTMRTMDMP_RB_HEX is not set
CONFIG_MSM_KGSL_MMU=y
# CONFIG_KGSL_PER_PROCESS_PAGE_TABLE is not set
CONFIG_MSM_KGSL_PAGE_TABLE_SIZE=0xFFF0000
CONFIG_MSM_KGSL_MMU_PAGE_FAULT=y
# CONFIG_MSM_KGSL_DISABLE_SHADOW_WRITES is not set
# CONFIG_VGASTATE is not set
CONFIG_VIDEO_OUTPUT_CONTROL=y
CONFIG_FB=y
@ -1394,9 +1420,6 @@ CONFIG_FB_CFB_IMAGEBLIT=y
CONFIG_FB_MSM=y
CONFIG_FB_MSM_LCDC=y
# CONFIG_FB_MSM_TVOUT is not set
CONFIG_GPU_MSM_KGSL=y
CONFIG_MSM_KGSL_MMU=y
# CONFIG_MSM_KGSL_PER_FD_PAGETABLE is not set
# CONFIG_MSM_HDMI is not set
CONFIG_FB_MSM_LOGO=y
# CONFIG_BACKLIGHT_LCD_SUPPORT is not set
@ -1665,6 +1688,20 @@ CONFIG_ANDROID_LOW_MEMORY_KILLER=y
# CONFIG_IIO is not set
# CONFIG_BTPORT is not set
#
# ZRAM
#
CONFIG_XVMALLOC=y
CONFIG_ZRAM=y
CONFIG_ZRAM_NUM_DEVICES=1
CONFIG_ZRAM_DEFAULT_PERCENTAGE=18
# CONFIG_ZRAM_DEBUG is not set
CONFIG_ZRAM_DEFAULT_DISKSIZE=100000000
# CONFIG_ZRAM_LZO is not set
CONFIG_ZRAM_SNAPPY=y
CONFIG_SNAPPY_COMPRESS=y
CONFIG_SNAPPY_DECOMPRESS=y
#
# File systems
#

View File

@ -0,0 +1 @@
#include <generated/asm-offsets.h>

View File

@ -129,6 +129,45 @@ static inline void dma_free_noncoherent(struct device *dev, size_t size,
{
}
/*
* dma_coherent_pre_ops - barrier functions for coherent memory before DMA.
* A barrier is required to ensure memory operations are complete before the
* initiation of a DMA xfer.
* If the coherent memory is Strongly Ordered
* - pre ARMv7 and 8x50 guarantees ordering wrt other mem accesses
* - ARMv7 guarantees ordering only within a 1KB block, so we need a barrier
* If coherent memory is normal then we need a barrier to prevent
* reordering
*/
static inline void dma_coherent_pre_ops(void)
{
#if (__LINUX_ARM_ARCH__ >= 7)
dmb();
#else
if (arch_is_coherent())
dmb();
else
barrier();
#endif
}
/*
* dma_post_coherent_ops - barrier functions for coherent memory after DMA.
* If the coherent memory is Strongly Ordered we dont need a barrier since
* there are no speculative fetches to Strongly Ordered memory.
* If coherent memory is normal then we need a barrier to prevent reordering
*/
static inline void dma_coherent_post_ops(void)
{
#if (__LINUX_ARM_ARCH__ >= 7)
dmb();
#else
if (arch_is_coherent())
dmb();
else
barrier();
#endif
}
/**
* dma_alloc_coherent - allocate consistent memory for DMA
* @dev: valid struct device pointer, or NULL for ISA and EISA-like devices

View File

@ -185,6 +185,12 @@ extern void _memset_io(volatile void __iomem *, int, size_t);
#define readsw(p,d,l) __raw_readsw(__mem_pci(p),d,l)
#define readsl(p,d,l) __raw_readsl(__mem_pci(p),d,l)
#define writeb_relaxed(v,c) ((void)__raw_writeb(v,__mem_pci(c)))
#define writew_relaxed(v,c) ((void)__raw_writew((__force u16) \
cpu_to_le16(v),__mem_pci(c)))
#define writel_relaxed(v,c) ((void)__raw_writel((__force u32) \
cpu_to_le32(v),__mem_pci(c)))
#define writeb(v,c) __raw_writeb(v,__mem_pci(c))
#define writew(v,c) __raw_writew((__force __u16) \
cpu_to_le16(v),__mem_pci(c))

View File

@ -0,0 +1,75 @@
/*
* arch/arm/include/asm/outercache.h
*
* Copyright (C) 2010 ARM Ltd.
* Written by Catalin Marinas <catalin.marinas@arm.com>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
* published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
*/
#ifndef __ASM_OUTERCACHE_H
#define __ASM_OUTERCACHE_H
struct outer_cache_fns {
void (*inv_range)(unsigned long, unsigned long);
void (*clean_range)(unsigned long, unsigned long);
void (*flush_range)(unsigned long, unsigned long);
#ifdef CONFIG_OUTER_CACHE_SYNC
void (*sync)(void);
#endif
};
#ifdef CONFIG_OUTER_CACHE
extern struct outer_cache_fns outer_cache;
static inline void outer_inv_range(unsigned long start, unsigned long end)
{
if (outer_cache.inv_range)
outer_cache.inv_range(start, end);
}
static inline void outer_clean_range(unsigned long start, unsigned long end)
{
if (outer_cache.clean_range)
outer_cache.clean_range(start, end);
}
static inline void outer_flush_range(unsigned long start, unsigned long end)
{
if (outer_cache.flush_range)
outer_cache.flush_range(start, end);
}
#else
static inline void outer_inv_range(unsigned long start, unsigned long end)
{ }
static inline void outer_clean_range(unsigned long start, unsigned long end)
{ }
static inline void outer_flush_range(unsigned long start, unsigned long end)
{ }
#endif
#ifdef CONFIG_OUTER_CACHE_SYNC
static inline void outer_sync(void)
{
if (outer_cache.sync)
outer_cache.sync();
}
#else
static inline void outer_sync(void)
{ }
#endif
#endif /* __ASM_OUTERCACHE_H */

View File

@ -382,11 +382,13 @@ ENDPROC(sys_clone_wrapper)
sys_sigreturn_wrapper:
add r0, sp, #S_OFF
mov why, #0 @ prevent syscall restart handling
b sys_sigreturn
ENDPROC(sys_sigreturn_wrapper)
sys_rt_sigreturn_wrapper:
add r0, sp, #S_OFF
mov why, #0 @ prevent syscall restart handling
b sys_rt_sigreturn
ENDPROC(sys_rt_sigreturn_wrapper)

View File

@ -158,10 +158,10 @@ __secondary_data:
* registers.
*/
__enable_mmu:
#ifdef CONFIG_ALIGNMENT_TRAP
orr r0, r0, #CR_A
#else
#ifdef CONFIG_ALLOW_CPU_ALIGNMENT
bic r0, r0, #CR_A
#else
orr r0, r0, #CR_A
#endif
#ifdef CONFIG_CPU_DCACHE_DISABLE
bic r0, r0, #CR_C

View File

@ -22,6 +22,7 @@
#include <linux/errno.h>
#include <linux/cpufreq.h>
#include <linux/regulator/consumer.h>
#include <linux/regulator/driver.h>
#include <mach/board.h>
#include <mach/msm_iomap.h>
@ -62,6 +63,19 @@ struct clkctl_acpu_speed {
unsigned axiclk_khz;
};
static unsigned long max_axi_rate;
struct regulator {
struct device *dev;
struct list_head list;
int uA_load;
int min_uV;
int max_uV;
char *supply_name;
struct device_attribute dev_attr;
struct regulator_dev *rdev;
};
/* clock sources */
#define CLK_TCXO 0 /* 19.2 MHz */
#define CLK_GLOBAL_PLL 1 /* 768 MHz */
@ -76,135 +90,46 @@ struct clkctl_acpu_speed {
#define SRC_PLL1 3 /* 768 MHz */
struct clkctl_acpu_speed acpu_freq_tbl[] = {
#ifdef CONFIG_HTCLEO_UNDERVOLT_1000
{ 19200, CCTL(CLK_TCXO, 1), SRC_RAW, 0, 0, 1000, 14000 },
{ 19200, CCTL(CLK_TCXO, 1), SRC_RAW, 0, 0, 1000, 14000},
{ 96000, CCTL(CLK_TCXO, 1), SRC_AXI, 0, 0, 1000, 14000 },
{ 128000, CCTL(CLK_TCXO, 1), SRC_AXI, 0, 0, 1000, 14000 },
{ 245000, CCTL(CLK_MODEM_PLL, 1), SRC_RAW, 0, 0, 1000, 29000 },
//{ 256000, CCTL(CLK_GLOBAL_PLL, 3), SRC_RAW, 0, 0, 1000, 29000 },
{ 384000, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x0A, 0, 1000, 58000 },
{ 422400, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x0B, 0, 1000, 117000 },
{ 460800, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x0C, 0, 1000, 117000 },
{ 499200, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x0D, 0, 1025, 117000 },
{ 537600, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x0E, 0, 1050, 117000 },
{ 576000, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x0F, 0, 1050, 117000 },
{ 614400, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x10, 0, 1075, 117000 },
{ 652800, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x11, 0, 1100, 117000 },
{ 691200, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x12, 0, 1125, 117000 },
{ 729600, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x13, 0, 1150, 117000 },
{ 768000, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x14, 0, 1150, 128000 },
{ 806400, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x15, 0, 1175, 128000 },
{ 844800, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x16, 0, 1200, 128000 },
{ 883200, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x17, 0, 1200, 128000 },
{ 921600, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x18, 0, 1225, 128000 },
{ 960000, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x19, 0, 1225, 128000 },
{ 998400, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x1A, 0, 1225, 128000 },
#elif CONFIG_HTCLEO_UNDERVOLT_925
// should work with most of HD2s
{ 19200, CCTL(CLK_TCXO, 1), SRC_RAW, 0, 0, 925, 14000 },
{ 128000, CCTL(CLK_TCXO, 1), SRC_AXI, 0, 0, 925, 14000 },
{ 245000, CCTL(CLK_MODEM_PLL, 1), SRC_RAW, 0, 0, 925, 29000 },
//{ 256000, CCTL(CLK_GLOBAL_PLL, 3), SRC_RAW, 0, 0, 925, 29000 },
{ 384000, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x0A, 0, 950, 58000 },
{ 422400, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x0B, 0, 975, 117000 },
{ 460800, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x0C, 0, 1000, 117000 },
{ 499200, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x0D, 0, 1025, 117000 },
{ 537600, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x0E, 0, 1050, 117000 },
{ 576000, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x0F, 0, 1050, 117000 },
{ 614400, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x10, 0, 1075, 117000 },
{ 652800, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x11, 0, 1100, 117000 },
{ 691200, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x12, 0, 1125, 117000 },
{ 729600, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x13, 0, 1150, 117000 },
{ 768000, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x14, 0, 1150, 128000 },
{ 806400, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x15, 0, 1175, 128000 },
{ 844800, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x16, 0, 1200, 128000 },
{ 883200, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x17, 0, 1200, 128000 },
{ 921600, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x18, 0, 1225, 128000 },
{ 960000, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x19, 0, 1225, 128000 },
{ 998400, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x1A, 0, 1225, 128000 },
#elif CONFIG_HTCLEO_UNDERVOLT_800
// not working yet
{ 19200, CCTL(CLK_TCXO, 1), SRC_RAW, 0, 0, 850, 14000 },
{ 128000, CCTL(CLK_TCXO, 1), SRC_AXI, 0, 0, 850, 14000 },
{ 245000, CCTL(CLK_MODEM_PLL, 1), SRC_RAW, 0, 0, 850, 29000 },
//{ 256000, CCTL(CLK_GLOBAL_PLL, 3), SRC_RAW, 0, 0, 850, 29000 },
{ 384000, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x0A, 0, 850, 58000 },
{ 422400, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x0B, 0, 875, 117000 },
{ 460800, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x0C, 0, 900, 117000 },
{ 499200, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x0D, 0, 925, 117000 },
{ 537600, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x0E, 0, 950, 117000 },
{ 576000, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x0F, 0, 950, 117000 },
{ 614400, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x10, 0, 975, 117000 },
{ 652800, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x11, 0, 1000, 117000 },
{ 691200, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x12, 0, 1025, 117000 },
{ 729600, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x13, 0, 1050, 117000 },
{ 768000, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x14, 0, 1125, 128000 },
{ 806400, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x15, 0, 1125, 128000 },
{ 844800, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x16, 0, 1150, 128000 },
{ 883200, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x17, 0, 1150, 128000 },
{ 921600, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x18, 0, 1175, 128000 },
{ 960000, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x19, 0, 1175, 128000 },
{ 998400, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x1A, 0, 1200, 128000 },
#else
{ 19200, CCTL(CLK_TCXO, 1), SRC_RAW, 0, 0, 1050, 14000},
{ 128000, CCTL(CLK_TCXO, 1), SRC_AXI, 0, 0, 1050, 14000 },
{ 245000, CCTL(CLK_MODEM_PLL, 1), SRC_RAW, 0, 0, 1050, 29000 },
/* Work arround for acpu resume hung, GPLL is turn off by arm9 */
/*{ 256000, CCTL(CLK_GLOBAL_PLL, 3), SRC_RAW, 0, 0, 1050, 29000 },*/
{ 384000, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x0A, 0, 1050, 58000 },
{ 422400, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x0B, 0, 1050, 117000 },
{ 460800, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x0C, 0, 1050, 117000 },
{ 499200, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x0D, 0, 1075, 117000 },
{ 537600, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x0E, 0, 1100, 117000 },
{ 576000, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x0F, 0, 1100, 117000 },
{ 614400, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x10, 0, 1125, 117000 },
{ 652800, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x11, 0, 1150, 117000 },
{ 691200, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x12, 0, 1175, 117000 },
{ 729600, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x13, 0, 1200, 117000 },
{ 768000, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x14, 0, 1200, 128000 },
{ 806400, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x15, 0, 1225, 128000 },
{ 844800, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x16, 0, 1250, 128000 },
{ 883200, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x17, 0, 1275, 128000 },
{ 921600, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x18, 0, 1300, 128000 },
{ 960000, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x19, 0, 1300, 128000 },
{ 998400, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x1A, 0, 1300, 128000 },
#endif
#ifdef CONFIG_HTCLEO_OVERCLOCK
#ifdef CONFIG_HTCLEO_UNDERVOLT_1000
{ 1036800, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x1B, 0, 1225, 128000 },
{ 1075200, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x1C, 0, 1250, 128000 },
{ 1113600, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x1D, 0, 1275, 128000 },
{ 1152000, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x1E, 0, 1300, 128000 },
{ 1190400, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x1F, 0, 1325, 128000 },
#elif CONFIG_HTCLEO_UNDERVOLT_925
{ 1036800, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x1B, 0, 1225, 128000 },
{ 1075200, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x1C, 0, 1250, 128000 },
{ 1113600, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x1D, 0, 1275, 128000 },
{ 1152000, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x1E, 0, 1300, 128000 },
{ 1190400, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x1F, 0, 1325, 128000 },
#elif CONFIG_HTCLEO_UNDERVOLT_800
{ 1036800, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x1B, 0, 1225, 128000 },
{ 1075200, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x1C, 0, 1250, 128000 },
{ 1113600, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x1D, 0, 1275, 128000 },
{ 1152000, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x1E, 0, 1300, 128000 },
{ 1190400, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x1F, 0, 1325, 128000 },
#else
{ 1036800, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x1B, 0, 1300, 128000 },
{ 1075200, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x1C, 0, 1300, 128000 },
{ 1113600, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x1D, 0, 1300, 128000 },
{ 1152000, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x1E, 0, 1325, 128000 },
{ 1190400, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x1F, 0, 1325, 128000 },
#endif
/* Work arround for acpu resume hung, GPLL is turn off by arm9 */
/*{ 256000, CCTL(CLK_GLOBAL_PLL, 3), SRC_RAW, 0, 0, 1050, 29000 },*/
{ 384000, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x0A, 0, 1000, 58000 },
{ 422400, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x0B, 0, 1000, 117000 },
{ 460800, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x0C, 0, 1000, 117000 },
{ 499200, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x0D, 0, 1050, 117000 },
{ 537600, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x0E, 0, 1050, 117000 },
{ 576000, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x0F, 0, 1050, 117000 },
{ 614400, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x10, 0, 1075, 117000 },
{ 652800, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x11, 0, 1100, 117000 },
{ 691200, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x12, 0, 1125, 117000 },
{ 729600, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x13, 0, 1150, 117000 },
{ 768000, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x14, 0, 1150, 128000 },
{ 806400, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x15, 0, 1175, 128000 },
{ 844800, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x16, 0, 1225, 128000 },
{ 883200, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x17, 0, 1250, 128000 },
{ 921600, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x18, 0, 1300, 128000 },
{ 960000, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x19, 0, 1300, 128000 },
{ 998400, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x1A, 0, 1300, 128000 },
#ifdef CONFIG_HTCLEO_OVERCLOCK
{ 1036800, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x1B, 0, 1300, 128000 },
{ 1075200, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x1C, 0, 1300, 128000 },
{ 1113600, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x1D, 0, 1300, 128000 },
{ 1152000, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x1E, 0, 1300, 128000 },
{ 1190400, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x1F, 0, 1325, 128000 },
#endif
#ifdef CONFIG_HTCLEO_EXOVERCLOCK
{ 1228800, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x20, 0, 1325, 128000 },
{ 1267200, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x21, 0, 1350, 128000 },
{ 1305600, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x22, 0, 1350, 128000 },
{ 1344000, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x23, 0, 1350, 128000 },
{ 1382400, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x24, 0, 1350, 128000 },
{ 1420800, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x25, 0, 1350, 128000 },
{ 1459200, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x26, 0, 1350, 128000 },
{ 1497600, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x27, 0, 1350, 128000 },
{ 1536000, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x28, 0, 1350, 128000 },
{ 1228800, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x20, 0, 1325, 128000 },
{ 1267200, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x21, 0, 1350, 128000 },
{ 1305600, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x22, 0, 1350, 128000 },
{ 1344000, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x23, 0, 1350, 128000 },
{ 1382400, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x24, 0, 1350, 128000 },
{ 1420800, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x25, 0, 1350, 128000 },
{ 1459200, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x26, 0, 1350, 128000 },
{ 1497600, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x27, 0, 1350, 128000 },
{ 1536000, CCTL(CLK_TCXO, 1), SRC_SCPLL, 0x28, 0, 1350, 128000 },
#endif
{ 0 },
};
@ -231,11 +156,10 @@ static void __init acpuclk_init_cpufreq_table(void)
freq_table[i].index = i;
freq_table[i].frequency = CPUFREQ_ENTRY_INVALID;
/* Skip speeds we don't want */
if ( acpu_freq_tbl[i].acpu_khz == 19200 ||
//acpu_freq_tbl[i].acpu_khz == 128000 ||
acpu_freq_tbl[i].acpu_khz == 256000)
continue;
/* Skip speeds using the global pll */
if (acpu_freq_tbl[i].acpu_khz == 256000 ||
acpu_freq_tbl[i].acpu_khz == 19200)
continue;
vdd = acpu_freq_tbl[i].vdd;
/* Allow mpll and the first scpll speeds */
@ -269,6 +193,7 @@ struct clock_state {
unsigned long wait_for_irq_khz;
struct clk* clk_ebi1;
struct regulator *regulator;
int (*acpu_set_vdd) (int mvolts);
};
static struct clock_state drv_state = { 0 };
@ -345,11 +270,10 @@ static void scpll_set_freq(uint32_t lval)
dmb();
/* wait for frequency switch to finish */
while (readl(SCPLL_STATUS_ADDR) & 0x1)
;
while (readl(SCPLL_STATUS_ADDR) & 0x1);
/* completion bit is not reliable for SHOT switch */
udelay(25);
udelay(15);
}
/* write the new L val and switch mode */
@ -363,8 +287,7 @@ static void scpll_set_freq(uint32_t lval)
dmb();
/* wait for frequency switch to finish */
while (readl(SCPLL_STATUS_ADDR) & 0x1)
;
while (readl(SCPLL_STATUS_ADDR) & 0x1);
}
/* this is still a bit weird... */
@ -625,13 +548,20 @@ static void __init acpuclk_init(void)
}
drv_state.current_speed = speed;
for (speed = acpu_freq_tbl; speed->acpu_khz; speed++)
for (speed = acpu_freq_tbl; speed->acpu_khz; speed++) {
speed->lpj = cpufreq_scale(loops_per_jiffy,
init_khz, speed->acpu_khz);
max_axi_rate = speed->axiclk_khz * 1000;
}
loops_per_jiffy = drv_state.current_speed->lpj;
}
unsigned long acpuclk_get_max_axi_rate(void)
{
return max_axi_rate;
}
unsigned long acpuclk_get_rate(void)
{
return drv_state.current_speed->acpu_khz;
@ -674,6 +604,7 @@ void __init msm_acpu_clock_init(struct msm_acpu_clock_platform_data *clkdata)
drv_state.vdd_switch_time_us = clkdata->vdd_switch_time_us;
drv_state.power_collapse_khz = clkdata->power_collapse_khz;
drv_state.wait_for_irq_khz = clkdata->wait_for_irq_khz;
drv_state.acpu_set_vdd = acpuclk_set_vdd_level;
if (clkdata->mpll_khz)
acpu_mpll->acpu_khz = clkdata->mpll_khz;
@ -708,7 +639,7 @@ ssize_t acpuclk_get_vdd_levels_str(char *buf)
void acpuclk_set_vdd(unsigned acpu_khz, int vdd)
{
int i;
vdd = vdd / 25 * 25; //! regulator only accepts multiples of 25 (mV)
vdd = (vdd / HTCLEO_TPS65023_UV_STEP_MV) * HTCLEO_TPS65023_UV_STEP_MV;
mutex_lock(&drv_state.lock);
for (i = 0; acpu_freq_tbl[i].acpu_khz; i++)
{
@ -722,5 +653,16 @@ void acpuclk_set_vdd(unsigned acpu_khz, int vdd)
}
mutex_unlock(&drv_state.lock);
}
unsigned int acpuclk_get_vdd_min(void)
{
return HTCLEO_TPS65023_MIN_UV_MV;
}
unsigned int acpuclk_get_vdd_max(void)
{
return HTCLEO_TPS65023_MAX_UV_MV;
}
unsigned int acpuclk_get_vdd_step(void)
{
return HTCLEO_TPS65023_UV_STEP_MV;
}
#endif

View File

@ -39,7 +39,6 @@
#define HTCLEO_DEFAULT_BACKLIGHT_BRIGHTNESS 255
static struct led_trigger *htcleo_lcd_backlight;
static int auto_bl_state=0;
static DEFINE_MUTEX(htcleo_backlight_lock);

View File

@ -118,7 +118,7 @@ int lightsensor_read_value(uint32_t *val)
}
*val = data[1] | (data[0] << 8);
D("lsensor adc = %d\n", *val);
D("lsensor adc = %u\n", *val); /* val is unsigned */
return 0;
}

View File

@ -30,6 +30,7 @@
#include <mach/vreg.h>
#include <mach/gpio.h>
#include <mach/board-htcleo-mmc.h>
#include "board-htcleo.h"
#include "devices.h"
@ -391,7 +392,7 @@ static int __init htcleommc_dbg_init(void)
{
struct dentry *dent;
if (!machine_is_htcleo() && !machine_is_htcleo())
if (!machine_is_htcleo())
return 0;
dent = debugfs_create_dir("htcleo_mmc_dbg", 0);

View File

@ -25,17 +25,20 @@
#include <linux/mtd/mtd.h>
#include <linux/mtd/blktrans.h>
#include <mach/msm_iomap.h>
#include <linux/crc32.h>
#include <linux/io.h>
#include "board-htcleo.h"
#include <mach/board-htcleo-mac.h>
#define NVS_MAX_SIZE 0x800U
#define NVS_MACADDR_SIZE 0x1AU
#define WLAN_SKB_BUF_NUM 16
/*
* wifi mac address will be parsed in msm_nand_probe
* see drivers/mtd/devices/htcleo_nand.c
*/
static struct proc_dir_entry *wifi_calibration;
static unsigned char nvs_mac_addr[NVS_MACADDR_SIZE];
char nvs_mac_addr[NVS_MACADDR_SIZE];
static unsigned char *hardcoded_nvs =
"sromrev=3\n"\
"vendid=0x14e4\n"\
@ -82,35 +85,7 @@ unsigned char *get_wifi_nvs_ram( void )
}
EXPORT_SYMBOL(get_wifi_nvs_ram);
static int parse_tag_msm_wifi(void)
{
uint32_t id1, id2, id3, sid1, sid2, sid3;
uint32_t id_base = 0xef260;
id1 = readl(MSM_SHARED_RAM_BASE + id_base + 0x0);
id2 = readl(MSM_SHARED_RAM_BASE + id_base + 0x4);
id3 = readl(MSM_SHARED_RAM_BASE + id_base + 0x8);
sid1 = crc32(~0, &id1, 4);
sid2 = crc32(~0, &id2, 4);
sid3 = crc32(~0, &id3, 4);
sprintf(nvs_mac_addr, "macaddr=00:23:76:%2x:%2x:%2x\n", sid1 % 0xff, sid2 % 0xff, sid3 % 0xff);
pr_info("Device Wifi Mac Address: %s\n", nvs_mac_addr);
return 0;
}
static int parse_tag_msm_wifi_from_spl(void)
{
uint32_t id1, id2, id3, id4, id5, id6;
uint32_t id_base = 0xFC028; //real mac offset found in spl for haret.exe on WM
id1 = readl(MSM_SPLHOOD_BASE + id_base + 0x0);
id2 = readl(MSM_SPLHOOD_BASE + id_base + 0x1);
id3 = readl(MSM_SPLHOOD_BASE + id_base + 0x2);
id4 = readl(MSM_SPLHOOD_BASE + id_base + 0x3);
id5 = readl(MSM_SPLHOOD_BASE + id_base + 0x4);
id6 = readl(MSM_SPLHOOD_BASE + id_base + 0x5);
sprintf(nvs_mac_addr, "macaddr=%2x:%2x:%2x:%2x:%2x:%2x\n", id1 & 0xff, id2 & 0xff, id3 & 0xff, id4 & 0xff, id5 & 0xff, id6 & 0xff);
pr_info("Device Real Wifi Mac Address: %s\n", nvs_mac_addr);
return 0;
}
static unsigned wifi_get_nvs_size( void )
{
@ -152,11 +127,6 @@ static int wifi_calibration_read_proc(char *page, char **start, off_t off,
static int __init wifi_nvs_init(void)
{
pr_info("%s\n", __func__);
if (htcleo_is_nand_boot()) {
parse_tag_msm_wifi();
} else {
parse_tag_msm_wifi_from_spl();
}
wifi_calibration = create_proc_entry("calibration", 0444, NULL);
if (wifi_calibration != NULL) {
wifi_calibration->size = wifi_get_nvs_size();

201
arch/arm/mach-msm/board-htcleo.c Normal file → Executable file
View File

@ -15,7 +15,6 @@
*
*/
#include <linux/crc32.h>
#include <linux/delay.h>
#include <linux/gpio.h>
#include <linux/i2c.h>
@ -55,10 +54,17 @@
#ifdef CONFIG_SERIAL_BCM_BT_LPM
#include <mach/bcm_bt_lpm.h>
#endif
#ifdef CONFIG_PERFLOCK
#include <mach/perflock.h>
#endif
#include <mach/htc_headset_mgr.h>
#include <mach/htc_headset_gpio.h>
#ifdef CONFIG_MSM_KGSL
#include <linux/msm_kgsl.h>
#endif
#include <mach/board-htcleo-mac.h>
#include <mach/board-htcleo-microp.h>
#include "board-htcleo.h"
@ -391,10 +397,9 @@ static uint32_t flashlight_gpio_table[] =
PCOM_GPIO_CFG(HTCLEO_GPIO_FLASHLIGHT_FLASH, 0, GPIO_OUTPUT, GPIO_NO_PULL, GPIO_2MA),
};
static int config_htcleo_flashlight_gpios(void)
static void config_htcleo_flashlight_gpios(void)
{
config_gpio_table(flashlight_gpio_table, ARRAY_SIZE(flashlight_gpio_table));
return 0;
}
static struct flashlight_platform_data htcleo_flashlight_data =
@ -526,87 +531,19 @@ static struct platform_device msm_camera_sensor_s5k3e2fx =
},
};
//-----PATCH for BT mac address
int is_valid_mac_address(char *mac)
{
int i =0;
while(i<17){
if( (i%3) == 2){
if ((mac[i] !=':') && (mac[i] = '-')) return 0;
if (mac[i] == '-') mac[i] = ':';
}else{
if ( !( ((mac[i] >= '0') && (mac[i] <= '9')) ||
((mac[i] >= 'a') && (mac[i] <= 'f')) ||
((mac[i] >= 'A') && (mac[i] <= 'F')))
) return 0;
}
i++;
}
if (mac[i] != '\0') return 0;
return 1;
}
//-----------------------------
///////////////////////////////////////////////////////////////////////
// bluetooth
///////////////////////////////////////////////////////////////////////
/* AOSP style interface */
#define BDADDR_STR_SIZE 18
static char bdaddr[BDADDR_STR_SIZE];
/*
* bluetooth mac address will be parsed in msm_nand_probe
* see drivers/mtd/devices/htcleo_nand.c
*/
char bdaddr[BDADDR_STR_SIZE];
module_param_string(bdaddr, bdaddr, sizeof(bdaddr), 0400);
MODULE_PARM_DESC(bdaddr, "bluetooth address");
static int parse_tag_bdaddr(void)
{
uint32_t id1, id2, id3, sid1, sid2, sid3;
uint32_t id_base = 0xef260;
id1 = readl(MSM_SHARED_RAM_BASE + id_base + 0x0);
id2 = readl(MSM_SHARED_RAM_BASE + id_base + 0x4);
id3 = readl(MSM_SHARED_RAM_BASE + id_base + 0x8);
sid1 = crc32(~0, &id1, 4);
sid2 = crc32(~0, &id2, 4);
sid3 = crc32(~0, &id3, 4);
sprintf(bdaddr, "00:23:76:%2X:%2X:%2X", sid3 % 0xff, sid2 % 0xff, sid1 % 0xff);
pr_info("Device Bluetooth Mac Address: %s\n", bdaddr);
return 0;
}
/* end AOSP style interface */
/* for (sense roms) */
#define MAC_ADDRESS_SIZE_C 17
static char bdaddress[MAC_ADDRESS_SIZE_C+1] = "";
static void bt_export_bd_address(void)
{
unsigned char cTemp[6];
if (!is_valid_mac_address(bdaddress)){
memcpy(cTemp, get_bt_bd_ram(), 6);
sprintf(bdaddress, "%02x:%02x:%02x:%02x:%02x:%02x", cTemp[0], cTemp[1], cTemp[2], cTemp[3], cTemp[4], cTemp[5]);
pr_info("BD_ADDRESS=%s\n", bdaddress);
}
}
module_param_string(bdaddress, bdaddress, sizeof(bdaddress), S_IWUSR | S_IRUGO);
MODULE_PARM_DESC(bdaddress, "BT MAC ADDRESS");
#define MAX_BT_SIZE 0x6U
static unsigned char bt_bd_ram[MAX_BT_SIZE] = {0x50,0xC3,0x00,0x00,0x00,0x00};
unsigned char *get_bt_bd_ram(void)
{
return (bt_bd_ram);
}
//-----added alias for bt mac address parameter--------
static int __init htcleo_bt_macaddress_setup(char *bootconfig)
{
printk("%s: cmdline bt mac config=%s | %s\n",__FUNCTION__, bootconfig, __FILE__);
strncpy(bdaddress, bootconfig, MAC_ADDRESS_SIZE_C);
return 1;
}
__setup("bt.mac=", htcleo_bt_macaddress_setup);
//-----------------------------------------------------
/* end (sense) */
#ifdef CONFIG_SERIAL_MSM_HS
static struct msm_serial_hs_platform_data msm_uart_dm1_pdata = {
@ -744,28 +681,6 @@ static struct platform_device qsd_device_spi = {
///////////////////////////////////////////////////////////////////////
// KGSL (HW3D support)#include <linux/android_pmem.h>
///////////////////////////////////////////////////////////////////////
static struct resource msm_kgsl_resources[] =
{
{
.name = "kgsl_reg_memory",
.start = MSM_GPU_REG_PHYS,
.end = MSM_GPU_REG_PHYS + MSM_GPU_REG_SIZE - 1,
.flags = IORESOURCE_MEM,
},
{
.name = "kgsl_phys_memory",
.start = MSM_GPU_PHYS_BASE,
.end = MSM_GPU_PHYS_BASE + MSM_GPU_PHYS_SIZE - 1,
.flags = IORESOURCE_MEM,
},
{
.start = INT_GRAPHICS,
.end = INT_GRAPHICS,
.flags = IORESOURCE_IRQ,
},
};
static int htcleo_kgsl_power_rail_mode(int follow_clk)
{
int mode = follow_clk ? 0 : 1;
@ -782,28 +697,81 @@ static int htcleo_kgsl_power(bool on)
return msm_proc_comm(cmd, &rail_id, 0);
}
static struct platform_device msm_kgsl_device =
{
.name = "kgsl",
.id = -1,
.resource = msm_kgsl_resources,
.num_resources = ARRAY_SIZE(msm_kgsl_resources),
/* start kgsl-3d0 */
static struct resource kgsl_3d0_resources[] = {
{
.name = KGSL_3D0_REG_MEMORY,
.start = 0xA0000000,
.end = 0xA001ffff,
.flags = IORESOURCE_MEM,
},
{
.name = KGSL_3D0_IRQ,
.start = INT_GRAPHICS,
.end = INT_GRAPHICS,
.flags = IORESOURCE_IRQ,
},
};
static struct kgsl_device_platform_data kgsl_3d0_pdata = {
.pwr_data = {
.pwrlevel = {
{
.gpu_freq = 0,
.bus_freq = 128000000,
},
},
.init_level = 0,
.num_levels = 1,
.set_grp_async = NULL,
.idle_timeout = HZ/5,
},
.clk = {
.name = {
.clk = "grp_clk",
},
},
.imem_clk_name = {
.clk = "imem_clk",
},
};
struct platform_device msm_kgsl_3d0 = {
.name = "kgsl-3d0",
.id = 0,
.num_resources = ARRAY_SIZE(kgsl_3d0_resources),
.resource = kgsl_3d0_resources,
.dev = {
.platform_data = &kgsl_3d0_pdata,
},
};
/* end kgsl-3d0 */
///////////////////////////////////////////////////////////////////////
// Memory
///////////////////////////////////////////////////////////////////////
static struct android_pmem_platform_data mdp_pmem_pdata = {
.name = "pmem",
.start = MSM_PMEM_MDP_BASE,
.size = MSM_PMEM_MDP_SIZE,
#ifdef CONFIG_MSM_KGSL
.allocator_type = PMEM_ALLOCATORTYPE_ALLORNOTHING,
#else
.no_allocator = 0,
#endif
.cached = 1,
};
static struct android_pmem_platform_data android_pmem_adsp_pdata = {
.name = "pmem_adsp",
.start = MSM_PMEM_ADSP_BASE,
.size = MSM_PMEM_ADSP_SIZE,
#ifdef CONFIG_MSM_KGSL
.allocator_type = PMEM_ALLOCATORTYPE_BITMAP,
#else
.no_allocator = 0,
.cached = 1,
#endif
.cached = 0,
};
@ -811,8 +779,12 @@ static struct android_pmem_platform_data android_pmem_venc_pdata = {
.name = "pmem_venc",
.start = MSM_PMEM_VENC_BASE,
.size = MSM_PMEM_VENC_SIZE,
#ifdef CONFIG_MSM_KGSL
.allocator_type = PMEM_ALLOCATORTYPE_BITMAP,
#else
.no_allocator = 0,
.cached = 1,
#endif
.cached = 0,
};
static struct platform_device android_pmem_mdp_device = {
@ -825,7 +797,7 @@ static struct platform_device android_pmem_mdp_device = {
static struct platform_device android_pmem_adsp_device = {
.name = "android_pmem",
.id = 4,
.id = 1, /* 4 */
.dev = {
.platform_data = &android_pmem_adsp_pdata,
},
@ -833,7 +805,7 @@ static struct platform_device android_pmem_adsp_device = {
static struct platform_device android_pmem_venc_device = {
.name = "android_pmem",
.id = 5,
.id = 3, /* 5 */
.dev = {
.platform_data = &android_pmem_venc_pdata,
},
@ -945,7 +917,11 @@ static struct platform_device *devices[] __initdata =
&msm_device_i2c,
&ds2746_battery_pdev,
&htc_battery_pdev,
#ifdef CONFIG_MSM_KGSL
&msm_kgsl_3d0,
#else
&msm_kgsl_device,
#endif
&msm_camera_sensor_s5k3e2fx,
&htcleo_flashlight_device,
&qsd_device_spi,
@ -1007,6 +983,7 @@ static struct msm_acpu_clock_platform_data htcleo_clock_data = {
// .wait_for_irq_khz = 19200, // TCXO
};
#ifdef CONFIG_PERFLOCK
static unsigned htcleo_perf_acpu_table[] = {
245000000,
576000000,
@ -1017,6 +994,8 @@ static struct perflock_platform_data htcleo_perflock_data = {
.perf_acpu_table = htcleo_perf_acpu_table,
.table_size = ARRAY_SIZE(htcleo_perf_acpu_table),
};
#endif
///////////////////////////////////////////////////////////////////////
// Reset
///////////////////////////////////////////////////////////////////////
@ -1063,7 +1042,9 @@ static void __init htcleo_init(void)
msm_acpu_clock_init(&htcleo_clock_data);
#ifdef CONFIG_PERFLOCK
perflock_init(&htcleo_perflock_data);
#endif
#if defined(CONFIG_MSM_SERIAL_DEBUGGER)
msm_serial_debug_init(MSM_UART1_PHYS, INT_UART1,
@ -1080,10 +1061,6 @@ static void __init htcleo_init(void)
config_gpio_table(bt_gpio_table, ARRAY_SIZE(bt_gpio_table));
parse_tag_bdaddr();
bt_export_bd_address();
htcleo_audio_init();
msm_device_i2c_init();

18
arch/arm/mach-msm/board-htcleo.h Normal file → Executable file
View File

@ -26,6 +26,7 @@
#define MSM_EBI1_BANK0_SIZE 0x1E7C0000 /* 488MB - 0x00040000 RAM CONSOLE*/
#endif
/* Don't change that */
#define MSM_SMI_BASE 0x00000000
#define MSM_SMI_SIZE 0x04000000
@ -38,6 +39,12 @@
#define MSM_FB_BASE MSM_PMEM_SMI_BASE
#define MSM_FB_SIZE 0x00600000
#define MSM_PMEM_MDP_BASE 0x3B700000
#define MSM_PMEM_MDP_SIZE 0x02000000
#define MSM_PMEM_ADSP_BASE 0x3D700000
#define MSM_PMEM_ADSP_SIZE 0x02200000
#define MSM_GPU_PHYS_BASE (MSM_PMEM_SMI_BASE + MSM_FB_SIZE)
#define MSM_GPU_PHYS_SIZE 0x00800000
/* #define MSM_GPU_PHYS_SIZE 0x00300000 */
@ -54,8 +61,6 @@
#define MSM_PMEM_SF_SIZE 0x02000000
#define MSM_PMEM_ADSP_SIZE 0x02196000
/* MSM_RAM_CONSOLE uses the last 0x00040000 of EBI memory, defined in msm_iomap.h
#define MSM_RAM_CONSOLE_SIZE 0x00040000
#define MSM_RAM_CONSOLE_BASE (MSM_EBI1_BANK0_BASE + MSM_EBI1_BANK0_SIZE - MSM_RAM_CONSOLE_SIZE) //0x2FFC0000
@ -174,7 +179,8 @@
/* Voltage driver */
#define HTCLEO_TPS65023_MIN_UV_MV (800)
#define HTCLEO_TPS65023_MAX_UV_MV (1350)
#define HTCLEO_TPS65023_MAX_UV_MV (1375)
#define HTCLEO_TPS65023_UV_STEP_MV (25)
/* LEDS */
#define LED_RGB (1 << 0)
@ -192,11 +198,11 @@ struct microp_led_platform_data {
int num_leds;
};
int htcleo_pm_set_vreg(int enable, unsigned id);
int __init htcleo_init_panel(void);
int htcleo_is_nand_boot(void);
unsigned htcleo_get_vbus_state(void);
void config_camera_on_gpios(void);
void config_camera_off_gpios(void);
#endif /* __ARCH_ARM_MACH_MSM_BOARD_HTCLEO_H */

View File

@ -34,6 +34,8 @@
//#define ENABLE_CLOCK_INFO 1
extern struct clk msm_clocks[];
static DEFINE_MUTEX(clocks_mutex);
static DEFINE_SPINLOCK(clocks_lock);
static LIST_HEAD(clocks);
@ -233,8 +235,16 @@ struct mdns_clock_params msm_clock_freq_parameters[] = {
MSM_CLOCK_REG(64000000,0x19, 0x60, 0x30, 0, 2, 4, 1, 245760000), /* BT, 4000000 (*16) */
};
int status_set_grp_clk = 0;
int i_set_grp_clk = 0;
int control_set_grp_clk;
static void set_grp_clk( int on )
{
int i = 0;
int status = 0;
int control;
if ( on != 0 )
{
//axi_reset
@ -274,8 +284,7 @@ static void set_grp_clk( int on )
writel(readl(MSM_CLK_CTL_BASE) |0x8, MSM_CLK_CTL_BASE);
//grp MD
writel(readl(MSM_CLK_CTL_BASE+0x80) |0x1, MSM_CLK_CTL_BASE+0x80); //PRPH_WEB_NS_REG
int i = 0;
int status = 0;
while ( status == 0 && i < 100) {
i++;
status = readl(MSM_CLK_CTL_BASE+0x84) & 0x1;
@ -297,7 +306,7 @@ static void set_grp_clk( int on )
writel(readl(MSM_CLK_CTL_BASE+0x290) |0x4, MSM_CLK_CTL_BASE+0x290); //MSM_RAIL_CLAMP_IO
writel( 0x11f, MSM_CLK_CTL_BASE+0x284); //VDD_GRP_GFS_CTL
int control = readl(MSM_CLK_CTL_BASE+0x288); //VDD_VDC_GFS_CTL
control = readl(MSM_CLK_CTL_BASE+0x288); //VDD_VDC_GFS_CTL
if ( control & 0x100 )
writel(readl(MSM_CLK_CTL_BASE) &(~(0x8)), MSM_CLK_CTL_BASE);
}
@ -1291,5 +1300,18 @@ static int __init clock_late_init(void)
//pr_info("reset imem_config\n");
return 0;
}
late_initcall(clock_late_init);
struct clk_ops clk_ops_pcom = {
.enable = pc_clk_enable,
.disable = pc_clk_disable,
.auto_off = pc_clk_disable,
// .reset = pc_clk_reset,
.set_rate = pc_clk_set_rate,
.set_min_rate = pc_clk_set_min_rate,
.set_max_rate = pc_clk_set_max_rate,
.set_flags = pc_clk_set_flags,
.get_rate = pc_clk_get_rate,
.is_enabled = pc_clk_is_enabled,
// .round_rate = pc_clk_round_rate,
};

View File

@ -21,6 +21,7 @@
#include <linux/list.h>
#include <linux/err.h>
#include <linux/clk.h>
#include <mach/clk.h>
#include <linux/spinlock.h>
#include <linux/fs.h>
#include <linux/debugfs.h>

View File

@ -17,6 +17,13 @@
*
*/
#include <linux/workqueue.h>
#include <linux/completion.h>
#include <linux/cpu.h>
#include <linux/cpumask.h>
#include <linux/sched.h>
#include <linux/suspend.h>
#include <linux/cpufreq.h>
#include <linux/earlysuspend.h>
#include <linux/init.h>

View File

@ -138,13 +138,14 @@ dmov_exec_cmdptr_complete_func(struct msm_dmov_cmd *_cmd,
complete(&cmd->complete);
}
int msm_dmov_exec_cmd(unsigned id, unsigned int cmdptr)
int msm_dmov_exec_cmd(unsigned id, unsigned int crci_mask, unsigned int cmdptr)
{
struct msm_dmov_exec_cmdptr_cmd cmd;
PRINT_FLOW("dmov_exec_cmdptr(%d, %x)\n", id, cmdptr);
cmd.dmov_cmd.cmdptr = cmdptr;
cmd.dmov_cmd.crci_mask = crci_mask;
cmd.dmov_cmd.complete_func = dmov_exec_cmdptr_complete_func;
cmd.dmov_cmd.execute_func = NULL;
cmd.id = id;

View File

@ -0,0 +1,29 @@
/* board-htcleo-mmc.h
*
* Copyright (C) 2011 marc1706
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef HTCLEO_AUDIO_H
#define HTCLEO_AUDIO_H
void htcleo_headset_enable(int en);
void htcleo_speaker_enable(int en);
void htcleo_receiver_enable(int en);
void htcleo_bt_sco_enable(int en);
void htcleo_mic_enable(int en);
void htcleo_analog_init(void);
int htcleo_get_rx_vol(uint8_t hw, int level);
void __init htcleo_audio_init(void);
#endif // HTCLEO_AUDIO_H

View File

@ -0,0 +1,27 @@
/* arch/arm/mach-msm/include/mach/board-htcleo-mac.h
*
* Copyright (C) 2012 Marc Alexander.
* Author: Marc Alexander<admin@m-a-styles.de>
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __ARCH_ARM_MACH_MSM_BOARD_HTCLEO_MAC_H
#define __ARCH_ARM_MACH_MSM_BOARD_HTCLEO_MAC_H
#define NVS_MACADDR_SIZE 0x1AU
extern char nvs_mac_addr[NVS_MACADDR_SIZE];
#define BDADDR_STR_SIZE 18
extern char bdaddr[BDADDR_STR_SIZE]; /* AOSP style */
#endif

View File

@ -136,4 +136,14 @@ struct microp_i2c_client_data {
int microp_i2c_read(uint8_t addr, uint8_t *data, int length);
int microp_i2c_write(uint8_t addr, uint8_t *data, int length);
int capella_cm3602_power(int pwr_device, uint8_t enable);
int microp_read_gpo_status(uint16_t *status);
int microp_gpo_enable(uint16_t gpo_mask);
int microp_gpo_disable(uint16_t gpo_mask);
#ifdef CONFIG_HAS_EARLYSUSPEND
void microp_early_suspend(struct early_suspend *h);
void microp_early_resume(struct early_suspend *h);
#endif // CONFIG_HAS_EARLYSUSPEND
#endif

View File

@ -0,0 +1,31 @@
/* board-htcleo-mmc.h
*
* Copyright (C) 2011 marc1706
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef HTCLEO_MMC_H
#define HTCLEO_MMC_H
static bool opt_disable_sdcard;
static void (*wifi_status_cb)(int card_present, void *dev_id);
static void *wifi_status_cb_devid;
static int htcleo_wifi_power_state;
static int htcleo_wifi_reset_state;
int htcleo_wifi_set_carddetect(int val);
int htcleo_wifi_power(int on);
int htcleo_wifi_reset(int on);
int __init htcleo_init_mmc(unsigned debug_uart);
#endif // HTCLEO_MMC_H

View File

@ -178,6 +178,18 @@ enum {
BOOTMODE_OFFMODE_CHARGING = 0x5,
};
void msm_hsusb_set_vbus_state(int online);
enum usb_connect_type {
CONNECT_TYPE_CLEAR = -2,
CONNECT_TYPE_UNKNOWN = -1,
CONNECT_TYPE_NONE = 0,
CONNECT_TYPE_USB,
CONNECT_TYPE_AC,
CONNECT_TYPE_9V_AC,
CONNECT_TYPE_WIRELESS,
CONNECT_TYPE_INTERNAL,
};
#define MSM_MAX_DEC_CNT 14
/* 7k target ADSP information */
/* Bit 23:0, for codec identification like mp3, wav etc *

View File

@ -32,6 +32,7 @@
#define NUM_AUTOFOCUS_MULTI_WINDOW_GRIDS 16
#define NUM_STAT_OUTPUT_BUFFERS 3
#define NUM_AF_STAT_OUTPUT_BUFFERS 3
#define max_control_command_size 150
enum msm_queue {
MSM_CAM_Q_CTRL, /* control command or control command status */

View File

@ -51,4 +51,7 @@ int clk_set_max_rate(struct clk *clk, unsigned long rate);
int clk_reset(struct clk *clk, enum clk_reset_action action);
int clk_set_flags(struct clk *clk, unsigned long flags);
unsigned long acpuclk_get_max_axi_rate(void);
#endif

View File

@ -0,0 +1,47 @@
/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*
*/
#ifndef __ARCH_ARM_MACH_MSM_DEBUG_MM_H_
#define __ARCH_ARM_MACH_MSM_DEBUG_MM_H_
/* The below macro removes the directory path name and retains only the
* file name to avoid long path names in log messages that comes as
* part of __FILE__ to compiler.
*/
#define __MM_FILE__ strrchr(__FILE__, '/') ? (strrchr(__FILE__, '/')+1) : \
__FILE__
#define MM_DBG(fmt, args...) pr_debug("[%s] " fmt,\
__func__, ##args)
#define MM_INFO(fmt, args...) pr_info("[%s:%s] " fmt,\
__MM_FILE__, __func__, ##args)
#define MM_ERR(fmt, args...) pr_err("[%s:%s] " fmt,\
__MM_FILE__, __func__, ##args)
#endif /* __ARCH_ARM_MACH_MSM_DEBUG_MM_H_ */

View File

@ -27,6 +27,7 @@ struct msm_dmov_errdata {
struct msm_dmov_cmd {
struct list_head list;
unsigned int cmdptr;
unsigned int crci_mask;
void (*complete_func)(struct msm_dmov_cmd *cmd,
unsigned int result,
struct msm_dmov_errdata *err);
@ -38,7 +39,7 @@ void msm_dmov_enqueue_cmd(unsigned id, struct msm_dmov_cmd *cmd);
void msm_dmov_enqueue_cmd_ext(unsigned id, struct msm_dmov_cmd *cmd);
void msm_dmov_stop_cmd(unsigned id, struct msm_dmov_cmd *cmd, int graceful);
void msm_dmov_flush(unsigned int id);
int msm_dmov_exec_cmd(unsigned id, unsigned int cmdptr);
int msm_dmov_exec_cmd(unsigned id, unsigned int crci_mask, unsigned int cmdptr);

View File

@ -0,0 +1,63 @@
/* Copyright (c) 2009, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are
* met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Code Aurora Forum, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED "AS IS" AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
* MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS
* BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
* WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
* OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN
* IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/
#ifndef _INTERNAL_POWER_RAIL_H
#define _INTERNAL_POWER_RAIL_H
/* Clock power rail IDs */
#define PWR_RAIL_GRP_CLK 8
#define PWR_RAIL_GRP_2D_CLK 58
#define PWR_RAIL_MDP_CLK 14
#define PWR_RAIL_MFC_CLK 68
#define PWR_RAIL_ROTATOR_CLK 90
#define PWR_RAIL_VDC_CLK 39
#define PWR_RAIL_VFE_CLK 41
#define PWR_RAIL_VPE_CLK 76
enum rail_ctl_mode {
PWR_RAIL_CTL_AUTO = 0,
PWR_RAIL_CTL_MANUAL,
};
static inline int __maybe_unused internal_pwr_rail_ctl(unsigned rail_id,
bool enable)
{
/* Not yet implemented. */
return 0;
}
static inline int __maybe_unused internal_pwr_rail_mode(unsigned rail_id,
enum rail_ctl_mode mode)
{
/* Not yet implemented. */
return 0;
}
int internal_pwr_rail_ctl_auto(unsigned rail_id, bool enable);
#endif /* _INTERNAL_POWER_RAIL_H */

View File

@ -0,0 +1,23 @@
/**
*
* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#ifndef __MACH_ION_H_
#define __MACH_ION_H_
enum ion_memory_types {
ION_EBI_TYPE,
ION_SMI_TYPE,
};
#endif

View File

@ -311,6 +311,7 @@
#define INT_MDDI_CLIENT INT_MDC
#define INT_NAND_WR_ER_DONE INT_EBI2_WR_ER_DONE
#define INT_NAND_OP_DONE INT_EBI2_OP_DONE
#define INT_GRAPHICS INT_GRP_3D
#define NR_SIRC_IRQS 0

View File

@ -1,6 +1,7 @@
/* arch/arm/mach-msm/include/mach/memory.h
*
* Copyright (C) 2007 Google, Inc.
* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@ -12,7 +13,6 @@
* GNU General Public License for more details.
*
*/
#ifndef __ASM_ARCH_MEMORY_H
#define __ASM_ARCH_MEMORY_H
@ -37,28 +37,41 @@
#define PHYS_OFFSET UL(0x10000000)
#endif
#define MAX_PHYSMEM_BITS 32
#define SECTION_SIZE_BITS 25
#define HAS_ARCH_IO_REMAP_PFN_RANGE
#define CONSISTENT_DMA_SIZE (4*SZ_1M)
#ifndef __ASSEMBLY__
void *alloc_bootmem_aligned(unsigned long size, unsigned long alignment);
unsigned long allocate_contiguous_ebi_nomap(unsigned long, unsigned long);
void clean_and_invalidate_caches(unsigned long, unsigned long, unsigned long);
void clean_caches(unsigned long, unsigned long, unsigned long);
void invalidate_caches(unsigned long, unsigned long, unsigned long);
int platform_physical_remove_pages(unsigned long, unsigned long);
int platform_physical_add_pages(unsigned long, unsigned long);
int platform_physical_low_power_pages(unsigned long, unsigned long);
#ifdef CONFIG_ARCH_MSM_ARM11
void write_to_strongly_ordered_memory(void);
void map_zero_page_strongly_ordered(void);
#include <asm/mach-types.h>
#if defined(CONFIG_ARCH_MSM7227)
#ifdef CONFIG_ARCH_MSM7X27
#define arch_barrier_extra() do \
{ \
write_to_strongly_ordered_memory(); \
} while (0)
#else
#define arch_barrier_extra() do {} while (0)
#define arch_barrier_extra() do \
{ if (machine_is_msm7x27_surf() || machine_is_msm7x27_ffa()) \
write_to_strongly_ordered_memory(); \
} while (0)
#endif
#endif
#ifdef CONFIG_CACHE_L2X0
@ -67,12 +80,17 @@ extern void l2x0_cache_flush_all(void);
#define finish_arch_switch(prev) do { l2x0_cache_sync(); } while (0)
#endif
#endif
#endif
#ifdef CONFIG_ARCH_MSM_SCORPION
#define arch_has_speculative_dfetch() 1
#define arch_has_speculative_dfetch() 1
#endif
#endif
/* these correspond to values known by the modem */
#define MEMORY_DEEP_POWERDOWN 0
#define MEMORY_SELF_REFRESH 1
#define MEMORY_ACTIVE 2
#define NPA_MEMORY_NODE_NAME "/mem/ebi1/cs1"

View File

@ -0,0 +1,134 @@
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* 1. Redistributions of source code must retain the above copyright
* notice, and the entire permission notice in its entirety,
* including the disclaimer of warranties.
* 2. Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* 3. The name of the author may not be used to endorse or promote
* products derived from this software without specific prior
* written permission.
*
* ALTERNATIVELY, this product may be distributed under the terms of
* the GNU General Public License, version 2, in which case the provisions
* of the GPL version 2 are required INSTEAD OF the BSD license.
*
* THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
* WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
* OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE, ALL OF
* WHICH ARE HEREBY DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
* BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
* LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE
* USE OF THIS SOFTWARE, EVEN IF NOT ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
#ifndef _ARCH_ARM_MACH_MSM_BUS_H
#define _ARCH_ARM_MACH_MSM_BUS_H
#include <linux/types.h>
#include <linux/input.h>
/*
* Macros for clients to convert their data to ib and ab
* Ws : Time window over which to transfer the data in SECONDS
* Bs : Size of the data block in bytes
* Per : Recurrence period
* Tb : Throughput bandwidth to prevent stalling
* R : Ratio of actual bandwidth used to Tb
* Ib : Instantaneous bandwidth
* Ab : Arbitrated bandwidth
*
* IB_RECURRBLOCK and AB_RECURRBLOCK:
* These are used if the requirement is to transfer a
* recurring block of data over a known time window.
*
* IB_THROUGHPUTBW and AB_THROUGHPUTBW:
* These are used for CPU style masters. Here the requirement
* is to have minimum throughput bandwidth available to avoid
* stalling.
*/
#define IB_RECURRBLOCK(Ws, Bs) ((Ws) == 0 ? 0 : ((Bs)/(Ws)))
#define AB_RECURRBLOCK(Ws, Per) ((Ws) == 0 ? 0 : ((Bs)/(Per)))
#define IB_THROUGHPUTBW(Tb) (Tb)
#define AB_THROUGHPUTBW(Tb, R) ((Tb) * (R))
struct msm_bus_vectors {
int src; /* Master */
int dst; /* Slave */
unsigned int ab; /* Arbitrated bandwidth */
unsigned int ib; /* Instantaneous bandwidth */
};
struct msm_bus_paths {
int num_paths;
struct msm_bus_vectors *vectors;
};
struct msm_bus_scale_pdata {
struct msm_bus_paths *usecase;
int num_usecases;
const char *name;
/*
* If the active_only flag is set to 1, the BW request is applied
* only when at least one CPU is active (powered on). If the flag
* is set to 0, then the BW request is always applied irrespective
* of the CPU state.
*/
unsigned int active_only;
};
/* Scaling APIs */
/*
* This function returns a handle to the client. This should be used to
* call msm_bus_scale_client_update_request.
* The function returns 0 if bus driver is unable to register a client
*/
#ifdef CONFIG_MSM_BUS_SCALING
uint32_t msm_bus_scale_register_client(struct msm_bus_scale_pdata *pdata);
int msm_bus_scale_client_update_request(uint32_t cl, unsigned int index);
void msm_bus_scale_unregister_client(uint32_t cl);
/* AXI Port configuration APIs */
int msm_bus_axi_porthalt(int master_port);
int msm_bus_axi_portunhalt(int master_port);
#else
static inline uint32_t
msm_bus_scale_register_client(struct msm_bus_scale_pdata *pdata)
{
return 1;
}
static inline int
msm_bus_scale_client_update_request(uint32_t cl, unsigned int index)
{
return 0;
}
static inline void
msm_bus_scale_unregister_client(uint32_t cl)
{
}
static inline int msm_bus_axi_porthalt(int master_port)
{
return 0;
}
static inline int msm_bus_axi_portunhalt(int master_port)
{
return 0;
}
#endif
#endif /*_ARCH_ARM_MACH_MSM_BUS_H*/

View File

@ -27,10 +27,12 @@ struct mddi_info;
#define MSM_MDP_OUT_IF_FMT_RGB888 2
/* mdp override operations */
#define MSM_MDP_PANEL_IGNORE_PIXEL_DATA (1 << 0)
#define MSM_MDP_PANEL_IGNORE_PIXEL_DATA (1 << 0)
#define MSM_MDP_PANEL_FLIP_UD (1 << 1)
#define MSM_MDP_PANEL_FLIP_LR (1 << 2)
#define MSM_MDP4_MDDI_DMA_SWITCH (1 << 3)
#define MSM_MDP_DMA_PACK_ALIGN_LSB (1 << 4)
#define MSM_MDP_RGB_PANEL_SELF_REFRESH (1 << 5)
/* mddi type */
#define MSM_MDP_MDDI_TYPE_I 0
@ -190,6 +192,7 @@ struct msm_lcdc_panel_ops {
int (*uninit)(struct msm_lcdc_panel_ops *);
int (*blank)(struct msm_lcdc_panel_ops *);
int (*unblank)(struct msm_lcdc_panel_ops *);
int (*shutdown)(struct msm_lcdc_panel_ops *);
};
struct msm_lcdc_platform_data {
@ -211,6 +214,8 @@ struct msm_tvenc_platform_data {
struct mdp_blit_req;
struct fb_info;
struct mdp_overlay;
struct msmfb_overlay_data;
struct mdp_device {
struct device dev;
void (*dma)(struct mdp_device *mdp, uint32_t addr,
@ -227,14 +232,17 @@ struct mdp_device {
int (*overlay_unset)(struct mdp_device *mdp, struct fb_info *fb,
int ndx);
int (*overlay_play)(struct mdp_device *mdp, struct fb_info *fb,
struct msmfb_overlay_data *req, struct file *p_src_file);
struct msmfb_overlay_data *req, struct file **p_src_file);
#endif
void (*set_grp_disp)(struct mdp_device *mdp, uint32_t disp_id);
void (*configure_dma)(struct mdp_device *mdp);
int (*check_output_format)(struct mdp_device *mdp, int bpp);
int (*set_output_format)(struct mdp_device *mdp, int bpp);
void (*set_panel_size)(struct mdp_device *mdp, int width, int height);
unsigned color_format;
unsigned overrides;
uint32_t width; /*panel width*/
uint32_t height; /*panel height*/
};
struct class_interface;

View File

@ -47,8 +47,18 @@ struct msm_hsusb_platform_data {
/* 1 : uart, 0 : usb */
void (*usb_uart_switch)(int);
void (*config_usb_id_gpios)(bool enable);
/* val, reg pairs terminated by -1 */
int *phy_init_seq;
void (*usb_hub_enable)(bool);
void (*serial_debug_gpios)(int);
int (*china_ac_detect)(void);
void (*disable_usb_charger)(void);
/* val, reg pairs terminated by -1 */
int *phy_init_seq;
void (*change_phy_voltage)(int);
int (*ldo_init) (int init);
int (*ldo_enable) (int enable);
int (*rpc_connect)(int);
/* 1 : mhl, 0 : usb */
void (*usb_mhl_switch)(bool);
#ifdef CONFIG_USB_FUNCTION
/* USB device descriptor fields */
@ -74,10 +84,15 @@ struct msm_hsusb_platform_data {
int num_products;
struct msm_hsusb_product *products;
#endif
char *serial_number;
int usb_id_pin_gpio;
bool enable_car_kit_detect;
__u8 accessory_detect;
char *serial_number;
int usb_id_pin_gpio;
int dock_pin_gpio;
int id_pin_irq;
bool enable_car_kit_detect;
__u8 accessory_detect;
bool dock_detect;
int ac_9v_gpio;
};
int usb_get_connect_type(void);

View File

@ -37,11 +37,30 @@
do { } while (0)
#endif /* VERBOSE */
#ifndef __LINUX_USB_COMPOSITE_H
#define ERROR(fmt,args...) \
xprintk(KERN_ERR , fmt , ## args)
#define INFO(fmt,args...) \
xprintk(KERN_INFO , fmt , ## args)
#endif
#define USB_ERR(fmt, args...) \
printk(KERN_ERR "[USB:ERR] " fmt, ## args)
#define USB_WARNING(fmt, args...) \
printk(KERN_WARNING "[USB] " fmt, ## args)
#define USB_INFO(fmt, args...) \
printk(KERN_INFO "[USB] " fmt, ## args)
#define USB_DEBUG(fmt, args...) \
printk(KERN_DEBUG "[USB] " fmt, ## args)
#define USBH_ERR(fmt, args...) \
printk(KERN_ERR "[USBH:ERR] " fmt, ## args)
#define USBH_WARNING(fmt, args...) \
printk(KERN_WARNING "[USBH] " fmt, ## args)
#define USBH_INFO(fmt, args...) \
printk(KERN_INFO "[USBH] " fmt, ## args)
#define USBH_DEBUG(fmt, args...) \
printk(KERN_DEBUG "[USBH] " fmt, ## args)
/*-------------------------------------------------------------------------*/
@ -51,9 +70,12 @@
#define USB_HWDEVICE (MSM_USB_BASE + 0x000C)
#define USB_HWTXBUF (MSM_USB_BASE + 0x0010)
#define USB_HWRXBUF (MSM_USB_BASE + 0x0014)
#define USB_AHBBURST (MSM_USB_BASE + 0x0090)
#define USB_AHBMODE (MSM_USB_BASE + 0x0098)
#define USB_AHB_BURST (MSM_USB_BASE + 0x0090)
#define USB_AHB_MODE (MSM_USB_BASE + 0x0098)
#define USB_AHBBURST (USB_AHB_BURST)
#define USB_AHBMODE (USB_AHB_MODE)
#define USB_SBUSCFG (MSM_USB_BASE + 0x0090)
#define USB_ROC_AHB_MODE (MSM_USB_BASE + 0x0090)
#define USB_CAPLENGTH (MSM_USB_BASE + 0x0100) /* 8 bit */
#define USB_HCIVERSION (MSM_USB_BASE + 0x0102) /* 16 bit */
@ -82,12 +104,26 @@
#define USB_ENDPTCTRL(n) (MSM_USB_BASE + 0x01C0 + (4 * (n)))
#define USBCMD_RESET 2
#define USBCMD_ATTACH 1
#define USBCMD_ATDTW (1 << 14)
#define USBCMD_RESET 2
#define USBCMD_ATTACH 1
#define USBCMD_RS (1 << 0) /* run/stop bit */
#define USBCMD_ATDTW (1 << 14)
#define ASYNC_INTR_CTRL (1 << 29)
#define ULPI_STP_CTRL (1 << 30)
#define USBCMD_ITC(n) (n << 16)
#define USBCMD_ITC_MASK (0xFF << 16)
#define USBMODE_DEVICE 2
#define USBMODE_HOST 3
/* Redefining SDIS bit as it defined incorrectly in ehci.h. */
#ifdef USBMODE_SDIS
#undef USBMODE_SDIS
#endif
#define USBMODE_SDIS (1 << 4) /* stream disable */
#define USBMODE_VBUS (1 << 5) /* vbus power select */
struct ept_queue_head {
unsigned config;
@ -138,7 +174,7 @@ struct ept_queue_item {
#define STS_NAKI (1 << 16) /* */
#define STS_SLI (1 << 8) /* R/WC - suspend state entered */
#define STS_SRI (1 << 7) /* R/WC - SOF recv'd */
#define STS_URI (1 << 6) /* R/WC - RESET recv'd - write to clear */
#define STS_URI (1 << 6) /* R/WC - RESET recv'd */
#define STS_FRI (1 << 3) /* R/WC - Frame List Rollover */
#define STS_PCI (1 << 2) /* R/WC - Port Change Detect */
#define STS_UEI (1 << 1) /* R/WC - USB Error */
@ -175,6 +211,38 @@ struct ept_queue_item {
#define CTRL_RXT_INT (3 << 2)
#define CTRL_RXT_EP_TYPE_SHIFT 2
#if defined(CONFIG_ARCH_MSM7X30) || defined(CONFIG_ARCH_MSM8X60)
#define ULPI_DIGOUT_CTRL 0X36
#define ULPI_CDR_AUTORESET (1 << 1)
#else
#define ULPI_DIGOUT_CTRL 0X31
#define ULPI_CDR_AUTORESET (1 << 5)
#endif
#define ULPI_FUNC_CTRL_CLR (0x06)
#define ULPI_IFC_CTRL_CLR (0x09)
#define ULPI_AMPLITUDE_MAX (0x0C)
#define ULPI_OTG_CTRL (0x0B)
#define ULPI_OTG_CTRL_CLR (0x0C)
#define ULPI_INT_RISE_CLR (0x0F)
#define ULPI_INT_FALL_CLR (0x12)
#define ULPI_DEBUG_REG (0x15)
#define ULPI_SCRATCH_REG (0x16)
#define ULPI_CONFIG_REG1 (0x30)
#define ULPI_CONFIG_REG2 (0X31)
#define ULPI_CONFIG_REG (0x31)
#define ULPI_CONFIG_REG3 (0X32)
#define ULPI_CHG_DETECT_REG (0x34)
#define ULPI_PRE_EMPHASIS_MASK (3 << 4)
#define ULPI_DRV_AMPL_MASK (3 << 2)
#define ULPI_ONCLOCK (1 << 6)
#define ULPI_FUNC_SUSPENDM (1 << 6)
#define ULPI_IDPU (1 << 0)
#define ULPI_HOST_DISCONNECT (1 << 0)
#define ULPI_VBUS_VALID (1 << 1)
#define ULPI_SE1_GATE (1 << 2)
#define ULPI_SESS_END (1 << 3)
#define ULPI_ID_GND (1 << 4)
#define ULPI_WAKEUP (1 << 31)
#define ULPI_RUN (1 << 30)
#define ULPI_WRITE (1 << 29)
@ -184,12 +252,17 @@ struct ept_queue_item {
#define ULPI_DATA(n) ((n) & 255)
#define ULPI_DATA_READ(n) (((n) >> 8) & 255)
#define ULPI_DEBUG_REG (0x15)
#define ULPI_SCRATCH_REG (0x16)
/* control charger detection by ULPI or externally */
#define ULPI_EXTCHGCTRL_65NM (1 << 2)
#define ULPI_EXTCHGCTRL_180NM (1 << 3)
#define ULPI_FUNC_CTRL_CLR (0x06)
#define ULPI_FUNC_SUSPENDM (1 << 6)
/* charger detection power on control */
#define ULPI_CHGDETON (1 << 1)
/* enable charger detection */
#define ULPI_CHGDETEN (1 << 0)
#define ULPI_CHGTYPE_65NM (1 << 3)
#define ULPI_CHGTYPE_180NM (1 << 4)
/* USB_PORTSC bits for determining port speed */
#define PORTSC_PSPD_FS (0 << 26)
@ -218,6 +291,30 @@ struct ept_queue_item {
#define PORTSC_FPR (1 << 6) /* R/W - State normal => suspend */
#define PORTSC_SUSP (1 << 7) /* Read - Port in suspend state */
#define PORTSC_LS (3 << 10) /* Read - Port's Line status */
#define PORTSC_PHCD (1 << 23) /* phy suspend mode */
#define PORTSC_CCS (1 << 0) /* current connect status */
#define PORTSC_PTS (3 << 30)
#define PORTSC_PTS_ULPI (2 << 30)
#define PORTSC_PTS_SERIAL (3 << 30)
#define PORTSC_PORT_SPEED_FULL 0x00000000
#define PORTSC_PORT_SPEED_LOW 0x04000000
#define PORTSC_PORT_SPEED_HIGH 0x08000000
#define PORTSC_PORT_SPEED_MASK 0x0c000000
#define SBUSCFG_AHBBRST_INCR4 0x01
#define ULPI_USBINTR_ENABLE_FALLING_S 0x11
#define ULPI_USBINTR_ENABLE_FALLING_C 0x12
#define ULPI_USBINTR_STATUS 0x13
#define ULPI_USBINTR_ENABLE_RASING_S 0x0E
#define ULPI_USBINTR_ENABLE_RASING_C 0x0F
#define ULPI_SESSION_END_RAISE (1 << 3)
#define ULPI_SESSION_END_FALL (1 << 3)
#define ULPI_SESSION_VALID_RAISE (1 << 2)
#define ULPI_SESSION_VALID_FALL (1 << 2)
#define ULPI_VBUS_VALID_RAISE (1 << 1)
#define ULPI_VBUS_VALID_FALL (1 << 1)
#define PORTSC_PHCD (1 << 23) /* phy suspend mode */
#define PORTSC_CCS (1 << 0) /* current connect status */
#define PORTSC_PTS (3 << 30)
@ -238,6 +335,9 @@ struct ept_queue_item {
#define PORTSC_PTC_SE0_NAK (0x03 << 16)
#define PORTSC_PTC_TST_PKT (0x04 << 16)
#define USBH (1 << 15)
#define USB_PHY (1 << 18)
#define PORTSC_PTS_MASK (3 << 30)
#define PORTSC_PTS_ULPI (2 << 30)
#define PORTSC_PTS_SERIAL (3 << 30)
@ -250,5 +350,9 @@ struct ept_queue_item {
#define PORTSC_PHCD (1 << 23) /* phy suspend mode */
#define ULPI_DEBUG 0x15
#define ULPI_CLOCK_SUSPENDM (1 << 3)
#define ULPI_SUSPENDM (1 << 6)
#endif /* _USB_FUNCTION_MSM_HSUSB_HW_H */
#define ULPI_CALIB_STS (1 << 7)
#define ULPI_CALIB_VAL(x) (x & 0x7C)
#endif /* __LINUX_USB_GADGET_MSM72K_UDC_H__ */

View File

@ -0,0 +1,64 @@
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
/* The MSM Hardware supports multiple flavors of physical memory.
* This file captures hardware specific information of these types.
*/
#ifndef __ASM_ARCH_MSM_MEMTYPES_H
#define __ASM_ARCH_MSM_MEMTYPES_H
#include <mach/memory.h>
#include <linux/init.h>
int __init meminfo_init(unsigned int, unsigned int);
/* Redundant check to prevent this from being included outside of 7x30 */
#if defined(CONFIG_ARCH_MSM7X30)
unsigned int get_num_populated_chipselects(void);
#endif
unsigned int get_num_memory_banks(void);
unsigned int get_memory_bank_size(unsigned int);
unsigned int get_memory_bank_start(unsigned int);
int soc_change_memory_power(u64, u64, int);
enum {
MEMTYPE_NONE = -1,
MEMTYPE_SMI_KERNEL = 0,
MEMTYPE_SMI,
MEMTYPE_EBI0,
MEMTYPE_EBI1,
MEMTYPE_MAX,
};
void msm_reserve(void);
#define MEMTYPE_FLAGS_FIXED 0x1
#define MEMTYPE_FLAGS_1M_ALIGN 0x2
struct memtype_reserve {
unsigned long start;
unsigned long size;
unsigned long limit;
int flags;
};
struct reserve_info {
struct memtype_reserve *memtype_reserve_table;
void (*calculate_reserve_sizes)(void);
int (*paddr_to_memtype)(unsigned int);
unsigned long low_unstable_address;
unsigned long max_unstable_size;
unsigned long bank_size;
};
extern struct reserve_info *reserve_info;
#endif

View File

@ -132,6 +132,8 @@ uint32_t msm_rpc_get_vers(struct msm_rpc_endpoint *ept);
/* check if server version can handle client requested version */
int msm_rpc_is_compatible_version(uint32_t server_version,
uint32_t client_version);
struct msm_rpc_endpoint *msm_rpc_connect_compatible(uint32_t prog,
uint32_t vers, unsigned flags);
int msm_rpc_close(struct msm_rpc_endpoint *ept);
int msm_rpc_write(struct msm_rpc_endpoint *ept,
@ -164,7 +166,7 @@ struct msm_rpc_xdr {
void *in_buf;
uint32_t in_size;
uint32_t in_index;
struct mutex in_lock;
wait_queue_head_t in_buf_wait_q;
void *out_buf;
uint32_t out_size;
@ -174,6 +176,22 @@ struct msm_rpc_xdr {
struct msm_rpc_endpoint *ept;
};
int xdr_send_int8(struct msm_rpc_xdr *xdr, const int8_t *value);
int xdr_send_uint8(struct msm_rpc_xdr *xdr, const uint8_t *value);
int xdr_send_int16(struct msm_rpc_xdr *xdr, const int16_t *value);
int xdr_send_uint16(struct msm_rpc_xdr *xdr, const uint16_t *value);
int xdr_send_int32(struct msm_rpc_xdr *xdr, const int32_t *value);
int xdr_send_uint32(struct msm_rpc_xdr *xdr, const uint32_t *value);
int xdr_send_bytes(struct msm_rpc_xdr *xdr, const void **data, uint32_t *size);
int xdr_recv_int8(struct msm_rpc_xdr *xdr, int8_t *value);
int xdr_recv_uint8(struct msm_rpc_xdr *xdr, uint8_t *value);
int xdr_recv_int16(struct msm_rpc_xdr *xdr, int16_t *value);
int xdr_recv_uint16(struct msm_rpc_xdr *xdr, uint16_t *value);
int xdr_recv_int32(struct msm_rpc_xdr *xdr, int32_t *value);
int xdr_recv_uint32(struct msm_rpc_xdr *xdr, uint32_t *value);
int xdr_recv_bytes(struct msm_rpc_xdr *xdr, void **data, uint32_t *size);
struct msm_rpc_server
{
struct list_head list;

View File

@ -63,6 +63,8 @@ int smd_wait_until_writable(smd_channel_t *ch, int bytes);
#endif
int smd_wait_until_opened(smd_channel_t *ch, int timeout_us);
int smd_total_fifo_size(smd_channel_t *ch);
typedef enum
{
SMD_PORT_DS = 0,

View File

@ -43,38 +43,19 @@ static int msm_irq_debug_mask;
module_param_named(debug_mask, msm_irq_debug_mask, int, S_IRUGO | S_IWUSR | S_IWGRP);
#define VIC_REG(off) (MSM_VIC_BASE + (off))
#if defined(CONFIG_ARCH_MSM7X30)
#define VIC_INT_TO_REG_ADDR(base, irq) (base + (irq / 32) * 4)
#define VIC_INT_TO_REG_INDEX(irq) ((irq >> 5) & 3)
#else
#define VIC_INT_TO_REG_ADDR(base, irq) (base + ((irq & 32) ? 4 : 0))
#define VIC_INT_TO_REG_INDEX(irq) ((irq >> 5) & 1)
#endif
#define VIC_INT_SELECT0 VIC_REG(0x0000) /* 1: FIQ, 0: IRQ */
#define VIC_INT_SELECT1 VIC_REG(0x0004) /* 1: FIQ, 0: IRQ */
#define VIC_INT_SELECT2 VIC_REG(0x0008) /* 1: FIQ, 0: IRQ */
#define VIC_INT_SELECT3 VIC_REG(0x000C) /* 1: FIQ, 0: IRQ */
#define VIC_INT_EN0 VIC_REG(0x0010)
#define VIC_INT_EN1 VIC_REG(0x0014)
#define VIC_INT_EN2 VIC_REG(0x0018)
#define VIC_INT_EN3 VIC_REG(0x001C)
#define VIC_INT_ENCLEAR0 VIC_REG(0x0020)
#define VIC_INT_ENCLEAR1 VIC_REG(0x0024)
#define VIC_INT_ENCLEAR2 VIC_REG(0x0028)
#define VIC_INT_ENCLEAR3 VIC_REG(0x002C)
#define VIC_INT_ENSET0 VIC_REG(0x0030)
#define VIC_INT_ENSET1 VIC_REG(0x0034)
#define VIC_INT_ENSET2 VIC_REG(0x0038)
#define VIC_INT_ENSET3 VIC_REG(0x003C)
#define VIC_INT_TYPE0 VIC_REG(0x0040) /* 1: EDGE, 0: LEVEL */
#define VIC_INT_TYPE1 VIC_REG(0x0044) /* 1: EDGE, 0: LEVEL */
#define VIC_INT_TYPE2 VIC_REG(0x0048) /* 1: EDGE, 0: LEVEL */
#define VIC_INT_TYPE3 VIC_REG(0x004C) /* 1: EDGE, 0: LEVEL */
#define VIC_INT_POLARITY0 VIC_REG(0x0050) /* 1: NEG, 0: POS */
#define VIC_INT_POLARITY1 VIC_REG(0x0054) /* 1: NEG, 0: POS */
#define VIC_INT_POLARITY2 VIC_REG(0x0058) /* 1: NEG, 0: POS */
#define VIC_INT_POLARITY3 VIC_REG(0x005C) /* 1: NEG, 0: POS */
#define VIC_NO_PEND_VAL VIC_REG(0x0060)
#if defined(CONFIG_ARCH_MSM_SCORPION)
@ -88,24 +69,14 @@ module_param_named(debug_mask, msm_irq_debug_mask, int, S_IRUGO | S_IWUSR | S_IW
#endif
#define VIC_IRQ_STATUS0 VIC_REG(0x0080)
#define VIC_IRQ_STATUS1 VIC_REG(0x0084)
#define VIC_IRQ_STATUS2 VIC_REG(0x0088)
#define VIC_IRQ_STATUS3 VIC_REG(0x008C)
#define VIC_FIQ_STATUS0 VIC_REG(0x0090)
#define VIC_FIQ_STATUS1 VIC_REG(0x0094)
#define VIC_FIQ_STATUS2 VIC_REG(0x0098)
#define VIC_FIQ_STATUS3 VIC_REG(0x009C)
#define VIC_RAW_STATUS0 VIC_REG(0x00A0)
#define VIC_RAW_STATUS1 VIC_REG(0x00A4)
#define VIC_RAW_STATUS2 VIC_REG(0x00A8)
#define VIC_RAW_STATUS3 VIC_REG(0x00AC)
#define VIC_INT_CLEAR0 VIC_REG(0x00B0)
#define VIC_INT_CLEAR1 VIC_REG(0x00B4)
#define VIC_INT_CLEAR2 VIC_REG(0x00B8)
#define VIC_INT_CLEAR3 VIC_REG(0x00BC)
#define VIC_SOFTINT0 VIC_REG(0x00C0)
#define VIC_SOFTINT1 VIC_REG(0x00C4)
#define VIC_SOFTINT2 VIC_REG(0x00C8)
#define VIC_SOFTINT3 VIC_REG(0x00CC)
#define VIC_IRQ_VEC_RD VIC_REG(0x00D0) /* pending int # */
#define VIC_IRQ_VEC_PEND_RD VIC_REG(0x00D4) /* pending vector addr */
#define VIC_IRQ_VEC_WR VIC_REG(0x00D8)
@ -129,40 +100,14 @@ module_param_named(debug_mask, msm_irq_debug_mask, int, S_IRUGO | S_IWUSR | S_IW
#define VIC_VECTPRIORITY(n) VIC_REG(0x0200+((n) * 4))
#define VIC_VECTADDR(n) VIC_REG(0x0400+((n) * 4))
#if defined(CONFIG_ARCH_MSM7X30)
#define VIC_NUM_REGS 4
#else
#define VIC_NUM_REGS 2
#endif
#if VIC_NUM_REGS == 2
#define DPRINT_REGS(base_reg, format, ...) \
printk(KERN_INFO format " %x %x\n", ##__VA_ARGS__, \
readl(base_reg ## 0), readl(base_reg ## 1))
#define DPRINT_ARRAY(array, format, ...) \
printk(KERN_INFO format " %x %x\n", ##__VA_ARGS__, \
array[0], array[1])
#elif VIC_NUM_REGS == 4
#define DPRINT_REGS(base_reg, format, ...) \
printk(KERN_INFO format " %x %x %x %x\n", ##__VA_ARGS__, \
readl(base_reg ## 0), readl(base_reg ## 1), \
readl(base_reg ## 2), readl(base_reg ## 3))
#define DPRINT_ARRAY(array, format, ...) \
printk(KERN_INFO format " %x %x %x %x\n", ##__VA_ARGS__, \
array[0], array[1], \
array[2], array[3])
#else
#error "VIC_NUM_REGS set to illegal value"
#endif
static uint32_t msm_irq_smsm_wake_enable[2];
static struct {
uint32_t int_en[2];
uint32_t int_type;
uint32_t int_polarity;
uint32_t int_select;
} msm_irq_shadow_reg[VIC_NUM_REGS];
static uint32_t msm_irq_idle_disable[VIC_NUM_REGS];
} msm_irq_shadow_reg[2];
static uint32_t msm_irq_idle_disable[2];
#if defined(CONFIG_MSM_N_WAY_SMD)
#define INT_INFO_SMSM_ID SMEM_APPS_DEM_SLAVE_DATA
@ -198,9 +143,7 @@ static uint8_t msm_irq_to_smsm[NR_MSM_IRQS + NR_SIRC_IRQS] = {
[INT_UART1DM_IRQ] = 17,
[INT_UART1DM_RX] = 18,
[INT_KEYSENSE] = 19,
#if !defined(CONFIG_ARCH_MSM7X30)
[INT_AD_HSSD] = 20,
#endif
[INT_NAND_WR_ER_DONE] = 21,
[INT_NAND_OP_DONE] = 22,
@ -226,31 +169,23 @@ static uint8_t msm_irq_to_smsm[NR_MSM_IRQS + NR_SIRC_IRQS] = {
[INT_GP_TIMER_EXP] = SMSM_FAKE_IRQ,
[INT_DEBUG_TIMER_EXP] = SMSM_FAKE_IRQ,
[INT_ADSP_A11] = SMSM_FAKE_IRQ,
#ifdef CONFIG_ARCH_QSD8X50
#ifdef CONFIG_ARCH_MSM_SCORPION
[INT_SIRC_0] = SMSM_FAKE_IRQ,
[INT_SIRC_1] = SMSM_FAKE_IRQ,
#endif
};
static inline void msm_irq_write_all_regs(void __iomem *base, unsigned int val)
{
int i;
/* the address must be continue */
for (i = 0; i < VIC_NUM_REGS; i++)
writel(val, base + (i * 4));
}
static void msm_irq_ack(unsigned int irq)
{
void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_CLEAR0, irq);
void __iomem *reg = VIC_INT_CLEAR0 + ((irq & 32) ? 4 : 0);
irq = 1 << (irq & 31);
writel(irq, reg);
}
static void msm_irq_mask(unsigned int irq)
{
void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_ENCLEAR0, irq);
unsigned index = VIC_INT_TO_REG_INDEX(irq);
void __iomem *reg = VIC_INT_ENCLEAR0 + ((irq & 32) ? 4 : 0);
unsigned index = (irq >> 5) & 1;
uint32_t mask = 1UL << (irq & 31);
int smsm_irq = msm_irq_to_smsm[irq];
@ -266,8 +201,8 @@ static void msm_irq_mask(unsigned int irq)
static void msm_irq_unmask(unsigned int irq)
{
void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_ENSET0, irq);
unsigned index = VIC_INT_TO_REG_INDEX(irq);
void __iomem *reg = VIC_INT_ENSET0 + ((irq & 32) ? 4 : 0);
unsigned index = (irq >> 5) & 1;
uint32_t mask = 1UL << (irq & 31);
int smsm_irq = msm_irq_to_smsm[irq];
@ -284,7 +219,7 @@ static void msm_irq_unmask(unsigned int irq)
static int msm_irq_set_wake(unsigned int irq, unsigned int on)
{
unsigned index = VIC_INT_TO_REG_INDEX(irq);
unsigned index = (irq >> 5) & 1;
uint32_t mask = 1UL << (irq & 31);
int smsm_irq = msm_irq_to_smsm[irq];
@ -310,9 +245,9 @@ static int msm_irq_set_wake(unsigned int irq, unsigned int on)
static int msm_irq_set_type(unsigned int irq, unsigned int flow_type)
{
void __iomem *treg = VIC_INT_TO_REG_ADDR(VIC_INT_TYPE0, irq);
void __iomem *preg = VIC_INT_TO_REG_ADDR(VIC_INT_POLARITY0, irq);
unsigned index = VIC_INT_TO_REG_INDEX(irq);
void __iomem *treg = VIC_INT_TYPE0 + ((irq & 32) ? 4 : 0);
void __iomem *preg = VIC_INT_POLARITY0 + ((irq & 32) ? 4 : 0);
unsigned index = (irq >> 5) & 1;
int b = 1 << (irq & 31);
uint32_t polarity;
uint32_t type;
@ -341,24 +276,16 @@ static int msm_irq_set_type(unsigned int irq, unsigned int flow_type)
int msm_irq_pending(void)
{
int i, pending = 0;
/* the address must be continue */
for (i = 0; (i < VIC_NUM_REGS) && !pending; i++)
pending |= readl(VIC_IRQ_STATUS0 + (i * 4));
return pending;
return readl(VIC_IRQ_STATUS0) || readl(VIC_IRQ_STATUS1);
}
int msm_irq_idle_sleep_allowed(void)
{
int i, disable = 0;
if (msm_irq_debug_mask & IRQ_DEBUG_SLEEP_REQUEST)
DPRINT_ARRAY(msm_irq_idle_disable,
"msm_irq_idle_sleep_allowed: disable");
for (i = 0; i < VIC_NUM_REGS; i++)
disable |= msm_irq_idle_disable[i];
return !(disable || !smsm_int_info);
printk(KERN_INFO "msm_irq_idle_sleep_allowed: disable %x %x\n",
msm_irq_idle_disable[0], msm_irq_idle_disable[1]);
return !(msm_irq_idle_disable[0] || msm_irq_idle_disable[1] ||
!smsm_int_info);
}
/* If arm9_wake is set: pass control to the other core.
@ -374,8 +301,8 @@ void msm_irq_enter_sleep1(bool arm9_wake, int from_idle)
int msm_irq_enter_sleep2(bool arm9_wake, int from_idle)
{
int i, limit = 10;
uint32_t pending[VIC_NUM_REGS];
int limit = 10;
uint32_t pending0, pending1;
if (from_idle && !arm9_wake)
return 0;
@ -384,25 +311,23 @@ int msm_irq_enter_sleep2(bool arm9_wake, int from_idle)
WARN_ON_ONCE(!arm9_wake && !from_idle);
if (msm_irq_debug_mask & IRQ_DEBUG_SLEEP)
DPRINT_REGS(VIC_IRQ_STATUS, "%s change irq, pend", __func__);
for (i = 0; i < VIC_NUM_REGS; i++) {
pending[i] = readl(VIC_IRQ_STATUS0 + (i * 4));
pending[i] &= msm_irq_shadow_reg[i].int_en[!from_idle];
}
printk(KERN_INFO "msm_irq_enter_sleep change irq, pend %x %x\n",
readl(VIC_IRQ_STATUS0), readl(VIC_IRQ_STATUS1));
pending0 = readl(VIC_IRQ_STATUS0);
pending1 = readl(VIC_IRQ_STATUS1);
pending0 &= msm_irq_shadow_reg[0].int_en[!from_idle];
/* Clear INT_A9_M2A_5 since requesting sleep triggers it */
pending[0] &= ~(1U << INT_A9_M2A_5);
for (i = 0; i < VIC_NUM_REGS; i++) {
if (pending[i]) {
if (msm_irq_debug_mask & IRQ_DEBUG_SLEEP_ABORT)
DPRINT_ARRAY(pending, "%s abort",
__func__);
return -EAGAIN;
}
pending0 &= ~(1U << INT_A9_M2A_5);
pending1 &= msm_irq_shadow_reg[1].int_en[!from_idle];
if (pending0 || pending1) {
if (msm_irq_debug_mask & IRQ_DEBUG_SLEEP_ABORT)
printk(KERN_INFO "msm_irq_enter_sleep2 abort %x %x\n",
pending0, pending1);
return -EAGAIN;
}
msm_irq_write_all_regs(VIC_INT_EN0, 0);
writel(0, VIC_INT_EN0);
writel(0, VIC_INT_EN1);
while (limit-- > 0) {
int pend_irq;
@ -420,9 +345,8 @@ int msm_irq_enter_sleep2(bool arm9_wake, int from_idle)
msm_irq_ack(INT_A9_M2A_6);
writel(1U << INT_A9_M2A_6, VIC_INT_ENSET0);
} else {
for (i = 0; i < VIC_NUM_REGS; i++)
writel(msm_irq_shadow_reg[i].int_en[1],
VIC_INT_ENSET0 + (i * 4));
writel(msm_irq_shadow_reg[0].int_en[1], VIC_INT_ENSET0);
writel(msm_irq_shadow_reg[1].int_en[1], VIC_INT_ENSET1);
}
return 0;
}
@ -433,7 +357,7 @@ void msm_irq_exit_sleep1(void)
msm_irq_ack(INT_A9_M2A_6);
msm_irq_ack(INT_PWB_I2C);
for (i = 0; i < VIC_NUM_REGS; i++) {
for (i = 0; i < 2; i++) {
writel(msm_irq_shadow_reg[i].int_type, VIC_INT_TYPE0 + i * 4);
writel(msm_irq_shadow_reg[i].int_polarity, VIC_INT_POLARITY0 + i * 4);
writel(msm_irq_shadow_reg[i].int_en[0], VIC_INT_EN0 + i * 4);
@ -527,16 +451,20 @@ void __init msm_init_irq(void)
unsigned n;
/* select level interrupts */
msm_irq_write_all_regs(VIC_INT_TYPE0, 0);
writel(0, VIC_INT_TYPE0);
writel(0, VIC_INT_TYPE1);
/* select highlevel interrupts */
msm_irq_write_all_regs(VIC_INT_POLARITY0, 0);
writel(0, VIC_INT_POLARITY0);
writel(0, VIC_INT_POLARITY1);
/* select IRQ for all INTs */
msm_irq_write_all_regs(VIC_INT_SELECT0, 0);
writel(0, VIC_INT_SELECT0);
writel(0, VIC_INT_SELECT1);
/* disable all INTs */
msm_irq_write_all_regs(VIC_INT_EN0, 0);
writel(0, VIC_INT_EN0);
writel(0, VIC_INT_EN1);
/* don't use 1136 vic */
writel(0, VIC_CONFIG);
@ -565,7 +493,7 @@ late_initcall(msm_init_irq_late);
#if defined(CONFIG_MSM_FIQ_SUPPORT)
void msm_trigger_irq(int irq)
{
void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_SOFTINT0, irq);
void __iomem *reg = VIC_SOFTINT0 + ((irq & 32) ? 4 : 0);
uint32_t mask = 1UL << (irq & 31);
writel(mask, reg);
}
@ -588,8 +516,8 @@ void msm_fiq_disable(int irq)
static void _msm_fiq_select(int irq)
{
void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_SELECT0, irq);
unsigned index = VIC_INT_TO_REG_INDEX(irq);
void __iomem *reg = VIC_INT_SELECT0 + ((irq & 32) ? 4 : 0);
unsigned index = (irq >> 5) & 1;
uint32_t mask = 1UL << (irq & 31);
unsigned long flags;
@ -601,8 +529,8 @@ static void _msm_fiq_select(int irq)
static void _msm_fiq_unselect(int irq)
{
void __iomem *reg = VIC_INT_TO_REG_ADDR(VIC_INT_SELECT0, irq);
unsigned index = VIC_INT_TO_REG_INDEX(irq);
void __iomem *reg = VIC_INT_SELECT0 + ((irq & 32) ? 4 : 0);
unsigned index = (irq >> 5) & 1;
uint32_t mask = 1UL << (irq & 31);
unsigned long flags;

View File

@ -16,10 +16,19 @@
#include <linux/mm.h>
#include <linux/mm_types.h>
#include <linux/bootmem.h>
#include <linux/memory_alloc.h>
#include <linux/module.h>
#include <asm/pgtable.h>
#include <asm/io.h>
#include <asm/mach/map.h>
#include <asm/cacheflush.h>
#include <mach/msm_memtypes.h>
#include <linux/hardirq.h>
#if defined(CONFIG_MSM_NPA_REMOTE)
#include "npa_remote.h"
#include <linux/completion.h>
#include <linux/err.h>
#endif
int arch_io_remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
unsigned long pfn, unsigned long size, pgprot_t prot)
@ -34,7 +43,7 @@ int arch_io_remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
void *zero_page_strongly_ordered;
static void map_zero_page_strongly_ordered(void)
void map_zero_page_strongly_ordered(void)
{
if (zero_page_strongly_ordered)
return;
@ -43,12 +52,15 @@ static void map_zero_page_strongly_ordered(void)
ioremap_strongly_ordered(page_to_pfn(empty_zero_page)
<< PAGE_SHIFT, PAGE_SIZE);
}
EXPORT_SYMBOL(map_zero_page_strongly_ordered);
void write_to_strongly_ordered_memory(void)
{
map_zero_page_strongly_ordered();
*(int *)zero_page_strongly_ordered = 0;
}
EXPORT_SYMBOL(write_to_strongly_ordered_memory);
void flush_axi_bus_buffer(void)
{
__asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
@ -109,3 +121,57 @@ void invalidate_caches(unsigned long vstart,
flush_axi_bus_buffer();
}
void *alloc_bootmem_aligned(unsigned long size, unsigned long alignment)
{
void *unused_addr = NULL;
unsigned long addr, tmp_size, unused_size;
/* Allocate maximum size needed, see where it ends up.
* Then free it -- in this path there are no other allocators
* so we can depend on getting the same address back
* when we allocate a smaller piece that is aligned
* at the end (if necessary) and the piece we really want,
* then free the unused first piece.
*/
tmp_size = size + alignment - PAGE_SIZE;
addr = (unsigned long)alloc_bootmem(tmp_size);
free_bootmem(__pa(addr), tmp_size);
unused_size = alignment - (addr % alignment);
if (unused_size)
unused_addr = alloc_bootmem(unused_size);
addr = (unsigned long)alloc_bootmem(size);
if (unused_size)
free_bootmem(__pa(unused_addr), unused_size);
return (void *)addr;
}
int platform_physical_remove_pages(unsigned long start_pfn,
unsigned long nr_pages)
{
return 0;
}
int platform_physical_add_pages(unsigned long start_pfn,
unsigned long nr_pages)
{
return 0;
}
int platform_physical_low_power_pages(unsigned long start_pfn,
unsigned long nr_pages)
{
return 0;
}
unsigned long allocate_contiguous_ebi_nomap(unsigned long size,
unsigned long align)
{
return _allocate_contiguous_memory_nomap(size, MEMTYPE_EBI0,
align, __builtin_return_address(0));
}
EXPORT_SYMBOL(allocate_contiguous_ebi_nomap);

View File

@ -4,6 +4,7 @@
* bootloader.
*
* Copyright (C) 2007 Google, Inc.
* Copyright (c) 2008-2009, Code Aurora Forum. All rights reserved.
* Author: Brian Swetland <swetland@google.com>
*
* This software is licensed under the terms of the GNU General Public
@ -22,7 +23,7 @@
#include <linux/platform_device.h>
#include <asm/mach/flash.h>
#include <asm/io.h>
#include <linux/io.h>
#include <asm/setup.h>
@ -38,47 +39,26 @@
#define ATAG_MSM_PARTITION 0x4d534D70 /* MSMp */
struct msm_ptbl_entry
{
struct msm_ptbl_entry {
char name[16];
__u32 offset;
__u32 size;
__u32 flags;
};
#define MSM_MAX_PARTITIONS 11
#define MSM_MAX_PARTITIONS 8
static struct mtd_partition msm_nand_partitions[MSM_MAX_PARTITIONS];
static char msm_nand_names[MSM_MAX_PARTITIONS * 16];
extern struct flash_platform_data msm_nand_data;
int emmc_partition_read_proc(char *page, char **start, off_t off,
int count, int *eof, void *data)
{
struct mtd_partition *ptn = msm_nand_partitions;
char *p = page;
int i;
uint64_t offset;
uint64_t size;
p += sprintf(p, "dev: size erasesize name\n");
for (i = 0; i < MSM_MAX_PARTITIONS && ptn->name; i++, ptn++) {
offset = ptn->offset;
size = ptn->size;
p += sprintf(p, "mmcblk0p%llu: %08llx %08x \"%s\"\n", offset, size * 512, 512, ptn->name);
}
return p - page;
}
static int __init parse_tag_msm_partition(const struct tag *tag)
{
struct mtd_partition *ptn = msm_nand_partitions;
char *name = msm_nand_names;
struct msm_ptbl_entry *entry = (void *) &tag->u;
unsigned count, n;
unsigned have_kpanic = 0;
count = (tag->hdr.size - 2) /
(sizeof(struct msm_ptbl_entry) / sizeof(__u32));
@ -90,9 +70,6 @@ static int __init parse_tag_msm_partition(const struct tag *tag)
memcpy(name, entry->name, 15);
name[15] = 0;
if (!strcmp(name, "kpanic"))
have_kpanic = 1;
ptn->name = name;
ptn->offset = entry->offset;
ptn->size = entry->size;
@ -102,42 +79,6 @@ static int __init parse_tag_msm_partition(const struct tag *tag)
ptn++;
}
#ifdef CONFIG_VIRTUAL_KPANIC_PARTITION
if (!have_kpanic) {
int i;
uint64_t kpanic_off = 0;
if (count == MSM_MAX_PARTITIONS) {
printk("Cannot create virtual 'kpanic' partition\n");
goto out;
}
for (i = 0; i < count; i++) {
ptn = &msm_nand_partitions[i];
if (!strcmp(ptn->name, CONFIG_VIRTUAL_KPANIC_SRC)) {
ptn->size -= CONFIG_VIRTUAL_KPANIC_PSIZE;
kpanic_off = ptn->offset + ptn->size;
break;
}
}
if (i == count) {
printk(KERN_ERR "Partition %s not found\n",
CONFIG_VIRTUAL_KPANIC_SRC);
goto out;
}
ptn = &msm_nand_partitions[count];
ptn->name ="kpanic";
ptn->offset = kpanic_off;
ptn->size = CONFIG_VIRTUAL_KPANIC_PSIZE;
printk("Virtual mtd partition '%s' created @%llx (%llu)\n",
ptn->name, ptn->offset, ptn->size);
count++;
}
out:
#endif /* CONFIG_VIRTUAL_KPANIC_SRC */
msm_nand_data.nr_parts = count;
msm_nand_data.parts = msm_nand_partitions;

View File

@ -142,13 +142,15 @@ int msm_irq_idle_sleep_allowed(void);
int msm_irq_pending(void);
int clks_allow_tcxo_locked_debug(void);
extern int board_mfg_mode(void);
extern char * board_get_mfg_sleep_gpio_table(void);
extern unsigned long * board_get_mfg_sleep_gpio_table(void);
extern void gpio_set_diag_gpio_table(unsigned long * dwMFG_gpio_table);
extern void wait_rmt_final_call_back(int timeout);
#ifdef CONFIG_AXI_SCREEN_POLICY
static int axi_rate;
static int sleep_axi_rate;
static struct clk *axi_clk;
#endif
static uint32_t *msm_pm_reset_vector;
static uint32_t msm_pm_max_sleep_time;
@ -654,8 +656,8 @@ static int msm_wakeup_after; /* default, no wakeup by alarm */
static int msm_power_wakeup_after(const char *val, struct kernel_param *kp)
{
int ret;
struct uart_port *port;
struct msm_port *msm_port;
//struct uart_port *port;
//struct msm_port *msm_port;
ret = param_set_int(val, kp);
printk(KERN_INFO "+msm_power_wakeup_after, ret=%d\r\n", ret);
@ -681,7 +683,7 @@ static void msm_pm_power_off(void)
pmic_glb_power_down();
#if CONFIG_MSM_RMT_STORAGE_SERVER
#ifdef CONFIG_MSM_RMT_STORAGE_SERVER
printk(KERN_INFO "from %s\r\n", __func__);
wait_rmt_final_call_back(10);
printk(KERN_INFO "back %s\r\n", __func__);
@ -715,7 +717,7 @@ void msm_pm_flush_console(void)
}
#if defined(CONFIG_MACH_HTCLEO)
static void htcleo_save_reset_reason()
static void htcleo_save_reset_reason(void)
{
/* save restart_reason to be accesible in bootloader @ ramconsole - 0x1000*/
uint32_t *bootloader_reset_reason = ioremap(0x2FFB0000, PAGE_SIZE);
@ -728,7 +730,7 @@ static void htcleo_save_reset_reason()
}
#endif
static void msm_pm_restart(char str)
static void msm_pm_restart(char str, const char *cmd)
{
msm_pm_flush_console();
@ -742,7 +744,7 @@ static void msm_pm_restart(char str)
else
msm_proc_comm(PCOM_RESET_CHIP, &restart_reason, 0);
#if CONFIG_MSM_RMT_STORAGE_SERVER
#ifdef CONFIG_MSM_RMT_STORAGE_SERVER
printk(KERN_INFO "from %s\r\n", __func__);
wait_rmt_final_call_back(10);
printk(KERN_INFO "back %s\r\n", __func__);
@ -775,6 +777,9 @@ static int msm_reboot_call(struct notifier_block *this, unsigned long code, void
} else if (!strncmp(cmd, "oem-", 4)) {
unsigned code = simple_strtoul(cmd + 4, 0, 16) & 0xff;
restart_reason = 0x6f656d00 | code;
} else if (!strncmp(cmd, "S", 1)) {
unsigned code = simple_strtoul(cmd + 1, 0, 16) & 0x00ffffff;
restart_reason = 0x53000000 | code;
} else if (!strcmp(cmd, "force-hard")) {
restart_reason = 0x776655AA;
} else {
@ -858,6 +863,7 @@ void msm_pm_set_max_sleep_time(int64_t max_sleep_time_ns)
EXPORT_SYMBOL(msm_pm_set_max_sleep_time);
#ifdef CONFIG_EARLYSUSPEND
#ifdef CONFIG_AXI_SCREEN_POLICY
/* axi 128 screen on, 61mhz screen off */
static void axi_early_suspend(struct early_suspend *handler)
{
@ -877,7 +883,9 @@ static struct early_suspend axi_screen_suspend = {
.resume = axi_late_resume,
};
#endif
#endif
#ifdef CONFIG_AXI_SCREEN_POLICY
static void __init msm_pm_axi_init(void)
{
#ifdef CONFIG_EARLYSUSPEND
@ -895,19 +903,18 @@ static void __init msm_pm_axi_init(void)
axi_rate = 0;
#endif
}
#endif
static int __init msm_pm_init(void)
{
pm_power_off = msm_pm_power_off;
arm_pm_restart = msm_pm_restart;
msm_pm_max_sleep_time = 0;
#if defined(CONFIG_ARCH_MSM_SCORPION)
#ifdef CONFIG_AXI_SCREEN_POLICY
msm_pm_axi_init();
#endif
#endif
register_reboot_notifier(&msm_reboot_notifier);
msm_pm_reset_vector = ioremap(0x0, PAGE_SIZE);
#if defined(CONFIG_MACH_HTCLEO)

View File

@ -1,6 +1,7 @@
/* arch/arm/mach-msm/proc_comm.c
*
* Copyright (C) 2007-2008 Google, Inc.
* Copyright (c) 2009-2010, Code Aurora Forum. All rights reserved.
* Author: Brian Swetland <swetland@google.com>
*
* This software is licensed under the terms of the GNU General Public
@ -18,24 +19,24 @@
#include <linux/errno.h>
#include <linux/io.h>
#include <linux/spinlock.h>
#include <linux/module.h>
#include <mach/msm_iomap.h>
#include <mach/system.h>
#include "proc_comm.h"
#include "smd_private.h"
#if defined(CONFIG_ARCH_MSM7X30)
#define MSM_TRIG_A2M_INT(n) (writel(1 << n, MSM_GCC_BASE + 0x8))
#define MSM_TRIG_A2M_PC_INT (writel(1 << 6, MSM_GCC_BASE + 0x8))
#elif defined(CONFIG_ARCH_MSM8X60)
#define MSM_TRIG_A2M_PC_INT (writel(1 << 5, MSM_GCC_BASE + 0x8))
#else
#define MSM_TRIG_A2M_PC_INT (writel(1, MSM_CSR_BASE + 0x400 + (6) * 4))
#endif
#define MSM_A2M_INT(n) (MSM_CSR_BASE + 0x400 + (n) * 4)
static inline void notify_other_proc_comm(void)
{
#if defined(CONFIG_ARCH_MSM7X30)
MSM_TRIG_A2M_INT(6);
#else
writel(1, MSM_A2M_INT(6));
#endif
MSM_TRIG_A2M_PC_INT;
}
#define APP_COMMAND 0x00
@ -50,69 +51,84 @@ static inline void notify_other_proc_comm(void)
static DEFINE_SPINLOCK(proc_comm_lock);
/* The higher level SMD support will install this to
* provide a way to check for and handle modem restart.
*/
int (*msm_check_for_modem_crash)(void);
/* Poll for a state change, checking for possible
* modem crashes along the way (so we don't wait
* forever while the ARM9 is blowing up).
* forever while the ARM9 is blowing up.
*
* Return an error in the event of a modem crash and
* restart so the msm_proc_comm() routine can restart
* the operation from the beginning.
*/
static int proc_comm_wait_for(void __iomem *addr, unsigned value)
static int proc_comm_wait_for(unsigned addr, unsigned value)
{
for (;;) {
while (1) {
if (readl(addr) == value)
return 0;
if (msm_check_for_modem_crash)
if (msm_check_for_modem_crash())
return -EAGAIN;
if (smsm_check_for_modem_crash())
return -EAGAIN;
udelay(5);
}
}
void msm_proc_comm_reset_modem_now(void)
{
unsigned base = (unsigned)MSM_SHARED_RAM_BASE;
unsigned long flags;
spin_lock_irqsave(&proc_comm_lock, flags);
again:
if (proc_comm_wait_for(base + MDM_STATUS, PCOM_READY))
goto again;
writel(PCOM_RESET_MODEM, base + APP_COMMAND);
writel(0, base + APP_DATA1);
writel(0, base + APP_DATA2);
spin_unlock_irqrestore(&proc_comm_lock, flags);
notify_other_proc_comm();
return;
}
EXPORT_SYMBOL(msm_proc_comm_reset_modem_now);
int msm_proc_comm(unsigned cmd, unsigned *data1, unsigned *data2)
{
void __iomem *base = MSM_SHARED_RAM_BASE;
unsigned base = (unsigned)MSM_SHARED_RAM_BASE;
unsigned long flags;
int ret;
spin_lock_irqsave(&proc_comm_lock, flags);
for (;;) {
if (proc_comm_wait_for(base + MDM_STATUS, PCOM_READY))
continue;
again:
if (proc_comm_wait_for(base + MDM_STATUS, PCOM_READY))
goto again;
writel(cmd, base + APP_COMMAND);
writel(data1 ? *data1 : 0, base + APP_DATA1);
writel(data2 ? *data2 : 0, base + APP_DATA2);
writel(cmd, base + APP_COMMAND);
writel(data1 ? *data1 : 0, base + APP_DATA1);
writel(data2 ? *data2 : 0, base + APP_DATA2);
notify_other_proc_comm();
notify_other_proc_comm();
if (proc_comm_wait_for(base + APP_COMMAND, PCOM_CMD_DONE))
continue;
if (proc_comm_wait_for(base + APP_COMMAND, PCOM_CMD_DONE))
goto again;
if (readl(base + APP_STATUS) != PCOM_CMD_FAIL) {
if (data1)
*data1 = readl(base + APP_DATA1);
if (data2)
*data2 = readl(base + APP_DATA2);
ret = 0;
} else {
ret = -EIO;
}
break;
if (readl(base + APP_STATUS) == PCOM_CMD_SUCCESS) {
if (data1)
*data1 = readl(base + APP_DATA1);
if (data2)
*data2 = readl(base + APP_DATA2);
ret = 0;
} else {
ret = -EIO;
}
writel(PCOM_CMD_IDLE, base + APP_COMMAND);
spin_unlock_irqrestore(&proc_comm_lock, flags);
return ret;
}
EXPORT_SYMBOL(msm_proc_comm);

View File

@ -1,6 +1,6 @@
/* arch/arm/mach-msm/proc_comm.h
*
* Copyright (c) 2007 QUALCOMM Incorporated
* Copyright (c) 2007-2009, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@ -179,7 +179,18 @@ enum {
PCOM_CLKCTL_RPC_RAIL_DISABLE,
PCOM_CLKCTL_RPC_RAIL_CONTROL,
PCOM_CLKCTL_RPC_MIN_MSMC1,
PCOM_NUM_CMDS,
PCOM_CLKCTL_RPC_SRC_REQUEST,
PCOM_NPA_INIT,
PCOM_NPA_ISSUE_REQUIRED_REQUEST,
};
enum {
PCOM_OEM_FIRST_CMD = 0x10000000,
PCOM_OEM_TEST_CMD = PCOM_OEM_FIRST_CMD,
/* add OEM PROC COMM commands here */
PCOM_OEM_LAST = PCOM_OEM_TEST_CMD,
};
enum {
@ -199,7 +210,6 @@ enum {
PCOM_CMD_FAIL_SMSM_NOT_INIT,
PCOM_CMD_FAIL_PROC_COMM_BUSY,
PCOM_CMD_FAIL_PROC_COMM_NOT_INIT,
};
/* List of VREGs that support the Pull Down Resistor setting. */
@ -294,6 +304,7 @@ enum {
(((pull) & 0x3) << 15) | \
(((drvstr) & 0xF) << 17))
void msm_proc_comm_reset_modem_now(void);
int msm_proc_comm(unsigned cmd, unsigned *data1, unsigned *data2);
#endif

View File

@ -14,6 +14,7 @@
*
*/
#include <linux/gpio.h>
#include <linux/module.h>
#include "devices.h"
#include "proc_comm.h"

15
arch/arm/mach-msm/qdsp6_1550/msm_q6vdec.c Normal file → Executable file
View File

@ -61,7 +61,7 @@
#define VDEC_GET_MAJOR_VERSION(version) (((version)&MAJOR_MASK)>>16)
#define VDEC_GET_MINOR_VERSION(version) ((version)&MINOR_MASK)
//#define DEBUG_TRACE_VDEC
#ifdef DEBUG_TRACE_VDEC
#define TRACE(fmt,x...) \
do { pr_debug("%s:%d " fmt, __func__, __LINE__, ##x); } while (0)
@ -69,6 +69,8 @@
#define TRACE(fmt,x...) do { } while (0)
#endif
/* the version check will cause vdec hang up!!! */
#define VERSION_CHECK 0
static DEFINE_MUTEX(idlecount_lock);
static int idlecount;
@ -696,7 +698,7 @@ static long vdec_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
break;
default:
pr_err("%s: invalid ioctl!\n", __func__);
pr_err("%s: invalid ioctl! cmd= %08x \n", __func__,cmd);
ret = -EINVAL;
break;
}
@ -799,8 +801,9 @@ static int vdec_open(struct inode *inode, struct file *file)
int i;
struct vdec_msg_list *l;
struct vdec_data *vd;
#if VERSION_CHECK
struct dal_info version_info;
#endif
pr_info("q6vdec_open()\n");
mutex_lock(&vdec_ref_lock);
if (ref_cnt >= MAX_SUPPORTED_INSTANCES) {
@ -845,6 +848,7 @@ static int vdec_open(struct inode *inode, struct file *file)
ret = -EIO;
goto vdec_open_err_handle_list;
}
#if VERSION_CHECK
ret = dal_call_f9(vd->vdec_handle, DAL_OP_INFO,
&version_info, sizeof(struct dal_info));
@ -859,12 +863,15 @@ static int vdec_open(struct inode *inode, struct file *file)
pr_err("%s: driver version mismatch !\n", __func__);
goto vdec_open_err_handle_version;
}
#endif
vd->running = 1;
prevent_sleep();
return 0;
#if VERSION_CHECK
vdec_open_err_handle_version:
dal_detach(vd->vdec_handle);
#endif
vdec_open_err_handle_list:
{
struct vdec_msg_list *l, *n;

View File

@ -1,6 +1,6 @@
/* linux/arch/arm/mach-msm/irq.c
*
* Copyright (c) 2009 QUALCOMM Incorporated.
* Copyright (c) 2009-2010 Code Aurora Forum. All rights reserved.
* Copyright (C) 2009 Google, Inc.
*
* This software is licensed under the terms of the GNU General Public
@ -188,6 +188,12 @@ static void sirc_irq_handler(unsigned int irq, struct irq_desc *desc)
(sirc_reg_table[reg].cascade_irq != irq))
reg++;
if (reg == ARRAY_SIZE(sirc_reg_table)) {
printk(KERN_ERR "%s: incorrect irq %d called\n",
__func__, irq);
return;
}
status = readl(sirc_reg_table[reg].int_status);
status &= SIRC_MASK;
if (status == 0)

View File

@ -16,12 +16,20 @@
#ifndef _ARCH_ARM_MACH_MSM_SIRC_H
#define _ARCH_ARM_MACH_MSM_SIRC_H
#ifdef CONFIG_ARCH_QSD8X50
#ifdef CONFIG_ARCH_MSM_SCORPION
void sirc_fiq_select(int irq, bool enable);
void __init msm_init_sirc(void);
#else
static inline void sirc_fiq_select(int irq, bool enable) {}
#endif
#ifdef CONFIG_ARCH_QSD8X50
void __init msm_init_sirc(void);
void msm_sirc_enter_sleep(void);
void msm_sirc_exit_sleep(void);
#else
static inline void __init msm_init_sirc(void) {}
static inline void msm_sirc_enter_sleep(void) { }
static inline void msm_sirc_exit_sleep(void) { }
#endif
#endif

View File

@ -140,16 +140,18 @@ static void handle_modem_crash(void)
;
}
extern int (*msm_check_for_modem_crash)(void);
uint32_t raw_smsm_get_state(enum smsm_state_item item)
{
return readl(smd_info.state + item * 4);
}
static int check_for_modem_crash(void)
int smsm_check_for_modem_crash(void)
{
if (raw_smsm_get_state(SMSM_STATE_MODEM) & SMSM_RESET) {
/* if the modem's not ready yet, we have to hope for the best */
if (!smd_info.state)
return 0;
if (raw_smsm_get_state(SMSM_MODEM_STATE) & SMSM_RESET) {
handle_modem_crash();
return -1;
}
@ -381,17 +383,18 @@ static void update_packet_state(struct smd_channel *ch)
int r;
/* can't do anything if we're in the middle of a packet */
if (ch->current_packet != 0)
return;
while (ch->current_packet == 0) {
/* discard 0 length packets if any */
/* don't bother unless we can get the full header */
if (smd_stream_read_avail(ch) < SMD_HEADER_SIZE)
return;
/* don't bother unless we can get the full header */
if (smd_stream_read_avail(ch) < SMD_HEADER_SIZE)
return;
r = ch_read(ch, hdr, SMD_HEADER_SIZE);
BUG_ON(r != SMD_HEADER_SIZE);
r = ch_read(ch, hdr, SMD_HEADER_SIZE);
BUG_ON(r != SMD_HEADER_SIZE);
ch->current_packet = hdr[0];
ch->current_packet = hdr[0];
}
}
/* provide a pointer and length to next free space in the fifo */
@ -490,7 +493,7 @@ static void handle_smd_irq(struct list_head *list, void (*notify)(void))
#ifdef CONFIG_BUILD_CIQ
/* put here to make sure we got the disable/enable index */
if (!msm_smd_ciq_info)
msm_smd_ciq_info = (*(volatile uint32_t *)(MSM_SHARED_RAM_BASE + 0xFC11C));
msm_smd_ciq_info = (*(volatile uint32_t *)(MSM_SHARED_RAM_BASE + SMD_CIQ_BASE));
#endif
spin_lock_irqsave(&smd_lock, flags);
list_for_each_entry(ch, list, ch_list) {
@ -641,6 +644,8 @@ static int smd_stream_write(smd_channel_t *ch, const void *_data, int len)
if (len < 0)
return -EINVAL;
else if (len == 0)
return 0;
while ((xfer = ch_write_buffer(ch, &ptr)) != 0) {
if (!ch_is_open(ch))
@ -911,6 +916,7 @@ int smd_close(smd_channel_t *ch)
return 0;
}
EXPORT_SYMBOL(smd_close);
int smd_read(smd_channel_t *ch, void *data, int len)
{
@ -922,6 +928,7 @@ int smd_write(smd_channel_t *ch, const void *data, int len)
{
return ch->write(ch, data, len);
}
EXPORT_SYMBOL(smd_write);
int smd_write_atomic(smd_channel_t *ch, const void *data, int len)
{
@ -944,6 +951,7 @@ int smd_write_avail(smd_channel_t *ch)
{
return ch->write_avail(ch);
}
EXPORT_SYMBOL_GPL(smd_write_avail);
int smd_wait_until_readable(smd_channel_t *ch, int bytes)
{
@ -981,6 +989,11 @@ int smd_cur_packet_size(smd_channel_t *ch)
}
EXPORT_SYMBOL(smd_cur_packet_size);
/* Returns SMD buffer size */
int smd_total_fifo_size(smd_channel_t *ch)
{
return ch->fifo_size;
}
/* ------------------------------------------------------------------------- */
@ -1227,8 +1240,6 @@ static int __init msm_smd_probe(struct platform_device *pdev)
do_smd_probe();
msm_check_for_modem_crash = check_for_modem_crash;
msm_init_last_radio_log(THIS_MODULE);
smd_initialized = 1;

View File

@ -1,7 +1,7 @@
/* arch/arm/mach-msm/smd_private.h
*
* Copyright (C) 2007 Google, Inc.
* Copyright (c) 2007 QUALCOMM Incorporated
* Copyright (c) 2007-2010, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
@ -16,6 +16,9 @@
#ifndef _ARCH_ARM_MACH_MSM_MSM_SMD_PRIVATE_H_
#define _ARCH_ARM_MACH_MSM_MSM_SMD_PRIVATE_H_
#include <linux/types.h>
#include <linux/spinlock.h>
struct smem_heap_info {
unsigned initialized;
unsigned free_offset;
@ -46,12 +49,15 @@ struct smem_proc_comm {
#define VERSION_MODEM_SBL 7
#define VERSION_APPS 8
#define VERSION_MODEM 9
#define VERSION_DSPS 10
#define SMD_HEAP_SIZE 512
struct smem_shared {
struct smem_proc_comm proc_comm[4];
unsigned version[32];
struct smem_heap_info heap_info;
struct smem_heap_entry heap_toc[512];
struct smem_heap_entry heap_toc[SMD_HEAP_SIZE];
};
#define SMSM_V1_SIZE (sizeof(unsigned) * 8)
@ -89,41 +95,70 @@ struct smsm_interrupt_info {
};
#endif
#if defined(CONFIG_MSM_N_WAY_SMSM)
enum {
SMSM_APPS_STATE,
SMSM_MODEM_STATE,
SMSM_Q6_STATE,
SMSM_APPS_DEM,
SMSM_MODEM_DEM,
SMSM_Q6_DEM,
SMSM_POWER_MASTER_DEM,
SMSM_TIME_MASTER_DEM,
SMSM_NUM_ENTRIES,
};
#else
enum {
SMSM_APPS_STATE = 1,
SMSM_MODEM_STATE = 3,
SMSM_NUM_ENTRIES,
};
#endif
enum {
SMSM_APPS,
SMSM_MODEM,
SMSM_Q6,
SMSM_NUM_HOSTS,
};
#define SZ_DIAG_ERR_MSG 0xC8
#define ID_DIAG_ERR_MSG SMEM_DIAG_ERR_MESSAGE
#define ID_SMD_CHANNELS SMEM_SMD_BASE_ID
#define ID_SHARED_STATE SMEM_SMSM_SHARED_STATE
#define ID_CH_ALLOC_TBL SMEM_CHANNEL_ALLOC_TBL
#define SMSM_INIT 0x00000001
#define SMSM_OSENTERED 0x00000002
#define SMSM_SMDWAIT 0x00000004
#define SMSM_SMDINIT 0x00000008
#define SMSM_RPCWAIT 0x00000010
#define SMSM_RPCINIT 0x00000020
#define SMSM_RESET 0x00000040
#define SMSM_RSA 0x00000080
#define SMSM_RUN 0x00000100
#define SMSM_PWRC 0x00000200
#define SMSM_TIMEWAIT 0x00000400
#define SMSM_TIMEINIT 0x00000800
#define SMSM_PWRC_EARLY_EXIT 0x00001000
#define SMSM_WFPI 0x00002000
#define SMSM_SLEEP 0x00004000
#define SMSM_SLEEPEXIT 0x00008000
#define SMSM_OEMSBL_RELEASE 0x00010000
#define SMSM_APPS_REBOOT 0x00020000
#define SMSM_SYSTEM_POWER_DOWN 0x00040000
#define SMSM_SYSTEM_REBOOT 0x00080000
#define SMSM_SYSTEM_DOWNLOAD 0x00100000
#define SMSM_PWRC_SUSPEND 0x00200000
#define SMSM_APPS_SHUTDOWN 0x00400000
#define SMSM_SMD_LOOPBACK 0x00800000
#define SMSM_RUN_QUIET 0x01000000
#define SMSM_MODEM_WAIT 0x02000000
#define SMSM_MODEM_BREAK 0x04000000
#define SMSM_MODEM_CONTINUE 0x08000000
#define SMSM_UNKNOWN 0x80000000
#define SMSM_INIT 0x00000001
#define SMSM_OSENTERED 0x00000002
#define SMSM_SMDWAIT 0x00000004
#define SMSM_SMDINIT 0x00000008
#define SMSM_RPCWAIT 0x00000010
#define SMSM_RPCINIT 0x00000020
#define SMSM_RESET 0x00000040
#define SMSM_RSA 0x00000080
#define SMSM_RUN 0x00000100
#define SMSM_PWRC 0x00000200
#define SMSM_TIMEWAIT 0x00000400
#define SMSM_TIMEINIT 0x00000800
#define SMSM_PWRC_EARLY_EXIT 0x00001000
#define SMSM_WFPI 0x00002000
#define SMSM_SLEEP 0x00004000
#define SMSM_SLEEPEXIT 0x00008000
#define SMSM_OEMSBL_RELEASE 0x00010000
#define SMSM_APPS_REBOOT 0x00020000
#define SMSM_SYSTEM_POWER_DOWN 0x00040000
#define SMSM_SYSTEM_REBOOT 0x00080000
#define SMSM_SYSTEM_DOWNLOAD 0x00100000
#define SMSM_PWRC_SUSPEND 0x00200000
#define SMSM_APPS_SHUTDOWN 0x00400000
#define SMSM_SMD_LOOPBACK 0x00800000
#define SMSM_RUN_QUIET 0x01000000
#define SMSM_MODEM_WAIT 0x02000000
#define SMSM_MODEM_BREAK 0x04000000
#define SMSM_MODEM_CONTINUE 0x08000000
#define SMSM_SYSTEM_REBOOT_USR 0x20000000
#define SMSM_SYSTEM_PWRDWN_USR 0x40000000
#define SMSM_UNKNOWN 0x80000000
#define SMSM_WKUP_REASON_RPC 0x00000001
#define SMSM_WKUP_REASON_INT 0x00000002
@ -251,18 +286,17 @@ typedef enum {
} smem_mem_type;
#define SMD_SS_CLOSED 0x00000000
#define SMD_SS_OPENING 0x00000001
#define SMD_SS_OPENED 0x00000002
#define SMD_SS_FLUSHING 0x00000003
#define SMD_SS_CLOSING 0x00000004
#define SMD_SS_RESET 0x00000005
#define SMD_SS_RESET_OPENING 0x00000006
#define SMD_SS_CLOSED 0x00000000
#define SMD_SS_OPENING 0x00000001
#define SMD_SS_OPENED 0x00000002
#define SMD_SS_FLUSHING 0x00000003
#define SMD_SS_CLOSING 0x00000004
#define SMD_SS_RESET 0x00000005
#define SMD_SS_RESET_OPENING 0x00000006
#define SMD_BUF_SIZE 8192
#define SMD_CHANNELS 64
#define SMD_HEADER_SIZE 20
#define SMD_BUF_SIZE 8192
#define SMD_CHANNELS 64
#define SMD_HEADER_SIZE 20
#define SMD_TYPE_MASK 0x0FF
#define SMD_TYPE_APPS_MODEM 0x000
@ -274,6 +308,8 @@ typedef enum {
#define SMD_KIND_STREAM 0x100
#define SMD_KIND_PACKET 0x200
int smsm_check_for_modem_crash(void);
#define msm_check_for_modem_crash smsm_check_for_modem_crash
void *smem_find(unsigned id, unsigned size);
void *smem_item(unsigned id, unsigned *size);
uint32_t raw_smsm_get_state(enum smsm_state_item item);

View File

@ -22,6 +22,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/cdev.h>
@ -105,7 +106,7 @@ static struct wake_lock rpcrouter_wake_lock;
static int rpcrouter_need_len;
static atomic_t next_xid = ATOMIC_INIT(1);
static uint8_t next_pacmarkid;
static atomic_t next_mid = ATOMIC_INIT(0);
static void do_read_data(struct work_struct *work);
static void do_create_pdevs(struct work_struct *work);
@ -114,12 +115,16 @@ static void do_create_rpcrouter_pdev(struct work_struct *work);
static DECLARE_WORK(work_read_data, do_read_data);
static DECLARE_WORK(work_create_pdevs, do_create_pdevs);
static DECLARE_WORK(work_create_rpcrouter_pdev, do_create_rpcrouter_pdev);
static atomic_t rpcrouter_pdev_created = ATOMIC_INIT(0);
#define RR_STATE_IDLE 0
#define RR_STATE_HEADER 1
#define RR_STATE_BODY 2
#define RR_STATE_ERROR 3
#define RMT_STORAGE_APIPROG_BE32 0xa7000030
#define RMT_STORAGE_SRV_APIPROG_BE32 0x9c000030
struct rr_context {
struct rr_packet *pkt;
uint8_t *ptr;
@ -262,6 +267,7 @@ struct msm_rpc_endpoint *msm_rpcrouter_create_local_endpoint(dev_t dev)
{
struct msm_rpc_endpoint *ept;
unsigned long flags;
int i;
ept = kmalloc(sizeof(struct msm_rpc_endpoint), GFP_KERNEL);
if (!ept)
@ -269,7 +275,9 @@ struct msm_rpc_endpoint *msm_rpcrouter_create_local_endpoint(dev_t dev)
memset(ept, 0, sizeof(struct msm_rpc_endpoint));
/* mark no reply outstanding */
ept->reply_pid = 0xffffffff;
ept->next_rroute = 0;
for (i = 0; i < MAX_REPLY_ROUTE; i++)
ept->rroute[i].pid = 0xffffffff;
ept->cid = (uint32_t) ept;
ept->pid = RPCROUTER_PID_LOCAL;
@ -530,7 +538,8 @@ static int process_control_msg(union rr_control_msg *msg, int len)
static void do_create_rpcrouter_pdev(struct work_struct *work)
{
platform_device_register(&rpcrouter_pdev);
if (atomic_cmpxchg(&rpcrouter_pdev_created, 0, 1) == 0)
platform_device_register(&rpcrouter_pdev);
}
static void do_create_pdevs(struct work_struct *work)
@ -652,11 +661,13 @@ static void do_read_data(struct work_struct *work)
hdr.size -= sizeof(pm);
frag = rr_malloc(hdr.size + sizeof(*frag));
frag = rr_malloc(sizeof(*frag));
frag->next = NULL;
frag->length = hdr.size;
if (rr_read(frag->data, hdr.size))
if (rr_read(frag->data, hdr.size)) {
kfree(frag);
goto fail_io;
}
ept = rpcrouter_lookup_local_endpoint(hdr.dst_cid);
if (!ept) {
@ -758,19 +769,77 @@ int msm_rpc_close(struct msm_rpc_endpoint *ept)
}
EXPORT_SYMBOL(msm_rpc_close);
static int msm_rpc_write_pkt(struct msm_rpc_endpoint *ept,
struct rr_remote_endpoint *r_ept,
struct rr_header *hdr,
uint32_t pacmark,
void *buffer, int count)
{
DEFINE_WAIT(__wait);
unsigned long flags;
int needed;
for (;;) {
prepare_to_wait(&r_ept->quota_wait, &__wait,
TASK_INTERRUPTIBLE);
spin_lock_irqsave(&r_ept->quota_lock, flags);
if (r_ept->tx_quota_cntr < RPCROUTER_DEFAULT_RX_QUOTA)
break;
if (signal_pending(current) &&
(!(ept->flags & MSM_RPC_UNINTERRUPTIBLE)))
break;
spin_unlock_irqrestore(&r_ept->quota_lock, flags);
schedule();
}
finish_wait(&r_ept->quota_wait, &__wait);
if (signal_pending(current) &&
(!(ept->flags & MSM_RPC_UNINTERRUPTIBLE))) {
spin_unlock_irqrestore(&r_ept->quota_lock, flags);
return -ERESTARTSYS;
}
r_ept->tx_quota_cntr++;
if (r_ept->tx_quota_cntr == RPCROUTER_DEFAULT_RX_QUOTA)
hdr->confirm_rx = 1;
spin_unlock_irqrestore(&r_ept->quota_lock, flags);
spin_lock_irqsave(&smd_lock, flags);
needed = sizeof(*hdr) + hdr->size;
while (smd_write_avail(smd_channel) < needed) {
spin_unlock_irqrestore(&smd_lock, flags);
msleep(250);
spin_lock_irqsave(&smd_lock, flags);
}
/* TODO: deal with full fifo */
smd_write(smd_channel, hdr, sizeof(*hdr));
smd_write(smd_channel, &pacmark, sizeof(pacmark));
smd_write(smd_channel, buffer, count);
spin_unlock_irqrestore(&smd_lock, flags);
return 0;
}
int msm_rpc_write(struct msm_rpc_endpoint *ept, void *buffer, int count)
{
struct rr_header hdr;
uint32_t pacmark;
uint32_t mid;
struct rpc_request_hdr *rq = buffer;
struct rr_remote_endpoint *r_ept;
unsigned long flags;
int needed;
DEFINE_WAIT(__wait);
int ret;
int total;
/* TODO: fragmentation for large outbound packets */
if (count > (RPCROUTER_MSGSIZE_MAX - sizeof(uint32_t)) || !count)
return -EINVAL;
if (((rq->prog&0xFFFFFFF0) == RMT_STORAGE_APIPROG_BE32) ||
((rq->prog&0xFFFFFFF0) == RMT_STORAGE_SRV_APIPROG_BE32)) {
printk(KERN_DEBUG
"rpc_write: prog = %x , procedure = %d, type = %d, xid = %d\n"
, be32_to_cpu(rq->prog), be32_to_cpu(rq->procedure)
, be32_to_cpu(rq->type), be32_to_cpu(rq->xid));
}
/* snoop the RPC packet and enforce permissions */
@ -818,23 +887,21 @@ int msm_rpc_write(struct msm_rpc_endpoint *ept, void *buffer, int count)
} else {
/* RPC REPLY */
/* TODO: locking */
if (ept->reply_pid == 0xffffffff) {
printk(KERN_ERR
"rr_write: rejecting unexpected reply\n");
return -EINVAL;
}
if (ept->reply_xid != rq->xid) {
printk(KERN_ERR
"rr_write: rejecting packet w/ bad xid\n");
return -EINVAL;
for (ret = 0; ret < MAX_REPLY_ROUTE; ret++)
if (ept->rroute[ret].xid == rq->xid) {
if (ept->rroute[ret].pid == 0xffffffff)
continue;
hdr.dst_pid = ept->rroute[ret].pid;
hdr.dst_cid = ept->rroute[ret].cid;
/* consume this reply */
ept->rroute[ret].pid = 0xffffffff;
goto found_rroute;
}
hdr.dst_pid = ept->reply_pid;
hdr.dst_cid = ept->reply_cid;
/* consume this reply */
ept->reply_pid = 0xffffffff;
printk(KERN_ERR "rr_write: rejecting packet w/ bad xid\n");
return -EINVAL;
found_rroute:
IO("REPLY on ept %p to xid=%d @ %d:%08x (%d bytes)\n",
ept,
be32_to_cpu(rq->xid), hdr.dst_pid, hdr.dst_cid, count);
@ -854,56 +921,36 @@ int msm_rpc_write(struct msm_rpc_endpoint *ept, void *buffer, int count)
hdr.version = RPCROUTER_VERSION;
hdr.src_pid = ept->pid;
hdr.src_cid = ept->cid;
hdr.confirm_rx = 0;
hdr.size = count + sizeof(uint32_t);
for (;;) {
prepare_to_wait(&r_ept->quota_wait, &__wait,
TASK_INTERRUPTIBLE);
spin_lock_irqsave(&r_ept->quota_lock, flags);
if (r_ept->tx_quota_cntr < RPCROUTER_DEFAULT_RX_QUOTA)
break;
if (signal_pending(current) &&
(!(ept->flags & MSM_RPC_UNINTERRUPTIBLE)))
break;
spin_unlock_irqrestore(&r_ept->quota_lock, flags);
schedule();
}
finish_wait(&r_ept->quota_wait, &__wait);
total = count;
if (signal_pending(current) &&
(!(ept->flags & MSM_RPC_UNINTERRUPTIBLE))) {
spin_unlock_irqrestore(&r_ept->quota_lock, flags);
return -ERESTARTSYS;
}
r_ept->tx_quota_cntr++;
if (r_ept->tx_quota_cntr == RPCROUTER_DEFAULT_RX_QUOTA)
hdr.confirm_rx = 1;
mid = atomic_add_return(1, &next_mid) & 0xFF;
/* bump pacmark while interrupts disabled to avoid race
* probably should be atomic op instead
*/
pacmark = PACMARK(count, ++next_pacmarkid, 0, 1);
while (count > 0) {
unsigned xfer;
spin_unlock_irqrestore(&r_ept->quota_lock, flags);
if (count > RPCROUTER_DATASIZE_MAX)
xfer = RPCROUTER_DATASIZE_MAX;
else
xfer = count;
spin_lock_irqsave(&smd_lock, flags);
hdr.confirm_rx = 0;
hdr.size = xfer + sizeof(uint32_t);
needed = sizeof(hdr) + hdr.size;
while (smd_write_avail(smd_channel) < needed) {
spin_unlock_irqrestore(&smd_lock, flags);
msleep(250);
spin_lock_irqsave(&smd_lock, flags);
/* total == count -> must be first packet
* xfer == count -> must be last packet
*/
pacmark = PACMARK(xfer, mid, (total == count), (xfer == count));
ret = msm_rpc_write_pkt(ept, r_ept, &hdr, pacmark, buffer, xfer);
if (ret < 0)
return ret;
buffer += xfer;
count -= xfer;
}
/* TODO: deal with full fifo */
smd_write(smd_channel, &hdr, sizeof(hdr));
smd_write(smd_channel, &pacmark, sizeof(pacmark));
smd_write(smd_channel, buffer, count);
spin_unlock_irqrestore(&smd_lock, flags);
return count;
return total;
}
EXPORT_SYMBOL(msm_rpc_write);
@ -1104,20 +1151,30 @@ int __msm_rpc_read(struct msm_rpc_endpoint *ept,
*frag_ret = pkt->first;
rq = (void*) pkt->first->data;
if (((rq->prog&0xFFFFFFF0) == RMT_STORAGE_APIPROG_BE32) ||
((rq->prog&0xFFFFFFF0) == RMT_STORAGE_SRV_APIPROG_BE32)) {
printk(KERN_DEBUG
"rpc_read: prog = %x , procedure = %d, type = %d, xid = %d\n"
, be32_to_cpu(rq->prog), be32_to_cpu(rq->procedure)
, be32_to_cpu(rq->type), be32_to_cpu(rq->xid));
}
if ((rc >= (sizeof(uint32_t) * 3)) && (rq->type == 0)) {
IO("READ on ept %p is a CALL on %08x:%08x proc %d xid %d\n",
ept, be32_to_cpu(rq->prog), be32_to_cpu(rq->vers),
be32_to_cpu(rq->procedure),
be32_to_cpu(rq->xid));
/* RPC CALL */
if (ept->reply_pid != 0xffffffff) {
if (ept->rroute[ept->next_rroute].pid != 0xffffffff) {
printk(KERN_WARNING
"rr_read: lost previous reply xid...\n");
}
/* TODO: locking? */
ept->reply_pid = pkt->hdr.src_pid;
ept->reply_cid = pkt->hdr.src_cid;
ept->reply_xid = rq->xid;
ept->rroute[ept->next_rroute].pid = pkt->hdr.src_pid;
ept->rroute[ept->next_rroute].cid = pkt->hdr.src_cid;
ept->rroute[ept->next_rroute].xid = rq->xid;
ept->next_rroute = (ept->next_rroute + 1) & (MAX_REPLY_ROUTE - 1);
}
#if TRACE_RPC_MSG
else if ((rc >= (sizeof(uint32_t) * 3)) && (rq->type == 1))

View File

@ -32,6 +32,7 @@
#define RPCROUTER_VERSION 1
#define RPCROUTER_PROCESSORS_MAX 4
#define RPCROUTER_MSGSIZE_MAX 512
#define RPCROUTER_DATASIZE_MAX 500
#if defined(CONFIG_ARCH_MSM7X30)
#define RPCROUTER_PEND_REPLIES_MAX 32
#endif
@ -50,6 +51,7 @@
#define RPCROUTER_CTRL_CMD_REMOVE_CLIENT 6
#define RPCROUTER_CTRL_CMD_RESUME_TX 7
#define RPCROUTER_CTRL_CMD_EXIT 8
#define RPCROUTER_CTRL_CMD_PING 9
#define RPCROUTER_DEFAULT_RX_QUOTA 5
@ -141,6 +143,15 @@ struct rr_remote_endpoint {
struct list_head list;
};
struct msm_reply_route {
uint32_t xid;
uint32_t pid;
uint32_t cid;
uint32_t unused;
};
#define MAX_REPLY_ROUTE 4
#if defined(CONFIG_ARCH_MSM7X30)
struct msm_rpc_reply {
struct list_head list;
@ -183,15 +194,12 @@ struct msm_rpc_endpoint {
uint32_t dst_prog; /* be32 */
uint32_t dst_vers; /* be32 */
/* reply remote address
* if reply_pid == 0xffffffff, none available
* RPC_REPLY writes may only go to the pid/cid/xid of the
* last RPC_CALL we received.
/* RPC_REPLY writes must be routed to the pid/cid of the
* RPC_CALL they are in reply to. Keep a cache of valid
* xid/pid/cid groups. pid 0xffffffff -> not valid.
*/
uint32_t reply_pid;
uint32_t reply_cid;
uint32_t reply_xid; /* be32 */
uint32_t next_pm; /* Pacmark sequence */
unsigned next_rroute;
struct msm_reply_route rroute[MAX_REPLY_ROUTE];
#if defined(CONFIG_ARCH_MSM7X30)
/* reply queue for inbound messages */
@ -224,6 +232,7 @@ void msm_rpcrouter_exit_devices(void);
#if defined(CONFIG_ARCH_MSM7X30)
void get_requesting_client(struct msm_rpc_endpoint *ept, uint32_t xid,
struct msm_rpc_client_info *clnt_info);
int msm_rpc_clear_netreset(struct msm_rpc_endpoint *ept);
#endif
extern dev_t msm_rpcrouter_devno;

View File

@ -26,6 +26,7 @@
#include <linux/fs.h>
#include <linux/err.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/poll.h>
#include <linux/platform_device.h>
#include <linux/msm_rpcrouter.h>

View File

@ -16,6 +16,7 @@
#include <linux/module.h>
#include <linux/kernel.h>
#include <linux/slab.h>
#include <linux/string.h>
#include <linux/errno.h>
#include <linux/cdev.h>

View File

@ -78,6 +78,8 @@
#include <linux/string.h>
#include <linux/kernel.h>
#include <linux/delay.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <mach/msm_rpcrouter.h>
@ -421,7 +423,7 @@ int xdr_send_msg(struct msm_rpc_xdr *xdr)
void xdr_init(struct msm_rpc_xdr *xdr)
{
mutex_init(&xdr->out_lock);
mutex_init(&xdr->in_lock);
init_waitqueue_head(&xdr->in_buf_wait_q);
xdr->in_buf = NULL;
xdr->in_size = 0;
@ -434,7 +436,7 @@ void xdr_init(struct msm_rpc_xdr *xdr)
void xdr_init_input(struct msm_rpc_xdr *xdr, void *buf, uint32_t size)
{
mutex_lock(&xdr->in_lock);
wait_event(xdr->in_buf_wait_q, !(xdr->in_buf));
xdr->in_buf = buf;
xdr->in_size = size;
@ -455,7 +457,7 @@ void xdr_clean_input(struct msm_rpc_xdr *xdr)
xdr->in_size = 0;
xdr->in_index = 0;
mutex_unlock(&xdr->in_lock);
wake_up(&xdr->in_buf_wait_q);
}
void xdr_clean_output(struct msm_rpc_xdr *xdr)

View File

@ -30,6 +30,7 @@
#include "board-htcleo.h"
#define MAX_SMD_TTYS 32
#define MAX_TTY_BUF_SIZE 2048
static DEFINE_MUTEX(smd_tty_lock);
@ -75,6 +76,9 @@ static void smd_tty_work_func(struct work_struct *work)
tty->low_latency = 0;
tty_flip_buffer_push(tty);
break;
if (avail > MAX_TTY_BUF_SIZE)
avail = MAX_TTY_BUF_SIZE;
}
ptr = NULL;

View File

@ -169,18 +169,10 @@ static int msm_timer_set_next_event(unsigned long cycles,
clock->last_set = now;
clock->alarm_vtime = alarm + clock->offset;
late = now - alarm;
if (late >= (int)(-clock->write_delay << clock->shift) && late < DGT_HZ*5) {
static int print_limit = 10;
if (print_limit > 0) {
print_limit--;
printk(KERN_NOTICE "msm_timer_set_next_event(%lu) "
"clock %s, alarm already expired, now %x, "
"alarm %x, late %d%s\n",
cycles, clock->clockevent.name, now, alarm, late,
print_limit ? "" : " stop printing");
}
if (late >= (int)(-clock->write_delay << clock->shift) &&
late < clock->freq*5)
return -ETIME;
}
return 0;
}
@ -582,9 +574,12 @@ static struct msm_clock msm_clocks[] = {
#endif
.freq = GPT_HZ,
.flags =
#ifdef CONFIG_ARCH_MSM_ARM11
MSM_CLOCK_FLAGS_UNSTABLE_COUNT |
MSM_CLOCK_FLAGS_ODD_MATCH_WRITE |
MSM_CLOCK_FLAGS_DELAYED_WRITE_POST,
MSM_CLOCK_FLAGS_DELAYED_WRITE_POST |
#endif
0,
.write_delay = 9,
},
[MSM_CLOCK_DGT] = {

View File

@ -17,10 +17,10 @@
/*
* See Documentation/block/deadline-iosched.txt
*/
static const int read_expire = HZ / 2; /* max time before a read is submitted. */
static const int read_expire = HZ / 4; /* max time before a read is submitted. */
static const int write_expire = 5 * HZ; /* ditto for writes, these limits are SOFT! */
static const int writes_starved = 2; /* max times reads can starve a write */
static const int fifo_batch = 16; /* # of sequential requests treated as one
static const int writes_starved = 4; /* max times reads can starve a write */
static const int fifo_batch = 1; /* # of sequential requests treated as one
by the above parameters. For throughput. */
struct deadline_data {
@ -362,7 +362,7 @@ static void *deadline_init_queue(struct request_queue *q)
dd->fifo_expire[READ] = read_expire;
dd->fifo_expire[WRITE] = write_expire;
dd->writes_starved = writes_starved;
dd->front_merges = 1;
dd->front_merges = 0;
dd->fifo_batch = fifo_batch;
return dd;
}

36
build.sh Executable file
View File

@ -0,0 +1,36 @@
#!/bin/sh
export KERNELBASEDIR=$PWD/../JB_Kernel_update-zip-files
#export TOOLCHAIN=$HOME/CodeSourcery/Sourcery_G++_Lite/bin/arm-none-eabi-
export TOOLCHAIN=$HOME/arm-2010q1/bin/arm-none-eabi-
export KERNEL_FILE=HTCLEO-Kernel_2.6.32_tytung_jellybean
rm arch/arm/boot/zImage
make htcleo_defconfig
make ARCH=arm CROSS_COMPILE=$TOOLCHAIN zImage -j8 && make ARCH=arm CROSS_COMPILE=$TOOLCHAIN modules -j8
if [ -f arch/arm/boot/zImage ]; then
mkdir -p $KERNELBASEDIR/
rm -rf $KERNELBASEDIR/boot/*
rm -rf $KERNELBASEDIR/system/lib/modules/*
mkdir -p $KERNELBASEDIR/boot
mkdir -p $KERNELBASEDIR/system/
mkdir -p $KERNELBASEDIR/system/lib/
mkdir -p $KERNELBASEDIR/system/lib/modules
cp -a arch/arm/boot/zImage $KERNELBASEDIR/boot/zImage
make ARCH=arm CROSS_COMPILE=$TOOLCHAIN INSTALL_MOD_PATH=$KERNELBASEDIR/system/lib/modules modules_install -j8
cd $KERNELBASEDIR/system/lib/modules
find -iname *.ko | xargs -i -t cp {} .
rm -rf $KERNELBASEDIR/system/lib/modules/lib
stat $KERNELBASEDIR/boot/zImage
cd ../../../
zip -r ${KERNEL_FILE}_`date +"%Y%m%d_%H_%M"`.zip boot system META-INF work
else
echo "Kernel STUCK in BUILD! no zImage exist"
fi

View File

@ -151,4 +151,18 @@ config SYS_HYPERVISOR
bool
default n
config GENLOCK
bool "Enable a generic cross-process locking mechanism"
depends on ANON_INODES
help
Enable a generic cross-process locking API to provide protection
for shared memory objects such as graphics buffers.
config GENLOCK_MISCDEVICE
bool "Enable a misc-device for userspace to access the genlock engine"
depends on GENLOCK
help
Create a miscdevice for the purposes of allowing userspace to create
and interact with locks created using genlock.
endmenu

View File

@ -8,6 +8,7 @@ obj-$(CONFIG_DEVTMPFS) += devtmpfs.o
obj-y += power/
obj-$(CONFIG_HAS_DMA) += dma-mapping.o
obj-$(CONFIG_HAVE_GENERIC_DMA_COHERENT) += dma-coherent.o
obj-$(CONFIG_GENLOCK) += genlock.o
obj-$(CONFIG_ISA) += isa.o
obj-$(CONFIG_FW_LOADER) += firmware_class.o
obj-$(CONFIG_NUMA) += node.o

852
drivers/base/genlock.c Normal file
View File

@ -0,0 +1,852 @@
/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*/
#include <linux/fb.h>
#include <linux/slab.h>
#include <linux/module.h>
#include <linux/list.h>
#include <linux/file.h>
#include <linux/sched.h>
#include <linux/fs.h>
#include <linux/wait.h>
#include <linux/uaccess.h>
#include <linux/anon_inodes.h>
#include <linux/miscdevice.h>
#include <linux/genlock.h>
#include <linux/interrupt.h>
/* Lock states - can either be unlocked, held as an exclusive write lock or a
* shared read lock
*/
#define _UNLOCKED 0
#define _RDLOCK GENLOCK_RDLOCK
#define _WRLOCK GENLOCK_WRLOCK
#define GENLOCK_LOG_ERR(fmt, args...) \
pr_err("genlock: %s: " fmt, __func__, ##args)
/* The genlock magic stored in the kernel private data is used to protect
* against the possibility of user space passing a valid fd to a
* non-genlock file for genlock_attach_lock()
*/
#define GENLOCK_MAGIC_OK 0xD2EAD10C
#define GENLOCK_MAGIC_BAD 0xD2EADBAD
struct genlock {
unsigned int magic; /* Magic for attach verification */
struct list_head active; /* List of handles holding lock */
spinlock_t lock; /* Spinlock to protect the lock internals */
wait_queue_head_t queue; /* Holding pen for processes pending lock */
struct file *file; /* File structure for exported lock */
int state; /* Current state of the lock */
struct kref refcount;
};
struct genlock_handle {
struct genlock *lock; /* Lock currently attached to the handle */
struct list_head entry; /* List node for attaching to a lock */
struct file *file; /* File structure associated with handle */
int active; /* Number of times the active lock has been
taken */
};
/*
* Create a spinlock to protect against a race condition when a lock gets
* released while another process tries to attach it
*/
static DEFINE_SPINLOCK(genlock_ref_lock);
static void genlock_destroy(struct kref *kref)
{
struct genlock *lock = container_of(kref, struct genlock,
refcount);
/*
* Clear the private data for the file descriptor in case the fd is
* still active after the lock gets released
*/
if (lock->file)
lock->file->private_data = NULL;
lock->magic = GENLOCK_MAGIC_BAD;
kfree(lock);
}
/*
* Release the genlock object. Called when all the references to
* the genlock file descriptor are released
*/
static int genlock_release(struct inode *inodep, struct file *file)
{
struct genlock *lock = file->private_data;
/*
* Clear the refrence back to this file structure to avoid
* somehow reusing the lock after the file has been destroyed
*/
if (lock)
lock->file = NULL;
return 0;
}
static const struct file_operations genlock_fops = {
.release = genlock_release,
};
/**
* genlock_create_lock - Create a new lock
* @handle - genlock handle to attach the lock to
*
* Returns: a pointer to the genlock
*/
struct genlock *genlock_create_lock(struct genlock_handle *handle)
{
struct genlock *lock;
void *ret;
if (IS_ERR_OR_NULL(handle)) {
GENLOCK_LOG_ERR("Invalid handle\n");
return ERR_PTR(-EINVAL);
}
if (handle->lock != NULL) {
GENLOCK_LOG_ERR("Handle already has a lock attached\n");
return ERR_PTR(-EINVAL);
}
lock = kzalloc(sizeof(*lock), GFP_KERNEL);
if (lock == NULL) {
GENLOCK_LOG_ERR("Unable to allocate memory for a lock\n");
return ERR_PTR(-ENOMEM);
}
INIT_LIST_HEAD(&lock->active);
init_waitqueue_head(&lock->queue);
spin_lock_init(&lock->lock);
lock->magic = GENLOCK_MAGIC_OK;
lock->state = _UNLOCKED;
/*
* Create an anonyonmous inode for the object that can exported to
* other processes
*/
ret = anon_inode_getfile("genlock", &genlock_fops, lock, O_RDWR);
if (IS_ERR_OR_NULL(ret)) {
GENLOCK_LOG_ERR("Unable to create lock inode\n");
kfree(lock);
return ret;
}
lock->file = ret;
/* Attach the new lock to the handle */
handle->lock = lock;
kref_init(&lock->refcount);
return lock;
}
EXPORT_SYMBOL(genlock_create_lock);
/*
* Get a file descriptor reference to a lock suitable for sharing with
* other processes
*/
static int genlock_get_fd(struct genlock *lock)
{
int ret;
if (!lock->file) {
GENLOCK_LOG_ERR("No file attached to the lock\n");
return -EINVAL;
}
ret = get_unused_fd_flags(0);
if (ret < 0)
return ret;
fd_install(ret, lock->file);
return ret;
}
/**
* genlock_attach_lock - Attach an existing lock to a handle
* @handle - Pointer to a genlock handle to attach the lock to
* @fd - file descriptor for the exported lock
*
* Returns: A pointer to the attached lock structure
*/
struct genlock *genlock_attach_lock(struct genlock_handle *handle, int fd)
{
struct file *file;
struct genlock *lock;
if (IS_ERR_OR_NULL(handle)) {
GENLOCK_LOG_ERR("Invalid handle\n");
return ERR_PTR(-EINVAL);
}
if (handle->lock != NULL) {
GENLOCK_LOG_ERR("Handle already has a lock attached\n");
return ERR_PTR(-EINVAL);
}
file = fget(fd);
if (file == NULL) {
GENLOCK_LOG_ERR("Bad file descriptor\n");
return ERR_PTR(-EBADF);
}
/*
* take a spinlock to avoid a race condition if the lock is
* released and then attached
*/
spin_lock(&genlock_ref_lock);
lock = file->private_data;
fput(file);
if (lock == NULL) {
GENLOCK_LOG_ERR("File descriptor is invalid\n");
goto fail_invalid;
}
if (lock->magic != GENLOCK_MAGIC_OK) {
GENLOCK_LOG_ERR("Magic is invalid - 0x%X\n", lock->magic);
goto fail_invalid;
}
handle->lock = lock;
kref_get(&lock->refcount);
spin_unlock(&genlock_ref_lock);
return lock;
fail_invalid:
spin_unlock(&genlock_ref_lock);
return ERR_PTR(-EINVAL);
}
EXPORT_SYMBOL(genlock_attach_lock);
/* Helper function that returns 1 if the specified handle holds the lock */
static int handle_has_lock(struct genlock *lock, struct genlock_handle *handle)
{
struct genlock_handle *h;
list_for_each_entry(h, &lock->active, entry) {
if (h == handle)
return 1;
}
return 0;
}
/* If the lock just became available, signal the next entity waiting for it */
static void _genlock_signal(struct genlock *lock)
{
if (list_empty(&lock->active)) {
/* If the list is empty, then the lock is free */
lock->state = _UNLOCKED;
/* Wake up the first process sitting in the queue */
wake_up(&lock->queue);
}
}
/* Attempt to release the handle's ownership of the lock */
static int _genlock_unlock(struct genlock *lock, struct genlock_handle *handle)
{
int ret = -EINVAL;
unsigned long irqflags;
spin_lock_irqsave(&lock->lock, irqflags);
if (lock->state == _UNLOCKED) {
GENLOCK_LOG_ERR("Trying to unlock an unlocked handle\n");
goto done;
}
/* Make sure this handle is an owner of the lock */
if (!handle_has_lock(lock, handle)) {
GENLOCK_LOG_ERR("handle does not have lock attached to it\n");
goto done;
}
/* If the handle holds no more references to the lock then
release it (maybe) */
if (--handle->active == 0) {
list_del(&handle->entry);
_genlock_signal(lock);
}
ret = 0;
done:
spin_unlock_irqrestore(&lock->lock, irqflags);
return ret;
}
/* Attempt to acquire the lock for the handle */
static int _genlock_lock(struct genlock *lock, struct genlock_handle *handle,
int op, int flags, uint32_t timeout)
{
unsigned long irqflags;
int ret = 0;
unsigned long ticks = msecs_to_jiffies(timeout);
spin_lock_irqsave(&lock->lock, irqflags);
/* Sanity check - no blocking locks in a debug context. Even if it
* succeed to not block, the mere idea is too dangerous to continue
*/
if (in_interrupt() && !(flags & GENLOCK_NOBLOCK))
BUG();
/* Fast path - the lock is unlocked, so go do the needful */
if (lock->state == _UNLOCKED)
goto dolock;
if (handle_has_lock(lock, handle)) {
/*
* If the handle already holds the lock and the lock type is
* a read lock then just increment the active pointer. This
* allows the handle to do recursive read locks. Recursive
* write locks are not allowed in order to support
* synchronization within a process using a single gralloc
* handle.
*/
if (lock->state == _RDLOCK && op == _RDLOCK) {
handle->active++;
goto done;
}
/*
* If the handle holds a write lock then the owner can switch
* to a read lock if they want. Do the transition atomically
* then wake up any pending waiters in case they want a read
* lock too. In order to support synchronization within a
* process the caller must explicity request to convert the
* lock type with the GENLOCK_WRITE_TO_READ flag.
*/
if (flags & GENLOCK_WRITE_TO_READ) {
if (lock->state == _WRLOCK && op == _RDLOCK) {
lock->state = _RDLOCK;
wake_up(&lock->queue);
goto done;
} else {
GENLOCK_LOG_ERR("Invalid state to convert"
"write to read\n");
ret = -EINVAL;
goto done;
}
}
} else {
/*
* Check to ensure the caller has not attempted to convert a
* write to a read without holding the lock.
*/
if (flags & GENLOCK_WRITE_TO_READ) {
GENLOCK_LOG_ERR("Handle must have lock to convert"
"write to read\n");
ret = -EINVAL;
goto done;
}
/*
* If we request a read and the lock is held by a read, then go
* ahead and share the lock
*/
if (op == GENLOCK_RDLOCK && lock->state == _RDLOCK)
goto dolock;
}
/* Treat timeout 0 just like a NOBLOCK flag and return if the
lock cannot be aquired without blocking */
if (flags & GENLOCK_NOBLOCK || timeout == 0) {
ret = -EAGAIN;
goto done;
}
/*
* Wait while the lock remains in an incompatible state
* state op wait
* -------------------
* unlocked n/a no
* read read no
* read write yes
* write n/a yes
*/
while ((lock->state == _RDLOCK && op == _WRLOCK) ||
lock->state == _WRLOCK) {
signed long elapsed;
spin_unlock_irqrestore(&lock->lock, irqflags);
elapsed = wait_event_interruptible_timeout(lock->queue,
lock->state == _UNLOCKED ||
(lock->state == _RDLOCK && op == _RDLOCK),
ticks);
spin_lock_irqsave(&lock->lock, irqflags);
if (elapsed <= 0) {
ret = (elapsed < 0) ? elapsed : -ETIMEDOUT;
goto done;
}
ticks = (unsigned long) elapsed;
}
dolock:
/* We can now get the lock, add ourselves to the list of owners */
list_add_tail(&handle->entry, &lock->active);
lock->state = op;
handle->active++;
done:
spin_unlock_irqrestore(&lock->lock, irqflags);
return ret;
}
/**
* genlock_lock - Acquire or release a lock (depreciated)
* @handle - pointer to the genlock handle that is requesting the lock
* @op - the operation to perform (RDLOCK, WRLOCK, UNLOCK)
* @flags - flags to control the operation
* @timeout - optional timeout to wait for the lock to come free
*
* Returns: 0 on success or error code on failure
*/
int genlock_lock(struct genlock_handle *handle, int op, int flags,
uint32_t timeout)
{
struct genlock *lock;
unsigned long irqflags;
int ret = 0;
if (IS_ERR_OR_NULL(handle)) {
GENLOCK_LOG_ERR("Invalid handle\n");
return -EINVAL;
}
lock = handle->lock;
if (lock == NULL) {
GENLOCK_LOG_ERR("Handle does not have a lock attached\n");
return -EINVAL;
}
switch (op) {
case GENLOCK_UNLOCK:
ret = _genlock_unlock(lock, handle);
break;
case GENLOCK_RDLOCK:
spin_lock_irqsave(&lock->lock, irqflags);
if (handle_has_lock(lock, handle)) {
/* request the WRITE_TO_READ flag for compatibility */
flags |= GENLOCK_WRITE_TO_READ;
}
spin_unlock_irqrestore(&lock->lock, irqflags);
/* fall through to take lock */
case GENLOCK_WRLOCK:
ret = _genlock_lock(lock, handle, op, flags, timeout);
break;
default:
GENLOCK_LOG_ERR("Invalid lock operation\n");
ret = -EINVAL;
break;
}
return ret;
}
EXPORT_SYMBOL(genlock_lock);
/**
* genlock_dreadlock - Acquire or release a lock
* @handle - pointer to the genlock handle that is requesting the lock
* @op - the operation to perform (RDLOCK, WRLOCK, UNLOCK)
* @flags - flags to control the operation
* @timeout - optional timeout to wait for the lock to come free
*
* Returns: 0 on success or error code on failure
*/
int genlock_dreadlock(struct genlock_handle *handle, int op, int flags,
uint32_t timeout)
{
struct genlock *lock;
int ret = 0;
if (IS_ERR_OR_NULL(handle)) {
GENLOCK_LOG_ERR("Invalid handle\n");
return -EINVAL;
}
lock = handle->lock;
if (lock == NULL) {
GENLOCK_LOG_ERR("Handle does not have a lock attached\n");
return -EINVAL;
}
switch (op) {
case GENLOCK_UNLOCK:
ret = _genlock_unlock(lock, handle);
break;
case GENLOCK_RDLOCK:
case GENLOCK_WRLOCK:
ret = _genlock_lock(lock, handle, op, flags, timeout);
break;
default:
GENLOCK_LOG_ERR("Invalid lock operation\n");
ret = -EINVAL;
break;
}
return ret;
}
EXPORT_SYMBOL(genlock_dreadlock);
/**
* genlock_wait - Wait for the lock to be released
* @handle - pointer to the genlock handle that is waiting for the lock
* @timeout - optional timeout to wait for the lock to get released
*/
int genlock_wait(struct genlock_handle *handle, uint32_t timeout)
{
struct genlock *lock;
unsigned long irqflags;
int ret = 0;
unsigned long ticks = msecs_to_jiffies(timeout);
if (IS_ERR_OR_NULL(handle)) {
GENLOCK_LOG_ERR("Invalid handle\n");
return -EINVAL;
}
lock = handle->lock;
if (lock == NULL) {
GENLOCK_LOG_ERR("Handle does not have a lock attached\n");
return -EINVAL;
}
spin_lock_irqsave(&lock->lock, irqflags);
/*
* if timeout is 0 and the lock is already unlocked, then success
* otherwise return -EAGAIN
*/
if (timeout == 0) {
ret = (lock->state == _UNLOCKED) ? 0 : -EAGAIN;
goto done;
}
while (lock->state != _UNLOCKED) {
signed long elapsed;
spin_unlock_irqrestore(&lock->lock, irqflags);
elapsed = wait_event_interruptible_timeout(lock->queue,
lock->state == _UNLOCKED, ticks);
spin_lock_irqsave(&lock->lock, irqflags);
if (elapsed <= 0) {
ret = (elapsed < 0) ? elapsed : -ETIMEDOUT;
break;
}
ticks = (unsigned long) elapsed;
}
done:
spin_unlock_irqrestore(&lock->lock, irqflags);
return ret;
}
static void genlock_release_lock(struct genlock_handle *handle)
{
unsigned long flags;
if (handle == NULL || handle->lock == NULL)
return;
spin_lock_irqsave(&handle->lock->lock, flags);
/* If the handle is holding the lock, then force it closed */
if (handle_has_lock(handle->lock, handle)) {
list_del(&handle->entry);
_genlock_signal(handle->lock);
}
spin_unlock_irqrestore(&handle->lock->lock, flags);
spin_lock(&genlock_ref_lock);
kref_put(&handle->lock->refcount, genlock_destroy);
spin_unlock(&genlock_ref_lock);
handle->lock = NULL;
handle->active = 0;
}
/*
* Release function called when all references to a handle are released
*/
static int genlock_handle_release(struct inode *inodep, struct file *file)
{
struct genlock_handle *handle = file->private_data;
genlock_release_lock(handle);
kfree(handle);
return 0;
}
static const struct file_operations genlock_handle_fops = {
.release = genlock_handle_release
};
/*
* Allocate a new genlock handle
*/
static struct genlock_handle *_genlock_get_handle(void)
{
struct genlock_handle *handle = kzalloc(sizeof(*handle), GFP_KERNEL);
if (handle == NULL) {
GENLOCK_LOG_ERR("Unable to allocate memory for the handle\n");
return ERR_PTR(-ENOMEM);
}
return handle;
}
/**
* genlock_get_handle - Create a new genlock handle
*
* Returns: A pointer to a new genlock handle
*/
struct genlock_handle *genlock_get_handle(void)
{
void *ret;
struct genlock_handle *handle = _genlock_get_handle();
if (IS_ERR(handle))
return handle;
ret = anon_inode_getfile("genlock-handle",
&genlock_handle_fops, handle, O_RDWR);
if (IS_ERR_OR_NULL(ret)) {
GENLOCK_LOG_ERR("Unable to create handle inode\n");
kfree(handle);
return ret;
}
handle->file = ret;
return handle;
}
EXPORT_SYMBOL(genlock_get_handle);
/**
* genlock_put_handle - release a reference to a genlock handle
* @handle - A pointer to the handle to release
*/
void genlock_put_handle(struct genlock_handle *handle)
{
if (handle)
fput(handle->file);
}
EXPORT_SYMBOL(genlock_put_handle);
/**
* genlock_get_handle_fd - Get a handle reference from a file descriptor
* @fd - The file descriptor for a genlock handle
*/
struct genlock_handle *genlock_get_handle_fd(int fd)
{
struct file *file = fget(fd);
if (file == NULL)
return ERR_PTR(-EINVAL);
return file->private_data;
}
EXPORT_SYMBOL(genlock_get_handle_fd);
#ifdef CONFIG_GENLOCK_MISCDEVICE
static long genlock_dev_ioctl(struct file *filep, unsigned int cmd,
unsigned long arg)
{
struct genlock_lock param;
struct genlock_handle *handle = filep->private_data;
struct genlock *lock;
int ret;
if (IS_ERR_OR_NULL(handle))
return -EINVAL;
switch (cmd) {
case GENLOCK_IOC_NEW: {
lock = genlock_create_lock(handle);
if (IS_ERR(lock))
return PTR_ERR(lock);
return 0;
}
case GENLOCK_IOC_EXPORT: {
if (handle->lock == NULL) {
GENLOCK_LOG_ERR("Handle does not have a lock"
"attached\n");
return -EINVAL;
}
ret = genlock_get_fd(handle->lock);
if (ret < 0)
return ret;
param.fd = ret;
if (copy_to_user((void __user *) arg, &param,
sizeof(param)))
return -EFAULT;
return 0;
}
case GENLOCK_IOC_ATTACH: {
if (copy_from_user(&param, (void __user *) arg,
sizeof(param)))
return -EFAULT;
lock = genlock_attach_lock(handle, param.fd);
if (IS_ERR(lock))
return PTR_ERR(lock);
return 0;
}
case GENLOCK_IOC_LOCK: {
if (copy_from_user(&param, (void __user *) arg,
sizeof(param)))
return -EFAULT;
return genlock_lock(handle, param.op, param.flags,
param.timeout);
}
case GENLOCK_IOC_DREADLOCK: {
if (copy_from_user(&param, (void __user *) arg,
sizeof(param)))
return -EFAULT;
return genlock_dreadlock(handle, param.op, param.flags,
param.timeout);
}
case GENLOCK_IOC_WAIT: {
if (copy_from_user(&param, (void __user *) arg,
sizeof(param)))
return -EFAULT;
return genlock_wait(handle, param.timeout);
}
case GENLOCK_IOC_RELEASE: {
/*
* Return error - this ioctl has been deprecated.
* Locks should only be released when the handle is
* destroyed
*/
GENLOCK_LOG_ERR("Deprecated RELEASE ioctl called\n");
return -EINVAL;
}
default:
GENLOCK_LOG_ERR("Invalid ioctl\n");
return -EINVAL;
}
}
static int genlock_dev_release(struct inode *inodep, struct file *file)
{
struct genlock_handle *handle = file->private_data;
genlock_release_lock(handle);
kfree(handle);
return 0;
}
static int genlock_dev_open(struct inode *inodep, struct file *file)
{
struct genlock_handle *handle = _genlock_get_handle();
if (IS_ERR(handle))
return PTR_ERR(handle);
handle->file = file;
file->private_data = handle;
return 0;
}
static const struct file_operations genlock_dev_fops = {
.open = genlock_dev_open,
.release = genlock_dev_release,
.unlocked_ioctl = genlock_dev_ioctl,
};
static struct miscdevice genlock_dev;
static int genlock_dev_init(void)
{
genlock_dev.minor = MISC_DYNAMIC_MINOR;
genlock_dev.name = "genlock";
genlock_dev.fops = &genlock_dev_fops;
genlock_dev.parent = NULL;
return misc_register(&genlock_dev);
}
static void genlock_dev_close(void)
{
misc_deregister(&genlock_dev);
}
module_init(genlock_dev_init);
module_exit(genlock_dev_close);
#endif

View File

@ -1 +1,2 @@
obj-y += drm/ vga/
obj-$(CONFIG_MSM_KGSL) += msm/

113
drivers/gpu/msm/Kconfig Normal file
View File

@ -0,0 +1,113 @@
config MSM_KGSL
tristate "MSM 3D Graphics driver"
default n
depends on ARCH_MSM && !ARCH_MSM7X00A && !ARCH_MSM7X25
select GENERIC_ALLOCATOR
select FW_LOADER
---help---
3D graphics driver. Required to use hardware accelerated
OpenGL ES 2.0 and 1.1.
config MSM_KGSL_CFF_DUMP
bool "Enable KGSL Common File Format (CFF) Dump Feature [Use with caution]"
default n
depends on MSM_KGSL
select RELAY
---help---
This is an analysis and diagnostic feature only, and should only be
turned on during KGSL GPU diagnostics and will slow down the KGSL
performance sigificantly, hence *do not use in production builds*.
When enabled, CFF Dump is on at boot. It can be turned off at runtime
via 'echo 0 > /d/kgsl/cff_dump'. The log can be captured via
/d/kgsl-cff/cpu[0|1].
config MSM_KGSL_CFF_DUMP_NO_CONTEXT_MEM_DUMP
bool "When selected will disable KGSL CFF Dump for context switches"
default n
depends on MSM_KGSL_CFF_DUMP
---help---
Dumping all the memory for every context switch can produce quite
huge log files, to reduce this, turn this feature on.
config MSM_KGSL_PSTMRTMDMP_CP_STAT_NO_DETAIL
bool "Disable human readable CP_STAT fields in post-mortem dump"
default n
depends on MSM_KGSL
---help---
For a more compact kernel log the human readable output of
CP_STAT can be turned off with this option.
config MSM_KGSL_PSTMRTMDMP_NO_IB_DUMP
bool "Disable dumping current IB1 and IB2 in post-mortem dump"
default n
depends on MSM_KGSL
---help---
For a more compact kernel log the IB1 and IB2 embedded dump
can be turned off with this option. Some IB dumps take up
so much space that vital other information gets cut from the
post-mortem dump.
config MSM_KGSL_PSTMRTMDMP_RB_HEX
bool "Use hex version for ring-buffer in post-mortem dump"
default n
depends on MSM_KGSL
---help---
Use hex version for the ring-buffer in the post-mortem dump, instead
of the human readable version.
config MSM_KGSL_2D
tristate "MSM 2D graphics driver. Required for OpenVG"
default y
depends on MSM_KGSL && !ARCH_MSM7X27 && !ARCH_MSM7X27A && !(ARCH_QSD8X50 && !MSM_SOC_REV_A)
config MSM_KGSL_DRM
bool "Build a DRM interface for the MSM_KGSL driver"
depends on MSM_KGSL && DRM
config MSM_KGSL_GPUMMU
bool "Enable the GPU MMU in the MSM_KGSL driver"
depends on MSM_KGSL && !MSM_KGSL_CFF_DUMP
default y
config MSM_KGSL_IOMMU
bool "Enable the use of IOMMU in the MSM_KGSL driver"
depends on MSM_KGSL && MSM_IOMMU && !MSM_KGSL_GPUMMU && !MSM_KGSL_CFF_DUMP
config MSM_KGSL_MMU
bool
depends on MSM_KGSL_GPUMMU || MSM_KGSL_IOMMU
default y
config KGSL_PER_PROCESS_PAGE_TABLE
bool "Enable Per Process page tables for the KGSL driver"
default n
depends on MSM_KGSL_GPUMMU && !MSM_KGSL_DRM
---help---
The MMU will use per process pagetables when enabled.
config MSM_KGSL_PAGE_TABLE_SIZE
hex "Size of pagetables"
default 0xFFF0000
---help---
Sets the pagetable size used by the MMU. The max value
is 0xFFF0000 or (256M - 64K).
config MSM_KGSL_PAGE_TABLE_COUNT
int "Minimum of concurrent pagetables to support"
default 8
depends on KGSL_PER_PROCESS_PAGE_TABLE
---help---
Specify the number of pagetables to allocate at init time
This is the number of concurrent processes that are guaranteed to
to run at any time. Additional processes can be created dynamically
assuming there is enough contiguous memory to allocate the pagetable.
config MSM_KGSL_MMU_PAGE_FAULT
bool "Force the GPU MMU to page fault for unmapped regions"
default y
depends on MSM_KGSL_GPUMMU
config MSM_KGSL_DISABLE_SHADOW_WRITES
bool "Disable register shadow writes for context switches"
default n
depends on MSM_KGSL

35
drivers/gpu/msm/Makefile Executable file
View File

@ -0,0 +1,35 @@
ccflags-y := -Iinclude/drm
msm_kgsl_core-y = \
kgsl.o \
kgsl_sharedmem.o \
kgsl_pwrctrl.o \
kgsl_pwrscale.o \
kgsl_mmu.o \
kgsl_gpummu.o
msm_kgsl_core-$(CONFIG_DEBUG_FS) += kgsl_debugfs.o
msm_kgsl_core-$(CONFIG_MSM_KGSL_CFF_DUMP) += kgsl_cffdump.o
msm_kgsl_core-$(CONFIG_MSM_KGSL_DRM) += kgsl_drm.o
msm_kgsl_core-$(CONFIG_MSM_SCM) += kgsl_pwrscale_trustzone.o
msm_kgsl_core-$(CONFIG_MSM_SLEEP_STATS) += kgsl_pwrscale_idlestats.o
msm_adreno-y += \
adreno_ringbuffer.o \
adreno_drawctxt.o \
adreno_postmortem.o \
adreno_a2xx.o \
adreno_a3xx.o \
adreno.o
msm_adreno-$(CONFIG_DEBUG_FS) += adreno_debugfs.o
msm_z180-y += z180.o
msm_kgsl_core-objs = $(msm_kgsl_core-y)
msm_adreno-objs = $(msm_adreno-y)
msm_z180-objs = $(msm_z180-y)
obj-$(CONFIG_MSM_KGSL) += msm_kgsl_core.o
obj-$(CONFIG_MSM_KGSL) += msm_adreno.o
obj-$(CONFIG_MSM_KGSL_2D) += msm_z180.o

View File

@ -1,146 +1,149 @@
/*
* (C) Copyright Advanced Micro Devices, Inc. 2002, 2007
* Copyright (c) 2008-2009 QUALCOMM USA, INC.
*
* All source code in this file is licensed under the following license
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
* version 2 as published by the Free Software Foundation.
*
/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
* See the GNU General Public License for more details.
*
* You should have received a copy of the GNU General Public License
* along with this program; if not, you can find it at http://www.fsf.org
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _YAMATO_REG_H
#define _YAMATO_REG_H
#ifndef __A200_REG_H
#define __A200_REG_H
enum VGT_EVENT_TYPE {
VS_DEALLOC = 0,
PS_DEALLOC = 1,
VS_DONE_TS = 2,
PS_DONE_TS = 3,
CACHE_FLUSH_TS = 4,
CONTEXT_DONE = 5,
CACHE_FLUSH = 6,
VIZQUERY_START = 7,
VIZQUERY_END = 8,
SC_WAIT_WC = 9,
RST_PIX_CNT = 13,
RST_VTX_CNT = 14,
TILE_FLUSH = 15,
CACHE_FLUSH_AND_INV_TS_EVENT = 20,
ZPASS_DONE = 21,
CACHE_FLUSH_AND_INV_EVENT = 22,
PERFCOUNTER_START = 23,
PERFCOUNTER_STOP = 24,
VS_FETCH_DONE = 27,
FACENESS_FLUSH = 28,
VS_DEALLOC = 0,
PS_DEALLOC = 1,
VS_DONE_TS = 2,
PS_DONE_TS = 3,
CACHE_FLUSH_TS = 4,
CONTEXT_DONE = 5,
CACHE_FLUSH = 6,
VIZQUERY_START = 7,
VIZQUERY_END = 8,
SC_WAIT_WC = 9,
RST_PIX_CNT = 13,
RST_VTX_CNT = 14,
TILE_FLUSH = 15,
CACHE_FLUSH_AND_INV_TS_EVENT = 20,
ZPASS_DONE = 21,
CACHE_FLUSH_AND_INV_EVENT = 22,
PERFCOUNTER_START = 23,
PERFCOUNTER_STOP = 24,
VS_FETCH_DONE = 27,
FACENESS_FLUSH = 28,
};
enum COLORFORMATX {
COLORX_4_4_4_4 = 0,
COLORX_1_5_5_5 = 1,
COLORX_5_6_5 = 2,
COLORX_8 = 3,
COLORX_8_8 = 4,
COLORX_8_8_8_8 = 5,
COLORX_S8_8_8_8 = 6,
COLORX_16_FLOAT = 7,
COLORX_16_16_FLOAT = 8,
COLORX_16_16_16_16_FLOAT = 9,
COLORX_32_FLOAT = 10,
COLORX_32_32_FLOAT = 11,
COLORX_32_32_32_32_FLOAT = 12,
COLORX_2_3_3 = 13,
COLORX_8_8_8 = 14,
COLORX_4_4_4_4 = 0,
COLORX_1_5_5_5 = 1,
COLORX_5_6_5 = 2,
COLORX_8 = 3,
COLORX_8_8 = 4,
COLORX_8_8_8_8 = 5,
COLORX_S8_8_8_8 = 6,
COLORX_16_FLOAT = 7,
COLORX_16_16_FLOAT = 8,
COLORX_16_16_16_16_FLOAT = 9,
COLORX_32_FLOAT = 10,
COLORX_32_32_FLOAT = 11,
COLORX_32_32_32_32_FLOAT = 12,
COLORX_2_3_3 = 13,
COLORX_8_8_8 = 14,
};
enum SURFACEFORMAT {
FMT_1_REVERSE = 0,
FMT_1 = 1,
FMT_8 = 2,
FMT_1_5_5_5 = 3,
FMT_5_6_5 = 4,
FMT_6_5_5 = 5,
FMT_8_8_8_8 = 6,
FMT_2_10_10_10 = 7,
FMT_8_A = 8,
FMT_8_B = 9,
FMT_8_8 = 10,
FMT_Cr_Y1_Cb_Y0 = 11,
FMT_Y1_Cr_Y0_Cb = 12,
FMT_5_5_5_1 = 13,
FMT_8_8_8_8_A = 14,
FMT_4_4_4_4 = 15,
FMT_10_11_11 = 16,
FMT_11_11_10 = 17,
FMT_DXT1 = 18,
FMT_DXT2_3 = 19,
FMT_DXT4_5 = 20,
FMT_24_8 = 22,
FMT_24_8_FLOAT = 23,
FMT_16 = 24,
FMT_16_16 = 25,
FMT_16_16_16_16 = 26,
FMT_16_EXPAND = 27,
FMT_16_16_EXPAND = 28,
FMT_16_16_16_16_EXPAND = 29,
FMT_16_FLOAT = 30,
FMT_16_16_FLOAT = 31,
FMT_16_16_16_16_FLOAT = 32,
FMT_32 = 33,
FMT_32_32 = 34,
FMT_32_32_32_32 = 35,
FMT_32_FLOAT = 36,
FMT_32_32_FLOAT = 37,
FMT_32_32_32_32_FLOAT = 38,
FMT_32_AS_8 = 39,
FMT_32_AS_8_8 = 40,
FMT_16_MPEG = 41,
FMT_16_16_MPEG = 42,
FMT_8_INTERLACED = 43,
FMT_32_AS_8_INTERLACED = 44,
FMT_32_AS_8_8_INTERLACED = 45,
FMT_16_INTERLACED = 46,
FMT_16_MPEG_INTERLACED = 47,
FMT_16_16_MPEG_INTERLACED = 48,
FMT_DXN = 49,
FMT_8_8_8_8_AS_16_16_16_16 = 50,
FMT_1_REVERSE = 0,
FMT_1 = 1,
FMT_8 = 2,
FMT_1_5_5_5 = 3,
FMT_5_6_5 = 4,
FMT_6_5_5 = 5,
FMT_8_8_8_8 = 6,
FMT_2_10_10_10 = 7,
FMT_8_A = 8,
FMT_8_B = 9,
FMT_8_8 = 10,
FMT_Cr_Y1_Cb_Y0 = 11,
FMT_Y1_Cr_Y0_Cb = 12,
FMT_5_5_5_1 = 13,
FMT_8_8_8_8_A = 14,
FMT_4_4_4_4 = 15,
FMT_10_11_11 = 16,
FMT_11_11_10 = 17,
FMT_DXT1 = 18,
FMT_DXT2_3 = 19,
FMT_DXT4_5 = 20,
FMT_24_8 = 22,
FMT_24_8_FLOAT = 23,
FMT_16 = 24,
FMT_16_16 = 25,
FMT_16_16_16_16 = 26,
FMT_16_EXPAND = 27,
FMT_16_16_EXPAND = 28,
FMT_16_16_16_16_EXPAND = 29,
FMT_16_FLOAT = 30,
FMT_16_16_FLOAT = 31,
FMT_16_16_16_16_FLOAT = 32,
FMT_32 = 33,
FMT_32_32 = 34,
FMT_32_32_32_32 = 35,
FMT_32_FLOAT = 36,
FMT_32_32_FLOAT = 37,
FMT_32_32_32_32_FLOAT = 38,
FMT_32_AS_8 = 39,
FMT_32_AS_8_8 = 40,
FMT_16_MPEG = 41,
FMT_16_16_MPEG = 42,
FMT_8_INTERLACED = 43,
FMT_32_AS_8_INTERLACED = 44,
FMT_32_AS_8_8_INTERLACED = 45,
FMT_16_INTERLACED = 46,
FMT_16_MPEG_INTERLACED = 47,
FMT_16_16_MPEG_INTERLACED = 48,
FMT_DXN = 49,
FMT_8_8_8_8_AS_16_16_16_16 = 50,
FMT_DXT1_AS_16_16_16_16 = 51,
FMT_DXT2_3_AS_16_16_16_16 = 52,
FMT_DXT4_5_AS_16_16_16_16 = 53,
FMT_2_10_10_10_AS_16_16_16_16 = 54,
FMT_10_11_11_AS_16_16_16_16 = 55,
FMT_11_11_10_AS_16_16_16_16 = 56,
FMT_32_32_32_FLOAT = 57,
FMT_DXT3A = 58,
FMT_DXT5A = 59,
FMT_CTX1 = 60,
FMT_DXT3A_AS_1_1_1_1 = 61
FMT_DXT2_3_AS_16_16_16_16 = 52,
FMT_DXT4_5_AS_16_16_16_16 = 53,
FMT_2_10_10_10_AS_16_16_16_16 = 54,
FMT_10_11_11_AS_16_16_16_16 = 55,
FMT_11_11_10_AS_16_16_16_16 = 56,
FMT_32_32_32_FLOAT = 57,
FMT_DXT3A = 58,
FMT_DXT5A = 59,
FMT_CTX1 = 60,
FMT_DXT3A_AS_1_1_1_1 = 61
};
#define REG_PERF_MODE_CNT 0x0
#define REG_PERF_STATE_RESET 0x0
#define REG_PERF_STATE_ENABLE 0x1
#define REG_PERF_STATE_FREEZE 0x2
#define RB_EDRAM_INFO_EDRAM_SIZE_SIZE 4
#define RB_EDRAM_INFO_EDRAM_MAPPING_MODE_SIZE 2
#define RB_EDRAM_INFO_UNUSED0_SIZE 8
#define RB_EDRAM_INFO_EDRAM_RANGE_SIZE 18
struct rb_edram_info_t {
unsigned int edram_size:RB_EDRAM_INFO_EDRAM_SIZE_SIZE;
unsigned int edram_mapping_mode:RB_EDRAM_INFO_EDRAM_MAPPING_MODE_SIZE;
unsigned int unused0:RB_EDRAM_INFO_UNUSED0_SIZE;
unsigned int edram_range:RB_EDRAM_INFO_EDRAM_RANGE_SIZE;
unsigned int edram_size:RB_EDRAM_INFO_EDRAM_SIZE_SIZE;
unsigned int edram_mapping_mode:RB_EDRAM_INFO_EDRAM_MAPPING_MODE_SIZE;
unsigned int unused0:RB_EDRAM_INFO_UNUSED0_SIZE;
unsigned int edram_range:RB_EDRAM_INFO_EDRAM_RANGE_SIZE;
};
union reg_rb_edram_info {
unsigned int val:32;
struct rb_edram_info_t f;
unsigned int val;
struct rb_edram_info_t f;
};
#define RBBM_READ_ERROR_ADDRESS_MASK 0x0001fffc
#define RBBM_READ_ERROR_REQUESTER (1<<30)
#define RBBM_READ_ERROR_ERROR (1<<31)
#define CP_RB_CNTL_RB_BUFSZ_SIZE 6
#define CP_RB_CNTL_UNUSED0_SIZE 2
#define CP_RB_CNTL_RB_BLKSZ_SIZE 6
@ -154,22 +157,22 @@ union reg_rb_edram_info {
#define CP_RB_CNTL_RB_RPTR_WR_ENA_SIZE 1
struct cp_rb_cntl_t {
unsigned int rb_bufsz:CP_RB_CNTL_RB_BUFSZ_SIZE;
unsigned int unused0:CP_RB_CNTL_UNUSED0_SIZE;
unsigned int rb_blksz:CP_RB_CNTL_RB_BLKSZ_SIZE;
unsigned int unused1:CP_RB_CNTL_UNUSED1_SIZE;
unsigned int buf_swap:CP_RB_CNTL_BUF_SWAP_SIZE;
unsigned int unused2:CP_RB_CNTL_UNUSED2_SIZE;
unsigned int rb_poll_en:CP_RB_CNTL_RB_POLL_EN_SIZE;
unsigned int unused3:CP_RB_CNTL_UNUSED3_SIZE;
unsigned int rb_no_update:CP_RB_CNTL_RB_NO_UPDATE_SIZE;
unsigned int unused4:CP_RB_CNTL_UNUSED4_SIZE;
unsigned int rb_rptr_wr_ena:CP_RB_CNTL_RB_RPTR_WR_ENA_SIZE;
unsigned int rb_bufsz:CP_RB_CNTL_RB_BUFSZ_SIZE;
unsigned int unused0:CP_RB_CNTL_UNUSED0_SIZE;
unsigned int rb_blksz:CP_RB_CNTL_RB_BLKSZ_SIZE;
unsigned int unused1:CP_RB_CNTL_UNUSED1_SIZE;
unsigned int buf_swap:CP_RB_CNTL_BUF_SWAP_SIZE;
unsigned int unused2:CP_RB_CNTL_UNUSED2_SIZE;
unsigned int rb_poll_en:CP_RB_CNTL_RB_POLL_EN_SIZE;
unsigned int unused3:CP_RB_CNTL_UNUSED3_SIZE;
unsigned int rb_no_update:CP_RB_CNTL_RB_NO_UPDATE_SIZE;
unsigned int unused4:CP_RB_CNTL_UNUSED4_SIZE;
unsigned int rb_rptr_wr_ena:CP_RB_CNTL_RB_RPTR_WR_ENA_SIZE;
};
union reg_cp_rb_cntl {
unsigned int val:32;
struct cp_rb_cntl_t f;
unsigned int val:32;
struct cp_rb_cntl_t f;
};
#define RB_COLOR_INFO__COLOR_FORMAT_MASK 0x0000000fL
@ -179,10 +182,6 @@ union reg_cp_rb_cntl {
#define SQ_INT_CNTL__PS_WATCHDOG_MASK 0x00000001L
#define SQ_INT_CNTL__VS_WATCHDOG_MASK 0x00000002L
#define MH_INTERRUPT_MASK__AXI_READ_ERROR 0x00000001L
#define MH_INTERRUPT_MASK__AXI_WRITE_ERROR 0x00000002L
#define MH_INTERRUPT_MASK__MMU_PAGE_FAULT 0x00000004L
#define RBBM_INT_CNTL__RDERR_INT_MASK 0x00000001L
#define RBBM_INT_CNTL__DISPLAY_UPDATE_INT_MASK 0x00000002L
#define RBBM_INT_CNTL__GUI_IDLE_INT_MASK 0x00080000L
@ -226,41 +225,29 @@ union reg_cp_rb_cntl {
#define RB_EDRAM_INFO__EDRAM_SIZE_MASK 0x0000000fL
#define RB_EDRAM_INFO__EDRAM_RANGE_MASK 0xffffc000L
#define MH_ARBITER_CONFIG__SAME_PAGE_GRANULARITY__SHIFT 0x00000006
#define MH_ARBITER_CONFIG__L1_ARB_ENABLE__SHIFT 0x00000007
#define MH_ARBITER_CONFIG__L1_ARB_HOLD_ENABLE__SHIFT 0x00000008
#define MH_ARBITER_CONFIG__L2_ARB_CONTROL__SHIFT 0x00000009
#define MH_ARBITER_CONFIG__PAGE_SIZE__SHIFT 0x0000000a
#define MH_ARBITER_CONFIG__TC_REORDER_ENABLE__SHIFT 0x0000000d
#define MH_ARBITER_CONFIG__TC_ARB_HOLD_ENABLE__SHIFT 0x0000000e
#define MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT_ENABLE__SHIFT 0x0000000f
#define MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT__SHIFT 0x00000010
#define MH_ARBITER_CONFIG__CP_CLNT_ENABLE__SHIFT 0x00000016
#define MH_ARBITER_CONFIG__VGT_CLNT_ENABLE__SHIFT 0x00000017
#define MH_ARBITER_CONFIG__TC_CLNT_ENABLE__SHIFT 0x00000018
#define MH_ARBITER_CONFIG__RB_CLNT_ENABLE__SHIFT 0x00000019
#define MH_ARBITER_CONFIG__PA_CLNT_ENABLE__SHIFT 0x0000001a
#define MH_ARBITER_CONFIG__SAME_PAGE_GRANULARITY__SHIFT 0x00000006
#define MH_ARBITER_CONFIG__L1_ARB_ENABLE__SHIFT 0x00000007
#define MH_ARBITER_CONFIG__L1_ARB_HOLD_ENABLE__SHIFT 0x00000008
#define MH_ARBITER_CONFIG__L2_ARB_CONTROL__SHIFT 0x00000009
#define MH_ARBITER_CONFIG__PAGE_SIZE__SHIFT 0x0000000a
#define MH_ARBITER_CONFIG__TC_REORDER_ENABLE__SHIFT 0x0000000d
#define MH_ARBITER_CONFIG__TC_ARB_HOLD_ENABLE__SHIFT 0x0000000e
#define MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT_ENABLE__SHIFT 0x0000000f
#define MH_ARBITER_CONFIG__IN_FLIGHT_LIMIT__SHIFT 0x00000010
#define MH_ARBITER_CONFIG__CP_CLNT_ENABLE__SHIFT 0x00000016
#define MH_ARBITER_CONFIG__VGT_CLNT_ENABLE__SHIFT 0x00000017
#define MH_ARBITER_CONFIG__TC_CLNT_ENABLE__SHIFT 0x00000018
#define MH_ARBITER_CONFIG__RB_CLNT_ENABLE__SHIFT 0x00000019
#define MH_ARBITER_CONFIG__PA_CLNT_ENABLE__SHIFT 0x0000001a
#define MH_MMU_CONFIG__RB_W_CLNT_BEHAVIOR__SHIFT 0x00000004
#define MH_MMU_CONFIG__CP_W_CLNT_BEHAVIOR__SHIFT 0x00000006
#define MH_MMU_CONFIG__CP_R0_CLNT_BEHAVIOR__SHIFT 0x00000008
#define MH_MMU_CONFIG__CP_R1_CLNT_BEHAVIOR__SHIFT 0x0000000a
#define MH_MMU_CONFIG__CP_R2_CLNT_BEHAVIOR__SHIFT 0x0000000c
#define MH_MMU_CONFIG__CP_R3_CLNT_BEHAVIOR__SHIFT 0x0000000e
#define MH_MMU_CONFIG__CP_R4_CLNT_BEHAVIOR__SHIFT 0x00000010
#define MH_MMU_CONFIG__VGT_R0_CLNT_BEHAVIOR__SHIFT 0x00000012
#define MH_MMU_CONFIG__VGT_R1_CLNT_BEHAVIOR__SHIFT 0x00000014
#define MH_MMU_CONFIG__TC_R_CLNT_BEHAVIOR__SHIFT 0x00000016
#define MH_MMU_CONFIG__PA_W_CLNT_BEHAVIOR__SHIFT 0x00000018
#define CP_RB_CNTL__RB_BUFSZ__SHIFT 0x00000000
#define CP_RB_CNTL__RB_BLKSZ__SHIFT 0x00000008
#define CP_RB_CNTL__RB_POLL_EN__SHIFT 0x00000014
#define CP_RB_CNTL__RB_NO_UPDATE__SHIFT 0x0000001b
#define CP_RB_CNTL__RB_BUFSZ__SHIFT 0x00000000
#define CP_RB_CNTL__RB_BLKSZ__SHIFT 0x00000008
#define CP_RB_CNTL__RB_POLL_EN__SHIFT 0x00000014
#define CP_RB_CNTL__RB_NO_UPDATE__SHIFT 0x0000001b
#define RB_COLOR_INFO__COLOR_FORMAT__SHIFT 0x00000000
#define RB_EDRAM_INFO__EDRAM_MAPPING_MODE__SHIFT 0x00000004
#define RB_EDRAM_INFO__EDRAM_RANGE__SHIFT 0x0000000e
#define RB_COLOR_INFO__COLOR_FORMAT__SHIFT 0x00000000
#define RB_EDRAM_INFO__EDRAM_MAPPING_MODE__SHIFT 0x00000004
#define RB_EDRAM_INFO__EDRAM_RANGE__SHIFT 0x0000000e
#define REG_CP_CSQ_IB1_STAT 0x01FE
#define REG_CP_CSQ_IB2_STAT 0x01FF
@ -276,6 +263,7 @@ union reg_cp_rb_cntl {
#define REG_CP_ME_CNTL 0x01F6
#define REG_CP_ME_RAM_DATA 0x01FA
#define REG_CP_ME_RAM_WADDR 0x01F8
#define REG_CP_ME_RAM_RADDR 0x01F9
#define REG_CP_ME_STATUS 0x01F7
#define REG_CP_PFP_UCODE_ADDR 0x00C0
#define REG_CP_PFP_UCODE_DATA 0x00C1
@ -294,24 +282,21 @@ union reg_cp_rb_cntl {
#define REG_CP_ST_BASE 0x044D
#define REG_CP_ST_BUFSZ 0x044E
#define REG_MASTER_INT_SIGNAL 0x03B7
#define REG_CP_PERFMON_CNTL 0x0444
#define REG_CP_PERFCOUNTER_SELECT 0x0445
#define REG_CP_PERFCOUNTER_LO 0x0446
#define REG_CP_PERFCOUNTER_HI 0x0447
#define REG_MH_ARBITER_CONFIG 0x0A40
#define REG_MH_INTERRUPT_CLEAR 0x0A44
#define REG_MH_INTERRUPT_MASK 0x0A42
#define REG_MH_INTERRUPT_STATUS 0x0A43
#define REG_MH_MMU_CONFIG 0x0040
#define REG_MH_MMU_INVALIDATE 0x0045
#define REG_MH_MMU_MPU_BASE 0x0046
#define REG_MH_MMU_MPU_END 0x0047
#define REG_MH_MMU_PAGE_FAULT 0x0043
#define REG_MH_MMU_PT_BASE 0x0042
#define REG_MH_MMU_TRAN_ERROR 0x0044
#define REG_MH_MMU_VA_RANGE 0x0041
#define REG_RBBM_PERFCOUNTER1_SELECT 0x0395
#define REG_RBBM_PERFCOUNTER1_HI 0x0398
#define REG_RBBM_PERFCOUNTER1_LO 0x0397
#define REG_MASTER_INT_SIGNAL 0x03B7
#define REG_PA_CL_VPORT_XSCALE 0x210F
#define REG_PA_CL_VPORT_ZOFFSET 0x2114
#define REG_PA_CL_VPORT_ZSCALE 0x2113
#define REG_PA_CL_CLIP_CNTL 0x2204
#define REG_PA_CL_VTE_CNTL 0x2206
#define REG_PA_SC_AA_MASK 0x2312
#define REG_PA_SC_LINE_CNTL 0x2300
@ -324,10 +309,13 @@ union reg_cp_rb_cntl {
#define REG_PA_SC_WINDOW_SCISSOR_TL 0x2081
#define REG_PA_SU_FACE_DATA 0x0C86
#define REG_PA_SU_POINT_SIZE 0x2280
#define REG_PA_SU_LINE_CNTL 0x2282
#define REG_PA_SU_POLY_OFFSET_BACK_OFFSET 0x2383
#define REG_PA_SU_POLY_OFFSET_FRONT_SCALE 0x2380
#define REG_PA_SU_SC_MODE_CNTL 0x2205
#define REG_PC_INDEX_OFFSET 0x2102
#define REG_RBBM_CNTL 0x003B
#define REG_RBBM_INT_ACK 0x03B6
#define REG_RBBM_INT_CNTL 0x03B4
@ -336,6 +324,8 @@ union reg_cp_rb_cntl {
#define REG_RBBM_PERIPHID1 0x03F9
#define REG_RBBM_PERIPHID2 0x03FA
#define REG_RBBM_DEBUG 0x039B
#define REG_RBBM_DEBUG_OUT 0x03A0
#define REG_RBBM_DEBUG_CNTL 0x03A1
#define REG_RBBM_PM_OVERRIDE1 0x039C
#define REG_RBBM_PM_OVERRIDE2 0x039D
#define REG_RBBM_READ_ERROR 0x03B3
@ -350,6 +340,7 @@ union reg_cp_rb_cntl {
#define REG_RB_EDRAM_INFO 0x0F02
#define REG_RB_MODECONTROL 0x2208
#define REG_RB_SURFACE_INFO 0x2000
#define REG_RB_SAMPLE_POS 0x220a
#define REG_SCRATCH_ADDR 0x01DD
#define REG_SCRATCH_REG0 0x0578
@ -359,6 +350,7 @@ union reg_cp_rb_cntl {
#define REG_SQ_CF_BOOLEANS 0x4900
#define REG_SQ_CF_LOOP 0x4908
#define REG_SQ_GPR_MANAGEMENT 0x0D00
#define REG_SQ_FLOW_CONTROL 0x0D01
#define REG_SQ_INST_STORE_MANAGMENT 0x0D02
#define REG_SQ_INT_ACK 0x0D36
#define REG_SQ_INT_CNTL 0x0D34
@ -374,27 +366,39 @@ union reg_cp_rb_cntl {
#define REG_VGT_MAX_VTX_INDX 0x2100
#define REG_VGT_MIN_VTX_INDX 0x2101
#define REG_TP0_CHICKEN 0x0E1E
#define REG_TC_CNTL_STATUS 0x0E00
#define REG_PA_SC_AA_CONFIG 0x2301
#define REG_TP0_CHICKEN 0x0E1E
#define REG_TC_CNTL_STATUS 0x0E00
#define REG_PA_SC_AA_CONFIG 0x2301
#define REG_VGT_VERTEX_REUSE_BLOCK_CNTL 0x2316
#define REG_SQ_INTERPOLATOR_CNTL 0x2182
#define REG_RB_DEPTH_INFO 0x2002
#define REG_COHER_DEST_BASE_0 0x2006
#define REG_PA_SC_SCREEN_SCISSOR_BR 0x200F
#define REG_RB_FOG_COLOR 0x2109
#define REG_RB_STENCILREFMASK_BF 0x210C
#define REG_PA_SC_LINE_STIPPLE 0x2283
#define REG_SQ_PS_CONST 0x2308
#define REG_VGT_VERTEX_REUSE_BLOCK_CNTL 0x2316
#define REG_RB_DEPTH_CLEAR 0x231D
#define REG_RB_SAMPLE_COUNT_CTL 0x2324
#define REG_SQ_CONSTANT_0 0x4000
#define REG_SQ_FETCH_0 0x4800
#define REG_MH_AXI_ERROR 0xA45
#define REG_COHER_BASE_PM4 0xA2A
#define REG_COHER_STATUS_PM4 0xA2B
#define REG_COHER_SIZE_PM4 0xA29
#define REG_COHER_BASE_PM4 0xA2A
#define REG_COHER_STATUS_PM4 0xA2B
#define REG_COHER_SIZE_PM4 0xA29
#endif /* _YAMATO_REG_H */
/*registers added in adreno220*/
#define REG_A220_PC_INDX_OFFSET REG_VGT_INDX_OFFSET
#define REG_A220_PC_VERTEX_REUSE_BLOCK_CNTL REG_VGT_VERTEX_REUSE_BLOCK_CNTL
#define REG_A220_PC_MAX_VTX_INDX REG_VGT_MAX_VTX_INDX
#define REG_A220_RB_LRZ_VSC_CONTROL 0x2209
#define REG_A220_GRAS_CONTROL 0x2210
#define REG_A220_VSC_BIN_SIZE 0x0C01
#define REG_A220_VSC_PIPE_DATA_LENGTH_7 0x0C1D
/*registers added in adreno225*/
#define REG_A225_RB_COLOR_INFO3 0x2005
#define REG_A225_PC_MULTI_PRIM_IB_RESET_INDX 0x2103
#define REG_A225_GRAS_UCP0X 0x2340
#define REG_A225_GRAS_UCP_ENABLED 0x2360
#endif /* __A200_REG_H */

453
drivers/gpu/msm/a3xx_reg.h Executable file
View File

@ -0,0 +1,453 @@
/* Copyright (c) 2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _A300_REG_H
#define _A300_REG_H
/* Interrupt bit positions within RBBM_INT_0 */
#define A3XX_INT_RBBM_GPU_IDLE 0
#define A3XX_INT_RBBM_AHB_ERROR 1
#define A3XX_INT_RBBM_REG_TIMEOUT 2
#define A3XX_INT_RBBM_ME_MS_TIMEOUT 3
#define A3XX_INT_RBBM_PFP_MS_TIMEOUT 4
#define A3XX_INT_RBBM_ATB_BUS_OVERFLOW 5
#define A3XX_INT_VFD_ERROR 6
#define A3XX_INT_CP_SW_INT 7
#define A3XX_INT_CP_T0_PACKET_IN_IB 8
#define A3XX_INT_CP_OPCODE_ERROR 9
#define A3XX_INT_CP_RESERVED_BIT_ERROR 10
#define A3XX_INT_CP_HW_FAULT 11
#define A3xx_INT_CP_DMA 12
#define A3XX_INT_CP_IB2_INT 13
#define A3XX_INT_CP_IB1_INT 14
#define A3XX_INT_CP_RB_INT 15
#define A3XX_INT_CP_REG_PROTECT_FAULT 16
#define A3XX_INT_CP_RB_DONE_TS 17
#define A3XX_INT_CP_VS_DONE_TS 18
#define A3XX_INT_CP_PS_DONE_TS 19
#define A3XX_INT_CACHE_FLUSH_TS 20
#define A3XX_INT_CP_AHB_ERROR_HALT 21
#define A3XX_INT_MISC_HANG_DETECT 24
#define A3XX_INT_UCHE_OOB_ACCESS 25
/* Register definitions */
#define A3XX_RBBM_HW_VERSION 0x000
#define A3XX_RBBM_HW_RELEASE 0x001
#define A3XX_RBBM_HW_CONFIGURATION 0x002
#define A3XX_RBBM_SW_RESET_CMD 0x018
#define A3XX_RBBM_AHB_CTL0 0x020
#define A3XX_RBBM_AHB_CTL1 0x021
#define A3XX_RBBM_AHB_CMD 0x022
#define A3XX_RBBM_AHB_ERROR_STATUS 0x027
#define A3XX_RBBM_GPR0_CTL 0x02E
/* This the same register as on A2XX, just in a different place */
#define A3XX_RBBM_STATUS 0x030
#define A3XX_RBBM_INTERFACE_HANG_INT_CTL 0x50
#define A3XX_RBBM_INTERFACE_HANG_MASK_CTL0 0x51
#define A3XX_RBBM_INTERFACE_HANG_MASK_CTL1 0x54
#define A3XX_RBBM_INTERFACE_HANG_MASK_CTL2 0x57
#define A3XX_RBBM_INTERFACE_HANG_MASK_CTL3 0x5A
#define A3XX_RBBM_INT_CLEAR_CMD 0x061
#define A3XX_RBBM_INT_0_MASK 0x063
#define A3XX_RBBM_INT_0_STATUS 0x064
#define A3XX_RBBM_GPU_BUSY_MASKED 0x88
#define A3XX_RBBM_RBBM_CTL 0x100
#define A3XX_RBBM_RBBM_CTL 0x100
#define A3XX_RBBM_PERFCTR_PWR_1_LO 0x0EC
#define A3XX_RBBM_PERFCTR_PWR_1_HI 0x0ED
/* Following two are same as on A2XX, just in a different place */
#define A3XX_CP_PFP_UCODE_ADDR 0x1C9
#define A3XX_CP_PFP_UCODE_DATA 0x1CA
#define A3XX_CP_HW_FAULT 0x45C
#define A3XX_CP_AHB_FAULT 0x54D
#define A3XX_CP_PROTECT_CTRL 0x45E
#define A3XX_CP_PROTECT_STATUS 0x45F
#define A3XX_CP_PROTECT_REG_0 0x460
#define A3XX_CP_PROTECT_REG_1 0x461
#define A3XX_CP_PROTECT_REG_2 0x462
#define A3XX_CP_PROTECT_REG_3 0x463
#define A3XX_CP_PROTECT_REG_4 0x464
#define A3XX_CP_PROTECT_REG_5 0x465
#define A3XX_CP_PROTECT_REG_6 0x466
#define A3XX_CP_PROTECT_REG_7 0x467
#define A3XX_CP_PROTECT_REG_8 0x468
#define A3XX_CP_PROTECT_REG_9 0x469
#define A3XX_CP_PROTECT_REG_A 0x46A
#define A3XX_CP_PROTECT_REG_B 0x46B
#define A3XX_CP_PROTECT_REG_C 0x46C
#define A3XX_CP_PROTECT_REG_D 0x46D
#define A3XX_CP_PROTECT_REG_E 0x46E
#define A3XX_CP_PROTECT_REG_F 0x46F
#define A3XX_CP_SCRATCH_REG2 0x57A
#define A3XX_CP_SCRATCH_REG3 0x57B
#define A3XX_VSC_BIN_SIZE 0xC01
#define A3XX_VSC_SIZE_ADDRESS 0xC02
#define A3XX_VSC_PIPE_CONFIG_0 0xC06
#define A3XX_VSC_PIPE_DATA_ADDRESS_0 0xC07
#define A3XX_VSC_PIPE_DATA_LENGTH_0 0xC08
#define A3XX_VSC_PIPE_CONFIG_1 0xC09
#define A3XX_VSC_PIPE_DATA_ADDRESS_1 0xC0A
#define A3XX_VSC_PIPE_DATA_LENGTH_1 0xC0B
#define A3XX_VSC_PIPE_CONFIG_2 0xC0C
#define A3XX_VSC_PIPE_DATA_ADDRESS_2 0xC0D
#define A3XX_VSC_PIPE_DATA_LENGTH_2 0xC0E
#define A3XX_VSC_PIPE_CONFIG_3 0xC0F
#define A3XX_VSC_PIPE_DATA_ADDRESS_3 0xC10
#define A3XX_VSC_PIPE_DATA_LENGTH_3 0xC11
#define A3XX_VSC_PIPE_CONFIG_4 0xC12
#define A3XX_VSC_PIPE_DATA_ADDRESS_4 0xC13
#define A3XX_VSC_PIPE_DATA_LENGTH_4 0xC14
#define A3XX_VSC_PIPE_CONFIG_5 0xC15
#define A3XX_VSC_PIPE_DATA_ADDRESS_5 0xC16
#define A3XX_VSC_PIPE_DATA_LENGTH_5 0xC17
#define A3XX_VSC_PIPE_CONFIG_6 0xC18
#define A3XX_VSC_PIPE_DATA_ADDRESS_6 0xC19
#define A3XX_VSC_PIPE_DATA_LENGTH_6 0xC1A
#define A3XX_VSC_PIPE_CONFIG_7 0xC1B
#define A3XX_VSC_PIPE_DATA_ADDRESS_7 0xC1C
#define A3XX_VSC_PIPE_DATA_LENGTH_7 0xC1D
#define A3XX_GRAS_CL_USER_PLANE_X0 0xCA0
#define A3XX_GRAS_CL_USER_PLANE_Y0 0xCA1
#define A3XX_GRAS_CL_USER_PLANE_Z0 0xCA2
#define A3XX_GRAS_CL_USER_PLANE_W0 0xCA3
#define A3XX_GRAS_CL_USER_PLANE_X1 0xCA4
#define A3XX_GRAS_CL_USER_PLANE_Y1 0xCA5
#define A3XX_GRAS_CL_USER_PLANE_Z1 0xCA6
#define A3XX_GRAS_CL_USER_PLANE_W1 0xCA7
#define A3XX_GRAS_CL_USER_PLANE_X2 0xCA8
#define A3XX_GRAS_CL_USER_PLANE_Y2 0xCA9
#define A3XX_GRAS_CL_USER_PLANE_Z2 0xCAA
#define A3XX_GRAS_CL_USER_PLANE_W2 0xCAB
#define A3XX_GRAS_CL_USER_PLANE_X3 0xCAC
#define A3XX_GRAS_CL_USER_PLANE_Y3 0xCAD
#define A3XX_GRAS_CL_USER_PLANE_Z3 0xCAE
#define A3XX_GRAS_CL_USER_PLANE_W3 0xCAF
#define A3XX_GRAS_CL_USER_PLANE_X4 0xCB0
#define A3XX_GRAS_CL_USER_PLANE_Y4 0xCB1
#define A3XX_GRAS_CL_USER_PLANE_Z4 0xCB2
#define A3XX_GRAS_CL_USER_PLANE_W4 0xCB3
#define A3XX_GRAS_CL_USER_PLANE_X5 0xCB4
#define A3XX_GRAS_CL_USER_PLANE_Y5 0xCB5
#define A3XX_GRAS_CL_USER_PLANE_Z5 0xCB6
#define A3XX_GRAS_CL_USER_PLANE_W5 0xCB7
#define A3XX_UCHE_CACHE_INVALIDATE0_REG 0xEA0
#define A3XX_GRAS_CL_CLIP_CNTL 0x2040
#define A3XX_GRAS_CL_GB_CLIP_ADJ 0x2044
#define A3XX_GRAS_CL_VPORT_XOFFSET 0x2048
#define A3XX_GRAS_CL_VPORT_ZOFFSET 0x204C
#define A3XX_GRAS_CL_VPORT_ZSCALE 0x204D
#define A3XX_GRAS_SU_POINT_MINMAX 0x2068
#define A3XX_GRAS_SU_POINT_SIZE 0x2069
#define A3XX_GRAS_SU_POLY_OFFSET_SCALE 0x206C
#define A3XX_GRAS_SU_POLY_OFFSET_OFFSET 0x206D
#define A3XX_GRAS_SU_MODE_CONTROL 0x2070
#define A3XX_GRAS_SC_CONTROL 0x2072
#define A3XX_GRAS_SC_SCREEN_SCISSOR_TL 0x2074
#define A3XX_GRAS_SC_SCREEN_SCISSOR_BR 0x2075
#define A3XX_GRAS_SC_WINDOW_SCISSOR_TL 0x2079
#define A3XX_GRAS_SC_WINDOW_SCISSOR_BR 0x207A
#define A3XX_RB_MODE_CONTROL 0x20C0
#define A3XX_RB_RENDER_CONTROL 0x20C1
#define A3XX_RB_MSAA_CONTROL 0x20C2
#define A3XX_RB_MRT_CONTROL0 0x20C4
#define A3XX_RB_MRT_BUF_INFO0 0x20C5
#define A3XX_RB_MRT_BLEND_CONTROL0 0x20C7
#define A3XX_RB_MRT_BLEND_CONTROL1 0x20CB
#define A3XX_RB_MRT_BLEND_CONTROL2 0x20CF
#define A3XX_RB_MRT_BLEND_CONTROL3 0x20D3
#define A3XX_RB_BLEND_RED 0x20E4
#define A3XX_RB_COPY_CONTROL 0x20EC
#define A3XX_RB_COPY_DEST_INFO 0x20EF
#define A3XX_RB_DEPTH_CONTROL 0x2100
#define A3XX_RB_STENCIL_CONTROL 0x2104
#define A3XX_PC_VSTREAM_CONTROL 0x21E4
#define A3XX_PC_VERTEX_REUSE_BLOCK_CNTL 0x21EA
#define A3XX_PC_PRIM_VTX_CNTL 0x21EC
#define A3XX_PC_RESTART_INDEX 0x21ED
#define A3XX_HLSQ_CONTROL_0_REG 0x2200
#define A3XX_HLSQ_VS_CONTROL_REG 0x2204
#define A3XX_HLSQ_CONST_FSPRESV_RANGE_REG 0x2207
#define A3XX_HLSQ_CL_NDRANGE_0_REG 0x220A
#define A3XX_HLSQ_CL_NDRANGE_2_REG 0x220C
#define A3XX_HLSQ_CL_CONTROL_0_REG 0x2211
#define A3XX_HLSQ_CL_CONTROL_1_REG 0x2212
#define A3XX_HLSQ_CL_KERNEL_CONST_REG 0x2214
#define A3XX_HLSQ_CL_KERNEL_GROUP_X_REG 0x2215
#define A3XX_HLSQ_CL_KERNEL_GROUP_Z_REG 0x2217
#define A3XX_HLSQ_CL_WG_OFFSET_REG 0x221A
#define A3XX_VFD_CONTROL_0 0x2240
#define A3XX_VFD_INDEX_MIN 0x2242
#define A3XX_VFD_FETCH_INSTR_0_0 0x2246
#define A3XX_VFD_FETCH_INSTR_0_4 0x224E
#define A3XX_VFD_DECODE_INSTR_0 0x2266
#define A3XX_VFD_VS_THREADING_THRESHOLD 0x227E
#define A3XX_VPC_ATTR 0x2280
#define A3XX_VPC_VARY_CYLWRAP_ENABLE_1 0x228B
#define A3XX_SP_SP_CTRL_REG 0x22C0
#define A3XX_SP_VS_CTRL_REG0 0x22C4
#define A3XX_SP_VS_CTRL_REG1 0x22C5
#define A3XX_SP_VS_PARAM_REG 0x22C6
#define A3XX_SP_VS_OUT_REG_7 0x22CE
#define A3XX_SP_VS_VPC_DST_REG_0 0x22D0
#define A3XX_SP_VS_OBJ_OFFSET_REG 0x22D4
#define A3XX_SP_VS_PVT_MEM_SIZE_REG 0x22D8
#define A3XX_SP_VS_LENGTH_REG 0x22DF
#define A3XX_SP_FS_CTRL_REG0 0x22E0
#define A3XX_SP_FS_CTRL_REG1 0x22E1
#define A3XX_SP_FS_OBJ_OFFSET_REG 0x22E2
#define A3XX_SP_FS_PVT_MEM_SIZE_REG 0x22E6
#define A3XX_SP_FS_FLAT_SHAD_MODE_REG_0 0x22E8
#define A3XX_SP_FS_FLAT_SHAD_MODE_REG_1 0x22E9
#define A3XX_SP_FS_OUTPUT_REG 0x22EC
#define A3XX_SP_FS_MRT_REG_0 0x22F0
#define A3XX_SP_FS_IMAGE_OUTPUT_REG_0 0x22F4
#define A3XX_SP_FS_IMAGE_OUTPUT_REG_3 0x22F7
#define A3XX_SP_FS_LENGTH_REG 0x22FF
#define A3XX_TPL1_TP_VS_TEX_OFFSET 0x2340
#define A3XX_TPL1_TP_FS_TEX_OFFSET 0x2342
#define A3XX_TPL1_TP_FS_BORDER_COLOR_BASE_ADDR 0x2343
#define A3XX_VBIF_FIXED_SORT_EN 0x300C
#define A3XX_VBIF_FIXED_SORT_SEL0 0x300D
#define A3XX_VBIF_FIXED_SORT_SEL1 0x300E
/* Bit flags for RBBM_CTL */
#define RBBM_RBBM_CTL_RESET_PWR_CTR1 (1 << 1)
#define RBBM_RBBM_CTL_ENABLE_PWR_CTR1 (17 << 1)
/* Various flags used by the context switch code */
#define SP_MULTI 0
#define SP_BUFFER_MODE 1
#define SP_TWO_VTX_QUADS 0
#define SP_PIXEL_BASED 0
#define SP_R8G8B8A8_UNORM 8
#define SP_FOUR_PIX_QUADS 1
#define HLSQ_DIRECT 0
#define HLSQ_BLOCK_ID_SP_VS 4
#define HLSQ_SP_VS_INSTR 0
#define HLSQ_SP_FS_INSTR 0
#define HLSQ_BLOCK_ID_SP_FS 6
#define HLSQ_TWO_PIX_QUADS 0
#define HLSQ_TWO_VTX_QUADS 0
#define HLSQ_BLOCK_ID_TP_TEX 2
#define HLSQ_TP_TEX_SAMPLERS 0
#define HLSQ_TP_TEX_MEMOBJ 1
#define HLSQ_BLOCK_ID_TP_MIPMAP 3
#define HLSQ_TP_MIPMAP_BASE 1
#define HLSQ_FOUR_PIX_QUADS 1
#define RB_FACTOR_ONE 1
#define RB_BLEND_OP_ADD 0
#define RB_FACTOR_ZERO 0
#define RB_DITHER_DISABLE 0
#define RB_DITHER_ALWAYS 1
#define RB_FRAG_NEVER 0
#define RB_ENDIAN_NONE 0
#define RB_R8G8B8A8_UNORM 8
#define RB_RESOLVE_PASS 2
#define RB_CLEAR_MODE_RESOLVE 1
#define RB_TILINGMODE_LINEAR 0
#define RB_REF_NEVER 0
#define RB_STENCIL_KEEP 0
#define RB_RENDERING_PASS 0
#define RB_TILINGMODE_32X32 2
#define PC_DRAW_TRIANGLES 2
#define PC_DI_PT_RECTLIST 8
#define PC_DI_SRC_SEL_AUTO_INDEX 2
#define PC_DI_INDEX_SIZE_16_BIT 0
#define PC_DI_IGNORE_VISIBILITY 0
#define PC_DI_PT_TRILIST 4
#define PC_DI_SRC_SEL_IMMEDIATE 1
#define PC_DI_INDEX_SIZE_32_BIT 1
#define UCHE_ENTIRE_CACHE 1
#define UCHE_OP_INVALIDATE 1
/*
* The following are bit field shifts within some of the registers defined
* above. These are used in the context switch code in conjunction with the
* _SET macro
*/
#define GRAS_CL_CLIP_CNTL_CLIP_DISABLE 16
#define GRAS_CL_CLIP_CNTL_IJ_PERSP_CENTER 12
#define GRAS_CL_CLIP_CNTL_PERSP_DIVISION_DISABLE 21
#define GRAS_CL_CLIP_CNTL_VP_CLIP_CODE_IGNORE 19
#define GRAS_CL_CLIP_CNTL_VP_XFORM_DISABLE 20
#define GRAS_CL_CLIP_CNTL_ZFAR_CLIP_DISABLE 17
#define GRAS_CL_VPORT_XSCALE_VPORT_XSCALE 0
#define GRAS_CL_VPORT_YSCALE_VPORT_YSCALE 0
#define GRAS_CL_VPORT_ZSCALE_VPORT_ZSCALE 0
#define GRAS_SC_CONTROL_RASTER_MODE 12
#define GRAS_SC_CONTROL_RENDER_MODE 4
#define GRAS_SC_SCREEN_SCISSOR_BR_BR_X 0
#define GRAS_SC_SCREEN_SCISSOR_BR_BR_Y 16
#define GRAS_SC_WINDOW_SCISSOR_BR_BR_X 0
#define GRAS_SC_WINDOW_SCISSOR_BR_BR_Y 16
#define HLSQ_CONSTFSPRESERVEDRANGEREG_ENDENTRY 16
#define HLSQ_CONSTFSPRESERVEDRANGEREG_STARTENTRY 0
#define HLSQ_CTRL0REG_CHUNKDISABLE 26
#define HLSQ_CTRL0REG_CONSTSWITCHMODE 27
#define HLSQ_CTRL0REG_FSSUPERTHREADENABLE 6
#define HLSQ_CTRL0REG_FSTHREADSIZE 4
#define HLSQ_CTRL0REG_LAZYUPDATEDISABLE 28
#define HLSQ_CTRL0REG_RESERVED2 10
#define HLSQ_CTRL0REG_SPCONSTFULLUPDATE 29
#define HLSQ_CTRL0REG_SPSHADERRESTART 9
#define HLSQ_CTRL0REG_TPFULLUPDATE 30
#define HLSQ_CTRL1REG_RESERVED1 9
#define HLSQ_CTRL1REG_VSSUPERTHREADENABLE 8
#define HLSQ_CTRL1REG_VSTHREADSIZE 6
#define HLSQ_CTRL2REG_PRIMALLOCTHRESHOLD 26
#define HLSQ_FSCTRLREG_FSCONSTLENGTH 0
#define HLSQ_FSCTRLREG_FSCONSTSTARTOFFSET 12
#define HLSQ_FSCTRLREG_FSINSTRLENGTH 24
#define HLSQ_VSCTRLREG_VSINSTRLENGTH 24
#define PC_PRIM_VTX_CONTROL_POLYMODE_BACK_PTYPE 8
#define PC_PRIM_VTX_CONTROL_POLYMODE_FRONT_PTYPE 5
#define PC_PRIM_VTX_CONTROL_PROVOKING_VTX_LAST 25
#define PC_PRIM_VTX_CONTROL_STRIDE_IN_VPC 0
#define PC_DRAW_INITIATOR_PRIM_TYPE 0
#define PC_DRAW_INITIATOR_SOURCE_SELECT 6
#define PC_DRAW_INITIATOR_VISIBILITY_CULLING_MODE 9
#define PC_DRAW_INITIATOR_INDEX_SIZE 0x0B
#define PC_DRAW_INITIATOR_SMALL_INDEX 0x0D
#define PC_DRAW_INITIATOR_PRE_DRAW_INITIATOR_ENABLE 0x0E
#define RB_COPYCONTROL_COPY_GMEM_BASE 14
#define RB_COPYCONTROL_RESOLVE_CLEAR_MODE 4
#define RB_COPYDESTBASE_COPY_DEST_BASE 4
#define RB_COPYDESTINFO_COPY_COMPONENT_ENABLE 14
#define RB_COPYDESTINFO_COPY_DEST_ENDIAN 18
#define RB_COPYDESTINFO_COPY_DEST_FORMAT 2
#define RB_COPYDESTINFO_COPY_DEST_TILE 0
#define RB_COPYDESTPITCH_COPY_DEST_PITCH 0
#define RB_DEPTHCONTROL_Z_TEST_FUNC 4
#define RB_MODECONTROL_RENDER_MODE 8
#define RB_MODECONTROL_MARB_CACHE_SPLIT_MODE 15
#define RB_MODECONTROL_PACKER_TIMER_ENABLE 16
#define RB_MRTBLENDCONTROL_ALPHA_BLEND_OPCODE 21
#define RB_MRTBLENDCONTROL_ALPHA_DEST_FACTOR 24
#define RB_MRTBLENDCONTROL_ALPHA_SRC_FACTOR 16
#define RB_MRTBLENDCONTROL_CLAMP_ENABLE 29
#define RB_MRTBLENDCONTROL_RGB_BLEND_OPCODE 5
#define RB_MRTBLENDCONTROL_RGB_DEST_FACTOR 8
#define RB_MRTBLENDCONTROL_RGB_SRC_FACTOR 0
#define RB_MRTBUFBASE_COLOR_BUF_BASE 4
#define RB_MRTBUFINFO_COLOR_BUF_PITCH 17
#define RB_MRTBUFINFO_COLOR_FORMAT 0
#define RB_MRTBUFINFO_COLOR_TILE_MODE 6
#define RB_MRTCONTROL_COMPONENT_ENABLE 24
#define RB_MRTCONTROL_DITHER_MODE 12
#define RB_MRTCONTROL_READ_DEST_ENABLE 3
#define RB_MRTCONTROL_ROP_CODE 8
#define RB_MSAACONTROL_MSAA_DISABLE 10
#define RB_MSAACONTROL_SAMPLE_MASK 16
#define RB_RENDERCONTROL_ALPHA_TEST_FUNC 24
#define RB_RENDERCONTROL_BIN_WIDTH 4
#define RB_RENDERCONTROL_DISABLE_COLOR_PIPE 12
#define RB_STENCILCONTROL_STENCIL_FAIL 11
#define RB_STENCILCONTROL_STENCIL_FAIL_BF 23
#define RB_STENCILCONTROL_STENCIL_FUNC 8
#define RB_STENCILCONTROL_STENCIL_FUNC_BF 20
#define RB_STENCILCONTROL_STENCIL_ZFAIL 17
#define RB_STENCILCONTROL_STENCIL_ZFAIL_BF 29
#define RB_STENCILCONTROL_STENCIL_ZPASS 14
#define RB_STENCILCONTROL_STENCIL_ZPASS_BF 26
#define SP_FSCTRLREG0_FSFULLREGFOOTPRINT 10
#define SP_FSCTRLREG0_FSICACHEINVALID 2
#define SP_FSCTRLREG0_FSINOUTREGOVERLAP 18
#define SP_FSCTRLREG0_FSINSTRBUFFERMODE 1
#define SP_FSCTRLREG0_FSLENGTH 24
#define SP_FSCTRLREG0_FSSUPERTHREADMODE 21
#define SP_FSCTRLREG0_FSTHREADMODE 0
#define SP_FSCTRLREG0_FSTHREADSIZE 20
#define SP_FSCTRLREG0_PIXLODENABLE 22
#define SP_FSCTRLREG1_FSCONSTLENGTH 0
#define SP_FSCTRLREG1_FSINITIALOUTSTANDING 20
#define SP_FSCTRLREG1_HALFPRECVAROFFSET 24
#define SP_FSMRTREG_REGID 0
#define SP_FSOUTREG_PAD0 2
#define SP_IMAGEOUTPUTREG_MRTFORMAT 0
#define SP_IMAGEOUTPUTREG_PAD0 6
#define SP_OBJOFFSETREG_CONSTOBJECTSTARTOFFSET 16
#define SP_OBJOFFSETREG_SHADEROBJOFFSETINIC 25
#define SP_SHADERLENGTH_LEN 0
#define SP_SPCTRLREG_CONSTMODE 18
#define SP_SPCTRLREG_SLEEPMODE 20
#define SP_VSCTRLREG0_VSFULLREGFOOTPRINT 10
#define SP_VSCTRLREG0_VSICACHEINVALID 2
#define SP_VSCTRLREG0_VSINSTRBUFFERMODE 1
#define SP_VSCTRLREG0_VSLENGTH 24
#define SP_VSCTRLREG0_VSSUPERTHREADMODE 21
#define SP_VSCTRLREG0_VSTHREADMODE 0
#define SP_VSCTRLREG0_VSTHREADSIZE 20
#define SP_VSCTRLREG1_VSINITIALOUTSTANDING 24
#define SP_VSOUTREG_COMPMASK0 9
#define SP_VSPARAMREG_POSREGID 0
#define SP_VSPARAMREG_PSIZEREGID 8
#define SP_VSPARAMREG_TOTALVSOUTVAR 20
#define SP_VSVPCDSTREG_OUTLOC0 0
#define TPL1_TPTEXOFFSETREG_BASETABLEPTR 16
#define TPL1_TPTEXOFFSETREG_MEMOBJOFFSET 8
#define TPL1_TPTEXOFFSETREG_SAMPLEROFFSET 0
#define UCHE_INVALIDATE1REG_OPCODE 0x1C
#define UCHE_INVALIDATE1REG_ALLORPORTION 0x1F
#define VFD_BASEADDR_BASEADDR 0
#define VFD_CTRLREG0_PACKETSIZE 18
#define VFD_CTRLREG0_STRMDECINSTRCNT 22
#define VFD_CTRLREG0_STRMFETCHINSTRCNT 27
#define VFD_CTRLREG0_TOTALATTRTOVS 0
#define VFD_CTRLREG1_MAXSTORAGE 0
#define VFD_CTRLREG1_REGID4INST 24
#define VFD_CTRLREG1_REGID4VTX 16
#define VFD_DECODEINSTRUCTIONS_CONSTFILL 4
#define VFD_DECODEINSTRUCTIONS_FORMAT 6
#define VFD_DECODEINSTRUCTIONS_LASTCOMPVALID 29
#define VFD_DECODEINSTRUCTIONS_REGID 12
#define VFD_DECODEINSTRUCTIONS_SHIFTCNT 24
#define VFD_DECODEINSTRUCTIONS_SWITCHNEXT 30
#define VFD_DECODEINSTRUCTIONS_WRITEMASK 0
#define VFD_FETCHINSTRUCTIONS_BUFSTRIDE 7
#define VFD_FETCHINSTRUCTIONS_FETCHSIZE 0
#define VFD_FETCHINSTRUCTIONS_INDEXDECODE 18
#define VFD_FETCHINSTRUCTIONS_STEPRATE 24
#define VFD_FETCHINSTRUCTIONS_SWITCHNEXT 17
#define VFD_THREADINGTHRESHOLD_REGID_VTXCNT 8
#define VFD_THREADINGTHRESHOLD_RESERVED6 4
#define VPC_VPCATTR_LMSIZE 28
#define VPC_VPCATTR_THRHDASSIGN 12
#define VPC_VPCATTR_TOTALATTR 0
#define VPC_VPCPACK_NUMFPNONPOSVAR 8
#define VPC_VPCPACK_NUMNONPOSVSVAR 16
#define VPC_VPCVARPSREPLMODE_COMPONENT08 0
#define VPC_VPCVARPSREPLMODE_COMPONENT09 2
#define VPC_VPCVARPSREPLMODE_COMPONENT0A 4
#define VPC_VPCVARPSREPLMODE_COMPONENT0B 6
#define VPC_VPCVARPSREPLMODE_COMPONENT0C 8
#define VPC_VPCVARPSREPLMODE_COMPONENT0D 10
#define VPC_VPCVARPSREPLMODE_COMPONENT0E 12
#define VPC_VPCVARPSREPLMODE_COMPONENT0F 14
#define VPC_VPCVARPSREPLMODE_COMPONENT10 16
#define VPC_VPCVARPSREPLMODE_COMPONENT11 18
#define VPC_VPCVARPSREPLMODE_COMPONENT12 20
#define VPC_VPCVARPSREPLMODE_COMPONENT13 22
#define VPC_VPCVARPSREPLMODE_COMPONENT14 24
#define VPC_VPCVARPSREPLMODE_COMPONENT15 26
#define VPC_VPCVARPSREPLMODE_COMPONENT16 28
#define VPC_VPCVARPSREPLMODE_COMPONENT17 30
#endif

1355
drivers/gpu/msm/adreno.c Executable file

File diff suppressed because it is too large Load Diff

197
drivers/gpu/msm/adreno.h Executable file
View File

@ -0,0 +1,197 @@
/* Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __ADRENO_H
#define __ADRENO_H
#include "kgsl_device.h"
#include "adreno_drawctxt.h"
#include "adreno_ringbuffer.h"
#define DEVICE_3D_NAME "kgsl-3d"
#define DEVICE_3D0_NAME "kgsl-3d0"
#define ADRENO_DEVICE(device) \
KGSL_CONTAINER_OF(device, struct adreno_device, dev)
/* Flags to control command packet settings */
#define KGSL_CMD_FLAGS_NONE 0x00000000
#define KGSL_CMD_FLAGS_PMODE 0x00000001
#define KGSL_CMD_FLAGS_NO_TS_CMP 0x00000002
#define KGSL_CMD_FLAGS_NOT_KERNEL_CMD 0x00000004
/* Command identifiers */
#define KGSL_CONTEXT_TO_MEM_IDENTIFIER 0x2EADBEEF
#define KGSL_CMD_IDENTIFIER 0x2EEDFACE
#define KGSL_START_OF_IB_IDENTIFIER 0x2EADEABE
#define KGSL_END_OF_IB_IDENTIFIER 0x2ABEDEAD
#ifdef CONFIG_MSM_SCM
#define ADRENO_DEFAULT_PWRSCALE_POLICY (&kgsl_pwrscale_policy_tz)
#else
#define ADRENO_DEFAULT_PWRSCALE_POLICY NULL
#endif
#define ADRENO_ISTORE_START 0x5000 /* Istore offset */
enum adreno_gpurev {
ADRENO_REV_UNKNOWN = 0,
ADRENO_REV_A200 = 200,
ADRENO_REV_A203 = 203,
ADRENO_REV_A205 = 205,
ADRENO_REV_A220 = 220,
ADRENO_REV_A225 = 225,
ADRENO_REV_A305 = 305,
ADRENO_REV_A320 = 320,
};
struct adreno_gpudev;
#ifdef CONFIG_MSM_SCM
#define ADRENO_DEFAULT_PWRSCALE_POLICY (&kgsl_pwrscale_policy_tz)
#else
#define ADRENO_DEFAULT_PWRSCALE_POLICY NULL
#endif
struct adreno_gpudev;
struct adreno_device {
struct kgsl_device dev; /* Must be first field in this struct */
unsigned int chip_id;
enum adreno_gpurev gpurev;
struct kgsl_memregion gmemspace;
struct adreno_context *drawctxt_active;
const char *pfp_fwfile;
unsigned int *pfp_fw;
size_t pfp_fw_size;
const char *pm4_fwfile;
unsigned int *pm4_fw;
size_t pm4_fw_size;
struct adreno_ringbuffer ringbuffer;
unsigned int mharb;
struct adreno_gpudev *gpudev;
unsigned int wait_timeout;
unsigned int istore_size;
unsigned int pix_shader_start;
unsigned int instruction_size;
unsigned int ib_check_level;
};
struct adreno_gpudev {
/*
* These registers are in a different location on A3XX, so define
* them in the structure and use them as variables.
*/
unsigned int reg_rbbm_status;
unsigned int reg_cp_pfp_ucode_data;
unsigned int reg_cp_pfp_ucode_addr;
/* GPU specific function hooks */
int (*ctxt_create)(struct adreno_device *, struct adreno_context *);
void (*ctxt_save)(struct adreno_device *, struct adreno_context *);
void (*ctxt_restore)(struct adreno_device *, struct adreno_context *);
irqreturn_t (*irq_handler)(struct adreno_device *);
void (*irq_control)(struct adreno_device *, int);
void (*rb_init)(struct adreno_device *, struct adreno_ringbuffer *);
void (*start)(struct adreno_device *);
unsigned int (*busy_cycles)(struct adreno_device *);
};
extern struct adreno_gpudev adreno_a2xx_gpudev;
extern struct adreno_gpudev adreno_a3xx_gpudev;
int adreno_idle(struct kgsl_device *device, unsigned int timeout);
void adreno_regread(struct kgsl_device *device, unsigned int offsetwords,
unsigned int *value);
void adreno_regwrite(struct kgsl_device *device, unsigned int offsetwords,
unsigned int value);
struct kgsl_memdesc *adreno_find_region(struct kgsl_device *device,
unsigned int pt_base,
unsigned int gpuaddr,
unsigned int size);
uint8_t *adreno_convertaddr(struct kgsl_device *device,
unsigned int pt_base, unsigned int gpuaddr, unsigned int size);
static inline int adreno_is_a200(struct adreno_device *adreno_dev)
{
return (adreno_dev->gpurev == ADRENO_REV_A200);
}
static inline int adreno_is_a203(struct adreno_device *adreno_dev)
{
return (adreno_dev->gpurev == ADRENO_REV_A203);
}
static inline int adreno_is_a205(struct adreno_device *adreno_dev)
{
return (adreno_dev->gpurev == ADRENO_REV_A205);
}
static inline int adreno_is_a20x(struct adreno_device *adreno_dev)
{
return (adreno_dev->gpurev <= 209);
}
static inline int adreno_is_a220(struct adreno_device *adreno_dev)
{
return (adreno_dev->gpurev == ADRENO_REV_A220);
}
static inline int adreno_is_a225(struct adreno_device *adreno_dev)
{
return (adreno_dev->gpurev == ADRENO_REV_A225);
}
static inline int adreno_is_a22x(struct adreno_device *adreno_dev)
{
return (adreno_dev->gpurev == ADRENO_REV_A220 ||
adreno_dev->gpurev == ADRENO_REV_A225);
}
static inline int adreno_is_a2xx(struct adreno_device *adreno_dev)
{
return (adreno_dev->gpurev <= 299);
}
static inline int adreno_is_a3xx(struct adreno_device *adreno_dev)
{
return (adreno_dev->gpurev >= 300);
}
/**
* adreno_encode_istore_size - encode istore size in CP format
* @adreno_dev - The 3D device.
*
* Encode the istore size into the format expected that the
* CP_SET_SHADER_BASES and CP_ME_INIT commands:
* bits 31:29 - istore size as encoded by this function
* bits 27:16 - vertex shader start offset in instructions
* bits 11:0 - pixel shader start offset in instructions.
*/
static inline int adreno_encode_istore_size(struct adreno_device *adreno_dev)
{
unsigned int size;
/* in a225 the CP microcode multiplies the encoded
* value by 3 while decoding.
*/
if (adreno_is_a225(adreno_dev))
size = adreno_dev->istore_size/3;
else
size = adreno_dev->istore_size;
return (ilog2(size) - 5) << 29;
}
#endif /*__ADRENO_H */

File diff suppressed because it is too large Load Diff

2555
drivers/gpu/msm/adreno_a3xx.c Executable file

File diff suppressed because it is too large Load Diff

461
drivers/gpu/msm/adreno_debugfs.c Executable file
View File

@ -0,0 +1,461 @@
/* Copyright (c) 2002,2008-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/delay.h>
#include <linux/debugfs.h>
#include <linux/uaccess.h>
#include <linux/io.h>
#include "kgsl.h"
#include "adreno_postmortem.h"
#include "adreno.h"
#include "a2xx_reg.h"
unsigned int kgsl_cff_dump_enable;
int kgsl_pm_regs_enabled;
static uint32_t kgsl_ib_base;
static uint32_t kgsl_ib_size;
static struct dentry *pm_d_debugfs;
static int pm_dump_set(void *data, u64 val)
{
struct kgsl_device *device = data;
if (val) {
mutex_lock(&device->mutex);
adreno_postmortem_dump(device, 1);
mutex_unlock(&device->mutex);
}
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(pm_dump_fops,
NULL,
pm_dump_set, "%llu\n");
static int pm_regs_enabled_set(void *data, u64 val)
{
kgsl_pm_regs_enabled = val ? 1 : 0;
return 0;
}
static int pm_regs_enabled_get(void *data, u64 *val)
{
*val = kgsl_pm_regs_enabled;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(pm_regs_enabled_fops,
pm_regs_enabled_get,
pm_regs_enabled_set, "%llu\n");
static int kgsl_cff_dump_enable_set(void *data, u64 val)
{
#ifdef CONFIG_MSM_KGSL_CFF_DUMP
kgsl_cff_dump_enable = (val != 0);
return 0;
#else
return -EINVAL;
#endif
}
static int kgsl_cff_dump_enable_get(void *data, u64 *val)
{
*val = kgsl_cff_dump_enable;
return 0;
}
DEFINE_SIMPLE_ATTRIBUTE(kgsl_cff_dump_enable_fops, kgsl_cff_dump_enable_get,
kgsl_cff_dump_enable_set, "%llu\n");
static int kgsl_dbgfs_open(struct inode *inode, struct file *file)
{
file->f_mode &= ~(FMODE_PREAD | FMODE_PWRITE);
file->private_data = inode->i_private;
return 0;
}
static int kgsl_dbgfs_release(struct inode *inode, struct file *file)
{
return 0;
}
static int kgsl_hex_dump(const char *prefix, int c, uint8_t *data,
int rowc, int linec, char __user *buff)
{
int ss;
/* Prefix of 20 chars max, 32 bytes per row, in groups of four - that's
* 8 groups at 8 chars per group plus a space, plus new-line, plus
* ending character */
char linebuf[20 + 64 + 1 + 1];
ss = snprintf(linebuf, sizeof(linebuf), prefix, c);
hex_dump_to_buffer(data, linec, rowc, 4, linebuf+ss,
sizeof(linebuf)-ss, 0);
strlcat(linebuf, "\n", sizeof(linebuf));
linebuf[sizeof(linebuf)-1] = 0;
ss = strlen(linebuf);
if (copy_to_user(buff, linebuf, ss+1))
return -EFAULT;
return ss;
}
static ssize_t kgsl_ib_dump_read(
struct file *file,
char __user *buff,
size_t buff_count,
loff_t *ppos)
{
int i, count = kgsl_ib_size, remaining, pos = 0, tot = 0, ss;
struct kgsl_device *device = file->private_data;
const int rowc = 32;
unsigned int pt_base, ib_memsize;
uint8_t *base_addr;
char linebuf[80];
if (!ppos || !device || !kgsl_ib_base)
return 0;
kgsl_regread(device, MH_MMU_PT_BASE, &pt_base);
base_addr = kgsl_sharedmem_convertaddr(device, pt_base, kgsl_ib_base,
&ib_memsize);
if (!base_addr)
return 0;
pr_info("%s ppos=%ld, buff_count=%d, count=%d\n", __func__, (long)*ppos,
buff_count, count);
ss = snprintf(linebuf, sizeof(linebuf), "IB: base=%08x(%08x"
"), size=%d, memsize=%d\n", kgsl_ib_base,
(uint32_t)base_addr, kgsl_ib_size, ib_memsize);
if (*ppos == 0) {
if (copy_to_user(buff, linebuf, ss+1))
return -EFAULT;
tot += ss;
buff += ss;
*ppos += ss;
}
pos += ss;
remaining = count;
for (i = 0; i < count; i += rowc) {
int linec = min(remaining, rowc);
remaining -= rowc;
ss = kgsl_hex_dump("IB: %05x: ", i, base_addr, rowc, linec,
buff);
if (ss < 0)
return ss;
if (pos >= *ppos) {
if (tot+ss >= buff_count) {
ss = copy_to_user(buff, "", 1);
return tot;
}
tot += ss;
buff += ss;
*ppos += ss;
}
pos += ss;
base_addr += linec;
}
return tot;
}
static ssize_t kgsl_ib_dump_write(
struct file *file,
const char __user *buff,
size_t count,
loff_t *ppos)
{
char local_buff[64];
if (count >= sizeof(local_buff))
return -EFAULT;
if (copy_from_user(local_buff, buff, count))
return -EFAULT;
local_buff[count] = 0; /* end of string */
sscanf(local_buff, "%x %d", &kgsl_ib_base, &kgsl_ib_size);
pr_info("%s: base=%08X size=%d\n", __func__, kgsl_ib_base,
kgsl_ib_size);
return count;
}
static const struct file_operations kgsl_ib_dump_fops = {
.open = kgsl_dbgfs_open,
.release = kgsl_dbgfs_release,
.read = kgsl_ib_dump_read,
.write = kgsl_ib_dump_write,
};
static int kgsl_regread_nolock(struct kgsl_device *device,
unsigned int offsetwords, unsigned int *value)
{
unsigned int *reg;
if (offsetwords*sizeof(uint32_t) >= device->regspace.sizebytes) {
KGSL_DRV_ERR(device, "invalid offset %d\n", offsetwords);
return -ERANGE;
}
reg = (unsigned int *)(device->regspace.mmio_virt_base
+ (offsetwords << 2));
*value = __raw_readl(reg);
return 0;
}
#define ADRENO_ISTORE_START 0x5000
static ssize_t kgsl_istore_read(
struct file *file,
char __user *buff,
size_t buff_count,
loff_t *ppos)
{
int i, count, remaining, pos = 0, tot = 0;
struct kgsl_device *device = file->private_data;
const int rowc = 8;
struct adreno_device *adreno_dev;
if (!ppos || !device)
return 0;
adreno_dev = ADRENO_DEVICE(device);
count = adreno_dev->istore_size * adreno_dev->instruction_size;
remaining = count;
for (i = 0; i < count; i += rowc) {
unsigned int vals[rowc];
int j, ss;
int linec = min(remaining, rowc);
remaining -= rowc;
if (pos >= *ppos) {
for (j = 0; j < linec; ++j)
kgsl_regread_nolock(device,
ADRENO_ISTORE_START + i + j,
vals + j);
} else
memset(vals, 0, sizeof(vals));
ss = kgsl_hex_dump("IS: %04x: ", i, (uint8_t *)vals, rowc*4,
linec*4, buff);
if (ss < 0)
return ss;
if (pos >= *ppos) {
if (tot+ss >= buff_count)
return tot;
tot += ss;
buff += ss;
*ppos += ss;
}
pos += ss;
}
return tot;
}
static const struct file_operations kgsl_istore_fops = {
.open = kgsl_dbgfs_open,
.release = kgsl_dbgfs_release,
.read = kgsl_istore_read,
.llseek = default_llseek,
};
typedef void (*reg_read_init_t)(struct kgsl_device *device);
typedef void (*reg_read_fill_t)(struct kgsl_device *device, int i,
unsigned int *vals, int linec);
static ssize_t kgsl_reg_read(struct kgsl_device *device, int count,
reg_read_init_t reg_read_init,
reg_read_fill_t reg_read_fill, const char *prefix, char __user *buff,
loff_t *ppos)
{
int i, remaining;
const int rowc = 8;
if (!ppos || *ppos || !device)
return 0;
mutex_lock(&device->mutex);
reg_read_init(device);
remaining = count;
for (i = 0; i < count; i += rowc) {
unsigned int vals[rowc];
int ss;
int linec = min(remaining, rowc);
remaining -= rowc;
reg_read_fill(device, i, vals, linec);
ss = kgsl_hex_dump(prefix, i, (uint8_t *)vals, rowc*4, linec*4,
buff);
if (ss < 0) {
mutex_unlock(&device->mutex);
return ss;
}
buff += ss;
*ppos += ss;
}
mutex_unlock(&device->mutex);
return *ppos;
}
static void kgsl_sx_reg_read_init(struct kgsl_device *device)
{
kgsl_regwrite(device, REG_RBBM_PM_OVERRIDE2, 0xFF);
kgsl_regwrite(device, REG_RBBM_DEBUG_CNTL, 0);
}
static void kgsl_sx_reg_read_fill(struct kgsl_device *device, int i,
unsigned int *vals, int linec)
{
int j;
for (j = 0; j < linec; ++j) {
kgsl_regwrite(device, REG_RBBM_DEBUG_CNTL, 0x1B00 | i);
kgsl_regread(device, REG_RBBM_DEBUG_OUT, vals+j);
}
}
static ssize_t kgsl_sx_debug_read(
struct file *file,
char __user *buff,
size_t buff_count,
loff_t *ppos)
{
struct kgsl_device *device = file->private_data;
return kgsl_reg_read(device, 0x1B, kgsl_sx_reg_read_init,
kgsl_sx_reg_read_fill, "SX: %02x: ", buff, ppos);
}
static const struct file_operations kgsl_sx_debug_fops = {
.open = kgsl_dbgfs_open,
.release = kgsl_dbgfs_release,
.read = kgsl_sx_debug_read,
};
static void kgsl_cp_reg_read_init(struct kgsl_device *device)
{
kgsl_regwrite(device, REG_RBBM_DEBUG_CNTL, 0);
}
static void kgsl_cp_reg_read_fill(struct kgsl_device *device, int i,
unsigned int *vals, int linec)
{
int j;
for (j = 0; j < linec; ++j) {
kgsl_regwrite(device, REG_RBBM_DEBUG_CNTL, 0x1628);
kgsl_regread(device, REG_RBBM_DEBUG_OUT, vals+j);
msleep(100);
}
}
static ssize_t kgsl_cp_debug_read(
struct file *file,
char __user *buff,
size_t buff_count,
loff_t *ppos)
{
struct kgsl_device *device = file->private_data;
return kgsl_reg_read(device, 20, kgsl_cp_reg_read_init,
kgsl_cp_reg_read_fill,
"CP: %02x: ", buff, ppos);
}
static const struct file_operations kgsl_cp_debug_fops = {
.open = kgsl_dbgfs_open,
.release = kgsl_dbgfs_release,
.read = kgsl_cp_debug_read,
};
static void kgsl_mh_reg_read_init(struct kgsl_device *device)
{
kgsl_regwrite(device, REG_RBBM_DEBUG_CNTL, 0);
}
static void kgsl_mh_reg_read_fill(struct kgsl_device *device, int i,
unsigned int *vals, int linec)
{
int j;
for (j = 0; j < linec; ++j) {
kgsl_regwrite(device, MH_DEBUG_CTRL, i+j);
kgsl_regread(device, MH_DEBUG_DATA, vals+j);
}
}
static ssize_t kgsl_mh_debug_read(
struct file *file,
char __user *buff,
size_t buff_count,
loff_t *ppos)
{
struct kgsl_device *device = file->private_data;
return kgsl_reg_read(device, 0x40, kgsl_mh_reg_read_init,
kgsl_mh_reg_read_fill,
"MH: %02x: ", buff, ppos);
}
static const struct file_operations kgsl_mh_debug_fops = {
.open = kgsl_dbgfs_open,
.release = kgsl_dbgfs_release,
.read = kgsl_mh_debug_read,
};
void adreno_debugfs_init(struct kgsl_device *device)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
if (!device->d_debugfs || IS_ERR(device->d_debugfs))
return;
debugfs_create_file("ib_dump", 0600, device->d_debugfs, device,
&kgsl_ib_dump_fops);
debugfs_create_file("istore", 0400, device->d_debugfs, device,
&kgsl_istore_fops);
debugfs_create_file("sx_debug", 0400, device->d_debugfs, device,
&kgsl_sx_debug_fops);
debugfs_create_file("cp_debug", 0400, device->d_debugfs, device,
&kgsl_cp_debug_fops);
debugfs_create_file("mh_debug", 0400, device->d_debugfs, device,
&kgsl_mh_debug_fops);
debugfs_create_file("cff_dump", 0644, device->d_debugfs, device,
&kgsl_cff_dump_enable_fops);
debugfs_create_u32("wait_timeout", 0644, device->d_debugfs,
&adreno_dev->wait_timeout);
debugfs_create_u32("ib_check", 0644, device->d_debugfs,
&adreno_dev->ib_check_level);
/* Create post mortem control files */
pm_d_debugfs = debugfs_create_dir("postmortem", device->d_debugfs);
if (IS_ERR(pm_d_debugfs))
return;
debugfs_create_file("dump", 0600, pm_d_debugfs, device,
&pm_dump_fops);
debugfs_create_file("regs_enabled", 0644, pm_d_debugfs, device,
&pm_regs_enabled_fops);
}

View File

@ -0,0 +1,40 @@
/* Copyright (c) 2002,2008-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __ADRENO_DEBUGFS_H
#define __ADRENO_DEBUGFS_H
#ifdef CONFIG_DEBUG_FS
int adreno_debugfs_init(struct kgsl_device *device);
extern int kgsl_pm_regs_enabled;
static inline int kgsl_pmregs_enabled(void)
{
return kgsl_pm_regs_enabled;
}
#else
static inline int adreno_debugfs_init(struct kgsl_device *device)
{
return 0;
}
static inline int kgsl_pmregs_enabled(void)
{
/* If debugfs is turned off, then always print registers */
return 1;
}
#endif
#endif /* __ADRENO_DEBUGFS_H */

285
drivers/gpu/msm/adreno_drawctxt.c Executable file
View File

@ -0,0 +1,285 @@
/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/slab.h>
#include "kgsl.h"
#include "kgsl_sharedmem.h"
#include "adreno.h"
#define KGSL_INIT_REFTIMESTAMP 0x7FFFFFFF
/* quad for copying GMEM to context shadow */
#define QUAD_LEN 12
#define QUAD_RESTORE_LEN 14
static unsigned int gmem_copy_quad[QUAD_LEN] = {
0x00000000, 0x00000000, 0x3f800000,
0x00000000, 0x00000000, 0x3f800000,
0x00000000, 0x00000000, 0x3f800000,
0x00000000, 0x00000000, 0x3f800000
};
static unsigned int gmem_restore_quad[QUAD_RESTORE_LEN] = {
0x00000000, 0x3f800000, 0x3f800000,
0x00000000, 0x00000000, 0x00000000,
0x3f800000, 0x00000000, 0x00000000,
0x3f800000, 0x00000000, 0x00000000,
0x3f800000, 0x3f800000,
};
#define TEXCOORD_LEN 8
static unsigned int gmem_copy_texcoord[TEXCOORD_LEN] = {
0x00000000, 0x3f800000,
0x3f800000, 0x3f800000,
0x00000000, 0x00000000,
0x3f800000, 0x00000000
};
/*
* Helper functions
* These are global helper functions used by the GPUs during context switch
*/
/**
* uint2float - convert a uint to IEEE754 single precision float
* @ uintval - value to convert
*/
unsigned int uint2float(unsigned int uintval)
{
unsigned int exp, frac = 0;
if (uintval == 0)
return 0;
exp = ilog2(uintval);
/* Calculate fraction */
if (23 > exp)
frac = (uintval & (~(1 << exp))) << (23 - exp);
/* Exp is biased by 127 and shifted 23 bits */
exp = (exp + 127) << 23;
return exp | frac;
}
static void set_gmem_copy_quad(struct gmem_shadow_t *shadow)
{
/* set vertex buffer values */
gmem_copy_quad[1] = uint2float(shadow->height);
gmem_copy_quad[3] = uint2float(shadow->width);
gmem_copy_quad[4] = uint2float(shadow->height);
gmem_copy_quad[9] = uint2float(shadow->width);
gmem_restore_quad[5] = uint2float(shadow->height);
gmem_restore_quad[7] = uint2float(shadow->width);
memcpy(shadow->quad_vertices.hostptr, gmem_copy_quad, QUAD_LEN << 2);
memcpy(shadow->quad_vertices_restore.hostptr, gmem_copy_quad,
QUAD_RESTORE_LEN << 2);
memcpy(shadow->quad_texcoords.hostptr, gmem_copy_texcoord,
TEXCOORD_LEN << 2);
}
/**
* build_quad_vtxbuff - Create a quad for saving/restoring GMEM
* @ context - Pointer to the context being created
* @ shadow - Pointer to the GMEM shadow structure
* @ incmd - Pointer to pointer to the temporary command buffer
*/
/* quad for saving/restoring gmem */
void build_quad_vtxbuff(struct adreno_context *drawctxt,
struct gmem_shadow_t *shadow, unsigned int **incmd)
{
unsigned int *cmd = *incmd;
/* quad vertex buffer location (in GPU space) */
shadow->quad_vertices.hostptr = cmd;
shadow->quad_vertices.gpuaddr = virt2gpu(cmd, &drawctxt->gpustate);
cmd += QUAD_LEN;
/* Used by A3XX, but define for both to make the code easier */
shadow->quad_vertices_restore.hostptr = cmd;
shadow->quad_vertices_restore.gpuaddr =
virt2gpu(cmd, &drawctxt->gpustate);
cmd += QUAD_RESTORE_LEN;
/* tex coord buffer location (in GPU space) */
shadow->quad_texcoords.hostptr = cmd;
shadow->quad_texcoords.gpuaddr = virt2gpu(cmd, &drawctxt->gpustate);
cmd += TEXCOORD_LEN;
set_gmem_copy_quad(shadow);
*incmd = cmd;
}
/**
* adreno_drawctxt_create - create a new adreno draw context
* @device - KGSL device to create the context on
* @pagetable - Pagetable for the context
* @context- Generic KGSL context structure
* @flags - flags for the context (passed from user space)
*
* Create a new draw context for the 3D core. Return 0 on success,
* or error code on failure.
*/
int adreno_drawctxt_create(struct kgsl_device *device,
struct kgsl_pagetable *pagetable,
struct kgsl_context *context, uint32_t flags)
{
struct adreno_context *drawctxt;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
int ret;
drawctxt = kzalloc(sizeof(struct adreno_context), GFP_KERNEL);
if (drawctxt == NULL)
return -ENOMEM;
drawctxt->pagetable = pagetable;
drawctxt->bin_base_offset = 0;
drawctxt->id = context->id;
if (flags & KGSL_CONTEXT_PREAMBLE)
drawctxt->flags |= CTXT_FLAGS_PREAMBLE;
if (flags & KGSL_CONTEXT_NO_GMEM_ALLOC)
drawctxt->flags |= CTXT_FLAGS_NOGMEMALLOC;
if (flags & KGSL_CONTEXT_PER_CONTEXT_TS)
drawctxt->flags |= CTXT_FLAGS_PER_CONTEXT_TS;
ret = adreno_dev->gpudev->ctxt_create(adreno_dev, drawctxt);
if (ret)
goto err;
kgsl_sharedmem_writel(&device->memstore,
KGSL_MEMSTORE_OFFSET(drawctxt->id, ref_wait_ts),
KGSL_INIT_REFTIMESTAMP);
context->devctxt = drawctxt;
return 0;
err:
kfree(drawctxt);
return ret;
}
/**
* adreno_drawctxt_destroy - destroy a draw context
* @device - KGSL device that owns the context
* @context- Generic KGSL context container for the context
*
* Destroy an existing context. Return 0 on success or error
* code on failure.
*/
/* destroy a drawing context */
void adreno_drawctxt_destroy(struct kgsl_device *device,
struct kgsl_context *context)
{
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct adreno_context *drawctxt;
if (context == NULL)
return;
drawctxt = context->devctxt;
/* deactivate context */
if (adreno_dev->drawctxt_active == drawctxt) {
/* no need to save GMEM or shader, the context is
* being destroyed.
*/
drawctxt->flags &= ~(CTXT_FLAGS_GMEM_SAVE |
CTXT_FLAGS_SHADER_SAVE |
CTXT_FLAGS_GMEM_SHADOW |
CTXT_FLAGS_STATE_SHADOW);
adreno_drawctxt_switch(adreno_dev, NULL, 0);
}
adreno_idle(device, KGSL_TIMEOUT_DEFAULT);
kgsl_sharedmem_free(&drawctxt->gpustate);
kgsl_sharedmem_free(&drawctxt->context_gmem_shadow.gmemshadow);
kfree(drawctxt);
context->devctxt = NULL;
}
/**
* adreno_drawctxt_set_bin_base_offset - set bin base offset for the context
* @device - KGSL device that owns the context
* @context- Generic KGSL context container for the context
* @offset - Offset to set
*
* Set the bin base offset for A2XX devices. Not valid for A3XX devices.
*/
void adreno_drawctxt_set_bin_base_offset(struct kgsl_device *device,
struct kgsl_context *context,
unsigned int offset)
{
struct adreno_context *drawctxt = context->devctxt;
if (drawctxt)
drawctxt->bin_base_offset = offset;
}
/**
* adreno_drawctxt_switch - switch the current draw context
* @adreno_dev - The 3D device that owns the context
* @drawctxt - the 3D context to switch to
* @flags - Flags to accompany the switch (from user space)
*
* Switch the current draw context
*/
void adreno_drawctxt_switch(struct adreno_device *adreno_dev,
struct adreno_context *drawctxt,
unsigned int flags)
{
struct kgsl_device *device = &adreno_dev->dev;
if (drawctxt) {
if (flags & KGSL_CONTEXT_SAVE_GMEM)
/* Set the flag in context so that the save is done
* when this context is switched out. */
drawctxt->flags |= CTXT_FLAGS_GMEM_SAVE;
else
/* Remove GMEM saving flag from the context */
drawctxt->flags &= ~CTXT_FLAGS_GMEM_SAVE;
}
/* already current? */
if (adreno_dev->drawctxt_active == drawctxt)
return;
KGSL_CTXT_INFO(device, "from %p to %p flags %d\n",
adreno_dev->drawctxt_active, drawctxt, flags);
/* Save the old context */
adreno_dev->gpudev->ctxt_save(adreno_dev, adreno_dev->drawctxt_active);
/* Set the new context */
adreno_dev->gpudev->ctxt_restore(adreno_dev, drawctxt);
adreno_dev->drawctxt_active = drawctxt;
}

179
drivers/gpu/msm/adreno_drawctxt.h Executable file
View File

@ -0,0 +1,179 @@
/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __ADRENO_DRAWCTXT_H
#define __ADRENO_DRAWCTXT_H
#include "adreno_pm4types.h"
#include "a2xx_reg.h"
/* Flags */
#define CTXT_FLAGS_NOT_IN_USE 0x00000000
#define CTXT_FLAGS_IN_USE 0x00000001
/* state shadow memory allocated */
#define CTXT_FLAGS_STATE_SHADOW 0x00000010
/* gmem shadow memory allocated */
#define CTXT_FLAGS_GMEM_SHADOW 0x00000100
/* gmem must be copied to shadow */
#define CTXT_FLAGS_GMEM_SAVE 0x00000200
/* gmem can be restored from shadow */
#define CTXT_FLAGS_GMEM_RESTORE 0x00000400
/* preamble packed in cmdbuffer for context switching */
#define CTXT_FLAGS_PREAMBLE 0x00000800
/* shader must be copied to shadow */
#define CTXT_FLAGS_SHADER_SAVE 0x00002000
/* shader can be restored from shadow */
#define CTXT_FLAGS_SHADER_RESTORE 0x00004000
/* Context has caused a GPU hang */
#define CTXT_FLAGS_GPU_HANG 0x00008000
/* Specifies there is no need to save GMEM */
#define CTXT_FLAGS_NOGMEMALLOC 0x00010000
/* Trash state for context */
#define CTXT_FLAGS_TRASHSTATE 0x00020000
/* per context timestamps enabled */
#define CTXT_FLAGS_PER_CONTEXT_TS 0x00040000
struct kgsl_device;
struct adreno_device;
struct kgsl_device_private;
struct kgsl_context;
/* draw context */
struct gmem_shadow_t {
struct kgsl_memdesc gmemshadow; /* Shadow buffer address */
/*
* 256 KB GMEM surface = 4 bytes-per-pixel x 256 pixels/row x
* 256 rows. Width & height must be multiples of 32 in case tiled
* textures are used
*/
enum COLORFORMATX format; /* Unused on A3XX */
unsigned int size; /* Size of surface used to store GMEM */
unsigned int width; /* Width of surface used to store GMEM */
unsigned int height; /* Height of surface used to store GMEM */
unsigned int pitch; /* Pitch of surface used to store GMEM */
unsigned int gmem_pitch; /* Pitch value used for GMEM */
unsigned int *gmem_save_commands; /* Unused on A3XX */
unsigned int *gmem_restore_commands; /* Unused on A3XX */
unsigned int gmem_save[3];
unsigned int gmem_restore[3];
struct kgsl_memdesc quad_vertices;
struct kgsl_memdesc quad_texcoords;
struct kgsl_memdesc quad_vertices_restore;
};
struct adreno_context {
unsigned int id;
uint32_t flags;
struct kgsl_pagetable *pagetable;
struct kgsl_memdesc gpustate;
unsigned int reg_restore[3];
unsigned int shader_save[3];
unsigned int shader_restore[3];
/* Information of the GMEM shadow that is created in context create */
struct gmem_shadow_t context_gmem_shadow;
/* A2XX specific items */
unsigned int reg_save[3];
unsigned int shader_fixup[3];
unsigned int chicken_restore[3];
unsigned int bin_base_offset;
/* A3XX specific items */
unsigned int regconstant_save[3];
unsigned int constant_restore[3];
unsigned int hlsqcontrol_restore[3];
unsigned int save_fixup[3];
unsigned int restore_fixup[3];
struct kgsl_memdesc shader_load_commands[2];
struct kgsl_memdesc shader_save_commands[4];
struct kgsl_memdesc constant_save_commands[3];
struct kgsl_memdesc constant_load_commands[3];
struct kgsl_memdesc cond_execs[4];
struct kgsl_memdesc hlsqcontrol_restore_commands[1];
};
int adreno_drawctxt_create(struct kgsl_device *device,
struct kgsl_pagetable *pagetable,
struct kgsl_context *context,
uint32_t flags);
void adreno_drawctxt_destroy(struct kgsl_device *device,
struct kgsl_context *context);
void adreno_drawctxt_switch(struct adreno_device *adreno_dev,
struct adreno_context *drawctxt,
unsigned int flags);
void adreno_drawctxt_set_bin_base_offset(struct kgsl_device *device,
struct kgsl_context *context,
unsigned int offset);
/* GPU context switch helper functions */
void build_quad_vtxbuff(struct adreno_context *drawctxt,
struct gmem_shadow_t *shadow, unsigned int **incmd);
unsigned int uint2float(unsigned int);
static inline unsigned int virt2gpu(unsigned int *cmd,
struct kgsl_memdesc *memdesc)
{
return memdesc->gpuaddr + ((char *) cmd - (char *) memdesc->hostptr);
}
static inline void create_ib1(struct adreno_context *drawctxt,
unsigned int *cmd,
unsigned int *start,
unsigned int *end)
{
cmd[0] = CP_HDR_INDIRECT_BUFFER_PFD;
cmd[1] = virt2gpu(start, &drawctxt->gpustate);
cmd[2] = end - start;
}
static inline unsigned int *reg_range(unsigned int *cmd, unsigned int start,
unsigned int end)
{
*cmd++ = CP_REG(start); /* h/w regs, start addr */
*cmd++ = end - start + 1; /* count */
return cmd;
}
static inline void calc_gmemsize(struct gmem_shadow_t *shadow, int gmem_size)
{
int w = 64, h = 64;
shadow->format = COLORX_8_8_8_8;
/* convert from bytes to 32-bit words */
gmem_size = (gmem_size + 3) / 4;
while ((w * h) < gmem_size) {
if (w < h)
w *= 2;
else
h *= 2;
}
shadow->pitch = shadow->width = w;
shadow->height = h;
shadow->gmem_pitch = shadow->pitch;
shadow->size = shadow->pitch * shadow->height * 4;
}
#endif /* __ADRENO_DRAWCTXT_H */

229
drivers/gpu/msm/adreno_pm4types.h Executable file
View File

@ -0,0 +1,229 @@
/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __ADRENO_PM4TYPES_H
#define __ADRENO_PM4TYPES_H
#define CP_PKT_MASK 0xc0000000
#define CP_TYPE0_PKT ((unsigned int)0 << 30)
#define CP_TYPE1_PKT ((unsigned int)1 << 30)
#define CP_TYPE2_PKT ((unsigned int)2 << 30)
#define CP_TYPE3_PKT ((unsigned int)3 << 30)
/* type3 packets */
/* initialize CP's micro-engine */
#define CP_ME_INIT 0x48
/* skip N 32-bit words to get to the next packet */
#define CP_NOP 0x10
/* indirect buffer dispatch. same as IB, but init is pipelined */
#define CP_INDIRECT_BUFFER_PFD 0x37
/* wait for the IDLE state of the engine */
#define CP_WAIT_FOR_IDLE 0x26
/* wait until a register or memory location is a specific value */
#define CP_WAIT_REG_MEM 0x3c
/* wait until a register location is equal to a specific value */
#define CP_WAIT_REG_EQ 0x52
/* wait until a register location is >= a specific value */
#define CP_WAT_REG_GTE 0x53
/* wait until a read completes */
#define CP_WAIT_UNTIL_READ 0x5c
/* wait until all base/size writes from an IB_PFD packet have completed */
#define CP_WAIT_IB_PFD_COMPLETE 0x5d
/* register read/modify/write */
#define CP_REG_RMW 0x21
/* reads register in chip and writes to memory */
#define CP_REG_TO_MEM 0x3e
/* write N 32-bit words to memory */
#define CP_MEM_WRITE 0x3d
/* write CP_PROG_COUNTER value to memory */
#define CP_MEM_WRITE_CNTR 0x4f
/* conditional execution of a sequence of packets */
#define CP_COND_EXEC 0x44
/* conditional write to memory or register */
#define CP_COND_WRITE 0x45
/* generate an event that creates a write to memory when completed */
#define CP_EVENT_WRITE 0x46
/* generate a VS|PS_done event */
#define CP_EVENT_WRITE_SHD 0x58
/* generate a cache flush done event */
#define CP_EVENT_WRITE_CFL 0x59
/* generate a z_pass done event */
#define CP_EVENT_WRITE_ZPD 0x5b
/* initiate fetch of index buffer and draw */
#define CP_DRAW_INDX 0x22
/* draw using supplied indices in packet */
#define CP_DRAW_INDX_2 0x36
/* initiate fetch of index buffer and binIDs and draw */
#define CP_DRAW_INDX_BIN 0x34
/* initiate fetch of bin IDs and draw using supplied indices */
#define CP_DRAW_INDX_2_BIN 0x35
/* begin/end initiator for viz query extent processing */
#define CP_VIZ_QUERY 0x23
/* fetch state sub-blocks and initiate shader code DMAs */
#define CP_SET_STATE 0x25
/* load constant into chip and to memory */
#define CP_SET_CONSTANT 0x2d
/* load sequencer instruction memory (pointer-based) */
#define CP_IM_LOAD 0x27
/* load sequencer instruction memory (code embedded in packet) */
#define CP_IM_LOAD_IMMEDIATE 0x2b
/* load constants from a location in memory */
#define CP_LOAD_CONSTANT_CONTEXT 0x2e
/* (A2x) sets binning configuration registers */
#define CP_SET_BIN_DATA 0x2f
/* selective invalidation of state pointers */
#define CP_INVALIDATE_STATE 0x3b
/* dynamically changes shader instruction memory partition */
#define CP_SET_SHADER_BASES 0x4A
/* sets the 64-bit BIN_MASK register in the PFP */
#define CP_SET_BIN_MASK 0x50
/* sets the 64-bit BIN_SELECT register in the PFP */
#define CP_SET_BIN_SELECT 0x51
/* updates the current context, if needed */
#define CP_CONTEXT_UPDATE 0x5e
/* generate interrupt from the command stream */
#define CP_INTERRUPT 0x40
/* copy sequencer instruction memory to system memory */
#define CP_IM_STORE 0x2c
/*
* for a20x
* program an offset that will added to the BIN_BASE value of
* the 3D_DRAW_INDX_BIN packet
*/
#define CP_SET_BIN_BASE_OFFSET 0x4B
/*
* for a22x
* sets draw initiator flags register in PFP, gets bitwise-ORed into
* every draw initiator
*/
#define CP_SET_DRAW_INIT_FLAGS 0x4B
#define CP_SET_PROTECTED_MODE 0x5f /* sets the register protection mode */
/*
* for a3xx
*/
#define CP_LOAD_STATE 0x30 /* load high level sequencer command */
/* Conditionally load a IB based on a flag */
#define CP_COND_INDIRECT_BUFFER_PFE 0x3A /* prefetch enabled */
#define CP_COND_INDIRECT_BUFFER_PFD 0x32 /* prefetch disabled */
/* Load a buffer with pre-fetch enabled */
#define CP_INDIRECT_BUFFER_PFE 0x3F
#define CP_LOADSTATE_DSTOFFSET_SHIFT 0x00000000
#define CP_LOADSTATE_STATESRC_SHIFT 0x00000010
#define CP_LOADSTATE_STATEBLOCKID_SHIFT 0x00000013
#define CP_LOADSTATE_NUMOFUNITS_SHIFT 0x00000016
#define CP_LOADSTATE_STATETYPE_SHIFT 0x00000000
#define CP_LOADSTATE_EXTSRCADDR_SHIFT 0x00000002
/* packet header building macros */
#define cp_type0_packet(regindx, cnt) \
(CP_TYPE0_PKT | (((cnt)-1) << 16) | ((regindx) & 0x7FFF))
#define cp_type0_packet_for_sameregister(regindx, cnt) \
((CP_TYPE0_PKT | (((cnt)-1) << 16) | ((1 << 15) | \
((regindx) & 0x7FFF)))
#define cp_type1_packet(reg0, reg1) \
(CP_TYPE1_PKT | ((reg1) << 12) | (reg0))
#define cp_type3_packet(opcode, cnt) \
(CP_TYPE3_PKT | (((cnt)-1) << 16) | (((opcode) & 0xFF) << 8))
#define cp_predicated_type3_packet(opcode, cnt) \
(CP_TYPE3_PKT | (((cnt)-1) << 16) | (((opcode) & 0xFF) << 8) | 0x1)
#define cp_nop_packet(cnt) \
(CP_TYPE3_PKT | (((cnt)-1) << 16) | (CP_NOP << 8))
#define pkt_is_type0(pkt) (((pkt) & 0XC0000000) == CP_TYPE0_PKT)
#define type0_pkt_size(pkt) ((((pkt) >> 16) & 0x3FFF) + 1)
#define type0_pkt_offset(pkt) ((pkt) & 0x7FFF)
#define pkt_is_type3(pkt) (((pkt) & 0xC0000000) == CP_TYPE3_PKT)
#define cp_type3_opcode(pkt) (((pkt) >> 8) & 0xFF)
#define type3_pkt_size(pkt) ((((pkt) >> 16) & 0x3FFF) + 1)
/* packet headers */
#define CP_HDR_ME_INIT cp_type3_packet(CP_ME_INIT, 18)
#define CP_HDR_INDIRECT_BUFFER_PFD cp_type3_packet(CP_INDIRECT_BUFFER_PFD, 2)
#define CP_HDR_INDIRECT_BUFFER_PFE cp_type3_packet(CP_INDIRECT_BUFFER_PFE, 2)
/* dword base address of the GFX decode space */
#define SUBBLOCK_OFFSET(reg) ((unsigned int)((reg) - (0x2000)))
/* gmem command buffer length */
#define CP_REG(reg) ((0x4 << 16) | (SUBBLOCK_OFFSET(reg)))
/* Return 1 if the command is an indirect buffer of any kind */
static inline int adreno_cmd_is_ib(unsigned int cmd)
{
return (cmd == cp_type3_packet(CP_INDIRECT_BUFFER_PFE, 2) ||
cmd == cp_type3_packet(CP_INDIRECT_BUFFER_PFD, 2) ||
cmd == cp_type3_packet(CP_COND_INDIRECT_BUFFER_PFE, 2) ||
cmd == cp_type3_packet(CP_COND_INDIRECT_BUFFER_PFD, 2));
}
#endif /* __ADRENO_PM4TYPES_H */

View File

@ -0,0 +1,881 @@
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/vmalloc.h>
#include "kgsl.h"
#include "kgsl_sharedmem.h"
#include "adreno.h"
#include "adreno_pm4types.h"
#include "adreno_ringbuffer.h"
#include "adreno_postmortem.h"
#include "adreno_debugfs.h"
#include "kgsl_cffdump.h"
#include "a2xx_reg.h"
#define INVALID_RB_CMD 0xaaaaaaaa
#define NUM_DWORDS_OF_RINGBUFFER_HISTORY 100
struct pm_id_name {
uint32_t id;
char name[9];
};
static const struct pm_id_name pm0_types[] = {
{REG_PA_SC_AA_CONFIG, "RPASCAAC"},
{REG_RBBM_PM_OVERRIDE2, "RRBBPMO2"},
{REG_SCRATCH_REG2, "RSCRTRG2"},
{REG_SQ_GPR_MANAGEMENT, "RSQGPRMN"},
{REG_SQ_INST_STORE_MANAGMENT, "RSQINSTS"},
{REG_TC_CNTL_STATUS, "RTCCNTLS"},
{REG_TP0_CHICKEN, "RTP0CHCK"},
{REG_CP_TIMESTAMP, "CP_TM_ST"},
};
static const struct pm_id_name pm3_types[] = {
{CP_COND_EXEC, "CND_EXEC"},
{CP_CONTEXT_UPDATE, "CX__UPDT"},
{CP_DRAW_INDX, "DRW_NDX_"},
{CP_DRAW_INDX_BIN, "DRW_NDXB"},
{CP_EVENT_WRITE, "EVENT_WT"},
{CP_IM_LOAD, "IN__LOAD"},
{CP_IM_LOAD_IMMEDIATE, "IM_LOADI"},
{CP_IM_STORE, "IM_STORE"},
{CP_INDIRECT_BUFFER_PFE, "IND_BUF_"},
{CP_INDIRECT_BUFFER_PFD, "IND_BUFP"},
{CP_INTERRUPT, "PM4_INTR"},
{CP_INVALIDATE_STATE, "INV_STAT"},
{CP_LOAD_CONSTANT_CONTEXT, "LD_CN_CX"},
{CP_ME_INIT, "ME__INIT"},
{CP_NOP, "PM4__NOP"},
{CP_REG_RMW, "REG__RMW"},
{CP_REG_TO_MEM, "REG2_MEM"},
{CP_SET_BIN_BASE_OFFSET, "ST_BIN_O"},
{CP_SET_CONSTANT, "ST_CONST"},
{CP_SET_PROTECTED_MODE, "ST_PRT_M"},
{CP_SET_SHADER_BASES, "ST_SHD_B"},
{CP_WAIT_FOR_IDLE, "WAIT4IDL"},
};
/* Offset address pairs: start, end of range to dump (inclusive) */
/* GPU < Z470 */
static const int a200_registers[] = {
0x0000, 0x0008, 0x0010, 0x002c, 0x00ec, 0x00f4,
0x0100, 0x0110, 0x0118, 0x011c,
0x0700, 0x0704, 0x070c, 0x0720, 0x0754, 0x0764,
0x0770, 0x0774, 0x07a8, 0x07a8, 0x07b8, 0x07cc,
0x07d8, 0x07dc, 0x07f0, 0x07fc, 0x0e44, 0x0e48,
0x0e6c, 0x0e78, 0x0ec8, 0x0ed4, 0x0edc, 0x0edc,
0x0fe0, 0x0fec, 0x1100, 0x1100,
0x110c, 0x1110, 0x112c, 0x112c, 0x1134, 0x113c,
0x1148, 0x1148, 0x1150, 0x116c, 0x11fc, 0x11fc,
0x15e0, 0x161c, 0x1724, 0x1724, 0x1740, 0x1740,
0x1804, 0x1810, 0x1818, 0x1824, 0x182c, 0x1838,
0x184c, 0x1850, 0x28a4, 0x28ac, 0x28bc, 0x28c4,
0x2900, 0x290c, 0x2914, 0x2914, 0x2938, 0x293c,
0x30b0, 0x30b0, 0x30c0, 0x30c0, 0x30e0, 0x30f0,
0x3100, 0x3100, 0x3110, 0x3110, 0x3200, 0x3218,
0x3220, 0x3250, 0x3264, 0x3268, 0x3290, 0x3294,
0x3400, 0x340c, 0x3418, 0x3418, 0x3420, 0x342c,
0x34d0, 0x34d4, 0x36b8, 0x3704, 0x3720, 0x3750,
0x3760, 0x3764, 0x3800, 0x3800, 0x3808, 0x3810,
0x385c, 0x3878, 0x3b00, 0x3b24, 0x3b2c, 0x3b30,
0x3b40, 0x3b40, 0x3b50, 0x3b5c, 0x3b80, 0x3b88,
0x3c04, 0x3c08, 0x3c30, 0x3c30, 0x3c38, 0x3c48,
0x3c98, 0x3ca8, 0x3cb0, 0x3cb0,
0x8000, 0x8008, 0x8018, 0x803c, 0x8200, 0x8208,
0x8400, 0x8424, 0x8430, 0x8450, 0x8600, 0x8610,
0x87d4, 0x87dc, 0x8800, 0x8820, 0x8a00, 0x8a0c,
0x8a4c, 0x8a50, 0x8c00, 0x8c20, 0x8c48, 0x8c48,
0x8c58, 0x8c74, 0x8c90, 0x8c98, 0x8e00, 0x8e0c,
0x9000, 0x9008, 0x9018, 0x903c, 0x9200, 0x9208,
0x9400, 0x9424, 0x9430, 0x9450, 0x9600, 0x9610,
0x97d4, 0x97dc, 0x9800, 0x9820, 0x9a00, 0x9a0c,
0x9a4c, 0x9a50, 0x9c00, 0x9c20, 0x9c48, 0x9c48,
0x9c58, 0x9c74, 0x9c90, 0x9c98, 0x9e00, 0x9e0c,
0x10000, 0x1000c, 0x12000, 0x12014,
0x12400, 0x12400, 0x12420, 0x12420
};
/* GPU = Z470 */
static const int a220_registers[] = {
0x0000, 0x0008, 0x0010, 0x002c, 0x00ec, 0x00f4,
0x0100, 0x0110, 0x0118, 0x011c,
0x0700, 0x0704, 0x070c, 0x0720, 0x0754, 0x0764,
0x0770, 0x0774, 0x07a8, 0x07a8, 0x07b8, 0x07cc,
0x07d8, 0x07dc, 0x07f0, 0x07fc, 0x0e44, 0x0e48,
0x0e6c, 0x0e78, 0x0ec8, 0x0ed4, 0x0edc, 0x0edc,
0x0fe0, 0x0fec, 0x1100, 0x1100,
0x110c, 0x1110, 0x112c, 0x112c, 0x1134, 0x113c,
0x1148, 0x1148, 0x1150, 0x116c, 0x11fc, 0x11fc,
0x15e0, 0x161c, 0x1724, 0x1724, 0x1740, 0x1740,
0x1804, 0x1810, 0x1818, 0x1824, 0x182c, 0x1838,
0x184c, 0x1850, 0x28a4, 0x28ac, 0x28bc, 0x28c4,
0x2900, 0x2900, 0x2908, 0x290c, 0x2914, 0x2914,
0x2938, 0x293c, 0x30c0, 0x30c0, 0x30e0, 0x30e4,
0x30f0, 0x30f0, 0x3200, 0x3204, 0x3220, 0x324c,
0x3400, 0x340c, 0x3414, 0x3418, 0x3420, 0x342c,
0x34d0, 0x34d4, 0x36b8, 0x3704, 0x3720, 0x3750,
0x3760, 0x3764, 0x3800, 0x3800, 0x3808, 0x3810,
0x385c, 0x3878, 0x3b00, 0x3b24, 0x3b2c, 0x3b30,
0x3b40, 0x3b40, 0x3b50, 0x3b5c, 0x3b80, 0x3b88,
0x3c04, 0x3c08, 0x8000, 0x8008, 0x8018, 0x803c,
0x8200, 0x8208, 0x8400, 0x8408, 0x8410, 0x8424,
0x8430, 0x8450, 0x8600, 0x8610, 0x87d4, 0x87dc,
0x8800, 0x8808, 0x8810, 0x8810, 0x8820, 0x8820,
0x8a00, 0x8a08, 0x8a50, 0x8a50,
0x8c00, 0x8c20, 0x8c24, 0x8c28, 0x8c48, 0x8c48,
0x8c58, 0x8c58, 0x8c60, 0x8c74, 0x8c90, 0x8c98,
0x8e00, 0x8e0c, 0x9000, 0x9008, 0x9018, 0x903c,
0x9200, 0x9208, 0x9400, 0x9408, 0x9410, 0x9424,
0x9430, 0x9450, 0x9600, 0x9610, 0x97d4, 0x97dc,
0x9800, 0x9808, 0x9810, 0x9818, 0x9820, 0x9820,
0x9a00, 0x9a08, 0x9a50, 0x9a50, 0x9c00, 0x9c20,
0x9c48, 0x9c48, 0x9c58, 0x9c58, 0x9c60, 0x9c74,
0x9c90, 0x9c98, 0x9e00, 0x9e0c,
0x10000, 0x1000c, 0x12000, 0x12014,
0x12400, 0x12400, 0x12420, 0x12420
};
static uint32_t adreno_is_pm4_len(uint32_t word)
{
if (word == INVALID_RB_CMD)
return 0;
return (word >> 16) & 0x3FFF;
}
static bool adreno_is_pm4_type(uint32_t word)
{
int i;
if (word == INVALID_RB_CMD)
return 1;
if (adreno_is_pm4_len(word) > 16)
return 0;
if ((word & (3<<30)) == CP_TYPE0_PKT) {
for (i = 0; i < ARRAY_SIZE(pm0_types); ++i) {
if ((word & 0x7FFF) == pm0_types[i].id)
return 1;
}
return 0;
}
if ((word & (3<<30)) == CP_TYPE3_PKT) {
for (i = 0; i < ARRAY_SIZE(pm3_types); ++i) {
if ((word & 0xFFFF) == (pm3_types[i].id << 8))
return 1;
}
return 0;
}
return 0;
}
static const char *adreno_pm4_name(uint32_t word)
{
int i;
if (word == INVALID_RB_CMD)
return "--------";
if ((word & (3<<30)) == CP_TYPE0_PKT) {
for (i = 0; i < ARRAY_SIZE(pm0_types); ++i) {
if ((word & 0x7FFF) == pm0_types[i].id)
return pm0_types[i].name;
}
return "????????";
}
if ((word & (3<<30)) == CP_TYPE3_PKT) {
for (i = 0; i < ARRAY_SIZE(pm3_types); ++i) {
if ((word & 0xFFFF) == (pm3_types[i].id << 8))
return pm3_types[i].name;
}
return "????????";
}
return "????????";
}
static void adreno_dump_regs(struct kgsl_device *device,
const int *registers, int size)
{
int range = 0, offset = 0;
for (range = 0; range < size; range++) {
/* start and end are in dword offsets */
int start = registers[range * 2] / 4;
int end = registers[range * 2 + 1] / 4;
unsigned char linebuf[32 * 3 + 2 + 32 + 1];
int linelen, i;
for (offset = start; offset <= end; offset += linelen) {
unsigned int regvals[32/4];
linelen = min(end+1-offset, 32/4);
for (i = 0; i < linelen; ++i)
kgsl_regread(device, offset+i, regvals+i);
hex_dump_to_buffer(regvals, linelen*4, 32, 4,
linebuf, sizeof(linebuf), 0);
KGSL_LOG_DUMP(device,
"REG: %5.5X: %s\n", offset<<2, linebuf);
}
}
}
static void dump_ib(struct kgsl_device *device, char* buffId, uint32_t pt_base,
uint32_t base_offset, uint32_t ib_base, uint32_t ib_size, bool dump)
{
uint8_t *base_addr = adreno_convertaddr(device, pt_base,
ib_base, ib_size*sizeof(uint32_t));
if (base_addr && dump)
print_hex_dump(KERN_ERR, buffId, DUMP_PREFIX_OFFSET,
32, 4, base_addr, ib_size*4, 0);
else
KGSL_LOG_DUMP(device, "%s base:%8.8X ib_size:%d "
"offset:%5.5X%s\n",
buffId, ib_base, ib_size*4, base_offset,
base_addr ? "" : " [Invalid]");
}
#define IB_LIST_SIZE 64
struct ib_list {
int count;
uint32_t bases[IB_LIST_SIZE];
uint32_t sizes[IB_LIST_SIZE];
uint32_t offsets[IB_LIST_SIZE];
};
static void dump_ib1(struct kgsl_device *device, uint32_t pt_base,
uint32_t base_offset,
uint32_t ib1_base, uint32_t ib1_size,
struct ib_list *ib_list, bool dump)
{
int i, j;
uint32_t value;
uint32_t *ib1_addr;
dump_ib(device, "IB1:", pt_base, base_offset, ib1_base,
ib1_size, dump);
/* fetch virtual address for given IB base */
ib1_addr = (uint32_t *)adreno_convertaddr(device, pt_base,
ib1_base, ib1_size*sizeof(uint32_t));
if (!ib1_addr)
return;
for (i = 0; i+3 < ib1_size; ) {
value = ib1_addr[i++];
if (adreno_cmd_is_ib(value)) {
uint32_t ib2_base = ib1_addr[i++];
uint32_t ib2_size = ib1_addr[i++];
/* find previous match */
for (j = 0; j < ib_list->count; ++j)
if (ib_list->sizes[j] == ib2_size
&& ib_list->bases[j] == ib2_base)
break;
if (j < ib_list->count || ib_list->count
>= IB_LIST_SIZE)
continue;
/* store match */
ib_list->sizes[ib_list->count] = ib2_size;
ib_list->bases[ib_list->count] = ib2_base;
ib_list->offsets[ib_list->count] = i<<2;
++ib_list->count;
}
}
}
static void adreno_dump_rb_buffer(const void *buf, size_t len,
char *linebuf, size_t linebuflen, int *argp)
{
const u32 *ptr4 = buf;
const int ngroups = len;
int lx = 0, j;
bool nxsp = 1;
for (j = 0; j < ngroups; j++) {
if (*argp < 0) {
lx += scnprintf(linebuf + lx, linebuflen - lx, " <");
*argp = -*argp;
} else if (nxsp)
lx += scnprintf(linebuf + lx, linebuflen - lx, " ");
else
nxsp = 1;
if (!*argp && adreno_is_pm4_type(ptr4[j])) {
lx += scnprintf(linebuf + lx, linebuflen - lx,
"%s", adreno_pm4_name(ptr4[j]));
*argp = -(adreno_is_pm4_len(ptr4[j])+1);
} else {
lx += scnprintf(linebuf + lx, linebuflen - lx,
"%8.8X", ptr4[j]);
if (*argp > 1)
--*argp;
else if (*argp == 1) {
*argp = 0;
nxsp = 0;
lx += scnprintf(linebuf + lx, linebuflen - lx,
"> ");
}
}
}
linebuf[lx] = '\0';
}
static bool adreno_rb_use_hex(void)
{
#ifdef CONFIG_MSM_KGSL_PSTMRTMDMP_RB_HEX
return 1;
#else
return 0;
#endif
}
static void adreno_dump_rb(struct kgsl_device *device, const void *buf,
size_t len, int start, int size)
{
const uint32_t *ptr = buf;
int i, remaining, args = 0;
unsigned char linebuf[32 * 3 + 2 + 32 + 1];
const int rowsize = 8;
len >>= 2;
remaining = len;
for (i = 0; i < len; i += rowsize) {
int linelen = min(remaining, rowsize);
remaining -= rowsize;
if (adreno_rb_use_hex())
hex_dump_to_buffer(ptr+i, linelen*4, rowsize*4, 4,
linebuf, sizeof(linebuf), 0);
else
adreno_dump_rb_buffer(ptr+i, linelen, linebuf,
sizeof(linebuf), &args);
KGSL_LOG_DUMP(device,
"RB: %4.4X:%s\n", (start+i)%size, linebuf);
}
}
static bool adreno_ib_dump_enabled(void)
{
#ifdef CONFIG_MSM_KGSL_PSTMRTMDMP_NO_IB_DUMP
return 0;
#else
return 1;
#endif
}
struct log_field {
bool show;
const char *display;
};
static int adreno_dump_fields_line(struct kgsl_device *device,
const char *start, char *str, int slen,
const struct log_field **lines,
int num)
{
const struct log_field *l = *lines;
int sptr, count = 0;
sptr = snprintf(str, slen, "%s", start);
for ( ; num && sptr < slen; num--, l++) {
int ilen = strlen(l->display);
if (!l->show)
continue;
if (count)
ilen += strlen(" | ");
if (ilen > (slen - sptr))
break;
if (count++)
sptr += snprintf(str + sptr, slen - sptr, " | ");
sptr += snprintf(str + sptr, slen - sptr, "%s", l->display);
}
KGSL_LOG_DUMP(device, "%s\n", str);
*lines = l;
return num;
}
static void adreno_dump_fields(struct kgsl_device *device,
const char *start, const struct log_field *lines,
int num)
{
char lb[90];
const char *sstr = start;
lb[sizeof(lb) - 1] = '\0';
while (num) {
int ret = adreno_dump_fields_line(device, sstr, lb,
sizeof(lb) - 1, &lines, num);
if (ret == num)
break;
num = ret;
sstr = " ";
}
}
static int adreno_dump(struct kgsl_device *device)
{
unsigned int r1, r2, r3, rbbm_status;
unsigned int cp_ib1_base, cp_ib1_bufsz, cp_stat;
unsigned int cp_ib2_base, cp_ib2_bufsz;
unsigned int pt_base, cur_pt_base;
unsigned int cp_rb_base, rb_count;
unsigned int cp_rb_wptr, cp_rb_rptr;
unsigned int i;
int result = 0;
uint32_t *rb_copy;
const uint32_t *rb_vaddr;
int num_item = 0;
int read_idx, write_idx;
unsigned int ts_processed = 0xdeaddead;
struct kgsl_context *context;
unsigned int context_id;
static struct ib_list ib_list;
struct adreno_device *adreno_dev = ADRENO_DEVICE(device);
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
mb();
KGSL_LOG_DUMP(device, "POWER: FLAGS = %08lX | ACTIVE POWERLEVEL = %08X",
pwr->power_flags, pwr->active_pwrlevel);
KGSL_LOG_DUMP(device, "POWER: INTERVAL TIMEOUT = %08X ",
pwr->interval_timeout);
KGSL_LOG_DUMP(device, "GRP_CLK = %lu ",
kgsl_get_clkrate(pwr->grp_clks[0]));
KGSL_LOG_DUMP(device, "BUS CLK = %lu ",
kgsl_get_clkrate(pwr->ebi1_clk));
kgsl_regread(device, REG_RBBM_STATUS, &rbbm_status);
kgsl_regread(device, REG_RBBM_PM_OVERRIDE1, &r2);
kgsl_regread(device, REG_RBBM_PM_OVERRIDE2, &r3);
KGSL_LOG_DUMP(device, "RBBM: STATUS = %08X | PM_OVERRIDE1 = %08X | "
"PM_OVERRIDE2 = %08X\n", rbbm_status, r2, r3);
kgsl_regread(device, REG_RBBM_INT_CNTL, &r1);
kgsl_regread(device, REG_RBBM_INT_STATUS, &r2);
kgsl_regread(device, REG_RBBM_READ_ERROR, &r3);
KGSL_LOG_DUMP(device, " INT_CNTL = %08X | INT_STATUS = %08X | "
"READ_ERROR = %08X\n", r1, r2, r3);
{
char cmdFifo[16];
struct log_field lines[] = {
{rbbm_status & 0x001F, cmdFifo},
{rbbm_status & BIT(5), "TC busy "},
{rbbm_status & BIT(8), "HIRQ pending"},
{rbbm_status & BIT(9), "CPRQ pending"},
{rbbm_status & BIT(10), "CFRQ pending"},
{rbbm_status & BIT(11), "PFRQ pending"},
{rbbm_status & BIT(12), "VGT 0DMA bsy"},
{rbbm_status & BIT(14), "RBBM WU busy"},
{rbbm_status & BIT(16), "CP NRT busy "},
{rbbm_status & BIT(18), "MH busy "},
{rbbm_status & BIT(19), "MH chncy bsy"},
{rbbm_status & BIT(21), "SX busy "},
{rbbm_status & BIT(22), "TPC busy "},
{rbbm_status & BIT(24), "SC CNTX busy"},
{rbbm_status & BIT(25), "PA busy "},
{rbbm_status & BIT(26), "VGT busy "},
{rbbm_status & BIT(27), "SQ cntx1 bsy"},
{rbbm_status & BIT(28), "SQ cntx0 bsy"},
{rbbm_status & BIT(30), "RB busy "},
{rbbm_status & BIT(31), "Grphs pp bsy"},
};
snprintf(cmdFifo, sizeof(cmdFifo), "CMD FIFO=%01X ",
rbbm_status & 0xf);
adreno_dump_fields(device, " STATUS=", lines,
ARRAY_SIZE(lines));
}
kgsl_regread(device, REG_CP_RB_BASE, &cp_rb_base);
kgsl_regread(device, REG_CP_RB_CNTL, &r2);
rb_count = 2 << (r2 & (BIT(6)-1));
kgsl_regread(device, REG_CP_RB_RPTR_ADDR, &r3);
KGSL_LOG_DUMP(device,
"CP_RB: BASE = %08X | CNTL = %08X | RPTR_ADDR = %08X"
" | rb_count = %08X\n", cp_rb_base, r2, r3, rb_count);
{
struct adreno_ringbuffer *rb = &adreno_dev->ringbuffer;
if (rb->sizedwords != rb_count)
rb_count = rb->sizedwords;
}
kgsl_regread(device, REG_CP_RB_RPTR, &cp_rb_rptr);
kgsl_regread(device, REG_CP_RB_WPTR, &cp_rb_wptr);
kgsl_regread(device, REG_CP_RB_RPTR_WR, &r3);
KGSL_LOG_DUMP(device,
" RPTR = %08X | WPTR = %08X | RPTR_WR = %08X"
"\n", cp_rb_rptr, cp_rb_wptr, r3);
kgsl_regread(device, REG_CP_IB1_BASE, &cp_ib1_base);
kgsl_regread(device, REG_CP_IB1_BUFSZ, &cp_ib1_bufsz);
KGSL_LOG_DUMP(device,
"CP_IB1: BASE = %08X | BUFSZ = %d\n", cp_ib1_base,
cp_ib1_bufsz);
kgsl_regread(device, REG_CP_IB2_BASE, &cp_ib2_base);
kgsl_regread(device, REG_CP_IB2_BUFSZ, &cp_ib2_bufsz);
KGSL_LOG_DUMP(device,
"CP_IB2: BASE = %08X | BUFSZ = %d\n", cp_ib2_base,
cp_ib2_bufsz);
kgsl_regread(device, REG_CP_INT_CNTL, &r1);
kgsl_regread(device, REG_CP_INT_STATUS, &r2);
KGSL_LOG_DUMP(device, "CP_INT: CNTL = %08X | STATUS = %08X\n", r1, r2);
kgsl_regread(device, REG_CP_ME_CNTL, &r1);
kgsl_regread(device, REG_CP_ME_STATUS, &r2);
kgsl_regread(device, REG_MASTER_INT_SIGNAL, &r3);
KGSL_LOG_DUMP(device,
"CP_ME: CNTL = %08X | STATUS = %08X | MSTR_INT_SGNL = "
"%08X\n", r1, r2, r3);
kgsl_regread(device, REG_CP_STAT, &cp_stat);
KGSL_LOG_DUMP(device, "CP_STAT = %08X\n", cp_stat);
#ifndef CONFIG_MSM_KGSL_PSTMRTMDMP_CP_STAT_NO_DETAIL
{
struct log_field lns[] = {
{cp_stat & BIT(0), "WR_BSY 0"},
{cp_stat & BIT(1), "RD_RQ_BSY 1"},
{cp_stat & BIT(2), "RD_RTN_BSY 2"},
};
adreno_dump_fields(device, " MIU=", lns, ARRAY_SIZE(lns));
}
{
struct log_field lns[] = {
{cp_stat & BIT(5), "RING_BUSY 5"},
{cp_stat & BIT(6), "NDRCTS_BSY 6"},
{cp_stat & BIT(7), "NDRCT2_BSY 7"},
{cp_stat & BIT(9), "ST_BUSY 9"},
{cp_stat & BIT(10), "BUSY 10"},
};
adreno_dump_fields(device, " CSF=", lns, ARRAY_SIZE(lns));
}
{
struct log_field lns[] = {
{cp_stat & BIT(11), "RNG_Q_BSY 11"},
{cp_stat & BIT(12), "NDRCTS_Q_B12"},
{cp_stat & BIT(13), "NDRCT2_Q_B13"},
{cp_stat & BIT(16), "ST_QUEUE_B16"},
{cp_stat & BIT(17), "PFP_BUSY 17"},
};
adreno_dump_fields(device, " RING=", lns, ARRAY_SIZE(lns));
}
{
struct log_field lns[] = {
{cp_stat & BIT(3), "RBIU_BUSY 3"},
{cp_stat & BIT(4), "RCIU_BUSY 4"},
{cp_stat & BIT(18), "MQ_RG_BSY 18"},
{cp_stat & BIT(19), "MQ_NDRS_BS19"},
{cp_stat & BIT(20), "MQ_NDR2_BS20"},
{cp_stat & BIT(21), "MIU_WC_STL21"},
{cp_stat & BIT(22), "CP_NRT_BSY22"},
{cp_stat & BIT(23), "3D_BUSY 23"},
{cp_stat & BIT(26), "ME_BUSY 26"},
{cp_stat & BIT(29), "ME_WC_BSY 29"},
{cp_stat & BIT(30), "MIU_FF EM 30"},
{cp_stat & BIT(31), "CP_BUSY 31"},
};
adreno_dump_fields(device, " CP_STT=", lns, ARRAY_SIZE(lns));
}
#endif
kgsl_regread(device, REG_SCRATCH_REG0, &r1);
KGSL_LOG_DUMP(device, "SCRATCH_REG0 = %08X\n", r1);
kgsl_regread(device, REG_COHER_SIZE_PM4, &r1);
kgsl_regread(device, REG_COHER_BASE_PM4, &r2);
kgsl_regread(device, REG_COHER_STATUS_PM4, &r3);
KGSL_LOG_DUMP(device,
"COHER: SIZE_PM4 = %08X | BASE_PM4 = %08X | STATUS_PM4"
" = %08X\n", r1, r2, r3);
kgsl_regread(device, MH_AXI_ERROR, &r1);
KGSL_LOG_DUMP(device, "MH: AXI_ERROR = %08X\n", r1);
kgsl_regread(device, MH_MMU_PAGE_FAULT, &r1);
kgsl_regread(device, MH_MMU_CONFIG, &r2);
kgsl_regread(device, MH_MMU_MPU_BASE, &r3);
KGSL_LOG_DUMP(device,
"MH_MMU: PAGE_FAULT = %08X | CONFIG = %08X | MPU_BASE ="
" %08X\n", r1, r2, r3);
kgsl_regread(device, MH_MMU_MPU_END, &r1);
kgsl_regread(device, MH_MMU_VA_RANGE, &r2);
pt_base = kgsl_mmu_get_current_ptbase(device);
KGSL_LOG_DUMP(device,
" MPU_END = %08X | VA_RANGE = %08X | PT_BASE ="
" %08X\n", r1, r2, pt_base);
cur_pt_base = pt_base;
KGSL_LOG_DUMP(device, "PAGETABLE SIZE: %08X ", KGSL_PAGETABLE_SIZE);
kgsl_regread(device, MH_MMU_TRAN_ERROR, &r1);
KGSL_LOG_DUMP(device, " TRAN_ERROR = %08X\n", r1);
kgsl_regread(device, MH_INTERRUPT_MASK, &r1);
kgsl_regread(device, MH_INTERRUPT_STATUS, &r2);
KGSL_LOG_DUMP(device,
"MH_INTERRUPT: MASK = %08X | STATUS = %08X\n", r1, r2);
kgsl_sharedmem_readl(&device->memstore,
(unsigned int *) &context_id,
KGSL_MEMSTORE_OFFSET(KGSL_MEMSTORE_GLOBAL,
current_context));
context = idr_find(&device->context_idr, context_id);
if (context) {
ts_processed = device->ftbl->readtimestamp(device, context,
KGSL_TIMESTAMP_RETIRED);
KGSL_LOG_DUMP(device, "CTXT: %d TIMESTM RTRD: %08X\n",
context->id, ts_processed);
} else
KGSL_LOG_DUMP(device, "BAD CTXT: %d\n", context_id);
num_item = adreno_ringbuffer_count(&adreno_dev->ringbuffer,
cp_rb_rptr);
if (num_item <= 0)
KGSL_LOG_POSTMORTEM_WRITE(device, "Ringbuffer is Empty.\n");
rb_copy = vmalloc(rb_count<<2);
if (!rb_copy) {
KGSL_LOG_POSTMORTEM_WRITE(device,
"vmalloc(%d) failed\n", rb_count << 2);
result = -ENOMEM;
goto end;
}
KGSL_LOG_DUMP(device, "RB: rd_addr:%8.8x rb_size:%d num_item:%d\n",
cp_rb_base, rb_count<<2, num_item);
if (adreno_dev->ringbuffer.buffer_desc.gpuaddr != cp_rb_base)
KGSL_LOG_POSTMORTEM_WRITE(device,
"rb address mismatch, should be 0x%08x\n",
adreno_dev->ringbuffer.buffer_desc.gpuaddr);
rb_vaddr = adreno_dev->ringbuffer.buffer_desc.hostptr;
if (!rb_vaddr) {
KGSL_LOG_POSTMORTEM_WRITE(device,
"rb has no kernel mapping!\n");
goto error_vfree;
}
read_idx = (int)cp_rb_rptr - NUM_DWORDS_OF_RINGBUFFER_HISTORY;
if (read_idx < 0)
read_idx += rb_count;
write_idx = (int)cp_rb_wptr + 16;
if (write_idx > rb_count)
write_idx -= rb_count;
num_item += NUM_DWORDS_OF_RINGBUFFER_HISTORY+16;
if (num_item > rb_count)
num_item = rb_count;
if (write_idx >= read_idx)
memcpy(rb_copy, rb_vaddr+read_idx, num_item<<2);
else {
int part1_c = rb_count-read_idx;
memcpy(rb_copy, rb_vaddr+read_idx, part1_c<<2);
memcpy(rb_copy+part1_c, rb_vaddr, (num_item-part1_c)<<2);
}
/* extract the latest ib commands from the buffer */
ib_list.count = 0;
i = 0;
for (read_idx = 0; read_idx < num_item; ) {
uint32_t this_cmd = rb_copy[read_idx++];
if (adreno_cmd_is_ib(this_cmd)) {
uint32_t ib_addr = rb_copy[read_idx++];
uint32_t ib_size = rb_copy[read_idx++];
dump_ib1(device, cur_pt_base, (read_idx-3)<<2, ib_addr,
ib_size, &ib_list, 0);
for (; i < ib_list.count; ++i)
dump_ib(device, "IB2:", cur_pt_base,
ib_list.offsets[i],
ib_list.bases[i],
ib_list.sizes[i], 0);
} else if (this_cmd == cp_type0_packet(MH_MMU_PT_BASE, 1)) {
/* Set cur_pt_base to the new pagetable base */
cur_pt_base = rb_copy[read_idx++];
}
}
/* Restore cur_pt_base back to the pt_base of
the process in whose context the GPU hung */
cur_pt_base = pt_base;
read_idx = (int)cp_rb_rptr - NUM_DWORDS_OF_RINGBUFFER_HISTORY;
if (read_idx < 0)
read_idx += rb_count;
KGSL_LOG_DUMP(device,
"RB: addr=%8.8x window:%4.4x-%4.4x, start:%4.4x\n",
cp_rb_base, cp_rb_rptr, cp_rb_wptr, read_idx);
adreno_dump_rb(device, rb_copy, num_item<<2, read_idx, rb_count);
if (adreno_ib_dump_enabled()) {
for (read_idx = NUM_DWORDS_OF_RINGBUFFER_HISTORY;
read_idx >= 0; --read_idx) {
uint32_t this_cmd = rb_copy[read_idx];
if (adreno_cmd_is_ib(this_cmd)) {
uint32_t ib_addr = rb_copy[read_idx+1];
uint32_t ib_size = rb_copy[read_idx+2];
if (ib_size && cp_ib1_base == ib_addr) {
KGSL_LOG_DUMP(device,
"IB1: base:%8.8X "
"count:%d\n", ib_addr, ib_size);
dump_ib(device, "IB1: ", cur_pt_base,
read_idx<<2, ib_addr, ib_size,
1);
}
}
}
for (i = 0; i < ib_list.count; ++i) {
uint32_t ib_size = ib_list.sizes[i];
uint32_t ib_offset = ib_list.offsets[i];
if (ib_size && cp_ib2_base == ib_list.bases[i]) {
KGSL_LOG_DUMP(device,
"IB2: base:%8.8X count:%d\n",
cp_ib2_base, ib_size);
dump_ib(device, "IB2: ", cur_pt_base, ib_offset,
ib_list.bases[i], ib_size, 1);
}
}
}
/* Dump the registers if the user asked for it */
if (adreno_is_a20x(adreno_dev))
adreno_dump_regs(device, a200_registers,
ARRAY_SIZE(a200_registers) / 2);
else if (adreno_is_a22x(adreno_dev))
adreno_dump_regs(device, a220_registers,
ARRAY_SIZE(a220_registers) / 2);
error_vfree:
vfree(rb_copy);
end:
return result;
}
/**
* adreno_postmortem_dump - Dump the current GPU state
* @device - A pointer to the KGSL device to dump
* @manual - A flag that indicates if this was a manually triggered
* dump (from debugfs). If zero, then this is assumed to be a
* dump automaticlaly triggered from a hang
*/
int adreno_postmortem_dump(struct kgsl_device *device, int manual)
{
bool saved_nap;
BUG_ON(device == NULL);
kgsl_cffdump_hang(device->id);
/* For a manual dump, make sure that the system is idle */
if (manual) {
if (device->active_cnt != 0) {
mutex_unlock(&device->mutex);
wait_for_completion(&device->suspend_gate);
mutex_lock(&device->mutex);
}
if (device->state == KGSL_STATE_ACTIVE)
kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
}
/* Disable the idle timer so we don't get interrupted */
del_timer_sync(&device->idle_timer);
/* Turn off napping to make sure we have the clocks full
attention through the following process */
saved_nap = device->pwrctrl.nap_allowed;
device->pwrctrl.nap_allowed = false;
/* Force on the clocks */
kgsl_pwrctrl_wake(device);
/* Disable the irq */
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
/* If this is not a manual trigger, then set up the
state to try to recover */
if (!manual) {
device->state = KGSL_STATE_DUMP_AND_RECOVER;
KGSL_PWR_WARN(device,
"state -> DUMP_AND_RECOVER, device %d\n",
device->id);
}
KGSL_DRV_ERR(device,
"wait for work in workqueue to complete\n");
mutex_unlock(&device->mutex);
flush_workqueue(device->work_queue);
mutex_lock(&device->mutex);
adreno_dump(device);
/* Restore nap mode */
device->pwrctrl.nap_allowed = saved_nap;
/* On a manual trigger, turn on the interrupts and put
the clocks to sleep. They will recover themselves
on the next event. For a hang, leave things as they
are until recovery kicks in. */
if (manual) {
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
/* try to go into a sleep mode until the next event */
device->requested_state = KGSL_STATE_SLEEP;
kgsl_pwrctrl_sleep(device);
}
KGSL_DRV_ERR(device, "Dump Finished\n");
return 0;
}

View File

@ -0,0 +1,21 @@
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __ADRENO_POSTMORTEM_H
#define __ADRENO_POSTMORTEM_H
struct kgsl_device;
int adreno_postmortem_dump(struct kgsl_device *device, int manual);
#endif /* __ADRENO_POSTMORTEM_H */

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,141 @@
/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __ADRENO_RINGBUFFER_H
#define __ADRENO_RINGBUFFER_H
/*
* Adreno ringbuffer sizes in bytes - these are converted to
* the appropriate log2 values in the code
*/
#define KGSL_RB_SIZE (32 * 1024)
#define KGSL_RB_BLKSIZE 16
/* CP timestamp register */
#define REG_CP_TIMESTAMP REG_SCRATCH_REG0
struct kgsl_device;
struct kgsl_device_private;
#define GSL_RB_MEMPTRS_SCRATCH_COUNT 8
struct kgsl_rbmemptrs {
int rptr;
int wptr_poll;
};
#define GSL_RB_MEMPTRS_RPTR_OFFSET \
(offsetof(struct kgsl_rbmemptrs, rptr))
#define GSL_RB_MEMPTRS_WPTRPOLL_OFFSET \
(offsetof(struct kgsl_rbmemptrs, wptr_poll))
struct adreno_ringbuffer {
struct kgsl_device *device;
uint32_t flags;
struct kgsl_memdesc buffer_desc;
struct kgsl_memdesc memptrs_desc;
struct kgsl_rbmemptrs *memptrs;
/*ringbuffer size */
unsigned int sizedwords;
unsigned int wptr; /* write pointer offset in dwords from baseaddr */
unsigned int rptr; /* read pointer offset in dwords from baseaddr */
unsigned int timestamp[KGSL_MEMSTORE_MAX];
};
#define GSL_RB_WRITE(ring, gpuaddr, data) \
do { \
*ring = data; \
wmb(); \
kgsl_cffdump_setmem(gpuaddr, data, 4); \
ring++; \
gpuaddr += sizeof(uint); \
} while (0)
/* enable timestamp (...scratch0) memory shadowing */
#define GSL_RB_MEMPTRS_SCRATCH_MASK 0x1
#define GSL_RB_INIT_TIMESTAMP(rb)
/* mem rptr */
#define GSL_RB_CNTL_NO_UPDATE 0x0 /* enable */
#define GSL_RB_GET_READPTR(rb, data) \
do { \
*(data) = rb->memptrs->rptr; \
} while (0)
#define GSL_RB_CNTL_POLL_EN 0x0 /* disable */
/*
* protected mode error checking below register address 0x800
* note: if CP_INTERRUPT packet is used then checking needs
* to change to below register address 0x7C8
*/
#define GSL_RB_PROTECTED_MODE_CONTROL 0x200001F2
int adreno_ringbuffer_issueibcmds(struct kgsl_device_private *dev_priv,
struct kgsl_context *context,
struct kgsl_ibdesc *ibdesc,
unsigned int numibs,
uint32_t *timestamp,
unsigned int flags);
int adreno_ringbuffer_init(struct kgsl_device *device);
int adreno_ringbuffer_start(struct adreno_ringbuffer *rb,
unsigned int init_ram);
void adreno_ringbuffer_stop(struct adreno_ringbuffer *rb);
void adreno_ringbuffer_close(struct adreno_ringbuffer *rb);
void adreno_ringbuffer_issuecmds(struct kgsl_device *device,
unsigned int flags,
unsigned int *cmdaddr,
int sizedwords);
void adreno_ringbuffer_submit(struct adreno_ringbuffer *rb);
void kgsl_cp_intrcallback(struct kgsl_device *device);
int adreno_ringbuffer_extract(struct adreno_ringbuffer *rb,
unsigned int *temp_rb_buffer,
int *rb_size);
void
adreno_ringbuffer_restore(struct adreno_ringbuffer *rb, unsigned int *rb_buff,
int num_rb_contents);
unsigned int *adreno_ringbuffer_allocspace(struct adreno_ringbuffer *rb,
unsigned int numcmds);
static inline int adreno_ringbuffer_count(struct adreno_ringbuffer *rb,
unsigned int rptr)
{
if (rb->wptr >= rptr)
return rb->wptr - rptr;
return rb->wptr + rb->sizedwords - rptr;
}
/* Increment a value by 4 bytes with wrap-around based on size */
static inline unsigned int adreno_ringbuffer_inc_wrapped(unsigned int val,
unsigned int size)
{
return (val + sizeof(unsigned int)) % size;
}
#endif /* __ADRENO_RINGBUFFER_H */

2619
drivers/gpu/msm/kgsl.c Executable file

File diff suppressed because it is too large Load Diff

252
drivers/gpu/msm/kgsl.h Executable file
View File

@ -0,0 +1,252 @@
/* Copyright (c) 2008-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __KGSL_H
#define __KGSL_H
#include <linux/types.h>
#include <linux/msm_kgsl.h>
#include <linux/platform_device.h>
#include <linux/clk.h>
#include <linux/interrupt.h>
#include <linux/mutex.h>
#include <linux/cdev.h>
#include <linux/regulator/consumer.h>
#include <linux/mm.h>
#define KGSL_NAME "kgsl"
/* The number of memstore arrays limits the number of contexts allowed.
* If more contexts are needed, update multiple for MEMSTORE_SIZE
*/
#define KGSL_MEMSTORE_SIZE ((int)(PAGE_SIZE * 2))
#define KGSL_MEMSTORE_GLOBAL (0)
#define KGSL_MEMSTORE_MAX (KGSL_MEMSTORE_SIZE / \
sizeof(struct kgsl_devmemstore) - 1)
/* Timestamp window used to detect rollovers */
#define KGSL_TIMESTAMP_WINDOW 0x80000000
/*cache coherency ops */
#define DRM_KGSL_GEM_CACHE_OP_TO_DEV 0x0001
#define DRM_KGSL_GEM_CACHE_OP_FROM_DEV 0x0002
/* The size of each entry in a page table */
#define KGSL_PAGETABLE_ENTRY_SIZE 4
/* Pagetable Virtual Address base */
#define KGSL_PAGETABLE_BASE 0x66000000
/* Extra accounting entries needed in the pagetable */
#define KGSL_PT_EXTRA_ENTRIES 16
#define KGSL_PAGETABLE_ENTRIES(_sz) (((_sz) >> PAGE_SHIFT) + \
KGSL_PT_EXTRA_ENTRIES)
#define KGSL_PAGETABLE_SIZE \
ALIGN(KGSL_PAGETABLE_ENTRIES(CONFIG_MSM_KGSL_PAGE_TABLE_SIZE) * \
KGSL_PAGETABLE_ENTRY_SIZE, PAGE_SIZE)
#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
#define KGSL_PAGETABLE_COUNT (CONFIG_MSM_KGSL_PAGE_TABLE_COUNT)
#else
#define KGSL_PAGETABLE_COUNT 1
#endif
/* Casting using container_of() for structures that kgsl owns. */
#define KGSL_CONTAINER_OF(ptr, type, member) \
container_of(ptr, type, member)
/* A macro for memory statistics - add the new size to the stat and if
the statisic is greater then _max, set _max
*/
#define KGSL_STATS_ADD(_size, _stat, _max) \
do { _stat += (_size); if (_stat > _max) _max = _stat; } while (0)
struct kgsl_device;
struct kgsl_driver {
struct cdev cdev;
dev_t major;
struct class *class;
/* Virtual device for managing the core */
struct device virtdev;
/* Kobjects for storing pagetable and process statistics */
struct kobject *ptkobj;
struct kobject *prockobj;
struct kgsl_device *devp[KGSL_DEVICE_MAX];
/* Global lilst of open processes */
struct list_head process_list;
/* Global list of pagetables */
struct list_head pagetable_list;
/* Spinlock for accessing the pagetable list */
spinlock_t ptlock;
/* Mutex for accessing the process list */
struct mutex process_mutex;
/* Mutex for protecting the device list */
struct mutex devlock;
void *ptpool;
struct {
unsigned int vmalloc;
unsigned int vmalloc_max;
unsigned int page_alloc;
unsigned int page_alloc_max;
unsigned int coherent;
unsigned int coherent_max;
unsigned int mapped;
unsigned int mapped_max;
unsigned int histogram[16];
} stats;
};
extern struct kgsl_driver kgsl_driver;
struct kgsl_pagetable;
struct kgsl_memdesc;
struct kgsl_memdesc_ops {
int (*vmflags)(struct kgsl_memdesc *);
int (*vmfault)(struct kgsl_memdesc *, struct vm_area_struct *,
struct vm_fault *);
void (*free)(struct kgsl_memdesc *memdesc);
int (*map_kernel_mem)(struct kgsl_memdesc *);
};
#define KGSL_MEMDESC_GUARD_PAGE BIT(0)
/* shared memory allocation */
struct kgsl_memdesc {
struct kgsl_pagetable *pagetable;
void *hostptr;
unsigned int gpuaddr;
unsigned int physaddr;
unsigned int size;
unsigned int priv;
struct scatterlist *sg;
unsigned int sglen;
struct kgsl_memdesc_ops *ops;
int flags;
};
/* List of different memory entry types */
#define KGSL_MEM_ENTRY_KERNEL 0
#define KGSL_MEM_ENTRY_PMEM 1
#define KGSL_MEM_ENTRY_ASHMEM 2
#define KGSL_MEM_ENTRY_USER 3
#define KGSL_MEM_ENTRY_ION 4
#define KGSL_MEM_ENTRY_MAX 5
struct kgsl_pagetable;
struct kgsl_memdesc_ops;
struct kgsl_mem_entry {
struct kref refcount;
struct kgsl_memdesc memdesc;
int memtype;
void *priv_data;
struct list_head list;
uint32_t free_timestamp;
unsigned int context_id;
/* back pointer to private structure under whose context this
* allocation is made */
struct kgsl_process_private *priv;
};
#ifdef CONFIG_MSM_KGSL_MMU_PAGE_FAULT
#define MMU_CONFIG 2
#else
#define MMU_CONFIG 1
#endif
void kgsl_mem_entry_destroy(struct kref *kref);
struct kgsl_mem_entry *kgsl_get_mem_entry(unsigned int ptbase,
unsigned int gpuaddr, unsigned int size);
struct kgsl_mem_entry *kgsl_sharedmem_find_region(
struct kgsl_process_private *private, unsigned int gpuaddr,
size_t size);
extern const struct dev_pm_ops kgsl_pm_ops;
struct early_suspend;
int kgsl_suspend_driver(struct platform_device *pdev, pm_message_t state);
int kgsl_resume_driver(struct platform_device *pdev);
void kgsl_early_suspend_driver(struct early_suspend *h);
void kgsl_late_resume_driver(struct early_suspend *h);
#ifdef CONFIG_MSM_KGSL_DRM
extern int kgsl_drm_init(struct platform_device *dev);
extern void kgsl_drm_exit(void);
extern void kgsl_gpu_mem_flush(int op);
#else
static inline int kgsl_drm_init(struct platform_device *dev)
{
return 0;
}
static inline void kgsl_drm_exit(void)
{
}
#endif
static inline int kgsl_gpuaddr_in_memdesc(const struct kgsl_memdesc *memdesc,
unsigned int gpuaddr, unsigned int size)
{
if (gpuaddr >= memdesc->gpuaddr &&
((gpuaddr + size) <= (memdesc->gpuaddr + memdesc->size))) {
return 1;
}
return 0;
}
static inline uint8_t *kgsl_gpuaddr_to_vaddr(struct kgsl_memdesc *memdesc,
unsigned int gpuaddr)
{
if (memdesc->gpuaddr == 0 ||
gpuaddr < memdesc->gpuaddr ||
gpuaddr >= (memdesc->gpuaddr + memdesc->size) ||
(NULL == memdesc->hostptr && memdesc->ops->map_kernel_mem &&
memdesc->ops->map_kernel_mem(memdesc)))
return NULL;
return memdesc->hostptr + (gpuaddr - memdesc->gpuaddr);
}
static inline int timestamp_cmp(unsigned int new, unsigned int old)
{
int ts_diff = new - old;
if (ts_diff == 0)
return 0;
return ((ts_diff > 0) || (ts_diff < -KGSL_TIMESTAMP_WINDOW)) ? 1 : -1;
}
static inline void
kgsl_mem_entry_get(struct kgsl_mem_entry *entry)
{
kref_get(&entry->refcount);
}
static inline void
kgsl_mem_entry_put(struct kgsl_mem_entry *entry)
{
kref_put(&entry->refcount, kgsl_mem_entry_destroy);
}
#endif /* __KGSL_H */

591
drivers/gpu/msm/kgsl_cffdump.c Executable file
View File

@ -0,0 +1,591 @@
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
/* #define DEBUG */
#define ALIGN_CPU
#include <linux/spinlock.h>
#include <linux/debugfs.h>
#include <linux/relay.h>
#include <linux/slab.h>
#include <linux/time.h>
#include <linux/sched.h>
//#include <mach/socinfo.h>
#include "kgsl.h"
#include "kgsl_cffdump.h"
#include "kgsl_debugfs.h"
#include "kgsl_log.h"
#include "kgsl_sharedmem.h"
#include "adreno_pm4types.h"
static struct rchan *chan;
static struct dentry *dir;
static int suspended;
static size_t dropped;
static size_t subbuf_size = 256*1024;
static size_t n_subbufs = 64;
/* forward declarations */
static void destroy_channel(void);
static struct rchan *create_channel(unsigned subbuf_size, unsigned n_subbufs);
static spinlock_t cffdump_lock;
static ulong serial_nr;
static ulong total_bytes;
static ulong total_syncmem;
static long last_sec;
#define MEMBUF_SIZE 64
#define CFF_OP_WRITE_REG 0x00000002
struct cff_op_write_reg {
unsigned char op;
uint addr;
uint value;
} __packed;
#define CFF_OP_POLL_REG 0x00000004
struct cff_op_poll_reg {
unsigned char op;
uint addr;
uint value;
uint mask;
} __packed;
#define CFF_OP_WAIT_IRQ 0x00000005
struct cff_op_wait_irq {
unsigned char op;
} __packed;
#define CFF_OP_RMW 0x0000000a
#define CFF_OP_WRITE_MEM 0x0000000b
struct cff_op_write_mem {
unsigned char op;
uint addr;
uint value;
} __packed;
#define CFF_OP_WRITE_MEMBUF 0x0000000c
struct cff_op_write_membuf {
unsigned char op;
uint addr;
ushort count;
uint buffer[MEMBUF_SIZE];
} __packed;
#define CFF_OP_MEMORY_BASE 0x0000000d
struct cff_op_memory_base {
unsigned char op;
uint base;
uint size;
uint gmemsize;
} __packed;
#define CFF_OP_HANG 0x0000000e
struct cff_op_hang {
unsigned char op;
} __packed;
#define CFF_OP_EOF 0xffffffff
struct cff_op_eof {
unsigned char op;
} __packed;
#define CFF_OP_VERIFY_MEM_FILE 0x00000007
#define CFF_OP_WRITE_SURFACE_PARAMS 0x00000011
struct cff_op_user_event {
unsigned char op;
unsigned int op1;
unsigned int op2;
unsigned int op3;
unsigned int op4;
unsigned int op5;
} __packed;
static void b64_encodeblock(unsigned char in[3], unsigned char out[4], int len)
{
static const char tob64[] = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmno"
"pqrstuvwxyz0123456789+/";
out[0] = tob64[in[0] >> 2];
out[1] = tob64[((in[0] & 0x03) << 4) | ((in[1] & 0xf0) >> 4)];
out[2] = (unsigned char) (len > 1 ? tob64[((in[1] & 0x0f) << 2)
| ((in[2] & 0xc0) >> 6)] : '=');
out[3] = (unsigned char) (len > 2 ? tob64[in[2] & 0x3f] : '=');
}
static void b64_encode(const unsigned char *in_buf, int in_size,
unsigned char *out_buf, int out_bufsize, int *out_size)
{
unsigned char in[3], out[4];
int i, len;
*out_size = 0;
while (in_size > 0) {
len = 0;
for (i = 0; i < 3; ++i) {
if (in_size-- > 0) {
in[i] = *in_buf++;
++len;
} else
in[i] = 0;
}
if (len) {
b64_encodeblock(in, out, len);
if (out_bufsize < 4) {
pr_warn("kgsl: cffdump: %s: out of buffer\n",
__func__);
return;
}
for (i = 0; i < 4; ++i)
*out_buf++ = out[i];
*out_size += 4;
out_bufsize -= 4;
}
}
}
#define KLOG_TMPBUF_SIZE (1024)
static void klog_printk(const char *fmt, ...)
{
/* per-cpu klog formatting temporary buffer */
static char klog_buf[NR_CPUS][KLOG_TMPBUF_SIZE];
va_list args;
int len;
char *cbuf;
unsigned long flags;
local_irq_save(flags);
cbuf = klog_buf[smp_processor_id()];
va_start(args, fmt);
len = vsnprintf(cbuf, KLOG_TMPBUF_SIZE, fmt, args);
total_bytes += len;
va_end(args);
relay_write(chan, cbuf, len);
local_irq_restore(flags);
}
static struct cff_op_write_membuf cff_op_write_membuf;
static void cffdump_membuf(int id, unsigned char *out_buf, int out_bufsize)
{
void *data;
int len, out_size;
struct cff_op_write_mem cff_op_write_mem;
uint addr = cff_op_write_membuf.addr
- sizeof(uint)*cff_op_write_membuf.count;
if (!cff_op_write_membuf.count) {
pr_warn("kgsl: cffdump: membuf: count == 0, skipping");
return;
}
if (cff_op_write_membuf.count != 1) {
cff_op_write_membuf.op = CFF_OP_WRITE_MEMBUF;
cff_op_write_membuf.addr = addr;
len = sizeof(cff_op_write_membuf) -
sizeof(uint)*(MEMBUF_SIZE - cff_op_write_membuf.count);
data = &cff_op_write_membuf;
} else {
cff_op_write_mem.op = CFF_OP_WRITE_MEM;
cff_op_write_mem.addr = addr;
cff_op_write_mem.value = cff_op_write_membuf.buffer[0];
data = &cff_op_write_mem;
len = sizeof(cff_op_write_mem);
}
b64_encode(data, len, out_buf, out_bufsize, &out_size);
out_buf[out_size] = 0;
klog_printk("%ld:%d;%s\n", ++serial_nr, id, out_buf);
cff_op_write_membuf.count = 0;
cff_op_write_membuf.addr = 0;
}
static void cffdump_printline(int id, uint opcode, uint op1, uint op2,
uint op3, uint op4, uint op5)
{
struct cff_op_write_reg cff_op_write_reg;
struct cff_op_poll_reg cff_op_poll_reg;
struct cff_op_wait_irq cff_op_wait_irq;
struct cff_op_memory_base cff_op_memory_base;
struct cff_op_hang cff_op_hang;
struct cff_op_eof cff_op_eof;
struct cff_op_user_event cff_op_user_event;
unsigned char out_buf[sizeof(cff_op_write_membuf)/3*4 + 16];
void *data;
int len = 0, out_size;
long cur_secs;
spin_lock(&cffdump_lock);
if (opcode == CFF_OP_WRITE_MEM) {
if ((cff_op_write_membuf.addr != op1 &&
cff_op_write_membuf.count)
|| (cff_op_write_membuf.count == MEMBUF_SIZE))
cffdump_membuf(id, out_buf, sizeof(out_buf));
cff_op_write_membuf.buffer[cff_op_write_membuf.count++] = op2;
cff_op_write_membuf.addr = op1 + sizeof(uint);
spin_unlock(&cffdump_lock);
return;
} else if (cff_op_write_membuf.count)
cffdump_membuf(id, out_buf, sizeof(out_buf));
spin_unlock(&cffdump_lock);
switch (opcode) {
case CFF_OP_WRITE_REG:
cff_op_write_reg.op = opcode;
cff_op_write_reg.addr = op1;
cff_op_write_reg.value = op2;
data = &cff_op_write_reg;
len = sizeof(cff_op_write_reg);
break;
case CFF_OP_POLL_REG:
cff_op_poll_reg.op = opcode;
cff_op_poll_reg.addr = op1;
cff_op_poll_reg.value = op2;
cff_op_poll_reg.mask = op3;
data = &cff_op_poll_reg;
len = sizeof(cff_op_poll_reg);
break;
case CFF_OP_WAIT_IRQ:
cff_op_wait_irq.op = opcode;
data = &cff_op_wait_irq;
len = sizeof(cff_op_wait_irq);
break;
case CFF_OP_MEMORY_BASE:
cff_op_memory_base.op = opcode;
cff_op_memory_base.base = op1;
cff_op_memory_base.size = op2;
cff_op_memory_base.gmemsize = op3;
data = &cff_op_memory_base;
len = sizeof(cff_op_memory_base);
break;
case CFF_OP_HANG:
cff_op_hang.op = opcode;
data = &cff_op_hang;
len = sizeof(cff_op_hang);
break;
case CFF_OP_EOF:
cff_op_eof.op = opcode;
data = &cff_op_eof;
len = sizeof(cff_op_eof);
break;
case CFF_OP_WRITE_SURFACE_PARAMS:
case CFF_OP_VERIFY_MEM_FILE:
cff_op_user_event.op = opcode;
cff_op_user_event.op1 = op1;
cff_op_user_event.op2 = op2;
cff_op_user_event.op3 = op3;
cff_op_user_event.op4 = op4;
cff_op_user_event.op5 = op5;
data = &cff_op_user_event;
len = sizeof(cff_op_user_event);
break;
}
if (len) {
b64_encode(data, len, out_buf, sizeof(out_buf), &out_size);
out_buf[out_size] = 0;
klog_printk("%ld:%d;%s\n", ++serial_nr, id, out_buf);
} else
pr_warn("kgsl: cffdump: unhandled opcode: %d\n", opcode);
cur_secs = get_seconds();
if ((cur_secs - last_sec) > 10 || (last_sec - cur_secs) > 10) {
pr_info("kgsl: cffdump: total [bytes:%lu kB, syncmem:%lu kB], "
"seq#: %lu\n", total_bytes/1024, total_syncmem/1024,
serial_nr);
last_sec = cur_secs;
}
}
void kgsl_cffdump_init()
{
struct dentry *debugfs_dir = kgsl_get_debugfs_dir();
#ifdef ALIGN_CPU
cpumask_t mask;
cpumask_clear(&mask);
cpumask_set_cpu(0, &mask);
sched_setaffinity(0, &mask);
#endif
if (!debugfs_dir || IS_ERR(debugfs_dir)) {
KGSL_CORE_ERR("Debugfs directory is bad\n");
return;
}
kgsl_cff_dump_enable = 1;
spin_lock_init(&cffdump_lock);
dir = debugfs_create_dir("cff", debugfs_dir);
if (!dir) {
KGSL_CORE_ERR("debugfs_create_dir failed\n");
return;
}
chan = create_channel(subbuf_size, n_subbufs);
}
void kgsl_cffdump_destroy()
{
if (chan)
relay_flush(chan);
destroy_channel();
if (dir)
debugfs_remove(dir);
}
void kgsl_cffdump_open(enum kgsl_deviceid device_id)
{
kgsl_cffdump_memory_base(device_id, KGSL_PAGETABLE_BASE,
CONFIG_MSM_KGSL_PAGE_TABLE_SIZE, SZ_256K);
}
void kgsl_cffdump_memory_base(enum kgsl_deviceid device_id, unsigned int base,
unsigned int range, unsigned gmemsize)
{
cffdump_printline(device_id, CFF_OP_MEMORY_BASE, base,
range, gmemsize, 0, 0);
}
void kgsl_cffdump_hang(enum kgsl_deviceid device_id)
{
cffdump_printline(device_id, CFF_OP_HANG, 0, 0, 0, 0, 0);
}
void kgsl_cffdump_close(enum kgsl_deviceid device_id)
{
cffdump_printline(device_id, CFF_OP_EOF, 0, 0, 0, 0, 0);
}
void kgsl_cffdump_user_event(unsigned int cff_opcode, unsigned int op1,
unsigned int op2, unsigned int op3,
unsigned int op4, unsigned int op5)
{
cffdump_printline(-1, cff_opcode, op1, op2, op3, op4, op5);
}
void kgsl_cffdump_syncmem(struct kgsl_device_private *dev_priv,
const struct kgsl_memdesc *memdesc, uint gpuaddr, uint sizebytes,
bool clean_cache)
{
const void *src;
if (!kgsl_cff_dump_enable)
return;
total_syncmem += sizebytes;
if (memdesc == NULL) {
struct kgsl_mem_entry *entry;
spin_lock(&dev_priv->process_priv->mem_lock);
entry = kgsl_sharedmem_find_region(dev_priv->process_priv,
gpuaddr, sizebytes);
spin_unlock(&dev_priv->process_priv->mem_lock);
if (entry == NULL) {
KGSL_CORE_ERR("did not find mapping "
"for gpuaddr: 0x%08x\n", gpuaddr);
return;
}
memdesc = &entry->memdesc;
}
src = (uint *)kgsl_gpuaddr_to_vaddr(memdesc, gpuaddr);
if (memdesc->hostptr == NULL) {
KGSL_CORE_ERR("no kernel mapping for "
"gpuaddr: 0x%08x, m->host: 0x%p, phys: 0x%08x\n",
gpuaddr, memdesc->hostptr, memdesc->physaddr);
return;
}
if (clean_cache) {
/* Ensure that this memory region is not read from the
* cache but fetched fresh */
mb();
kgsl_cache_range_op((struct kgsl_memdesc *)memdesc,
KGSL_CACHE_OP_INV);
}
while (sizebytes > 3) {
cffdump_printline(-1, CFF_OP_WRITE_MEM, gpuaddr, *(uint *)src,
0, 0, 0);
gpuaddr += 4;
src += 4;
sizebytes -= 4;
}
if (sizebytes > 0)
cffdump_printline(-1, CFF_OP_WRITE_MEM, gpuaddr, *(uint *)src,
0, 0, 0);
}
void kgsl_cffdump_setmem(uint addr, uint value, uint sizebytes)
{
if (!kgsl_cff_dump_enable)
return;
while (sizebytes > 3) {
/* Use 32bit memory writes as long as there's at least
* 4 bytes left */
cffdump_printline(-1, CFF_OP_WRITE_MEM, addr, value,
0, 0, 0);
addr += 4;
sizebytes -= 4;
}
if (sizebytes > 0)
cffdump_printline(-1, CFF_OP_WRITE_MEM, addr, value,
0, 0, 0);
}
void kgsl_cffdump_regwrite(enum kgsl_deviceid device_id, uint addr,
uint value)
{
if (!kgsl_cff_dump_enable)
return;
cffdump_printline(device_id, CFF_OP_WRITE_REG, addr, value,
0, 0, 0);
}
void kgsl_cffdump_regpoll(enum kgsl_deviceid device_id, uint addr,
uint value, uint mask)
{
if (!kgsl_cff_dump_enable)
return;
cffdump_printline(device_id, CFF_OP_POLL_REG, addr, value,
mask, 0, 0);
}
void kgsl_cffdump_slavewrite(uint addr, uint value)
{
if (!kgsl_cff_dump_enable)
return;
cffdump_printline(-1, CFF_OP_WRITE_REG, addr, value, 0, 0, 0);
}
int kgsl_cffdump_waitirq(void)
{
if (!kgsl_cff_dump_enable)
return 0;
cffdump_printline(-1, CFF_OP_WAIT_IRQ, 0, 0, 0, 0, 0);
return 1;
}
EXPORT_SYMBOL(kgsl_cffdump_waitirq);
static int subbuf_start_handler(struct rchan_buf *buf,
void *subbuf, void *prev_subbuf, uint prev_padding)
{
pr_debug("kgsl: cffdump: subbuf_start_handler(subbuf=%p, prev_subbuf"
"=%p, prev_padding=%08x)\n", subbuf, prev_subbuf, prev_padding);
if (relay_buf_full(buf)) {
if (!suspended) {
suspended = 1;
pr_warn("kgsl: cffdump: relay: cpu %d buffer full!!!\n",
smp_processor_id());
}
dropped++;
return 0;
} else if (suspended) {
suspended = 0;
pr_warn("kgsl: cffdump: relay: cpu %d buffer no longer full.\n",
smp_processor_id());
}
subbuf_start_reserve(buf, 0);
return 1;
}
static struct dentry *create_buf_file_handler(const char *filename,
struct dentry *parent, int mode, struct rchan_buf *buf,
int *is_global)
{
return debugfs_create_file(filename, mode, parent, buf,
&relay_file_operations);
}
/*
* file_remove() default callback. Removes relay file in debugfs.
*/
static int remove_buf_file_handler(struct dentry *dentry)
{
pr_info("kgsl: cffdump: %s()\n", __func__);
debugfs_remove(dentry);
return 0;
}
/*
* relay callbacks
*/
static struct rchan_callbacks relay_callbacks = {
.subbuf_start = subbuf_start_handler,
.create_buf_file = create_buf_file_handler,
.remove_buf_file = remove_buf_file_handler,
};
/**
* create_channel - creates channel /debug/klog/cpuXXX
*
* Creates channel along with associated produced/consumed control files
*
* Returns channel on success, NULL otherwise
*/
static struct rchan *create_channel(unsigned subbuf_size, unsigned n_subbufs)
{
struct rchan *chan;
pr_info("kgsl: cffdump: relay: create_channel: subbuf_size %u, "
"n_subbufs %u, dir 0x%p\n", subbuf_size, n_subbufs, dir);
chan = relay_open("cpu", dir, subbuf_size,
n_subbufs, &relay_callbacks, NULL);
if (!chan) {
KGSL_CORE_ERR("relay_open failed\n");
return NULL;
}
suspended = 0;
dropped = 0;
return chan;
}
/**
* destroy_channel - destroys channel /debug/kgsl/cff/cpuXXX
*
* Destroys channel along with associated produced/consumed control files
*/
static void destroy_channel(void)
{
pr_info("kgsl: cffdump: relay: destroy_channel\n");
if (chan) {
relay_close(chan);
chan = NULL;
}
}

View File

@ -0,0 +1,69 @@
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __KGSL_CFFDUMP_H
#define __KGSL_CFFDUMP_H
#ifdef CONFIG_MSM_KGSL_CFF_DUMP
#include <linux/types.h>
#include "kgsl_device.h"
void kgsl_cffdump_init(void);
void kgsl_cffdump_destroy(void);
void kgsl_cffdump_open(enum kgsl_deviceid device_id);
void kgsl_cffdump_close(enum kgsl_deviceid device_id);
void kgsl_cffdump_syncmem(struct kgsl_device_private *dev_priv,
const struct kgsl_memdesc *memdesc, uint physaddr, uint sizebytes,
bool clean_cache);
void kgsl_cffdump_setmem(uint addr, uint value, uint sizebytes);
void kgsl_cffdump_regwrite(enum kgsl_deviceid device_id, uint addr,
uint value);
void kgsl_cffdump_regpoll(enum kgsl_deviceid device_id, uint addr,
uint value, uint mask);
bool kgsl_cffdump_parse_ibs(struct kgsl_device_private *dev_priv,
const struct kgsl_memdesc *memdesc, uint gpuaddr, int sizedwords,
bool check_only);
void kgsl_cffdump_user_event(unsigned int cff_opcode, unsigned int op1,
unsigned int op2, unsigned int op3,
unsigned int op4, unsigned int op5);
static inline bool kgsl_cffdump_flags_no_memzero(void) { return true; }
void kgsl_cffdump_memory_base(enum kgsl_deviceid device_id, unsigned int base,
unsigned int range, unsigned int gmemsize);
void kgsl_cffdump_hang(enum kgsl_deviceid device_id);
#else
#define kgsl_cffdump_init() (void)0
#define kgsl_cffdump_destroy() (void)0
#define kgsl_cffdump_open(device_id) (void)0
#define kgsl_cffdump_close(device_id) (void)0
#define kgsl_cffdump_syncmem(dev_priv, memdesc, addr, sizebytes, clean_cache) \
(void) 0
#define kgsl_cffdump_setmem(addr, value, sizebytes) (void)0
#define kgsl_cffdump_regwrite(device_id, addr, value) (void)0
#define kgsl_cffdump_regpoll(device_id, addr, value, mask) (void)0
#define kgsl_cffdump_parse_ibs(dev_priv, memdesc, gpuaddr, \
sizedwords, check_only) true
#define kgsl_cffdump_flags_no_memzero() true
#define kgsl_cffdump_memory_base(base, range, gmemsize) (void)0
#define kgsl_cffdump_hang(device_id) (void)0
#define kgsl_cffdump_user_event(cff_opcode, op1, op2, op3, op4, op5) \
(void)param
#endif /* CONFIG_MSM_KGSL_CFF_DUMP */
#endif /* __KGSL_CFFDUMP_H */

View File

@ -0,0 +1,87 @@
/* Copyright (c) 2002,2008-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/debugfs.h>
#include "kgsl.h"
#include "kgsl_device.h"
/*default log levels is error for everything*/
#define KGSL_LOG_LEVEL_DEFAULT 3
#define KGSL_LOG_LEVEL_MAX 7
struct dentry *kgsl_debugfs_dir;
static inline int kgsl_log_set(unsigned int *log_val, void *data, u64 val)
{
*log_val = min((unsigned int)val, (unsigned int)KGSL_LOG_LEVEL_MAX);
return 0;
}
#define KGSL_DEBUGFS_LOG(__log) \
static int __log ## _set(void *data, u64 val) \
{ \
struct kgsl_device *device = data; \
return kgsl_log_set(&device->__log, data, val); \
} \
static int __log ## _get(void *data, u64 *val) \
{ \
struct kgsl_device *device = data; \
*val = device->__log; \
return 0; \
} \
DEFINE_SIMPLE_ATTRIBUTE(__log ## _fops, \
__log ## _get, __log ## _set, "%llu\n"); \
KGSL_DEBUGFS_LOG(drv_log);
KGSL_DEBUGFS_LOG(cmd_log);
KGSL_DEBUGFS_LOG(ctxt_log);
KGSL_DEBUGFS_LOG(mem_log);
KGSL_DEBUGFS_LOG(pwr_log);
void kgsl_device_debugfs_init(struct kgsl_device *device)
{
if (kgsl_debugfs_dir && !IS_ERR(kgsl_debugfs_dir))
device->d_debugfs = debugfs_create_dir(device->name,
kgsl_debugfs_dir);
if (!device->d_debugfs || IS_ERR(device->d_debugfs))
return;
device->cmd_log = KGSL_LOG_LEVEL_DEFAULT;
device->ctxt_log = KGSL_LOG_LEVEL_DEFAULT;
device->drv_log = KGSL_LOG_LEVEL_DEFAULT;
device->mem_log = KGSL_LOG_LEVEL_DEFAULT;
device->pwr_log = KGSL_LOG_LEVEL_DEFAULT;
debugfs_create_file("log_level_cmd", 0644, device->d_debugfs, device,
&cmd_log_fops);
debugfs_create_file("log_level_ctxt", 0644, device->d_debugfs, device,
&ctxt_log_fops);
debugfs_create_file("log_level_drv", 0644, device->d_debugfs, device,
&drv_log_fops);
debugfs_create_file("log_level_mem", 0644, device->d_debugfs, device,
&mem_log_fops);
debugfs_create_file("log_level_pwr", 0644, device->d_debugfs, device,
&pwr_log_fops);
}
void kgsl_core_debugfs_init(void)
{
kgsl_debugfs_dir = debugfs_create_dir("kgsl", 0);
}
void kgsl_core_debugfs_close(void)
{
debugfs_remove_recursive(kgsl_debugfs_dir);
}

View File

@ -0,0 +1,39 @@
/* Copyright (c) 2002,2008-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef _KGSL_DEBUGFS_H
#define _KGSL_DEBUGFS_H
struct kgsl_device;
#ifdef CONFIG_DEBUG_FS
void kgsl_core_debugfs_init(void);
void kgsl_core_debugfs_close(void);
void kgsl_device_debugfs_init(struct kgsl_device *device);
extern struct dentry *kgsl_debugfs_dir;
static inline struct dentry *kgsl_get_debugfs_dir(void)
{
return kgsl_debugfs_dir;
}
#else
static inline void kgsl_core_debugfs_init(void) { }
static inline void kgsl_device_debugfs_init(struct kgsl_device *device) { }
static inline void kgsl_core_debugfs_close(void) { }
static inline struct dentry *kgsl_get_debugfs_dir(void) { return NULL; }
#endif
#endif

326
drivers/gpu/msm/kgsl_device.h Executable file
View File

@ -0,0 +1,326 @@
/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __KGSL_DEVICE_H
#define __KGSL_DEVICE_H
#include <linux/idr.h>
#include <linux/wakelock.h>
#include <linux/pm_qos_params.h>
#include <linux/earlysuspend.h>
#include "kgsl.h"
#include "kgsl_mmu.h"
#include "kgsl_pwrctrl.h"
#include "kgsl_log.h"
#include "kgsl_pwrscale.h"
#define KGSL_TIMEOUT_NONE 0
#define KGSL_TIMEOUT_DEFAULT 0xFFFFFFFF
#define FIRST_TIMEOUT (HZ / 2)
/* KGSL device state is initialized to INIT when platform_probe *
* sucessfully initialized the device. Once a device has been opened *
* (started) it becomes active. NAP implies that only low latency *
* resources (for now clocks on some platforms) are off. SLEEP implies *
* that the KGSL module believes a device is idle (has been inactive *
* past its timer) and all system resources are released. SUSPEND is *
* requested by the kernel and will be enforced upon all open devices. */
#define KGSL_STATE_NONE 0x00000000
#define KGSL_STATE_INIT 0x00000001
#define KGSL_STATE_ACTIVE 0x00000002
#define KGSL_STATE_NAP 0x00000004
#define KGSL_STATE_SLEEP 0x00000008
#define KGSL_STATE_SUSPEND 0x00000010
#define KGSL_STATE_HUNG 0x00000020
#define KGSL_STATE_DUMP_AND_RECOVER 0x00000040
#define KGSL_STATE_SLUMBER 0x00000080
#define KGSL_GRAPHICS_MEMORY_LOW_WATERMARK 0x1000000
#define KGSL_IS_PAGE_ALIGNED(addr) (!((addr) & (~PAGE_MASK)))
struct kgsl_device;
struct platform_device;
struct kgsl_device_private;
struct kgsl_context;
struct kgsl_power_stats;
struct kgsl_functable {
/* Mandatory functions - these functions must be implemented
by the client device. The driver will not check for a NULL
pointer before calling the hook.
*/
void (*regread) (struct kgsl_device *device,
unsigned int offsetwords, unsigned int *value);
void (*regwrite) (struct kgsl_device *device,
unsigned int offsetwords, unsigned int value);
int (*idle) (struct kgsl_device *device, unsigned int timeout);
unsigned int (*isidle) (struct kgsl_device *device);
int (*suspend_context) (struct kgsl_device *device);
int (*start) (struct kgsl_device *device, unsigned int init_ram);
int (*stop) (struct kgsl_device *device);
int (*getproperty) (struct kgsl_device *device,
enum kgsl_property_type type, void *value,
unsigned int sizebytes);
int (*waittimestamp) (struct kgsl_device *device,
struct kgsl_context *context, unsigned int timestamp,
unsigned int msecs);
unsigned int (*readtimestamp) (struct kgsl_device *device,
struct kgsl_context *context, enum kgsl_timestamp_type type);
int (*issueibcmds) (struct kgsl_device_private *dev_priv,
struct kgsl_context *context, struct kgsl_ibdesc *ibdesc,
unsigned int sizedwords, uint32_t *timestamp,
unsigned int flags);
int (*setup_pt)(struct kgsl_device *device,
struct kgsl_pagetable *pagetable);
void (*cleanup_pt)(struct kgsl_device *device,
struct kgsl_pagetable *pagetable);
void (*power_stats)(struct kgsl_device *device,
struct kgsl_power_stats *stats);
void (*irqctrl)(struct kgsl_device *device, int state);
/* Optional functions - these functions are not mandatory. The
driver will check that the function pointer is not NULL before
calling the hook */
void (*setstate) (struct kgsl_device *device, uint32_t flags);
int (*drawctxt_create) (struct kgsl_device *device,
struct kgsl_pagetable *pagetable, struct kgsl_context *context,
uint32_t flags);
void (*drawctxt_destroy) (struct kgsl_device *device,
struct kgsl_context *context);
long (*ioctl) (struct kgsl_device_private *dev_priv,
unsigned int cmd, void *data);
int (*setproperty) (struct kgsl_device *device,
enum kgsl_property_type type, void *value,
unsigned int sizebytes);
};
struct kgsl_memregion {
unsigned char *mmio_virt_base;
unsigned int mmio_phys_base;
uint32_t gpu_base;
unsigned int sizebytes;
};
/* MH register values */
struct kgsl_mh {
unsigned int mharb;
unsigned int mh_intf_cfg1;
unsigned int mh_intf_cfg2;
uint32_t mpu_base;
int mpu_range;
};
struct kgsl_event {
struct kgsl_context *context;
uint32_t timestamp;
void (*func)(struct kgsl_device *, void *, u32, u32);
void *priv;
struct list_head list;
struct kgsl_device_private *owner;
};
struct kgsl_device {
struct device *dev;
const char *name;
unsigned int ver_major;
unsigned int ver_minor;
uint32_t flags;
enum kgsl_deviceid id;
struct kgsl_memregion regspace;
struct kgsl_memdesc memstore;
const char *iomemname;
struct kgsl_mh mh;
struct kgsl_mmu mmu;
struct completion hwaccess_gate;
const struct kgsl_functable *ftbl;
struct work_struct idle_check_ws;
struct timer_list idle_timer;
struct kgsl_pwrctrl pwrctrl;
int open_count;
struct atomic_notifier_head ts_notifier_list;
struct mutex mutex;
uint32_t state;
uint32_t requested_state;
unsigned int last_expired_ctxt_id;
unsigned int active_cnt;
struct completion suspend_gate;
wait_queue_head_t wait_queue;
struct workqueue_struct *work_queue;
struct device *parentdev;
struct completion recovery_gate;
struct dentry *d_debugfs;
struct idr context_idr;
struct early_suspend display_off;
/* Logging levels */
int cmd_log;
int ctxt_log;
int drv_log;
int mem_log;
int pwr_log;
struct wake_lock idle_wakelock;
struct kgsl_pwrscale pwrscale;
struct kobject pwrscale_kobj;
struct work_struct ts_expired_ws;
struct list_head events;
};
struct kgsl_context {
uint32_t id;
/* Pointer to the owning device instance */
struct kgsl_device_private *dev_priv;
/* Pointer to the device specific context information */
void *devctxt;
/*
* Status indicating whether a gpu reset occurred and whether this
* context was responsible for causing it
*/
unsigned int reset_status;
};
struct kgsl_process_private {
unsigned int refcnt;
pid_t pid;
spinlock_t mem_lock;
struct list_head mem_list;
struct kgsl_pagetable *pagetable;
struct list_head list;
struct kobject kobj;
struct {
unsigned int cur;
unsigned int max;
} stats[KGSL_MEM_ENTRY_MAX];
};
struct kgsl_device_private {
struct kgsl_device *device;
struct kgsl_process_private *process_priv;
};
struct kgsl_power_stats {
s64 total_time;
s64 busy_time;
};
struct kgsl_device *kgsl_get_device(int dev_idx);
static inline void kgsl_process_add_stats(struct kgsl_process_private *priv,
unsigned int type, size_t size)
{
priv->stats[type].cur += size;
if (priv->stats[type].max < priv->stats[type].cur)
priv->stats[type].max = priv->stats[type].cur;
}
static inline void kgsl_regread(struct kgsl_device *device,
unsigned int offsetwords,
unsigned int *value)
{
device->ftbl->regread(device, offsetwords, value);
}
static inline void kgsl_regwrite(struct kgsl_device *device,
unsigned int offsetwords,
unsigned int value)
{
device->ftbl->regwrite(device, offsetwords, value);
}
static inline int kgsl_idle(struct kgsl_device *device, unsigned int timeout)
{
return device->ftbl->idle(device, timeout);
}
static inline int kgsl_create_device_sysfs_files(struct device *root,
struct device_attribute **list)
{
int ret = 0, i;
for (i = 0; list[i] != NULL; i++)
ret |= device_create_file(root, list[i]);
return ret;
}
static inline void kgsl_remove_device_sysfs_files(struct device *root,
struct device_attribute **list)
{
int i;
for (i = 0; list[i] != NULL; i++)
device_remove_file(root, list[i]);
}
static inline struct kgsl_mmu *
kgsl_get_mmu(struct kgsl_device *device)
{
return (struct kgsl_mmu *) (device ? &device->mmu : NULL);
}
static inline struct kgsl_device *kgsl_device_from_dev(struct device *dev)
{
int i;
for (i = 0; i < KGSL_DEVICE_MAX; i++) {
if (kgsl_driver.devp[i] && kgsl_driver.devp[i]->dev == dev)
return kgsl_driver.devp[i];
}
return NULL;
}
static inline int kgsl_create_device_workqueue(struct kgsl_device *device)
{
device->work_queue = create_workqueue(device->name);
if (!device->work_queue) {
KGSL_DRV_ERR(device, "create_workqueue(%s) failed\n",
device->name);
return -EINVAL;
}
return 0;
}
static inline struct kgsl_context *
kgsl_find_context(struct kgsl_device_private *dev_priv, uint32_t id)
{
struct kgsl_context *ctxt =
idr_find(&dev_priv->device->context_idr, id);
/* Make sure that the context belongs to the current instance so
that other processes can't guess context IDs and mess things up */
return (ctxt && ctxt->dev_priv == dev_priv) ? ctxt : NULL;
}
int kgsl_check_timestamp(struct kgsl_device *device,
struct kgsl_context *context, unsigned int timestamp);
int kgsl_register_ts_notifier(struct kgsl_device *device,
struct notifier_block *nb);
int kgsl_unregister_ts_notifier(struct kgsl_device *device,
struct notifier_block *nb);
int kgsl_device_platform_probe(struct kgsl_device *device,
irqreturn_t (*dev_isr) (int, void*));
void kgsl_device_platform_remove(struct kgsl_device *device);
#endif /* __KGSL_DEVICE_H */

1508
drivers/gpu/msm/kgsl_drm.c Executable file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,768 @@
/* Copyright (c) 2011-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/types.h>
#include <linux/device.h>
#include <linux/spinlock.h>
#include <linux/genalloc.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include "kgsl.h"
#include "kgsl_mmu.h"
#include "kgsl_device.h"
#include "kgsl_sharedmem.h"
#include "adreno_ringbuffer.h"
static ssize_t
sysfs_show_ptpool_entries(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
kgsl_driver.ptpool;
return snprintf(buf, PAGE_SIZE, "%d\n", pool->entries);
}
static ssize_t
sysfs_show_ptpool_min(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
kgsl_driver.ptpool;
return snprintf(buf, PAGE_SIZE, "%d\n",
pool->static_entries);
}
static ssize_t
sysfs_show_ptpool_chunks(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
kgsl_driver.ptpool;
return snprintf(buf, PAGE_SIZE, "%d\n", pool->chunks);
}
static ssize_t
sysfs_show_ptpool_ptsize(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
struct kgsl_ptpool *pool = (struct kgsl_ptpool *)
kgsl_driver.ptpool;
return snprintf(buf, PAGE_SIZE, "%d\n", pool->ptsize);
}
static struct kobj_attribute attr_ptpool_entries = {
.attr = { .name = "ptpool_entries", .mode = 0444 },
.show = sysfs_show_ptpool_entries,
.store = NULL,
};
static struct kobj_attribute attr_ptpool_min = {
.attr = { .name = "ptpool_min", .mode = 0444 },
.show = sysfs_show_ptpool_min,
.store = NULL,
};
static struct kobj_attribute attr_ptpool_chunks = {
.attr = { .name = "ptpool_chunks", .mode = 0444 },
.show = sysfs_show_ptpool_chunks,
.store = NULL,
};
static struct kobj_attribute attr_ptpool_ptsize = {
.attr = { .name = "ptpool_ptsize", .mode = 0444 },
.show = sysfs_show_ptpool_ptsize,
.store = NULL,
};
static struct attribute *ptpool_attrs[] = {
&attr_ptpool_entries.attr,
&attr_ptpool_min.attr,
&attr_ptpool_chunks.attr,
&attr_ptpool_ptsize.attr,
NULL,
};
static struct attribute_group ptpool_attr_group = {
.attrs = ptpool_attrs,
};
static int
_kgsl_ptpool_add_entries(struct kgsl_ptpool *pool, int count, int dynamic)
{
struct kgsl_ptpool_chunk *chunk;
size_t size = ALIGN(count * pool->ptsize, PAGE_SIZE);
BUG_ON(count == 0);
if (get_order(size) >= MAX_ORDER) {
KGSL_CORE_ERR("ptpool allocation is too big: %d\n", size);
return -EINVAL;
}
chunk = kzalloc(sizeof(*chunk), GFP_KERNEL);
if (chunk == NULL) {
KGSL_CORE_ERR("kzalloc(%d) failed\n", sizeof(*chunk));
return -ENOMEM;
}
chunk->size = size;
chunk->count = count;
chunk->dynamic = dynamic;
chunk->data = dma_alloc_coherent(NULL, size,
&chunk->phys, GFP_KERNEL);
if (chunk->data == NULL) {
KGSL_CORE_ERR("dma_alloc_coherent(%d) failed\n", size);
goto err;
}
chunk->bitmap = kzalloc(BITS_TO_LONGS(count) * 4, GFP_KERNEL);
if (chunk->bitmap == NULL) {
KGSL_CORE_ERR("kzalloc(%d) failed\n",
BITS_TO_LONGS(count) * 4);
goto err_dma;
}
list_add_tail(&chunk->list, &pool->list);
pool->chunks++;
pool->entries += count;
if (!dynamic)
pool->static_entries += count;
return 0;
err_dma:
dma_free_coherent(NULL, chunk->size, chunk->data, chunk->phys);
err:
kfree(chunk);
return -ENOMEM;
}
static void *
_kgsl_ptpool_get_entry(struct kgsl_ptpool *pool, unsigned int *physaddr)
{
struct kgsl_ptpool_chunk *chunk;
list_for_each_entry(chunk, &pool->list, list) {
int bit = find_first_zero_bit(chunk->bitmap, chunk->count);
if (bit >= chunk->count)
continue;
set_bit(bit, chunk->bitmap);
*physaddr = chunk->phys + (bit * pool->ptsize);
return chunk->data + (bit * pool->ptsize);
}
return NULL;
}
/**
* kgsl_ptpool_add
* @pool: A pointer to a ptpool structure
* @entries: Number of entries to add
*
* Add static entries to the pagetable pool.
*/
static int
kgsl_ptpool_add(struct kgsl_ptpool *pool, int count)
{
int ret = 0;
BUG_ON(count == 0);
mutex_lock(&pool->lock);
/* Only 4MB can be allocated in one chunk, so larger allocations
need to be split into multiple sections */
while (count) {
int entries = ((count * pool->ptsize) > SZ_4M) ?
SZ_4M / pool->ptsize : count;
/* Add the entries as static, i.e. they don't ever stand
a chance of being removed */
ret = _kgsl_ptpool_add_entries(pool, entries, 0);
if (ret)
break;
count -= entries;
}
mutex_unlock(&pool->lock);
return ret;
}
/**
* kgsl_ptpool_alloc
* @pool: A pointer to a ptpool structure
* @addr: A pointer to store the physical address of the chunk
*
* Allocate a pagetable from the pool. Returns the virtual address
* of the pagetable, the physical address is returned in physaddr
*/
static void *kgsl_ptpool_alloc(struct kgsl_ptpool *pool,
unsigned int *physaddr)
{
void *addr = NULL;
int ret;
mutex_lock(&pool->lock);
addr = _kgsl_ptpool_get_entry(pool, physaddr);
if (addr)
goto done;
/* Add a chunk for 1 more pagetable and mark it as dynamic */
ret = _kgsl_ptpool_add_entries(pool, 1, 1);
if (ret)
goto done;
addr = _kgsl_ptpool_get_entry(pool, physaddr);
done:
mutex_unlock(&pool->lock);
return addr;
}
static inline void _kgsl_ptpool_rm_chunk(struct kgsl_ptpool_chunk *chunk)
{
list_del(&chunk->list);
if (chunk->data)
dma_free_coherent(NULL, chunk->size, chunk->data,
chunk->phys);
kfree(chunk->bitmap);
kfree(chunk);
}
/**
* kgsl_ptpool_free
* @pool: A pointer to a ptpool structure
* @addr: A pointer to the virtual address to free
*
* Free a pagetable allocated from the pool
*/
static void kgsl_ptpool_free(struct kgsl_ptpool *pool, void *addr)
{
struct kgsl_ptpool_chunk *chunk, *tmp;
if (pool == NULL || addr == NULL)
return;
mutex_lock(&pool->lock);
list_for_each_entry_safe(chunk, tmp, &pool->list, list) {
if (addr >= chunk->data &&
addr < chunk->data + chunk->size) {
int bit = ((unsigned long) (addr - chunk->data)) /
pool->ptsize;
clear_bit(bit, chunk->bitmap);
memset(addr, 0, pool->ptsize);
if (chunk->dynamic &&
bitmap_empty(chunk->bitmap, chunk->count))
_kgsl_ptpool_rm_chunk(chunk);
break;
}
}
mutex_unlock(&pool->lock);
}
void kgsl_gpummu_ptpool_destroy(void *ptpool)
{
struct kgsl_ptpool *pool = (struct kgsl_ptpool *)ptpool;
struct kgsl_ptpool_chunk *chunk, *tmp;
if (pool == NULL)
return;
mutex_lock(&pool->lock);
list_for_each_entry_safe(chunk, tmp, &pool->list, list)
_kgsl_ptpool_rm_chunk(chunk);
mutex_unlock(&pool->lock);
kfree(pool);
}
/**
* kgsl_ptpool_init
* @pool: A pointer to a ptpool structure to initialize
* @ptsize: The size of each pagetable entry
* @entries: The number of inital entries to add to the pool
*
* Initalize a pool and allocate an initial chunk of entries.
*/
void *kgsl_gpummu_ptpool_init(int ptsize, int entries)
{
struct kgsl_ptpool *pool;
int ret = 0;
BUG_ON(ptsize == 0);
pool = kzalloc(sizeof(struct kgsl_ptpool), GFP_KERNEL);
if (!pool) {
KGSL_CORE_ERR("Failed to allocate memory "
"for ptpool\n");
return NULL;
}
pool->ptsize = ptsize;
mutex_init(&pool->lock);
INIT_LIST_HEAD(&pool->list);
if (entries) {
ret = kgsl_ptpool_add(pool, entries);
if (ret)
goto err_ptpool_remove;
}
ret = sysfs_create_group(kgsl_driver.ptkobj, &ptpool_attr_group);
if (ret) {
KGSL_CORE_ERR("sysfs_create_group failed for ptpool "
"statistics: %d\n", ret);
goto err_ptpool_remove;
}
return (void *)pool;
err_ptpool_remove:
kgsl_gpummu_ptpool_destroy(pool);
return NULL;
}
int kgsl_gpummu_pt_equal(struct kgsl_pagetable *pt,
unsigned int pt_base)
{
struct kgsl_gpummu_pt *gpummu_pt = pt ? pt->priv : NULL;
return gpummu_pt && pt_base && (gpummu_pt->base.gpuaddr == pt_base);
}
void kgsl_gpummu_destroy_pagetable(void *mmu_specific_pt)
{
struct kgsl_gpummu_pt *gpummu_pt = (struct kgsl_gpummu_pt *)
mmu_specific_pt;
kgsl_ptpool_free((struct kgsl_ptpool *)kgsl_driver.ptpool,
gpummu_pt->base.hostptr);
kgsl_driver.stats.coherent -= KGSL_PAGETABLE_SIZE;
kfree(gpummu_pt->tlbflushfilter.base);
kfree(gpummu_pt);
}
static inline uint32_t
kgsl_pt_entry_get(unsigned int va_base, uint32_t va)
{
return (va - va_base) >> PAGE_SHIFT;
}
static inline void
kgsl_pt_map_set(struct kgsl_gpummu_pt *pt, uint32_t pte, uint32_t val)
{
uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
BUG_ON(pte*sizeof(uint32_t) >= pt->base.size);
baseptr[pte] = val;
}
static inline uint32_t
kgsl_pt_map_get(struct kgsl_gpummu_pt *pt, uint32_t pte)
{
uint32_t *baseptr = (uint32_t *)pt->base.hostptr;
BUG_ON(pte*sizeof(uint32_t) >= pt->base.size);
return baseptr[pte] & GSL_PT_PAGE_ADDR_MASK;
}
static unsigned int kgsl_gpummu_pt_get_flags(struct kgsl_pagetable *pt,
enum kgsl_deviceid id)
{
unsigned int result = 0;
struct kgsl_gpummu_pt *gpummu_pt;
if (pt == NULL)
return 0;
gpummu_pt = pt->priv;
spin_lock(&pt->lock);
if (gpummu_pt->tlb_flags && (1<<id)) {
result = KGSL_MMUFLAGS_TLBFLUSH;
gpummu_pt->tlb_flags &= ~(1<<id);
}
spin_unlock(&pt->lock);
return result;
}
static void kgsl_gpummu_pagefault(struct kgsl_device *device)
{
unsigned int reg;
unsigned int ptbase;
kgsl_regread(device, MH_MMU_PAGE_FAULT, &reg);
kgsl_regread(device, MH_MMU_PT_BASE, &ptbase);
KGSL_MEM_CRIT(device,
"mmu page fault: page=0x%lx pt=%d op=%s axi=%d\n",
reg & ~(PAGE_SIZE - 1),
kgsl_mmu_get_ptname_from_ptbase(ptbase),
reg & 0x02 ? "WRITE" : "READ", (reg >> 4) & 0xF);
}
static void *kgsl_gpummu_create_pagetable(void)
{
struct kgsl_gpummu_pt *gpummu_pt;
gpummu_pt = kzalloc(sizeof(struct kgsl_gpummu_pt),
GFP_KERNEL);
if (!gpummu_pt)
return NULL;
gpummu_pt->tlb_flags = 0;
gpummu_pt->last_superpte = 0;
gpummu_pt->tlbflushfilter.size = (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE /
(PAGE_SIZE * GSL_PT_SUPER_PTE * 8)) + 1;
gpummu_pt->tlbflushfilter.base = (unsigned int *)
kzalloc(gpummu_pt->tlbflushfilter.size, GFP_KERNEL);
if (!gpummu_pt->tlbflushfilter.base) {
KGSL_CORE_ERR("kzalloc(%d) failed\n",
gpummu_pt->tlbflushfilter.size);
goto err_free_gpummu;
}
GSL_TLBFLUSH_FILTER_RESET();
gpummu_pt->base.hostptr = kgsl_ptpool_alloc((struct kgsl_ptpool *)
kgsl_driver.ptpool,
&gpummu_pt->base.physaddr);
if (gpummu_pt->base.hostptr == NULL)
goto err_flushfilter;
/* ptpool allocations are from coherent memory, so update the
device statistics acordingly */
KGSL_STATS_ADD(KGSL_PAGETABLE_SIZE, kgsl_driver.stats.coherent,
kgsl_driver.stats.coherent_max);
gpummu_pt->base.gpuaddr = gpummu_pt->base.physaddr;
gpummu_pt->base.size = KGSL_PAGETABLE_SIZE;
return (void *)gpummu_pt;
err_flushfilter:
kfree(gpummu_pt->tlbflushfilter.base);
err_free_gpummu:
kfree(gpummu_pt);
return NULL;
}
static void kgsl_gpummu_default_setstate(struct kgsl_device *device,
uint32_t flags)
{
struct kgsl_gpummu_pt *gpummu_pt;
if (!kgsl_mmu_enabled())
return;
if (flags & KGSL_MMUFLAGS_PTUPDATE) {
kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
gpummu_pt = device->mmu.hwpagetable->priv;
kgsl_regwrite(device, MH_MMU_PT_BASE,
gpummu_pt->base.gpuaddr);
}
if (flags & KGSL_MMUFLAGS_TLBFLUSH) {
/* Invalidate all and tc */
kgsl_regwrite(device, MH_MMU_INVALIDATE, 0x00000003);
}
}
static void kgsl_gpummu_setstate(struct kgsl_device *device,
struct kgsl_pagetable *pagetable)
{
struct kgsl_mmu *mmu = &device->mmu;
struct kgsl_gpummu_pt *gpummu_pt;
if (mmu->flags & KGSL_FLAGS_STARTED) {
/* page table not current, then setup mmu to use new
* specified page table
*/
if (mmu->hwpagetable != pagetable) {
mmu->hwpagetable = pagetable;
spin_lock(&mmu->hwpagetable->lock);
gpummu_pt = mmu->hwpagetable->priv;
gpummu_pt->tlb_flags &= ~(1<<device->id);
spin_unlock(&mmu->hwpagetable->lock);
/* call device specific set page table */
kgsl_setstate(mmu->device, KGSL_MMUFLAGS_TLBFLUSH |
KGSL_MMUFLAGS_PTUPDATE);
}
}
}
static int kgsl_gpummu_init(struct kgsl_device *device)
{
/*
* intialize device mmu
*
* call this with the global lock held
*/
int status = 0;
struct kgsl_mmu *mmu = &device->mmu;
mmu->device = device;
/* sub-client MMU lookups require address translation */
if ((mmu->config & ~0x1) > 0) {
/*make sure virtual address range is a multiple of 64Kb */
if (CONFIG_MSM_KGSL_PAGE_TABLE_SIZE & ((1 << 16) - 1)) {
KGSL_CORE_ERR("Invalid pagetable size requested "
"for GPUMMU: %x\n", CONFIG_MSM_KGSL_PAGE_TABLE_SIZE);
return -EINVAL;
}
/* allocate memory used for completing r/w operations that
* cannot be mapped by the MMU
*/
status = kgsl_allocate_contiguous(&mmu->setstate_memory, 64);
if (!status)
kgsl_sharedmem_set(&mmu->setstate_memory, 0, 0,
mmu->setstate_memory.size);
}
dev_info(device->dev, "|%s| MMU type set for device is GPUMMU\n",
__func__);
return status;
}
static int kgsl_gpummu_start(struct kgsl_device *device)
{
/*
* intialize device mmu
*
* call this with the global lock held
*/
struct kgsl_mmu *mmu = &device->mmu;
struct kgsl_gpummu_pt *gpummu_pt;
if (mmu->flags & KGSL_FLAGS_STARTED)
return 0;
/* MMU not enabled */
if ((mmu->config & 0x1) == 0)
return 0;
/* setup MMU and sub-client behavior */
kgsl_regwrite(device, MH_MMU_CONFIG, mmu->config);
/* idle device */
kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
/* enable axi interrupts */
kgsl_regwrite(device, MH_INTERRUPT_MASK,
GSL_MMU_INT_MASK | MH_INTERRUPT_MASK__MMU_PAGE_FAULT);
kgsl_sharedmem_set(&mmu->setstate_memory, 0, 0,
mmu->setstate_memory.size);
/* TRAN_ERROR needs a 32 byte (32 byte aligned) chunk of memory
* to complete transactions in case of an MMU fault. Note that
* we'll leave the bottom 32 bytes of the setstate_memory for other
* purposes (e.g. use it when dummy read cycles are needed
* for other blocks) */
kgsl_regwrite(device, MH_MMU_TRAN_ERROR,
mmu->setstate_memory.physaddr + 32);
if (mmu->defaultpagetable == NULL)
mmu->defaultpagetable =
kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
/* Return error if the default pagetable doesn't exist */
if (mmu->defaultpagetable == NULL)
return -ENOMEM;
mmu->hwpagetable = mmu->defaultpagetable;
gpummu_pt = mmu->hwpagetable->priv;
kgsl_regwrite(device, MH_MMU_PT_BASE,
gpummu_pt->base.gpuaddr);
kgsl_regwrite(device, MH_MMU_VA_RANGE,
(KGSL_PAGETABLE_BASE |
(CONFIG_MSM_KGSL_PAGE_TABLE_SIZE >> 16)));
kgsl_setstate(device, KGSL_MMUFLAGS_TLBFLUSH);
mmu->flags |= KGSL_FLAGS_STARTED;
return 0;
}
static int
kgsl_gpummu_unmap(void *mmu_specific_pt,
struct kgsl_memdesc *memdesc)
{
unsigned int numpages;
unsigned int pte, ptefirst, ptelast, superpte;
unsigned int range = memdesc->size;
struct kgsl_gpummu_pt *gpummu_pt = mmu_specific_pt;
/* All GPU addresses as assigned are page aligned, but some
functions purturb the gpuaddr with an offset, so apply the
mask here to make sure we have the right address */
unsigned int gpuaddr = memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK;
numpages = (range >> PAGE_SHIFT);
if (range & (PAGE_SIZE - 1))
numpages++;
ptefirst = kgsl_pt_entry_get(KGSL_PAGETABLE_BASE, gpuaddr);
ptelast = ptefirst + numpages;
superpte = ptefirst - (ptefirst & (GSL_PT_SUPER_PTE-1));
GSL_TLBFLUSH_FILTER_SETDIRTY(superpte / GSL_PT_SUPER_PTE);
for (pte = ptefirst; pte < ptelast; pte++) {
#ifdef VERBOSE_DEBUG
/* check if PTE exists */
if (!kgsl_pt_map_get(gpummu_pt, pte))
KGSL_CORE_ERR("pt entry %x is already "
"unmapped for pagetable %p\n", pte, gpummu_pt);
#endif
kgsl_pt_map_set(gpummu_pt, pte, GSL_PT_PAGE_DIRTY);
superpte = pte - (pte & (GSL_PT_SUPER_PTE - 1));
if (pte == superpte)
GSL_TLBFLUSH_FILTER_SETDIRTY(superpte /
GSL_PT_SUPER_PTE);
}
/* Post all writes to the pagetable */
wmb();
return 0;
}
#define SUPERPTE_IS_DIRTY(_p) \
(((_p) & (GSL_PT_SUPER_PTE - 1)) == 0 && \
GSL_TLBFLUSH_FILTER_ISDIRTY((_p) / GSL_PT_SUPER_PTE))
static int
kgsl_gpummu_map(void *mmu_specific_pt,
struct kgsl_memdesc *memdesc,
unsigned int protflags)
{
unsigned int pte;
struct kgsl_gpummu_pt *gpummu_pt = mmu_specific_pt;
struct scatterlist *s;
int flushtlb = 0;
int i;
pte = kgsl_pt_entry_get(KGSL_PAGETABLE_BASE, memdesc->gpuaddr);
/* Flush the TLB if the first PTE isn't at the superpte boundary */
if (pte & (GSL_PT_SUPER_PTE - 1))
flushtlb = 1;
for_each_sg(memdesc->sg, s, memdesc->sglen, i) {
unsigned int paddr = kgsl_get_sg_pa(s);
unsigned int j;
/* Each sg entry might be multiple pages long */
for (j = paddr; j < paddr + s->length; pte++, j += PAGE_SIZE) {
if (SUPERPTE_IS_DIRTY(pte))
flushtlb = 1;
kgsl_pt_map_set(gpummu_pt, pte, j | protflags);
}
}
/* Flush the TLB if the last PTE isn't at the superpte boundary */
if ((pte + 1) & (GSL_PT_SUPER_PTE - 1))
flushtlb = 1;
wmb();
if (flushtlb) {
/*set all devices as needing flushing*/
gpummu_pt->tlb_flags = UINT_MAX;
GSL_TLBFLUSH_FILTER_RESET();
}
return 0;
}
static int kgsl_gpummu_stop(struct kgsl_device *device)
{
struct kgsl_mmu *mmu = &device->mmu;
kgsl_regwrite(device, MH_MMU_CONFIG, 0x00000000);
mmu->flags &= ~KGSL_FLAGS_STARTED;
return 0;
}
static int kgsl_gpummu_close(struct kgsl_device *device)
{
/*
* close device mmu
*
* call this with the global lock held
*/
struct kgsl_mmu *mmu = &device->mmu;
if (mmu->setstate_memory.gpuaddr)
kgsl_sharedmem_free(&mmu->setstate_memory);
if (mmu->defaultpagetable)
kgsl_mmu_putpagetable(mmu->defaultpagetable);
return 0;
}
static unsigned int
kgsl_gpummu_get_current_ptbase(struct kgsl_device *device)
{
unsigned int ptbase;
kgsl_regread(device, MH_MMU_PT_BASE, &ptbase);
return ptbase;
}
struct kgsl_mmu_ops gpummu_ops = {
.mmu_init = kgsl_gpummu_init,
.mmu_close = kgsl_gpummu_close,
.mmu_start = kgsl_gpummu_start,
.mmu_stop = kgsl_gpummu_stop,
.mmu_setstate = kgsl_gpummu_setstate,
.mmu_device_setstate = kgsl_gpummu_default_setstate,
.mmu_pagefault = kgsl_gpummu_pagefault,
.mmu_get_current_ptbase = kgsl_gpummu_get_current_ptbase,
};
struct kgsl_mmu_pt_ops gpummu_pt_ops = {
.mmu_map = kgsl_gpummu_map,
.mmu_unmap = kgsl_gpummu_unmap,
.mmu_create_pagetable = kgsl_gpummu_create_pagetable,
.mmu_destroy_pagetable = kgsl_gpummu_destroy_pagetable,
.mmu_pt_equal = kgsl_gpummu_pt_equal,
.mmu_pt_get_flags = kgsl_gpummu_pt_get_flags,
};

View File

@ -0,0 +1,85 @@
/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __KGSL_GPUMMU_H
#define __KGSL_GPUMMU_H
#define GSL_PT_PAGE_BITS_MASK 0x00000007
#define GSL_PT_PAGE_ADDR_MASK PAGE_MASK
#define GSL_MMU_INT_MASK \
(MH_INTERRUPT_MASK__AXI_READ_ERROR | \
MH_INTERRUPT_MASK__AXI_WRITE_ERROR)
/* Macros to manage TLB flushing */
#define GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS (sizeof(unsigned char) * 8)
#define GSL_TLBFLUSH_FILTER_GET(superpte) \
(*((unsigned char *) \
(((unsigned int)gpummu_pt->tlbflushfilter.base) \
+ (superpte / GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS))))
#define GSL_TLBFLUSH_FILTER_SETDIRTY(superpte) \
(GSL_TLBFLUSH_FILTER_GET((superpte)) |= 1 << \
(superpte % GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS))
#define GSL_TLBFLUSH_FILTER_ISDIRTY(superpte) \
(GSL_TLBFLUSH_FILTER_GET((superpte)) & \
(1 << (superpte % GSL_TLBFLUSH_FILTER_ENTRY_NUMBITS)))
#define GSL_TLBFLUSH_FILTER_RESET() memset(gpummu_pt->tlbflushfilter.base,\
0, gpummu_pt->tlbflushfilter.size)
extern struct kgsl_mmu_ops gpummu_ops;
extern struct kgsl_mmu_pt_ops gpummu_pt_ops;
struct kgsl_tlbflushfilter {
unsigned int *base;
unsigned int size;
};
struct kgsl_gpummu_pt {
struct kgsl_memdesc base;
unsigned int last_superpte;
unsigned int tlb_flags;
/* Maintain filter to manage tlb flushing */
struct kgsl_tlbflushfilter tlbflushfilter;
};
struct kgsl_ptpool_chunk {
size_t size;
unsigned int count;
int dynamic;
void *data;
unsigned int phys;
unsigned long *bitmap;
struct list_head list;
};
struct kgsl_ptpool {
size_t ptsize;
struct mutex lock;
struct list_head list;
int entries;
int static_entries;
int chunks;
};
void *kgsl_gpummu_ptpool_init(int ptsize,
int entries);
void kgsl_gpummu_ptpool_destroy(void *ptpool);
static inline unsigned int kgsl_pt_get_base_addr(struct kgsl_pagetable *pt)
{
struct kgsl_gpummu_pt *gpummu_pt = pt->priv;
return gpummu_pt->base.gpuaddr;
}
#endif /* __KGSL_GPUMMU_H */

View File

@ -0,0 +1,333 @@
/* Copyright (c) 2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/types.h>
#include <linux/device.h>
#include <linux/spinlock.h>
#include <linux/genalloc.h>
#include <linux/slab.h>
#include <linux/iommu.h>
#include <mach/iommu.h>
#include <linux/msm_kgsl.h>
#include "kgsl.h"
#include "kgsl_device.h"
#include "kgsl_mmu.h"
#include "kgsl_sharedmem.h"
struct kgsl_iommu {
struct device *iommu_user_dev;
int iommu_user_dev_attached;
struct device *iommu_priv_dev;
int iommu_priv_dev_attached;
};
static int kgsl_iommu_pt_equal(struct kgsl_pagetable *pt,
unsigned int pt_base)
{
struct iommu_domain *domain = pt ? pt->priv : NULL;
return domain && pt_base && ((unsigned int)domain == pt_base);
}
static void kgsl_iommu_destroy_pagetable(void *mmu_specific_pt)
{
struct iommu_domain *domain = mmu_specific_pt;
if (domain)
iommu_domain_free(domain);
}
void *kgsl_iommu_create_pagetable(void)
{
struct iommu_domain *domain = iommu_domain_alloc(0);
if (!domain)
KGSL_CORE_ERR("Failed to create iommu domain\n");
return domain;
}
static void kgsl_detach_pagetable_iommu_domain(struct kgsl_mmu *mmu)
{
struct iommu_domain *domain;
struct kgsl_iommu *iommu = mmu->priv;
BUG_ON(mmu->hwpagetable == NULL);
BUG_ON(mmu->hwpagetable->priv == NULL);
domain = mmu->hwpagetable->priv;
if (iommu->iommu_user_dev_attached) {
iommu_detach_device(domain, iommu->iommu_user_dev);
iommu->iommu_user_dev_attached = 0;
KGSL_MEM_INFO(mmu->device,
"iommu %p detached from user dev of MMU: %p\n",
domain, mmu);
}
if (iommu->iommu_priv_dev_attached) {
iommu_detach_device(domain, iommu->iommu_priv_dev);
iommu->iommu_priv_dev_attached = 0;
KGSL_MEM_INFO(mmu->device,
"iommu %p detached from priv dev of MMU: %p\n",
domain, mmu);
}
}
static int kgsl_attach_pagetable_iommu_domain(struct kgsl_mmu *mmu)
{
struct iommu_domain *domain;
int ret = 0;
struct kgsl_iommu *iommu = mmu->priv;
BUG_ON(mmu->hwpagetable == NULL);
BUG_ON(mmu->hwpagetable->priv == NULL);
domain = mmu->hwpagetable->priv;
if (iommu->iommu_user_dev && !iommu->iommu_user_dev_attached) {
ret = iommu_attach_device(domain, iommu->iommu_user_dev);
if (ret) {
KGSL_MEM_ERR(mmu->device,
"Failed to attach device, err %d\n", ret);
goto done;
}
iommu->iommu_user_dev_attached = 1;
KGSL_MEM_INFO(mmu->device,
"iommu %p attached to user dev of MMU: %p\n",
domain, mmu);
}
if (iommu->iommu_priv_dev && !iommu->iommu_priv_dev_attached) {
ret = iommu_attach_device(domain, iommu->iommu_priv_dev);
if (ret) {
KGSL_MEM_ERR(mmu->device,
"Failed to attach device, err %d\n", ret);
iommu_detach_device(domain, iommu->iommu_user_dev);
iommu->iommu_user_dev_attached = 0;
goto done;
}
iommu->iommu_priv_dev_attached = 1;
KGSL_MEM_INFO(mmu->device,
"iommu %p attached to priv dev of MMU: %p\n",
domain, mmu);
}
done:
return ret;
}
static int kgsl_get_iommu_ctxt(struct kgsl_iommu *iommu,
struct kgsl_device *device)
{
int status = 0;
struct platform_device *pdev =
container_of(device->parentdev, struct platform_device, dev);
struct kgsl_device_platform_data *pdata_dev = pdev->dev.platform_data;
if (pdata_dev->iommu_user_ctx_name)
iommu->iommu_user_dev = msm_iommu_get_ctx(
pdata_dev->iommu_user_ctx_name);
if (pdata_dev->iommu_priv_ctx_name)
iommu->iommu_priv_dev = msm_iommu_get_ctx(
pdata_dev->iommu_priv_ctx_name);
if (!iommu->iommu_user_dev) {
KGSL_CORE_ERR("Failed to get user iommu dev handle for "
"device %s\n",
pdata_dev->iommu_user_ctx_name);
status = -EINVAL;
}
return status;
}
static void kgsl_iommu_setstate(struct kgsl_device *device,
struct kgsl_pagetable *pagetable)
{
struct kgsl_mmu *mmu = &device->mmu;
if (mmu->flags & KGSL_FLAGS_STARTED) {
/* page table not current, then setup mmu to use new
* specified page table
*/
if (mmu->hwpagetable != pagetable) {
kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
kgsl_detach_pagetable_iommu_domain(mmu);
mmu->hwpagetable = pagetable;
if (mmu->hwpagetable)
kgsl_attach_pagetable_iommu_domain(mmu);
}
}
}
static int kgsl_iommu_init(struct kgsl_device *device)
{
/*
* intialize device mmu
*
* call this with the global lock held
*/
int status = 0;
struct kgsl_mmu *mmu = &device->mmu;
struct kgsl_iommu *iommu;
mmu->device = device;
iommu = kzalloc(sizeof(struct kgsl_iommu), GFP_KERNEL);
if (!iommu) {
KGSL_CORE_ERR("kzalloc(%d) failed\n",
sizeof(struct kgsl_iommu));
return -ENOMEM;
}
iommu->iommu_priv_dev_attached = 0;
iommu->iommu_user_dev_attached = 0;
status = kgsl_get_iommu_ctxt(iommu, device);
if (status) {
kfree(iommu);
iommu = NULL;
}
mmu->priv = iommu;
dev_info(device->dev, "|%s| MMU type set for device is IOMMU\n",
__func__);
return status;
}
static int kgsl_iommu_start(struct kgsl_device *device)
{
int status;
struct kgsl_mmu *mmu = &device->mmu;
if (mmu->flags & KGSL_FLAGS_STARTED)
return 0;
kgsl_regwrite(device, MH_MMU_CONFIG, 0x00000000);
if (mmu->defaultpagetable == NULL)
mmu->defaultpagetable =
kgsl_mmu_getpagetable(KGSL_MMU_GLOBAL_PT);
/* Return error if the default pagetable doesn't exist */
if (mmu->defaultpagetable == NULL)
return -ENOMEM;
mmu->hwpagetable = mmu->defaultpagetable;
status = kgsl_attach_pagetable_iommu_domain(mmu);
if (!status)
mmu->flags |= KGSL_FLAGS_STARTED;
return status;
}
static int
kgsl_iommu_unmap(void *mmu_specific_pt,
struct kgsl_memdesc *memdesc)
{
int ret;
unsigned int range = memdesc->size;
struct iommu_domain *domain = (struct iommu_domain *)
mmu_specific_pt;
/* All GPU addresses as assigned are page aligned, but some
functions purturb the gpuaddr with an offset, so apply the
mask here to make sure we have the right address */
unsigned int gpuaddr = memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK;
if (range == 0 || gpuaddr == 0)
return 0;
ret = iommu_unmap_range(domain, gpuaddr, range);
if (ret)
KGSL_CORE_ERR("iommu_unmap_range(%p, %x, %d) failed "
"with err: %d\n", domain, gpuaddr,
range, ret);
return 0;
}
static int
kgsl_iommu_map(void *mmu_specific_pt,
struct kgsl_memdesc *memdesc,
unsigned int protflags)
{
int ret;
unsigned int iommu_virt_addr;
struct iommu_domain *domain = mmu_specific_pt;
BUG_ON(NULL == domain);
iommu_virt_addr = memdesc->gpuaddr;
ret = iommu_map_range(domain, iommu_virt_addr, memdesc->sg,
memdesc->size, MSM_IOMMU_ATTR_NONCACHED);
if (ret) {
KGSL_CORE_ERR("iommu_map_range(%p, %x, %p, %d, %d) "
"failed with err: %d\n", domain,
iommu_virt_addr, memdesc->sg, memdesc->size,
MSM_IOMMU_ATTR_NONCACHED, ret);
return ret;
}
return ret;
}
static int kgsl_iommu_stop(struct kgsl_device *device)
{
/*
* stop device mmu
*
* call this with the global lock held
*/
struct kgsl_mmu *mmu = &device->mmu;
if (mmu->flags & KGSL_FLAGS_STARTED) {
/* detach iommu attachment */
kgsl_detach_pagetable_iommu_domain(mmu);
mmu->flags &= ~KGSL_FLAGS_STARTED;
}
return 0;
}
static int kgsl_iommu_close(struct kgsl_device *device)
{
struct kgsl_mmu *mmu = &device->mmu;
if (mmu->defaultpagetable)
kgsl_mmu_putpagetable(mmu->defaultpagetable);
return 0;
}
static unsigned int
kgsl_iommu_get_current_ptbase(struct kgsl_device *device)
{
/* Current base is always the hwpagetables domain as we
* do not use per process pagetables right not for iommu.
* This will change when we switch to per process pagetables.
*/
return (unsigned int)device->mmu.hwpagetable->priv;
}
struct kgsl_mmu_ops iommu_ops = {
.mmu_init = kgsl_iommu_init,
.mmu_close = kgsl_iommu_close,
.mmu_start = kgsl_iommu_start,
.mmu_stop = kgsl_iommu_stop,
.mmu_setstate = kgsl_iommu_setstate,
.mmu_device_setstate = NULL,
.mmu_pagefault = NULL,
.mmu_get_current_ptbase = kgsl_iommu_get_current_ptbase,
};
struct kgsl_mmu_pt_ops iommu_pt_ops = {
.mmu_map = kgsl_iommu_map,
.mmu_unmap = kgsl_iommu_unmap,
.mmu_create_pagetable = kgsl_iommu_create_pagetable,
.mmu_destroy_pagetable = kgsl_iommu_destroy_pagetable,
.mmu_pt_equal = kgsl_iommu_pt_equal,
.mmu_pt_get_flags = NULL,
};

102
drivers/gpu/msm/kgsl_log.h Normal file
View File

@ -0,0 +1,102 @@
/* Copyright (c) 2002,2008-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __KGSL_LOG_H
#define __KGSL_LOG_H
extern unsigned int kgsl_cff_dump_enable;
#define KGSL_LOG_INFO(dev, lvl, fmt, args...) \
do { \
if ((lvl) >= 6) \
dev_info(dev, "|%s| " fmt, \
__func__, ##args);\
} while (0)
#define KGSL_LOG_WARN(dev, lvl, fmt, args...) \
do { \
if ((lvl) >= 4) \
dev_warn(dev, "|%s| " fmt, \
__func__, ##args);\
} while (0)
#define KGSL_LOG_ERR(dev, lvl, fmt, args...) \
do { \
if ((lvl) >= 3) \
dev_err(dev, "|%s| " fmt, \
__func__, ##args);\
} while (0)
#define KGSL_LOG_CRIT(dev, lvl, fmt, args...) \
do { \
if ((lvl) >= 2) \
dev_crit(dev, "|%s| " fmt, \
__func__, ##args);\
} while (0)
#define KGSL_LOG_POSTMORTEM_WRITE(_dev, fmt, args...) \
do { dev_crit(_dev->dev, fmt, ##args); } while (0)
#define KGSL_LOG_DUMP(_dev, fmt, args...) dev_err(_dev->dev, fmt, ##args)
#define KGSL_DRV_INFO(_dev, fmt, args...) \
KGSL_LOG_INFO(_dev->dev, _dev->drv_log, fmt, ##args)
#define KGSL_DRV_WARN(_dev, fmt, args...) \
KGSL_LOG_WARN(_dev->dev, _dev->drv_log, fmt, ##args)
#define KGSL_DRV_ERR(_dev, fmt, args...) \
KGSL_LOG_ERR(_dev->dev, _dev->drv_log, fmt, ##args)
#define KGSL_DRV_CRIT(_dev, fmt, args...) \
KGSL_LOG_CRIT(_dev->dev, _dev->drv_log, fmt, ##args)
#define KGSL_CMD_INFO(_dev, fmt, args...) \
KGSL_LOG_INFO(_dev->dev, _dev->cmd_log, fmt, ##args)
#define KGSL_CMD_WARN(_dev, fmt, args...) \
KGSL_LOG_WARN(_dev->dev, _dev->cmd_log, fmt, ##args)
#define KGSL_CMD_ERR(_dev, fmt, args...) \
KGSL_LOG_ERR(_dev->dev, _dev->cmd_log, fmt, ##args)
#define KGSL_CMD_CRIT(_dev, fmt, args...) \
KGSL_LOG_CRIT(_dev->dev, _dev->cmd_log, fmt, ##args)
#define KGSL_CTXT_INFO(_dev, fmt, args...) \
KGSL_LOG_INFO(_dev->dev, _dev->ctxt_log, fmt, ##args)
#define KGSL_CTXT_WARN(_dev, fmt, args...) \
KGSL_LOG_WARN(_dev->dev, _dev->ctxt_log, fmt, ##args)
#define KGSL_CTXT_ERR(_dev, fmt, args...) \
KGSL_LOG_ERR(_dev->dev, _dev->ctxt_log, fmt, ##args)
#define KGSL_CTXT_CRIT(_dev, fmt, args...) \
KGSL_LOG_CRIT(_dev->dev, _dev->ctxt_log, fmt, ##args)
#define KGSL_MEM_INFO(_dev, fmt, args...) \
KGSL_LOG_INFO(_dev->dev, _dev->mem_log, fmt, ##args)
#define KGSL_MEM_WARN(_dev, fmt, args...) \
KGSL_LOG_WARN(_dev->dev, _dev->mem_log, fmt, ##args)
#define KGSL_MEM_ERR(_dev, fmt, args...) \
KGSL_LOG_ERR(_dev->dev, _dev->mem_log, fmt, ##args)
#define KGSL_MEM_CRIT(_dev, fmt, args...) \
KGSL_LOG_CRIT(_dev->dev, _dev->mem_log, fmt, ##args)
#define KGSL_PWR_INFO(_dev, fmt, args...) \
KGSL_LOG_INFO(_dev->dev, _dev->pwr_log, fmt, ##args)
#define KGSL_PWR_WARN(_dev, fmt, args...) \
KGSL_LOG_WARN(_dev->dev, _dev->pwr_log, fmt, ##args)
#define KGSL_PWR_ERR(_dev, fmt, args...) \
KGSL_LOG_ERR(_dev->dev, _dev->pwr_log, fmt, ##args)
#define KGSL_PWR_CRIT(_dev, fmt, args...) \
KGSL_LOG_CRIT(_dev->dev, _dev->pwr_log, fmt, ##args)
/* Core error messages - these are for core KGSL functions that have
no device associated with them (such as memory) */
#define KGSL_CORE_ERR(fmt, args...) \
pr_err("kgsl: %s: " fmt, __func__, ##args)
#endif /* __KGSL_LOG_H */

727
drivers/gpu/msm/kgsl_mmu.c Executable file
View File

@ -0,0 +1,727 @@
/* Copyright (c) 2002,2007-2012, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/types.h>
#include <linux/device.h>
#include <linux/spinlock.h>
#include <linux/genalloc.h>
#include <linux/slab.h>
#include <linux/sched.h>
#include <linux/iommu.h>
#include "kgsl.h"
#include "kgsl_mmu.h"
#include "kgsl_device.h"
#include "kgsl_sharedmem.h"
#include "adreno_postmortem.h"
#define KGSL_MMU_ALIGN_SHIFT 13
#define KGSL_MMU_ALIGN_MASK (~((1 << KGSL_MMU_ALIGN_SHIFT) - 1))
static enum kgsl_mmutype kgsl_mmu_type;
static void pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable);
static int kgsl_cleanup_pt(struct kgsl_pagetable *pt)
{
int i;
for (i = 0; i < KGSL_DEVICE_MAX; i++) {
struct kgsl_device *device = kgsl_driver.devp[i];
if (device)
device->ftbl->cleanup_pt(device, pt);
}
return 0;
}
static void kgsl_destroy_pagetable(struct kref *kref)
{
struct kgsl_pagetable *pagetable = container_of(kref,
struct kgsl_pagetable, refcount);
unsigned long flags;
spin_lock_irqsave(&kgsl_driver.ptlock, flags);
list_del(&pagetable->list);
spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
pagetable_remove_sysfs_objects(pagetable);
kgsl_cleanup_pt(pagetable);
if (pagetable->pool)
gen_pool_destroy(pagetable->pool);
pagetable->pt_ops->mmu_destroy_pagetable(pagetable->priv);
kfree(pagetable);
}
static inline void kgsl_put_pagetable(struct kgsl_pagetable *pagetable)
{
if (pagetable)
kref_put(&pagetable->refcount, kgsl_destroy_pagetable);
}
static struct kgsl_pagetable *
kgsl_get_pagetable(unsigned long name)
{
struct kgsl_pagetable *pt, *ret = NULL;
unsigned long flags;
spin_lock_irqsave(&kgsl_driver.ptlock, flags);
list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
if (pt->name == name) {
ret = pt;
kref_get(&ret->refcount);
break;
}
}
spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
return ret;
}
static struct kgsl_pagetable *
_get_pt_from_kobj(struct kobject *kobj)
{
unsigned long ptname;
if (!kobj)
return NULL;
if (sscanf(kobj->name, "%ld", &ptname) != 1)
return NULL;
return kgsl_get_pagetable(ptname);
}
static ssize_t
sysfs_show_entries(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
struct kgsl_pagetable *pt;
int ret = 0;
pt = _get_pt_from_kobj(kobj);
if (pt)
ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.entries);
kgsl_put_pagetable(pt);
return ret;
}
static ssize_t
sysfs_show_mapped(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
struct kgsl_pagetable *pt;
int ret = 0;
pt = _get_pt_from_kobj(kobj);
if (pt)
ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.mapped);
kgsl_put_pagetable(pt);
return ret;
}
static ssize_t
sysfs_show_va_range(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
struct kgsl_pagetable *pt;
int ret = 0;
pt = _get_pt_from_kobj(kobj);
if (pt)
ret += snprintf(buf, PAGE_SIZE, "0x%x\n",
CONFIG_MSM_KGSL_PAGE_TABLE_SIZE);
kgsl_put_pagetable(pt);
return ret;
}
static ssize_t
sysfs_show_max_mapped(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
struct kgsl_pagetable *pt;
int ret = 0;
pt = _get_pt_from_kobj(kobj);
if (pt)
ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.max_mapped);
kgsl_put_pagetable(pt);
return ret;
}
static ssize_t
sysfs_show_max_entries(struct kobject *kobj,
struct kobj_attribute *attr,
char *buf)
{
struct kgsl_pagetable *pt;
int ret = 0;
pt = _get_pt_from_kobj(kobj);
if (pt)
ret += snprintf(buf, PAGE_SIZE, "%d\n", pt->stats.max_entries);
kgsl_put_pagetable(pt);
return ret;
}
static struct kobj_attribute attr_entries = {
.attr = { .name = "entries", .mode = 0444 },
.show = sysfs_show_entries,
.store = NULL,
};
static struct kobj_attribute attr_mapped = {
.attr = { .name = "mapped", .mode = 0444 },
.show = sysfs_show_mapped,
.store = NULL,
};
static struct kobj_attribute attr_va_range = {
.attr = { .name = "va_range", .mode = 0444 },
.show = sysfs_show_va_range,
.store = NULL,
};
static struct kobj_attribute attr_max_mapped = {
.attr = { .name = "max_mapped", .mode = 0444 },
.show = sysfs_show_max_mapped,
.store = NULL,
};
static struct kobj_attribute attr_max_entries = {
.attr = { .name = "max_entries", .mode = 0444 },
.show = sysfs_show_max_entries,
.store = NULL,
};
static struct attribute *pagetable_attrs[] = {
&attr_entries.attr,
&attr_mapped.attr,
&attr_va_range.attr,
&attr_max_mapped.attr,
&attr_max_entries.attr,
NULL,
};
static struct attribute_group pagetable_attr_group = {
.attrs = pagetable_attrs,
};
static void
pagetable_remove_sysfs_objects(struct kgsl_pagetable *pagetable)
{
if (pagetable->kobj)
sysfs_remove_group(pagetable->kobj,
&pagetable_attr_group);
kobject_put(pagetable->kobj);
}
static int
pagetable_add_sysfs_objects(struct kgsl_pagetable *pagetable)
{
char ptname[16];
int ret = -ENOMEM;
snprintf(ptname, sizeof(ptname), "%d", pagetable->name);
pagetable->kobj = kobject_create_and_add(ptname,
kgsl_driver.ptkobj);
if (pagetable->kobj == NULL)
goto err;
ret = sysfs_create_group(pagetable->kobj, &pagetable_attr_group);
err:
if (ret) {
if (pagetable->kobj)
kobject_put(pagetable->kobj);
pagetable->kobj = NULL;
}
return ret;
}
unsigned int kgsl_mmu_get_current_ptbase(struct kgsl_device *device)
{
struct kgsl_mmu *mmu = &device->mmu;
if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
return 0;
else
return mmu->mmu_ops->mmu_get_current_ptbase(device);
}
EXPORT_SYMBOL(kgsl_mmu_get_current_ptbase);
int
kgsl_mmu_get_ptname_from_ptbase(unsigned int pt_base)
{
struct kgsl_pagetable *pt;
int ptid = -1;
spin_lock(&kgsl_driver.ptlock);
list_for_each_entry(pt, &kgsl_driver.pagetable_list, list) {
if (pt->pt_ops->mmu_pt_equal(pt, pt_base)) {
ptid = (int) pt->name;
break;
}
}
spin_unlock(&kgsl_driver.ptlock);
return ptid;
}
EXPORT_SYMBOL(kgsl_mmu_get_ptname_from_ptbase);
void kgsl_mmu_setstate(struct kgsl_device *device,
struct kgsl_pagetable *pagetable)
{
struct kgsl_mmu *mmu = &device->mmu;
if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
return;
else
mmu->mmu_ops->mmu_setstate(device,
pagetable);
}
EXPORT_SYMBOL(kgsl_mmu_setstate);
int kgsl_mmu_init(struct kgsl_device *device)
{
struct kgsl_mmu *mmu = &device->mmu;
mmu->device = device;
if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type ||
KGSL_MMU_TYPE_IOMMU == kgsl_mmu_type) {
dev_info(device->dev, "|%s| MMU type set for device is "
"NOMMU\n", __func__);
return 0;
} else if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
mmu->mmu_ops = &gpummu_ops;
return mmu->mmu_ops->mmu_init(device);
}
EXPORT_SYMBOL(kgsl_mmu_init);
int kgsl_mmu_start(struct kgsl_device *device)
{
struct kgsl_mmu *mmu = &device->mmu;
if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
kgsl_regwrite(device, MH_MMU_CONFIG, 0);
return 0;
} else {
return mmu->mmu_ops->mmu_start(device);
}
}
EXPORT_SYMBOL(kgsl_mmu_start);
void kgsl_mh_intrcallback(struct kgsl_device *device)
{
unsigned int status = 0;
unsigned int reg;
kgsl_regread(device, MH_INTERRUPT_STATUS, &status);
kgsl_regread(device, MH_AXI_ERROR, &reg);
if (status & MH_INTERRUPT_MASK__AXI_READ_ERROR)
KGSL_MEM_CRIT(device, "axi read error interrupt: %08x\n", reg);
if (status & MH_INTERRUPT_MASK__AXI_WRITE_ERROR)
KGSL_MEM_CRIT(device, "axi write error interrupt: %08x\n", reg);
if (status & MH_INTERRUPT_MASK__MMU_PAGE_FAULT)
device->mmu.mmu_ops->mmu_pagefault(device);
status &= KGSL_MMU_INT_MASK;
kgsl_regwrite(device, MH_INTERRUPT_CLEAR, status);
}
EXPORT_SYMBOL(kgsl_mh_intrcallback);
static int kgsl_setup_pt(struct kgsl_pagetable *pt)
{
int i = 0;
int status = 0;
for (i = 0; i < KGSL_DEVICE_MAX; i++) {
struct kgsl_device *device = kgsl_driver.devp[i];
if (device) {
status = device->ftbl->setup_pt(device, pt);
if (status)
goto error_pt;
}
}
return status;
error_pt:
while (i >= 0) {
struct kgsl_device *device = kgsl_driver.devp[i];
if (device)
device->ftbl->cleanup_pt(device, pt);
i--;
}
return status;
}
static struct kgsl_pagetable *kgsl_mmu_createpagetableobject(
unsigned int name)
{
int status = 0;
struct kgsl_pagetable *pagetable = NULL;
unsigned long flags;
pagetable = kzalloc(sizeof(struct kgsl_pagetable), GFP_KERNEL);
if (pagetable == NULL) {
KGSL_CORE_ERR("kzalloc(%d) failed\n",
sizeof(struct kgsl_pagetable));
return NULL;
}
kref_init(&pagetable->refcount);
spin_lock_init(&pagetable->lock);
pagetable->name = name;
pagetable->max_entries = KGSL_PAGETABLE_ENTRIES(
CONFIG_MSM_KGSL_PAGE_TABLE_SIZE);
pagetable->pool = gen_pool_create(PAGE_SHIFT, -1);
if (pagetable->pool == NULL) {
KGSL_CORE_ERR("gen_pool_create(%d) failed\n", PAGE_SHIFT);
goto err_alloc;
}
if (gen_pool_add(pagetable->pool, KGSL_PAGETABLE_BASE,
CONFIG_MSM_KGSL_PAGE_TABLE_SIZE, -1)) {
KGSL_CORE_ERR("gen_pool_add failed\n");
goto err_pool;
}
if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
pagetable->pt_ops = &gpummu_pt_ops;
pagetable->priv = pagetable->pt_ops->mmu_create_pagetable();
if (!pagetable->priv)
goto err_pool;
status = kgsl_setup_pt(pagetable);
if (status)
goto err_mmu_create;
spin_lock_irqsave(&kgsl_driver.ptlock, flags);
list_add(&pagetable->list, &kgsl_driver.pagetable_list);
spin_unlock_irqrestore(&kgsl_driver.ptlock, flags);
/* Create the sysfs entries */
pagetable_add_sysfs_objects(pagetable);
return pagetable;
err_mmu_create:
pagetable->pt_ops->mmu_destroy_pagetable(pagetable->priv);
err_pool:
gen_pool_destroy(pagetable->pool);
err_alloc:
kfree(pagetable);
return NULL;
}
struct kgsl_pagetable *kgsl_mmu_getpagetable(unsigned long name)
{
struct kgsl_pagetable *pt;
if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
return (void *)(-1);
#ifdef CONFIG_KGSL_PER_PROCESS_PAGE_TABLE
#else
name = KGSL_MMU_GLOBAL_PT;
#endif
pt = kgsl_get_pagetable(name);
if (pt == NULL)
pt = kgsl_mmu_createpagetableobject(name);
return pt;
}
void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable)
{
kgsl_put_pagetable(pagetable);
}
EXPORT_SYMBOL(kgsl_mmu_putpagetable);
void kgsl_setstate(struct kgsl_device *device, uint32_t flags)
{
struct kgsl_mmu *mmu = &device->mmu;
if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
return;
else if (device->ftbl->setstate)
device->ftbl->setstate(device, flags);
else if (mmu->mmu_ops->mmu_device_setstate)
mmu->mmu_ops->mmu_device_setstate(device, flags);
}
EXPORT_SYMBOL(kgsl_setstate);
void kgsl_mmu_device_setstate(struct kgsl_device *device, uint32_t flags)
{
struct kgsl_mmu *mmu = &device->mmu;
if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
return;
else if (mmu->mmu_ops->mmu_device_setstate)
mmu->mmu_ops->mmu_device_setstate(device, flags);
}
EXPORT_SYMBOL(kgsl_mmu_device_setstate);
void kgsl_mh_start(struct kgsl_device *device)
{
struct kgsl_mh *mh = &device->mh;
/* force mmu off to for now*/
kgsl_regwrite(device, MH_MMU_CONFIG, 0);
kgsl_idle(device, KGSL_TIMEOUT_DEFAULT);
/* define physical memory range accessible by the core */
kgsl_regwrite(device, MH_MMU_MPU_BASE, mh->mpu_base);
kgsl_regwrite(device, MH_MMU_MPU_END,
mh->mpu_base + mh->mpu_range);
kgsl_regwrite(device, MH_ARBITER_CONFIG, mh->mharb);
if (mh->mh_intf_cfg1 != 0)
kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG1,
mh->mh_intf_cfg1);
if (mh->mh_intf_cfg2 != 0)
kgsl_regwrite(device, MH_CLNT_INTF_CTRL_CONFIG2,
mh->mh_intf_cfg2);
/*
* Interrupts are enabled on a per-device level when
* kgsl_pwrctrl_irq() is called
*/
}
int
kgsl_mmu_map(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc,
unsigned int protflags)
{
int ret;
if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
memdesc->gpuaddr = memdesc->physaddr;
return 0;
}
memdesc->gpuaddr = gen_pool_alloc_aligned(pagetable->pool,
memdesc->size, KGSL_MMU_ALIGN_SHIFT);
if (memdesc->gpuaddr == 0) {
KGSL_CORE_ERR("gen_pool_alloc(%d) failed\n", memdesc->size);
KGSL_CORE_ERR(" [%d] allocated=%d, entries=%d\n",
pagetable->name, pagetable->stats.mapped,
pagetable->stats.entries);
return -ENOMEM;
}
spin_lock(&pagetable->lock);
ret = pagetable->pt_ops->mmu_map(pagetable->priv, memdesc, protflags);
if (ret)
goto err_free_gpuaddr;
/* Keep track of the statistics for the sysfs files */
KGSL_STATS_ADD(1, pagetable->stats.entries,
pagetable->stats.max_entries);
KGSL_STATS_ADD(memdesc->size, pagetable->stats.mapped,
pagetable->stats.max_mapped);
spin_unlock(&pagetable->lock);
return 0;
err_free_gpuaddr:
spin_unlock(&pagetable->lock);
gen_pool_free(pagetable->pool, memdesc->gpuaddr, memdesc->size);
memdesc->gpuaddr = 0;
return ret;
}
EXPORT_SYMBOL(kgsl_mmu_map);
int
kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc)
{
if (memdesc->size == 0 || memdesc->gpuaddr == 0)
return 0;
if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE) {
memdesc->gpuaddr = 0;
return 0;
}
spin_lock(&pagetable->lock);
pagetable->pt_ops->mmu_unmap(pagetable->priv, memdesc);
/* Remove the statistics */
pagetable->stats.entries--;
pagetable->stats.mapped -= memdesc->size;
spin_unlock(&pagetable->lock);
gen_pool_free(pagetable->pool,
memdesc->gpuaddr & KGSL_MMU_ALIGN_MASK,
memdesc->size);
/*
* Don't clear the gpuaddr on global mappings because they
* may be in use by other pagetables
*/
if (!(memdesc->priv & KGSL_MEMFLAGS_GLOBAL))
memdesc->gpuaddr = 0;
return 0;
}
EXPORT_SYMBOL(kgsl_mmu_unmap);
int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc, unsigned int protflags)
{
int result = -EINVAL;
unsigned int gpuaddr = 0;
if (memdesc == NULL) {
KGSL_CORE_ERR("invalid memdesc\n");
goto error;
}
/* Not all global mappings are needed for all MMU types */
if (!memdesc->size)
return 0;
gpuaddr = memdesc->gpuaddr;
result = kgsl_mmu_map(pagetable, memdesc, protflags);
if (result)
goto error;
/*global mappings must have the same gpu address in all pagetables*/
if (gpuaddr && gpuaddr != memdesc->gpuaddr) {
KGSL_CORE_ERR("pt %p addr mismatch phys 0x%08x"
"gpu 0x%0x 0x%08x", pagetable, memdesc->physaddr,
gpuaddr, memdesc->gpuaddr);
goto error_unmap;
}
memdesc->priv |= KGSL_MEMFLAGS_GLOBAL;
return result;
error_unmap:
kgsl_mmu_unmap(pagetable, memdesc);
error:
return result;
}
EXPORT_SYMBOL(kgsl_mmu_map_global);
int kgsl_mmu_stop(struct kgsl_device *device)
{
struct kgsl_mmu *mmu = &device->mmu;
if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE)
return 0;
else
return mmu->mmu_ops->mmu_stop(device);
}
EXPORT_SYMBOL(kgsl_mmu_stop);
int kgsl_mmu_close(struct kgsl_device *device)
{
struct kgsl_mmu *mmu = &device->mmu;
if (kgsl_mmu_type == KGSL_MMU_TYPE_NONE)
return 0;
else
return mmu->mmu_ops->mmu_close(device);
}
EXPORT_SYMBOL(kgsl_mmu_close);
int kgsl_mmu_pt_get_flags(struct kgsl_pagetable *pt,
enum kgsl_deviceid id)
{
if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
return pt->pt_ops->mmu_pt_get_flags(pt, id);
else
return 0;
}
EXPORT_SYMBOL(kgsl_mmu_pt_get_flags);
void kgsl_mmu_ptpool_destroy(void *ptpool)
{
if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
kgsl_gpummu_ptpool_destroy(ptpool);
ptpool = 0;
}
EXPORT_SYMBOL(kgsl_mmu_ptpool_destroy);
void *kgsl_mmu_ptpool_init(int ptsize, int entries)
{
if (KGSL_MMU_TYPE_GPU == kgsl_mmu_type)
return kgsl_gpummu_ptpool_init(ptsize, entries);
else
return (void *)(-1);
}
EXPORT_SYMBOL(kgsl_mmu_ptpool_init);
int kgsl_mmu_enabled(void)
{
if (KGSL_MMU_TYPE_NONE != kgsl_mmu_type)
return 1;
else
return 0;
}
EXPORT_SYMBOL(kgsl_mmu_enabled);
int kgsl_mmu_pt_equal(struct kgsl_pagetable *pt,
unsigned int pt_base)
{
if (KGSL_MMU_TYPE_NONE == kgsl_mmu_type)
return true;
else
return pt->pt_ops->mmu_pt_equal(pt, pt_base);
}
EXPORT_SYMBOL(kgsl_mmu_pt_equal);
enum kgsl_mmutype kgsl_mmu_get_mmutype(void)
{
return kgsl_mmu_type;
}
EXPORT_SYMBOL(kgsl_mmu_get_mmutype);
void kgsl_mmu_set_mmutype(char *mmutype)
{
kgsl_mmu_type = KGSL_MMU_TYPE_NONE;
#ifdef CONFIG_MSM_KGSL_GPUMMU
kgsl_mmu_type = KGSL_MMU_TYPE_GPU;
#elif defined(CONFIG_MSM_KGSL_IOMMU)
#endif
if (mmutype && !strncmp(mmutype, "gpummu", 6))
kgsl_mmu_type = KGSL_MMU_TYPE_GPU;
if (mmutype && !strncmp(mmutype, "nommu", 5))
kgsl_mmu_type = KGSL_MMU_TYPE_NONE;
}
EXPORT_SYMBOL(kgsl_mmu_set_mmutype);

193
drivers/gpu/msm/kgsl_mmu.h Normal file
View File

@ -0,0 +1,193 @@
/* Copyright (c) 2002,2007-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __KGSL_MMU_H
#define __KGSL_MMU_H
#define KGSL_MMU_ALIGN_SHIFT 13
#define KGSL_MMU_ALIGN_MASK (~((1 << KGSL_MMU_ALIGN_SHIFT) - 1))
/* Identifier for the global page table */
/* Per process page tables will probably pass in the thread group
as an identifier */
#define KGSL_MMU_GLOBAL_PT 0
struct kgsl_device;
#define GSL_PT_SUPER_PTE 8
#define GSL_PT_PAGE_WV 0x00000001
#define GSL_PT_PAGE_RV 0x00000002
#define GSL_PT_PAGE_DIRTY 0x00000004
/* MMU registers - the register locations for all cores are the
same. The method for getting to those locations differs between
2D and 3D, but the 2D and 3D register functions do that magic
for us */
#define MH_MMU_CONFIG 0x0040
#define MH_MMU_VA_RANGE 0x0041
#define MH_MMU_PT_BASE 0x0042
#define MH_MMU_PAGE_FAULT 0x0043
#define MH_MMU_TRAN_ERROR 0x0044
#define MH_MMU_INVALIDATE 0x0045
#define MH_MMU_MPU_BASE 0x0046
#define MH_MMU_MPU_END 0x0047
#define MH_INTERRUPT_MASK 0x0A42
#define MH_INTERRUPT_STATUS 0x0A43
#define MH_INTERRUPT_CLEAR 0x0A44
#define MH_AXI_ERROR 0x0A45
#define MH_ARBITER_CONFIG 0x0A40
#define MH_DEBUG_CTRL 0x0A4E
#define MH_DEBUG_DATA 0x0A4F
#define MH_AXI_HALT_CONTROL 0x0A50
#define MH_CLNT_INTF_CTRL_CONFIG1 0x0A54
#define MH_CLNT_INTF_CTRL_CONFIG2 0x0A55
/* MH_MMU_CONFIG bit definitions */
#define MH_MMU_CONFIG__RB_W_CLNT_BEHAVIOR__SHIFT 0x00000004
#define MH_MMU_CONFIG__CP_W_CLNT_BEHAVIOR__SHIFT 0x00000006
#define MH_MMU_CONFIG__CP_R0_CLNT_BEHAVIOR__SHIFT 0x00000008
#define MH_MMU_CONFIG__CP_R1_CLNT_BEHAVIOR__SHIFT 0x0000000a
#define MH_MMU_CONFIG__CP_R2_CLNT_BEHAVIOR__SHIFT 0x0000000c
#define MH_MMU_CONFIG__CP_R3_CLNT_BEHAVIOR__SHIFT 0x0000000e
#define MH_MMU_CONFIG__CP_R4_CLNT_BEHAVIOR__SHIFT 0x00000010
#define MH_MMU_CONFIG__VGT_R0_CLNT_BEHAVIOR__SHIFT 0x00000012
#define MH_MMU_CONFIG__VGT_R1_CLNT_BEHAVIOR__SHIFT 0x00000014
#define MH_MMU_CONFIG__TC_R_CLNT_BEHAVIOR__SHIFT 0x00000016
#define MH_MMU_CONFIG__PA_W_CLNT_BEHAVIOR__SHIFT 0x00000018
/* MMU Flags */
#define KGSL_MMUFLAGS_TLBFLUSH 0x10000000
#define KGSL_MMUFLAGS_PTUPDATE 0x20000000
#define MH_INTERRUPT_MASK__AXI_READ_ERROR 0x00000001L
#define MH_INTERRUPT_MASK__AXI_WRITE_ERROR 0x00000002L
#define MH_INTERRUPT_MASK__MMU_PAGE_FAULT 0x00000004L
#ifdef CONFIG_MSM_KGSL_MMU
#define KGSL_MMU_INT_MASK \
(MH_INTERRUPT_MASK__AXI_READ_ERROR | \
MH_INTERRUPT_MASK__AXI_WRITE_ERROR | \
MH_INTERRUPT_MASK__MMU_PAGE_FAULT)
#else
#define KGSL_MMU_INT_MASK \
(MH_INTERRUPT_MASK__AXI_READ_ERROR | \
MH_INTERRUPT_MASK__AXI_WRITE_ERROR)
#endif
enum kgsl_mmutype {
KGSL_MMU_TYPE_GPU = 0,
KGSL_MMU_TYPE_IOMMU,
KGSL_MMU_TYPE_NONE
};
struct kgsl_pagetable {
spinlock_t lock;
struct kref refcount;
unsigned int max_entries;
struct gen_pool *pool;
struct list_head list;
unsigned int name;
struct kobject *kobj;
struct {
unsigned int entries;
unsigned int mapped;
unsigned int max_mapped;
unsigned int max_entries;
} stats;
const struct kgsl_mmu_pt_ops *pt_ops;
void *priv;
};
struct kgsl_mmu_ops {
int (*mmu_init) (struct kgsl_device *device);
int (*mmu_close) (struct kgsl_device *device);
int (*mmu_start) (struct kgsl_device *device);
int (*mmu_stop) (struct kgsl_device *device);
void (*mmu_setstate) (struct kgsl_device *device,
struct kgsl_pagetable *pagetable);
void (*mmu_device_setstate) (struct kgsl_device *device,
uint32_t flags);
void (*mmu_pagefault) (struct kgsl_device *device);
unsigned int (*mmu_get_current_ptbase)
(struct kgsl_device *device);
};
struct kgsl_mmu_pt_ops {
int (*mmu_map) (void *mmu_pt,
struct kgsl_memdesc *memdesc,
unsigned int protflags);
int (*mmu_unmap) (void *mmu_pt,
struct kgsl_memdesc *memdesc);
void *(*mmu_create_pagetable) (void);
void (*mmu_destroy_pagetable) (void *pt);
int (*mmu_pt_equal) (struct kgsl_pagetable *pt,
unsigned int pt_base);
unsigned int (*mmu_pt_get_flags) (struct kgsl_pagetable *pt,
enum kgsl_deviceid id);
};
struct kgsl_mmu {
unsigned int refcnt;
uint32_t flags;
struct kgsl_device *device;
unsigned int config;
struct kgsl_memdesc setstate_memory;
/* current page table object being used by device mmu */
struct kgsl_pagetable *defaultpagetable;
struct kgsl_pagetable *hwpagetable;
const struct kgsl_mmu_ops *mmu_ops;
void *priv;
};
#include "kgsl_gpummu.h"
extern struct kgsl_mmu_ops iommu_ops;
extern struct kgsl_mmu_pt_ops iommu_pt_ops;
struct kgsl_pagetable *kgsl_mmu_getpagetable(unsigned long name);
void kgsl_mmu_putpagetable(struct kgsl_pagetable *pagetable);
void kgsl_mh_start(struct kgsl_device *device);
void kgsl_mh_intrcallback(struct kgsl_device *device);
int kgsl_mmu_init(struct kgsl_device *device);
int kgsl_mmu_start(struct kgsl_device *device);
int kgsl_mmu_stop(struct kgsl_device *device);
int kgsl_mmu_close(struct kgsl_device *device);
int kgsl_mmu_map(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc,
unsigned int protflags);
int kgsl_mmu_map_global(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc, unsigned int protflags);
int kgsl_mmu_unmap(struct kgsl_pagetable *pagetable,
struct kgsl_memdesc *memdesc);
unsigned int kgsl_virtaddr_to_physaddr(void *virtaddr);
void kgsl_setstate(struct kgsl_device *device, uint32_t flags);
void kgsl_mmu_device_setstate(struct kgsl_device *device, uint32_t flags);
void kgsl_mmu_setstate(struct kgsl_device *device,
struct kgsl_pagetable *pt);
int kgsl_mmu_get_ptname_from_ptbase(unsigned int pt_base);
int kgsl_mmu_pt_get_flags(struct kgsl_pagetable *pt,
enum kgsl_deviceid id);
void kgsl_mmu_ptpool_destroy(void *ptpool);
void *kgsl_mmu_ptpool_init(int ptsize, int entries);
int kgsl_mmu_enabled(void);
int kgsl_mmu_pt_equal(struct kgsl_pagetable *pt,
unsigned int pt_base);
void kgsl_mmu_set_mmutype(char *mmutype);
unsigned int kgsl_mmu_get_current_ptbase(struct kgsl_device *device);
enum kgsl_mmutype kgsl_mmu_get_mmutype(void);
#endif /* __KGSL_MMU_H */

View File

@ -0,0 +1,715 @@
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
* Copyright (C) 2011 Sony Ericsson Mobile Communications AB.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/interrupt.h>
#include <linux/err.h>
#include <mach/msm_iomap.h>
#include "kgsl.h"
#include "kgsl_pwrscale.h"
#include "kgsl_device.h"
#define KGSL_PWRFLAGS_POWER_ON 0
#define KGSL_PWRFLAGS_CLK_ON 1
#define KGSL_PWRFLAGS_AXI_ON 2
#define KGSL_PWRFLAGS_IRQ_ON 3
#define SWITCH_OFF 200
#define GPU_SWFI_LATENCY 3
#define UPDATE_BUSY_VAL 1000000
#define UPDATE_BUSY 50
void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
unsigned int new_level)
{
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
if (new_level < (pwr->num_pwrlevels - 1) &&
new_level >= pwr->thermal_pwrlevel &&
new_level != pwr->active_pwrlevel) {
pwr->active_pwrlevel = new_level;
if ((test_bit(KGSL_PWRFLAGS_CLK_ON, &pwr->power_flags)) ||
(device->state == KGSL_STATE_NAP))
clk_set_rate(pwr->grp_clks[0],
pwr->pwrlevels[pwr->active_pwrlevel].
gpu_freq);
if (test_bit(KGSL_PWRFLAGS_AXI_ON, &pwr->power_flags)) {
if (pwr->ebi1_clk)
clk_set_rate(pwr->ebi1_clk,
pwr->pwrlevels[pwr->active_pwrlevel].
bus_freq);
}
KGSL_PWR_WARN(device, "kgsl pwr level changed to %d\n",
pwr->active_pwrlevel);
}
}
EXPORT_SYMBOL(kgsl_pwrctrl_pwrlevel_change);
static int __gpuclk_store(int max, struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{ int ret, i, delta = 5000000;
unsigned long val;
struct kgsl_device *device = kgsl_device_from_dev(dev);
struct kgsl_pwrctrl *pwr;
if (device == NULL)
return 0;
pwr = &device->pwrctrl;
ret = sscanf(buf, "%ld", &val);
if (ret != 1)
return count;
mutex_lock(&device->mutex);
for (i = 0; i < pwr->num_pwrlevels; i++) {
if (abs(pwr->pwrlevels[i].gpu_freq - val) < delta) {
if (max)
pwr->thermal_pwrlevel = i;
break;
}
}
if (i == pwr->num_pwrlevels)
goto done;
/*
* If the current or requested clock speed is greater than the
* thermal limit, bump down immediately.
*/
if (pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq >
pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq)
kgsl_pwrctrl_pwrlevel_change(device, pwr->thermal_pwrlevel);
else if (!max)
kgsl_pwrctrl_pwrlevel_change(device, i);
done:
mutex_unlock(&device->mutex);
return count;
}
static int kgsl_pwrctrl_max_gpuclk_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return __gpuclk_store(1, dev, attr, buf, count);
}
static int kgsl_pwrctrl_max_gpuclk_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct kgsl_device *device = kgsl_device_from_dev(dev);
struct kgsl_pwrctrl *pwr;
if (device == NULL)
return 0;
pwr = &device->pwrctrl;
return snprintf(buf, PAGE_SIZE, "%d\n",
pwr->pwrlevels[pwr->thermal_pwrlevel].gpu_freq);
}
static int kgsl_pwrctrl_gpuclk_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
return __gpuclk_store(0, dev, attr, buf, count);
}
static int kgsl_pwrctrl_gpuclk_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct kgsl_device *device = kgsl_device_from_dev(dev);
struct kgsl_pwrctrl *pwr;
if (device == NULL)
return 0;
pwr = &device->pwrctrl;
return snprintf(buf, PAGE_SIZE, "%d\n",
pwr->pwrlevels[pwr->active_pwrlevel].gpu_freq);
}
static int kgsl_pwrctrl_pwrnap_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
char temp[20];
unsigned long val;
struct kgsl_device *device = kgsl_device_from_dev(dev);
struct kgsl_pwrctrl *pwr;
int rc;
if (device == NULL)
return 0;
pwr = &device->pwrctrl;
snprintf(temp, sizeof(temp), "%.*s",
(int)min(count, sizeof(temp) - 1), buf);
rc = strict_strtoul(temp, 0, &val);
if (rc)
return rc;
mutex_lock(&device->mutex);
if (val == 1)
pwr->nap_allowed = true;
else if (val == 0)
pwr->nap_allowed = false;
mutex_unlock(&device->mutex);
return count;
}
static int kgsl_pwrctrl_pwrnap_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct kgsl_device *device = kgsl_device_from_dev(dev);
if (device == NULL)
return 0;
return snprintf(buf, PAGE_SIZE, "%d\n", device->pwrctrl.nap_allowed);
}
static int kgsl_pwrctrl_idle_timer_store(struct device *dev,
struct device_attribute *attr,
const char *buf, size_t count)
{
char temp[20];
unsigned long val;
struct kgsl_device *device = kgsl_device_from_dev(dev);
struct kgsl_pwrctrl *pwr;
const long div = 1000/HZ;
static unsigned int org_interval_timeout = 1;
int rc;
if (device == NULL)
return 0;
pwr = &device->pwrctrl;
snprintf(temp, sizeof(temp), "%.*s",
(int)min(count, sizeof(temp) - 1), buf);
rc = strict_strtoul(temp, 0, &val);
if (rc)
return rc;
if (org_interval_timeout == 1)
org_interval_timeout = pwr->interval_timeout;
mutex_lock(&device->mutex);
/* Let the timeout be requested in ms, but convert to jiffies. */
val /= div;
if (val >= org_interval_timeout)
pwr->interval_timeout = val;
mutex_unlock(&device->mutex);
return count;
}
static int kgsl_pwrctrl_idle_timer_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
struct kgsl_device *device = kgsl_device_from_dev(dev);
if (device == NULL)
return 0;
return snprintf(buf, PAGE_SIZE, "%d\n",
device->pwrctrl.interval_timeout);
}
static int kgsl_pwrctrl_gpubusy_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
int ret;
struct kgsl_device *device = kgsl_device_from_dev(dev);
struct kgsl_busy *b = &device->pwrctrl.busy;
ret = snprintf(buf, 17, "%7d %7d\n",
b->on_time_old, b->time_old);
if (!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
b->on_time_old = 0;
b->time_old = 0;
}
return ret;
}
DEVICE_ATTR(gpuclk, 0644, kgsl_pwrctrl_gpuclk_show, kgsl_pwrctrl_gpuclk_store);
DEVICE_ATTR(max_gpuclk, 0644, kgsl_pwrctrl_max_gpuclk_show,
kgsl_pwrctrl_max_gpuclk_store);
DEVICE_ATTR(pwrnap, 0644, kgsl_pwrctrl_pwrnap_show, kgsl_pwrctrl_pwrnap_store);
DEVICE_ATTR(idle_timer, 0644, kgsl_pwrctrl_idle_timer_show,
kgsl_pwrctrl_idle_timer_store);
DEVICE_ATTR(gpubusy, 0644, kgsl_pwrctrl_gpubusy_show,
NULL);
static struct device_attribute *pwrctrl_attr_list[] = {
&dev_attr_gpuclk,
&dev_attr_max_gpuclk,
&dev_attr_pwrnap,
&dev_attr_idle_timer,
&dev_attr_gpubusy,
NULL
};
int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device)
{
return kgsl_create_device_sysfs_files(device->dev, pwrctrl_attr_list);
}
void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device)
{
kgsl_remove_device_sysfs_files(device->dev, pwrctrl_attr_list);
}
/* Track the amount of time the gpu is on vs the total system time. *
* Regularly update the percentage of busy time displayed by sysfs. */
static void kgsl_pwrctrl_busy_time(struct kgsl_device *device, bool on_time)
{
struct kgsl_busy *b = &device->pwrctrl.busy;
int elapsed;
if (b->start.tv_sec == 0)
do_gettimeofday(&(b->start));
do_gettimeofday(&(b->stop));
elapsed = (b->stop.tv_sec - b->start.tv_sec) * 1000000;
elapsed += b->stop.tv_usec - b->start.tv_usec;
b->time += elapsed;
if (on_time)
b->on_time += elapsed;
/* Update the output regularly and reset the counters. */
if ((b->time > UPDATE_BUSY_VAL) ||
!test_bit(KGSL_PWRFLAGS_AXI_ON, &device->pwrctrl.power_flags)) {
b->on_time_old = b->on_time;
b->time_old = b->time;
b->on_time = 0;
b->time = 0;
}
do_gettimeofday(&(b->start));
}
void kgsl_pwrctrl_clk(struct kgsl_device *device, int state)
{
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
int i = 0;
if (state == KGSL_PWRFLAGS_OFF) {
if (test_and_clear_bit(KGSL_PWRFLAGS_CLK_ON,
&pwr->power_flags)) {
KGSL_PWR_INFO(device,
"clocks off, device %d\n", device->id);
for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
if (pwr->grp_clks[i])
clk_disable(pwr->grp_clks[i]);
if ((pwr->pwrlevels[0].gpu_freq > 0) &&
(device->requested_state != KGSL_STATE_NAP))
clk_set_rate(pwr->grp_clks[0],
pwr->pwrlevels[pwr->num_pwrlevels - 1].
gpu_freq);
kgsl_pwrctrl_busy_time(device, true);
}
} else if (state == KGSL_PWRFLAGS_ON) {
if (!test_and_set_bit(KGSL_PWRFLAGS_CLK_ON,
&pwr->power_flags)) {
KGSL_PWR_INFO(device,
"clocks on, device %d\n", device->id);
if ((pwr->pwrlevels[0].gpu_freq > 0) &&
(device->state != KGSL_STATE_NAP))
clk_set_rate(pwr->grp_clks[0],
pwr->pwrlevels[pwr->active_pwrlevel].
gpu_freq);
/* as last step, enable grp_clk
this is to let GPU interrupt to come */
for (i = KGSL_MAX_CLKS - 1; i > 0; i--)
if (pwr->grp_clks[i])
clk_enable(pwr->grp_clks[i]);
kgsl_pwrctrl_busy_time(device, false);
}
}
}
EXPORT_SYMBOL(kgsl_pwrctrl_clk);
void kgsl_pwrctrl_axi(struct kgsl_device *device, int state)
{
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
if (state == KGSL_PWRFLAGS_OFF) {
if (test_and_clear_bit(KGSL_PWRFLAGS_AXI_ON,
&pwr->power_flags)) {
KGSL_PWR_INFO(device,
"axi off, device %d\n", device->id);
if (pwr->ebi1_clk) {
clk_set_rate(pwr->ebi1_clk, 0);
clk_disable(pwr->ebi1_clk);
}
}
} else if (state == KGSL_PWRFLAGS_ON) {
if (!test_and_set_bit(KGSL_PWRFLAGS_AXI_ON,
&pwr->power_flags)) {
KGSL_PWR_INFO(device,
"axi on, device %d\n", device->id);
if (pwr->ebi1_clk) {
clk_enable(pwr->ebi1_clk);
clk_set_rate(pwr->ebi1_clk,
pwr->pwrlevels[pwr->active_pwrlevel].
bus_freq);
}
}
}
}
EXPORT_SYMBOL(kgsl_pwrctrl_axi);
void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state)
{
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
if (state == KGSL_PWRFLAGS_OFF) {
if (test_and_clear_bit(KGSL_PWRFLAGS_POWER_ON,
&pwr->power_flags)) {
KGSL_PWR_INFO(device,
"power off, device %d\n", device->id);
if (pwr->gpu_reg)
regulator_disable(pwr->gpu_reg);
}
} else if (state == KGSL_PWRFLAGS_ON) {
if (!test_and_set_bit(KGSL_PWRFLAGS_POWER_ON,
&pwr->power_flags)) {
KGSL_PWR_INFO(device,
"power on, device %d\n", device->id);
if (pwr->gpu_reg)
regulator_enable(pwr->gpu_reg);
}
}
}
EXPORT_SYMBOL(kgsl_pwrctrl_pwrrail);
void kgsl_pwrctrl_irq(struct kgsl_device *device, int state)
{
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
if (state == KGSL_PWRFLAGS_ON) {
if (!test_and_set_bit(KGSL_PWRFLAGS_IRQ_ON,
&pwr->power_flags)) {
KGSL_PWR_INFO(device,
"irq on, device %d\n", device->id);
enable_irq(pwr->interrupt_num);
device->ftbl->irqctrl(device, 1);
}
} else if (state == KGSL_PWRFLAGS_OFF) {
if (test_and_clear_bit(KGSL_PWRFLAGS_IRQ_ON,
&pwr->power_flags)) {
KGSL_PWR_INFO(device,
"irq off, device %d\n", device->id);
device->ftbl->irqctrl(device, 0);
if (in_interrupt())
disable_irq_nosync(pwr->interrupt_num);
else
disable_irq(pwr->interrupt_num);
}
}
}
EXPORT_SYMBOL(kgsl_pwrctrl_irq);
int kgsl_pwrctrl_init(struct kgsl_device *device)
{
int i, result = 0;
struct clk *clk;
struct platform_device *pdev =
container_of(device->parentdev, struct platform_device, dev);
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
struct kgsl_device_platform_data *pdata_dev = pdev->dev.platform_data;
struct kgsl_device_pwr_data *pdata_pwr = &pdata_dev->pwr_data;
const char *clk_names[KGSL_MAX_CLKS] = {pwr->src_clk_name,
pdata_dev->clk.name.clk,
pdata_dev->clk.name.pclk,
pdata_dev->imem_clk_name.clk,
pdata_dev->imem_clk_name.pclk};
/*acquire clocks */
for (i = 1; i < KGSL_MAX_CLKS; i++) {
if (clk_names[i]) {
clk = clk_get(&pdev->dev, clk_names[i]);
if (IS_ERR(clk))
goto clk_err;
pwr->grp_clks[i] = clk;
}
}
/* Make sure we have a source clk for freq setting */
clk = clk_get(&pdev->dev, clk_names[0]);
pwr->grp_clks[0] = (IS_ERR(clk)) ? pwr->grp_clks[1] : clk;
/* put the AXI bus into asynchronous mode with the graphics cores */
if (pdata_pwr->set_grp_async != NULL)
pdata_pwr->set_grp_async();
if (pdata_pwr->num_levels > KGSL_MAX_PWRLEVELS) {
KGSL_PWR_ERR(device, "invalid power level count: %d\n",
pdata_pwr->num_levels);
result = -EINVAL;
goto done;
}
pwr->num_pwrlevels = pdata_pwr->num_levels;
pwr->active_pwrlevel = pdata_pwr->init_level;
for (i = 0; i < pdata_pwr->num_levels; i++) {
// pwr->pwrlevels[i].gpu_freq =
// (pdata_pwr->pwrlevel[i].gpu_freq > 0) ?
// clk_round_rate(pwr->grp_clks[0],
// pdata_pwr->pwrlevel[i].
// gpu_freq) : 0;
pwr->pwrlevels[i].gpu_freq =(pdata_pwr->pwrlevel[i].gpu_freq > 0)?
pdata_pwr->pwrlevel[i].gpu_freq:0;
pwr->pwrlevels[i].bus_freq =
pdata_pwr->pwrlevel[i].bus_freq;
}
/* Do not set_rate for targets in sync with AXI */
if (pwr->pwrlevels[0].gpu_freq > 0)
clk_set_rate(pwr->grp_clks[0], pwr->
pwrlevels[pwr->num_pwrlevels - 1].gpu_freq);
pwr->gpu_reg = regulator_get(NULL, pwr->regulator_name);
if (IS_ERR(pwr->gpu_reg))
pwr->gpu_reg = NULL;
pwr->power_flags = 0;
pwr->nap_allowed = pdata_pwr->nap_allowed;
pwr->interval_timeout = pdata_pwr->idle_timeout;
pwr->ebi1_clk = clk_get(NULL, "ebi1_kgsl_clk");
if (IS_ERR(pwr->ebi1_clk))
pwr->ebi1_clk = NULL;
else
clk_set_rate(pwr->ebi1_clk,
pwr->pwrlevels[pwr->active_pwrlevel].
bus_freq);
/*acquire interrupt */
pwr->interrupt_num =
platform_get_irq_byname(pdev, pwr->irq_name);
if (pwr->interrupt_num <= 0) {
KGSL_PWR_ERR(device, "platform_get_irq_byname failed: %d\n",
pwr->interrupt_num);
result = -EINVAL;
goto done;
}
register_early_suspend(&device->display_off);
return result;
clk_err:
result = PTR_ERR(clk);
KGSL_PWR_ERR(device, "clk_get(%s) failed: %d\n",
clk_names[i], result);
done:
return result;
}
void kgsl_pwrctrl_close(struct kgsl_device *device)
{
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
int i;
KGSL_PWR_INFO(device, "close device %d\n", device->id);
unregister_early_suspend(&device->display_off);
if (pwr->interrupt_num > 0) {
if (pwr->have_irq) {
free_irq(pwr->interrupt_num, NULL);
pwr->have_irq = 0;
}
pwr->interrupt_num = 0;
}
clk_put(pwr->ebi1_clk);
pwr->pcl = 0;
if (pwr->gpu_reg) {
regulator_put(pwr->gpu_reg);
pwr->gpu_reg = NULL;
}
for (i = 1; i < KGSL_MAX_CLKS; i++)
if (pwr->grp_clks[i]) {
clk_put(pwr->grp_clks[i]);
pwr->grp_clks[i] = NULL;
}
pwr->grp_clks[0] = NULL;
pwr->power_flags = 0;
}
void kgsl_idle_check(struct work_struct *work)
{
struct kgsl_device *device = container_of(work, struct kgsl_device,
idle_check_ws);
mutex_lock(&device->mutex);
if (device->state & (KGSL_STATE_ACTIVE | KGSL_STATE_NAP)) {
if (device->requested_state != KGSL_STATE_SLEEP)
kgsl_pwrscale_idle(device);
if (kgsl_pwrctrl_sleep(device) != 0) {
mod_timer(&device->idle_timer,
jiffies +
device->pwrctrl.interval_timeout);
/* If the GPU has been too busy to sleep, make sure *
* that is acurately reflected in the % busy numbers. */
device->pwrctrl.busy.no_nap_cnt++;
if (device->pwrctrl.busy.no_nap_cnt > UPDATE_BUSY) {
kgsl_pwrctrl_busy_time(device, true);
device->pwrctrl.busy.no_nap_cnt = 0;
}
}
} else if (device->state & (KGSL_STATE_HUNG |
KGSL_STATE_DUMP_AND_RECOVER)) {
device->requested_state = KGSL_STATE_NONE;
}
mutex_unlock(&device->mutex);
}
void kgsl_timer(unsigned long data)
{
struct kgsl_device *device = (struct kgsl_device *) data;
KGSL_PWR_INFO(device, "idle timer expired device %d\n", device->id);
if (device->requested_state != KGSL_STATE_SUSPEND) {
device->requested_state = KGSL_STATE_SLEEP;
/* Have work run in a non-interrupt context. */
queue_work(device->work_queue, &device->idle_check_ws);
}
}
void kgsl_pre_hwaccess(struct kgsl_device *device)
{
BUG_ON(!mutex_is_locked(&device->mutex));
if (device->state & (KGSL_STATE_SLEEP | KGSL_STATE_NAP))
kgsl_pwrctrl_wake(device);
}
EXPORT_SYMBOL(kgsl_pre_hwaccess);
void kgsl_check_suspended(struct kgsl_device *device)
{
if (device->requested_state == KGSL_STATE_SUSPEND ||
device->state == KGSL_STATE_SUSPEND) {
mutex_unlock(&device->mutex);
wait_for_completion(&device->hwaccess_gate);
mutex_lock(&device->mutex);
}
if (device->state == KGSL_STATE_DUMP_AND_RECOVER) {
mutex_unlock(&device->mutex);
wait_for_completion(&device->recovery_gate);
mutex_lock(&device->mutex);
}
}
/******************************************************************/
/* Caller must hold the device mutex. */
int kgsl_pwrctrl_sleep(struct kgsl_device *device)
{
struct kgsl_pwrctrl *pwr = &device->pwrctrl;
KGSL_PWR_INFO(device, "sleep device %d\n", device->id);
/* Work through the legal state transitions */
if (device->requested_state == KGSL_STATE_NAP) {
if (device->ftbl->isidle(device))
goto nap;
} else if (device->requested_state == KGSL_STATE_SLEEP) {
if (device->state == KGSL_STATE_NAP ||
device->ftbl->isidle(device))
goto sleep;
}
device->requested_state = KGSL_STATE_NONE;
return -EBUSY;
sleep:
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
if (pwr->pwrlevels[0].gpu_freq > 0)
clk_set_rate(pwr->grp_clks[0],
pwr->pwrlevels[pwr->num_pwrlevels - 1].
gpu_freq);
kgsl_pwrctrl_busy_time(device, false);
pwr->busy.start.tv_sec = 0;
device->pwrctrl.time = 0;
goto clk_off;
nap:
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_OFF);
clk_off:
kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
device->state = device->requested_state;
device->requested_state = KGSL_STATE_NONE;
wake_unlock(&device->idle_wakelock);
KGSL_PWR_WARN(device, "state -> NAP/SLEEP(%d), device %d\n",
device->state, device->id);
return 0;
}
EXPORT_SYMBOL(kgsl_pwrctrl_sleep);
/******************************************************************/
/* Caller must hold the device mutex. */
void kgsl_pwrctrl_wake(struct kgsl_device *device)
{
if (device->state == KGSL_STATE_SUSPEND)
return;
if (device->state != KGSL_STATE_NAP) {
kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
}
/* Turn on the core clocks */
kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON);
/* Enable state before turning on irq */
device->state = KGSL_STATE_ACTIVE;
KGSL_PWR_WARN(device, "state -> ACTIVE, device %d\n", device->id);
kgsl_pwrctrl_irq(device, KGSL_PWRFLAGS_ON);
/* Re-enable HW access */
mod_timer(&device->idle_timer,
jiffies + device->pwrctrl.interval_timeout);
wake_lock(&device->idle_wakelock);
KGSL_PWR_INFO(device, "wake return for device %d\n", device->id);
}
EXPORT_SYMBOL(kgsl_pwrctrl_wake);
void kgsl_pwrctrl_enable(struct kgsl_device *device)
{
/* Order pwrrail/clk sequence based upon platform */
kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_ON);
kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_ON);
kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_ON);
}
EXPORT_SYMBOL(kgsl_pwrctrl_enable);
void kgsl_pwrctrl_disable(struct kgsl_device *device)
{
/* Order pwrrail/clk sequence based upon platform */
kgsl_pwrctrl_axi(device, KGSL_PWRFLAGS_OFF);
kgsl_pwrctrl_clk(device, KGSL_PWRFLAGS_OFF);
kgsl_pwrctrl_pwrrail(device, KGSL_PWRFLAGS_OFF);
}
EXPORT_SYMBOL(kgsl_pwrctrl_disable);

View File

@ -0,0 +1,87 @@
/* Copyright (c) 2010-2011, Code Aurora Forum. All rights reserved.
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 and
* only version 2 as published by the Free Software Foundation.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#ifndef __KGSL_PWRCTRL_H
#define __KGSL_PWRCTRL_H
#include <mach/internal_power_rail.h>
/*****************************************************************************
** power flags
*****************************************************************************/
#define KGSL_PWRFLAGS_ON 1
#define KGSL_PWRFLAGS_OFF 0
#define KGSL_PWRLEVEL_TURBO 0
#define KGSL_PWRLEVEL_NOMINAL 1
#define KGSL_PWRLEVEL_LOW_OFFSET 2
#define KGSL_MAX_CLKS 5
struct platform_device;
struct kgsl_busy {
struct timeval start;
struct timeval stop;
int on_time;
int time;
int on_time_old;
int time_old;
unsigned int no_nap_cnt;
};
struct kgsl_pwrctrl {
int interrupt_num;
int have_irq;
unsigned int pwr_rail;
struct clk *ebi1_clk;
struct clk *grp_clks[KGSL_MAX_CLKS];
unsigned long power_flags;
struct kgsl_pwrlevel pwrlevels[KGSL_MAX_PWRLEVELS];
unsigned int active_pwrlevel;
int thermal_pwrlevel;
unsigned int num_pwrlevels;
unsigned int interval_timeout;
struct regulator *gpu_reg;
uint32_t pcl;
unsigned int nap_allowed;
const char *regulator_name;
const char *irq_name;
const char *src_clk_name;
s64 time;
struct kgsl_busy busy;
};
void kgsl_pwrctrl_clk(struct kgsl_device *device, int state);
void kgsl_pwrctrl_axi(struct kgsl_device *device, int state);
void kgsl_pwrctrl_pwrrail(struct kgsl_device *device, int state);
void kgsl_pwrctrl_irq(struct kgsl_device *device, int state);
int kgsl_pwrctrl_init(struct kgsl_device *device);
void kgsl_pwrctrl_close(struct kgsl_device *device);
void kgsl_timer(unsigned long data);
void kgsl_idle_check(struct work_struct *work);
void kgsl_pre_hwaccess(struct kgsl_device *device);
void kgsl_check_suspended(struct kgsl_device *device);
int kgsl_pwrctrl_sleep(struct kgsl_device *device);
void kgsl_pwrctrl_wake(struct kgsl_device *device);
void kgsl_pwrctrl_pwrlevel_change(struct kgsl_device *device,
unsigned int level);
int kgsl_pwrctrl_init_sysfs(struct kgsl_device *device);
void kgsl_pwrctrl_uninit_sysfs(struct kgsl_device *device);
void kgsl_pwrctrl_enable(struct kgsl_device *device);
void kgsl_pwrctrl_disable(struct kgsl_device *device);
static inline unsigned long kgsl_get_clkrate(struct clk *clk)
{
return (clk != NULL) ? clk_get_rate(clk) : 0;
}
#endif /* __KGSL_PWRCTRL_H */

Some files were not shown because too many files have changed in this diff Show More