537 lines
		
	
	
		
			16 KiB
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
			
		
		
	
	
			537 lines
		
	
	
		
			16 KiB
		
	
	
	
		
			Plaintext
		
	
	
	
	
	
| #
 | |
| # Architectures that offer an FUNCTION_TRACER implementation should
 | |
| #  select HAVE_FUNCTION_TRACER:
 | |
| #
 | |
| 
 | |
| config USER_STACKTRACE_SUPPORT
 | |
| 	bool
 | |
| 
 | |
| config NOP_TRACER
 | |
| 	bool
 | |
| 
 | |
| config HAVE_FTRACE_NMI_ENTER
 | |
| 	bool
 | |
| 	help
 | |
| 	  See Documentation/trace/ftrace-implementation.txt
 | |
| 
 | |
| config HAVE_FUNCTION_TRACER
 | |
| 	bool
 | |
| 	help
 | |
| 	  See Documentation/trace/ftrace-implementation.txt
 | |
| 
 | |
| config HAVE_FUNCTION_GRAPH_TRACER
 | |
| 	bool
 | |
| 	help
 | |
| 	  See Documentation/trace/ftrace-implementation.txt
 | |
| 
 | |
| config HAVE_FUNCTION_GRAPH_FP_TEST
 | |
| 	bool
 | |
| 	help
 | |
| 	 An arch may pass in a unique value (frame pointer) to both the
 | |
| 	 entering and exiting of a function. On exit, the value is compared
 | |
| 	 and if it does not match, then it will panic the kernel.
 | |
| 
 | |
| config HAVE_FUNCTION_TRACE_MCOUNT_TEST
 | |
| 	bool
 | |
| 	help
 | |
| 	  See Documentation/trace/ftrace-implementation.txt
 | |
| 
 | |
| config HAVE_DYNAMIC_FTRACE
 | |
| 	bool
 | |
| 	help
 | |
| 	  See Documentation/trace/ftrace-implementation.txt
 | |
| 
 | |
| config HAVE_FTRACE_MCOUNT_RECORD
 | |
| 	bool
 | |
| 	help
 | |
| 	  See Documentation/trace/ftrace-implementation.txt
 | |
| 
 | |
| config HAVE_HW_BRANCH_TRACER
 | |
| 	bool
 | |
| 
 | |
| config HAVE_SYSCALL_TRACEPOINTS
 | |
| 	bool
 | |
| 	help
 | |
| 	  See Documentation/trace/ftrace-implementation.txt
 | |
| 
 | |
| config TRACER_MAX_TRACE
 | |
| 	bool
 | |
| 
 | |
| config RING_BUFFER
 | |
| 	bool
 | |
| 
 | |
| config FTRACE_NMI_ENTER
 | |
|        bool
 | |
|        depends on HAVE_FTRACE_NMI_ENTER
 | |
|        default y
 | |
| 
 | |
| config EVENT_TRACING
 | |
| 	select CONTEXT_SWITCH_TRACER
 | |
| 	bool
 | |
| 
 | |
| config CONTEXT_SWITCH_TRACER
 | |
| 	bool
 | |
| 
 | |
| config RING_BUFFER_ALLOW_SWAP
 | |
| 	bool
 | |
| 	help
 | |
| 	 Allow the use of ring_buffer_swap_cpu.
 | |
| 	 Adds a very slight overhead to tracing when enabled.
 | |
| 
 | |
| # All tracer options should select GENERIC_TRACER. For those options that are
 | |
| # enabled by all tracers (context switch and event tracer) they select TRACING.
 | |
| # This allows those options to appear when no other tracer is selected. But the
 | |
| # options do not appear when something else selects it. We need the two options
 | |
| # GENERIC_TRACER and TRACING to avoid circular dependencies to accomplish the
 | |
| # hidding of the automatic options.
 | |
| 
 | |
| config TRACING
 | |
| 	bool
 | |
| 	select DEBUG_FS
 | |
| 	select RING_BUFFER
 | |
| 	select STACKTRACE if STACKTRACE_SUPPORT
 | |
| 	select TRACEPOINTS
 | |
| 	select NOP_TRACER
 | |
| 	select BINARY_PRINTF
 | |
| 	select EVENT_TRACING
 | |
| 
 | |
| config GENERIC_TRACER
 | |
| 	bool
 | |
| 	select TRACING
 | |
| 
 | |
| #
 | |
| # Minimum requirements an architecture has to meet for us to
 | |
| # be able to offer generic tracing facilities:
 | |
| #
 | |
| config TRACING_SUPPORT
 | |
| 	bool
 | |
| 	# PPC32 has no irqflags tracing support, but it can use most of the
 | |
| 	# tracers anyway, they were tested to build and work. Note that new
 | |
| 	# exceptions to this list aren't welcomed, better implement the
 | |
| 	# irqflags tracing for your architecture.
 | |
| 	depends on TRACE_IRQFLAGS_SUPPORT || PPC32
 | |
| 	depends on STACKTRACE_SUPPORT
 | |
| 	default y
 | |
| 
 | |
| if TRACING_SUPPORT
 | |
| 
 | |
| menuconfig FTRACE
 | |
| 	bool "Tracers"
 | |
| 	default y if DEBUG_KERNEL
 | |
| 	help
 | |
| 	 Enable the kernel tracing infrastructure.
 | |
| 
 | |
| if FTRACE
 | |
| 
 | |
| config FUNCTION_TRACER
 | |
| 	bool "Kernel Function Tracer"
 | |
| 	depends on HAVE_FUNCTION_TRACER
 | |
| 	select FRAME_POINTER
 | |
| 	select KALLSYMS
 | |
| 	select GENERIC_TRACER
 | |
| 	select CONTEXT_SWITCH_TRACER
 | |
| 	help
 | |
| 	  Enable the kernel to trace every kernel function. This is done
 | |
| 	  by using a compiler feature to insert a small, 5-byte No-Operation
 | |
| 	  instruction to the beginning of every kernel function, which NOP
 | |
| 	  sequence is then dynamically patched into a tracer call when
 | |
| 	  tracing is enabled by the administrator. If it's runtime disabled
 | |
| 	  (the bootup default), then the overhead of the instructions is very
 | |
| 	  small and not measurable even in micro-benchmarks.
 | |
| 
 | |
| config FUNCTION_GRAPH_TRACER
 | |
| 	bool "Kernel Function Graph Tracer"
 | |
| 	depends on HAVE_FUNCTION_GRAPH_TRACER
 | |
| 	depends on FUNCTION_TRACER
 | |
| 	depends on !X86_32 || !CC_OPTIMIZE_FOR_SIZE
 | |
| 	default y
 | |
| 	help
 | |
| 	  Enable the kernel to trace a function at both its return
 | |
| 	  and its entry.
 | |
| 	  Its first purpose is to trace the duration of functions and
 | |
| 	  draw a call graph for each thread with some information like
 | |
| 	  the return value. This is done by setting the current return 
 | |
| 	  address on the current task structure into a stack of calls.
 | |
| 
 | |
| 
 | |
| config IRQSOFF_TRACER
 | |
| 	bool "Interrupts-off Latency Tracer"
 | |
| 	default n
 | |
| 	depends on TRACE_IRQFLAGS_SUPPORT
 | |
| 	depends on GENERIC_TIME
 | |
| 	select TRACE_IRQFLAGS
 | |
| 	select GENERIC_TRACER
 | |
| 	select TRACER_MAX_TRACE
 | |
| 	select RING_BUFFER_ALLOW_SWAP
 | |
| 	help
 | |
| 	  This option measures the time spent in irqs-off critical
 | |
| 	  sections, with microsecond accuracy.
 | |
| 
 | |
| 	  The default measurement method is a maximum search, which is
 | |
| 	  disabled by default and can be runtime (re-)started
 | |
| 	  via:
 | |
| 
 | |
| 	      echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
 | |
| 
 | |
| 	  (Note that kernel size and overhead increases with this option
 | |
| 	  enabled. This option and the preempt-off timing option can be
 | |
| 	  used together or separately.)
 | |
| 
 | |
| config PREEMPT_TRACER
 | |
| 	bool "Preemption-off Latency Tracer"
 | |
| 	default n
 | |
| 	depends on GENERIC_TIME
 | |
| 	depends on PREEMPT
 | |
| 	select GENERIC_TRACER
 | |
| 	select TRACER_MAX_TRACE
 | |
| 	select RING_BUFFER_ALLOW_SWAP
 | |
| 	help
 | |
| 	  This option measures the time spent in preemption off critical
 | |
| 	  sections, with microsecond accuracy.
 | |
| 
 | |
| 	  The default measurement method is a maximum search, which is
 | |
| 	  disabled by default and can be runtime (re-)started
 | |
| 	  via:
 | |
| 
 | |
| 	      echo 0 > /sys/kernel/debug/tracing/tracing_max_latency
 | |
| 
 | |
| 	  (Note that kernel size and overhead increases with this option
 | |
| 	  enabled. This option and the irqs-off timing option can be
 | |
| 	  used together or separately.)
 | |
| 
 | |
| config SYSPROF_TRACER
 | |
| 	bool "Sysprof Tracer"
 | |
| 	depends on X86
 | |
| 	select GENERIC_TRACER
 | |
| 	select CONTEXT_SWITCH_TRACER
 | |
| 	help
 | |
| 	  This tracer provides the trace needed by the 'Sysprof' userspace
 | |
| 	  tool.
 | |
| 
 | |
| config SCHED_TRACER
 | |
| 	bool "Scheduling Latency Tracer"
 | |
| 	select GENERIC_TRACER
 | |
| 	select CONTEXT_SWITCH_TRACER
 | |
| 	select TRACER_MAX_TRACE
 | |
| 	help
 | |
| 	  This tracer tracks the latency of the highest priority task
 | |
| 	  to be scheduled in, starting from the point it has woken up.
 | |
| 
 | |
| config ENABLE_DEFAULT_TRACERS
 | |
| 	bool "Trace process context switches and events"
 | |
| 	depends on !GENERIC_TRACER
 | |
| 	select TRACING
 | |
| 	help
 | |
| 	  This tracer hooks to various trace points in the kernel
 | |
| 	  allowing the user to pick and choose which trace point they
 | |
| 	  want to trace. It also includes the sched_switch tracer plugin.
 | |
| 
 | |
| config FTRACE_SYSCALLS
 | |
| 	bool "Trace syscalls"
 | |
| 	depends on HAVE_SYSCALL_TRACEPOINTS
 | |
| 	select GENERIC_TRACER
 | |
| 	select KALLSYMS
 | |
| 	help
 | |
| 	  Basic tracer to catch the syscall entry and exit events.
 | |
| 
 | |
| config BOOT_TRACER
 | |
| 	bool "Trace boot initcalls"
 | |
| 	select GENERIC_TRACER
 | |
| 	select CONTEXT_SWITCH_TRACER
 | |
| 	help
 | |
| 	  This tracer helps developers to optimize boot times: it records
 | |
| 	  the timings of the initcalls and traces key events and the identity
 | |
| 	  of tasks that can cause boot delays, such as context-switches.
 | |
| 
 | |
| 	  Its aim is to be parsed by the scripts/bootgraph.pl tool to
 | |
| 	  produce pretty graphics about boot inefficiencies, giving a visual
 | |
| 	  representation of the delays during initcalls - but the raw
 | |
| 	  /debug/tracing/trace text output is readable too.
 | |
| 
 | |
| 	  You must pass in initcall_debug and ftrace=initcall to the kernel
 | |
| 	  command line to enable this on bootup.
 | |
| 
 | |
| config TRACE_BRANCH_PROFILING
 | |
| 	bool
 | |
| 	select GENERIC_TRACER
 | |
| 
 | |
| choice
 | |
| 	prompt "Branch Profiling"
 | |
| 	default BRANCH_PROFILE_NONE
 | |
| 	help
 | |
| 	 The branch profiling is a software profiler. It will add hooks
 | |
| 	 into the C conditionals to test which path a branch takes.
 | |
| 
 | |
| 	 The likely/unlikely profiler only looks at the conditions that
 | |
| 	 are annotated with a likely or unlikely macro.
 | |
| 
 | |
| 	 The "all branch" profiler will profile every if statement in the
 | |
| 	 kernel. This profiler will also enable the likely/unlikely
 | |
| 	 profiler as well.
 | |
| 
 | |
| 	 Either of the above profilers add a bit of overhead to the system.
 | |
| 	 If unsure choose "No branch profiling".
 | |
| 
 | |
| config BRANCH_PROFILE_NONE
 | |
| 	bool "No branch profiling"
 | |
| 	help
 | |
| 	 No branch profiling. Branch profiling adds a bit of overhead.
 | |
| 	 Only enable it if you want to analyse the branching behavior.
 | |
| 	 Otherwise keep it disabled.
 | |
| 
 | |
| config PROFILE_ANNOTATED_BRANCHES
 | |
| 	bool "Trace likely/unlikely profiler"
 | |
| 	select TRACE_BRANCH_PROFILING
 | |
| 	help
 | |
| 	  This tracer profiles all the the likely and unlikely macros
 | |
| 	  in the kernel. It will display the results in:
 | |
| 
 | |
| 	  /sys/kernel/debug/tracing/profile_annotated_branch
 | |
| 
 | |
| 	  Note: this will add a significant overhead, only turn this
 | |
| 	  on if you need to profile the system's use of these macros.
 | |
| 
 | |
| config PROFILE_ALL_BRANCHES
 | |
| 	bool "Profile all if conditionals"
 | |
| 	select TRACE_BRANCH_PROFILING
 | |
| 	help
 | |
| 	  This tracer profiles all branch conditions. Every if ()
 | |
| 	  taken in the kernel is recorded whether it hit or miss.
 | |
| 	  The results will be displayed in:
 | |
| 
 | |
| 	  /sys/kernel/debug/tracing/profile_branch
 | |
| 
 | |
| 	  This option also enables the likely/unlikely profiler.
 | |
| 
 | |
| 	  This configuration, when enabled, will impose a great overhead
 | |
| 	  on the system. This should only be enabled when the system
 | |
| 	  is to be analyzed
 | |
| endchoice
 | |
| 
 | |
| config TRACING_BRANCHES
 | |
| 	bool
 | |
| 	help
 | |
| 	  Selected by tracers that will trace the likely and unlikely
 | |
| 	  conditions. This prevents the tracers themselves from being
 | |
| 	  profiled. Profiling the tracing infrastructure can only happen
 | |
| 	  when the likelys and unlikelys are not being traced.
 | |
| 
 | |
| config BRANCH_TRACER
 | |
| 	bool "Trace likely/unlikely instances"
 | |
| 	depends on TRACE_BRANCH_PROFILING
 | |
| 	select TRACING_BRANCHES
 | |
| 	help
 | |
| 	  This traces the events of likely and unlikely condition
 | |
| 	  calls in the kernel.  The difference between this and the
 | |
| 	  "Trace likely/unlikely profiler" is that this is not a
 | |
| 	  histogram of the callers, but actually places the calling
 | |
| 	  events into a running trace buffer to see when and where the
 | |
| 	  events happened, as well as their results.
 | |
| 
 | |
| 	  Say N if unsure.
 | |
| 
 | |
| config POWER_TRACER
 | |
| 	bool "Trace power consumption behavior"
 | |
| 	depends on X86
 | |
| 	select GENERIC_TRACER
 | |
| 	help
 | |
| 	  This tracer helps developers to analyze and optimize the kernels
 | |
| 	  power management decisions, specifically the C-state and P-state
 | |
| 	  behavior.
 | |
| 
 | |
| 
 | |
| config STACK_TRACER
 | |
| 	bool "Trace max stack"
 | |
| 	depends on HAVE_FUNCTION_TRACER
 | |
| 	select FUNCTION_TRACER
 | |
| 	select STACKTRACE
 | |
| 	select KALLSYMS
 | |
| 	help
 | |
| 	  This special tracer records the maximum stack footprint of the
 | |
| 	  kernel and displays it in /sys/kernel/debug/tracing/stack_trace.
 | |
| 
 | |
| 	  This tracer works by hooking into every function call that the
 | |
| 	  kernel executes, and keeping a maximum stack depth value and
 | |
| 	  stack-trace saved.  If this is configured with DYNAMIC_FTRACE
 | |
| 	  then it will not have any overhead while the stack tracer
 | |
| 	  is disabled.
 | |
| 
 | |
| 	  To enable the stack tracer on bootup, pass in 'stacktrace'
 | |
| 	  on the kernel command line.
 | |
| 
 | |
| 	  The stack tracer can also be enabled or disabled via the
 | |
| 	  sysctl kernel.stack_tracer_enabled
 | |
| 
 | |
| 	  Say N if unsure.
 | |
| 
 | |
| config HW_BRANCH_TRACER
 | |
| 	depends on HAVE_HW_BRANCH_TRACER
 | |
| 	bool "Trace hw branches"
 | |
| 	select GENERIC_TRACER
 | |
| 	help
 | |
| 	  This tracer records all branches on the system in a circular
 | |
| 	  buffer giving access to the last N branches for each cpu.
 | |
| 
 | |
| config KMEMTRACE
 | |
| 	bool "Trace SLAB allocations"
 | |
| 	select GENERIC_TRACER
 | |
| 	help
 | |
| 	  kmemtrace provides tracing for slab allocator functions, such as
 | |
| 	  kmalloc, kfree, kmem_cache_alloc, kmem_cache_free etc.. Collected
 | |
| 	  data is then fed to the userspace application in order to analyse
 | |
| 	  allocation hotspots, internal fragmentation and so on, making it
 | |
| 	  possible to see how well an allocator performs, as well as debug
 | |
| 	  and profile kernel code.
 | |
| 
 | |
| 	  This requires an userspace application to use. See
 | |
| 	  Documentation/trace/kmemtrace.txt for more information.
 | |
| 
 | |
| 	  Saying Y will make the kernel somewhat larger and slower. However,
 | |
| 	  if you disable kmemtrace at run-time or boot-time, the performance
 | |
| 	  impact is minimal (depending on the arch the kernel is built for).
 | |
| 
 | |
| 	  If unsure, say N.
 | |
| 
 | |
| config WORKQUEUE_TRACER
 | |
| 	bool "Trace workqueues"
 | |
| 	select GENERIC_TRACER
 | |
| 	help
 | |
| 	  The workqueue tracer provides some statistical informations
 | |
|           about each cpu workqueue thread such as the number of the
 | |
|           works inserted and executed since their creation. It can help
 | |
|           to evaluate the amount of work each of them have to perform.
 | |
|           For example it can help a developer to decide whether he should
 | |
|           choose a per cpu workqueue instead of a singlethreaded one.
 | |
| 
 | |
| config BLK_DEV_IO_TRACE
 | |
| 	bool "Support for tracing block io actions"
 | |
| 	depends on SYSFS
 | |
| 	depends on BLOCK
 | |
| 	select RELAY
 | |
| 	select DEBUG_FS
 | |
| 	select TRACEPOINTS
 | |
| 	select GENERIC_TRACER
 | |
| 	select STACKTRACE
 | |
| 	help
 | |
| 	  Say Y here if you want to be able to trace the block layer actions
 | |
| 	  on a given queue. Tracing allows you to see any traffic happening
 | |
| 	  on a block device queue. For more information (and the userspace
 | |
| 	  support tools needed), fetch the blktrace tools from:
 | |
| 
 | |
| 	  git://git.kernel.dk/blktrace.git
 | |
| 
 | |
| 	  Tracing also is possible using the ftrace interface, e.g.:
 | |
| 
 | |
| 	    echo 1 > /sys/block/sda/sda1/trace/enable
 | |
| 	    echo blk > /sys/kernel/debug/tracing/current_tracer
 | |
| 	    cat /sys/kernel/debug/tracing/trace_pipe
 | |
| 
 | |
| 	  If unsure, say N.
 | |
| 
 | |
| config DYNAMIC_FTRACE
 | |
| 	bool "enable/disable ftrace tracepoints dynamically"
 | |
| 	depends on FUNCTION_TRACER
 | |
| 	depends on HAVE_DYNAMIC_FTRACE
 | |
| 	default y
 | |
| 	help
 | |
|          This option will modify all the calls to ftrace dynamically
 | |
| 	 (will patch them out of the binary image and replaces them
 | |
| 	 with a No-Op instruction) as they are called. A table is
 | |
| 	 created to dynamically enable them again.
 | |
| 
 | |
| 	 This way a CONFIG_FUNCTION_TRACER kernel is slightly larger, but otherwise
 | |
| 	 has native performance as long as no tracing is active.
 | |
| 
 | |
| 	 The changes to the code are done by a kernel thread that
 | |
| 	 wakes up once a second and checks to see if any ftrace calls
 | |
| 	 were made. If so, it runs stop_machine (stops all CPUS)
 | |
| 	 and modifies the code to jump over the call to ftrace.
 | |
| 
 | |
| config FUNCTION_PROFILER
 | |
| 	bool "Kernel function profiler"
 | |
| 	depends on FUNCTION_TRACER
 | |
| 	default n
 | |
| 	help
 | |
| 	 This option enables the kernel function profiler. A file is created
 | |
| 	 in debugfs called function_profile_enabled which defaults to zero.
 | |
| 	 When a 1 is echoed into this file profiling begins, and when a
 | |
| 	 zero is entered, profiling stops. A file in the trace_stats
 | |
| 	 directory called functions, that show the list of functions that
 | |
| 	 have been hit and their counters.
 | |
| 
 | |
| 	 If in doubt, say N
 | |
| 
 | |
| config FTRACE_MCOUNT_RECORD
 | |
| 	def_bool y
 | |
| 	depends on DYNAMIC_FTRACE
 | |
| 	depends on HAVE_FTRACE_MCOUNT_RECORD
 | |
| 
 | |
| config FTRACE_SELFTEST
 | |
| 	bool
 | |
| 
 | |
| config FTRACE_STARTUP_TEST
 | |
| 	bool "Perform a startup test on ftrace"
 | |
| 	depends on GENERIC_TRACER
 | |
| 	select FTRACE_SELFTEST
 | |
| 	help
 | |
| 	  This option performs a series of startup tests on ftrace. On bootup
 | |
| 	  a series of tests are made to verify that the tracer is
 | |
| 	  functioning properly. It will do tests on all the configured
 | |
| 	  tracers of ftrace.
 | |
| 
 | |
| config EVENT_TRACE_TEST_SYSCALLS
 | |
| 	bool "Run selftest on syscall events"
 | |
| 	depends on FTRACE_STARTUP_TEST
 | |
| 	help
 | |
| 	 This option will also enable testing every syscall event.
 | |
| 	 It only enables the event and disables it and runs various loads
 | |
| 	 with the event enabled. This adds a bit more time for kernel boot
 | |
| 	 up since it runs this on every system call defined.
 | |
| 
 | |
| 	 TBD - enable a way to actually call the syscalls as we test their
 | |
| 	       events
 | |
| 
 | |
| config MMIOTRACE
 | |
| 	bool "Memory mapped IO tracing"
 | |
| 	depends on HAVE_MMIOTRACE_SUPPORT && PCI
 | |
| 	select GENERIC_TRACER
 | |
| 	help
 | |
| 	  Mmiotrace traces Memory Mapped I/O access and is meant for
 | |
| 	  debugging and reverse engineering. It is called from the ioremap
 | |
| 	  implementation and works via page faults. Tracing is disabled by
 | |
| 	  default and can be enabled at run-time.
 | |
| 
 | |
| 	  See Documentation/trace/mmiotrace.txt.
 | |
| 	  If you are not helping to develop drivers, say N.
 | |
| 
 | |
| config MMIOTRACE_TEST
 | |
| 	tristate "Test module for mmiotrace"
 | |
| 	depends on MMIOTRACE && m
 | |
| 	help
 | |
| 	  This is a dumb module for testing mmiotrace. It is very dangerous
 | |
| 	  as it will write garbage to IO memory starting at a given address.
 | |
| 	  However, it should be safe to use on e.g. unused portion of VRAM.
 | |
| 
 | |
| 	  Say N, unless you absolutely know what you are doing.
 | |
| 
 | |
| config RING_BUFFER_BENCHMARK
 | |
| 	tristate "Ring buffer benchmark stress tester"
 | |
| 	depends on RING_BUFFER
 | |
| 	help
 | |
| 	  This option creates a test to stress the ring buffer and bench mark it.
 | |
| 	  It creates its own ring buffer such that it will not interfer with
 | |
| 	  any other users of the ring buffer (such as ftrace). It then creates
 | |
| 	  a producer and consumer that will run for 10 seconds and sleep for
 | |
| 	  10 seconds. Each interval it will print out the number of events
 | |
| 	  it recorded and give a rough estimate of how long each iteration took.
 | |
| 
 | |
| 	  It does not disable interrupts or raise its priority, so it may be
 | |
| 	  affected by processes that are running.
 | |
| 
 | |
| 	  If unsure, say N
 | |
| 
 | |
| endif # FTRACE
 | |
| 
 | |
| endif # TRACING_SUPPORT
 | |
| 
 |