104 lines
		
	
	
		
			2.7 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
			
		
		
	
	
			104 lines
		
	
	
		
			2.7 KiB
		
	
	
	
		
			C
		
	
	
	
	
	
| #ifndef __ASM_POWERPC_MMU_CONTEXT_H
 | |
| #define __ASM_POWERPC_MMU_CONTEXT_H
 | |
| #ifdef __KERNEL__
 | |
| 
 | |
| #include <linux/kernel.h>
 | |
| #include <linux/mm.h>
 | |
| #include <linux/sched.h>
 | |
| #include <linux/spinlock.h>
 | |
| #include <asm/mmu.h>	
 | |
| #include <asm/cputable.h>
 | |
| #include <asm-generic/mm_hooks.h>
 | |
| #include <asm/cputhreads.h>
 | |
| 
 | |
| /*
 | |
|  * Most if the context management is out of line
 | |
|  */
 | |
| extern int init_new_context(struct task_struct *tsk, struct mm_struct *mm);
 | |
| extern void destroy_context(struct mm_struct *mm);
 | |
| 
 | |
| extern void switch_mmu_context(struct mm_struct *prev, struct mm_struct *next);
 | |
| extern void switch_stab(struct task_struct *tsk, struct mm_struct *mm);
 | |
| extern void switch_slb(struct task_struct *tsk, struct mm_struct *mm);
 | |
| extern void set_context(unsigned long id, pgd_t *pgd);
 | |
| 
 | |
| #ifdef CONFIG_PPC_BOOK3S_64
 | |
| static inline void mmu_context_init(void) { }
 | |
| #else
 | |
| extern void mmu_context_init(void);
 | |
| #endif
 | |
| 
 | |
| /*
 | |
|  * switch_mm is the entry point called from the architecture independent
 | |
|  * code in kernel/sched.c
 | |
|  */
 | |
| static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 | |
| 			     struct task_struct *tsk)
 | |
| {
 | |
| 	/* Mark this context has been used on the new CPU */
 | |
| 	cpumask_set_cpu(smp_processor_id(), mm_cpumask(next));
 | |
| 
 | |
| 	/* 32-bit keeps track of the current PGDIR in the thread struct */
 | |
| #ifdef CONFIG_PPC32
 | |
| 	tsk->thread.pgdir = next->pgd;
 | |
| #endif /* CONFIG_PPC32 */
 | |
| 
 | |
| 	/* 64-bit Book3E keeps track of current PGD in the PACA */
 | |
| #ifdef CONFIG_PPC_BOOK3E_64
 | |
| 	get_paca()->pgd = next->pgd;
 | |
| #endif
 | |
| 	/* Nothing else to do if we aren't actually switching */
 | |
| 	if (prev == next)
 | |
| 		return;
 | |
| 
 | |
| 	/* We must stop all altivec streams before changing the HW
 | |
| 	 * context
 | |
| 	 */
 | |
| #ifdef CONFIG_ALTIVEC
 | |
| 	if (cpu_has_feature(CPU_FTR_ALTIVEC))
 | |
| 		asm volatile ("dssall");
 | |
| #endif /* CONFIG_ALTIVEC */
 | |
| 
 | |
| 	/* The actual HW switching method differs between the various
 | |
| 	 * sub architectures.
 | |
| 	 */
 | |
| #ifdef CONFIG_PPC_STD_MMU_64
 | |
| 	if (cpu_has_feature(CPU_FTR_SLB))
 | |
| 		switch_slb(tsk, next);
 | |
| 	else
 | |
| 		switch_stab(tsk, next);
 | |
| #else
 | |
| 	/* Out of line for now */
 | |
| 	switch_mmu_context(prev, next);
 | |
| #endif
 | |
| 
 | |
| }
 | |
| 
 | |
| #define deactivate_mm(tsk,mm)	do { } while (0)
 | |
| 
 | |
| /*
 | |
|  * After we have set current->mm to a new value, this activates
 | |
|  * the context for the new mm so we see the new mappings.
 | |
|  */
 | |
| static inline void activate_mm(struct mm_struct *prev, struct mm_struct *next)
 | |
| {
 | |
| 	unsigned long flags;
 | |
| 
 | |
| 	local_irq_save(flags);
 | |
| 	switch_mm(prev, next, current);
 | |
| 	local_irq_restore(flags);
 | |
| }
 | |
| 
 | |
| /* We don't currently use enter_lazy_tlb() for anything */
 | |
| static inline void enter_lazy_tlb(struct mm_struct *mm,
 | |
| 				  struct task_struct *tsk)
 | |
| {
 | |
| 	/* 64-bit Book3E keeps track of current PGD in the PACA */
 | |
| #ifdef CONFIG_PPC_BOOK3E_64
 | |
| 	get_paca()->pgd = NULL;
 | |
| #endif
 | |
| }
 | |
| 
 | |
| #endif /* __KERNEL__ */
 | |
| #endif /* __ASM_POWERPC_MMU_CONTEXT_H */
 |