From: Ingo Molnar add per-arch sched_cacheflush() which is a write-back cacheflush used by the migration-cost calibration code at bootup time. Signed-off-by: Ingo Molnar Signed-off-by: Andrew Morton --- include/asm-i386/system.h | 9 +++++++++ include/asm-ia64/system.h | 9 +++++++++ include/asm-ppc64/system.h | 9 +++++++++ include/asm-x86_64/system.h | 9 +++++++++ kernel/sched.c | 17 ----------------- 5 files changed, 36 insertions(+), 17 deletions(-) diff -puN include/asm-i386/system.h~sched-add-cacheflush-asm include/asm-i386/system.h --- 25/include/asm-i386/system.h~sched-add-cacheflush-asm 2005-06-25 01:17:14.000000000 -0700 +++ 25-akpm/include/asm-i386/system.h 2005-06-25 01:17:14.000000000 -0700 @@ -469,6 +469,15 @@ void enable_hlt(void); extern int es7000_plat; void cpu_idle_wait(void); +/* + * On SMP systems, when the scheduler does migration-cost autodetection, + * it needs a way to flush as much of the CPU's caches as possible: + */ +static inline void sched_cacheflush(void) +{ + wbinvd(); +} + extern unsigned long arch_align_stack(unsigned long sp); #endif diff -puN include/asm-ia64/system.h~sched-add-cacheflush-asm include/asm-ia64/system.h --- 25/include/asm-ia64/system.h~sched-add-cacheflush-asm 2005-06-25 01:17:14.000000000 -0700 +++ 25-akpm/include/asm-ia64/system.h 2005-06-25 01:17:14.000000000 -0700 @@ -278,6 +278,15 @@ extern void ia64_load_extra (struct task void cpu_idle_wait(void); +/* + * On SMP systems, when the scheduler does migration-cost autodetection, + * it needs a way to flush as much of the CPU's caches as possible. + */ +static inline void sched_cacheflush(void) +{ + ia64_sal_cache_flush(1); +} + #define arch_align_stack(x) (x) #endif /* __KERNEL__ */ diff -puN include/asm-ppc64/system.h~sched-add-cacheflush-asm include/asm-ppc64/system.h --- 25/include/asm-ppc64/system.h~sched-add-cacheflush-asm 2005-06-25 01:17:14.000000000 -0700 +++ 25-akpm/include/asm-ppc64/system.h 2005-06-25 01:17:14.000000000 -0700 @@ -141,6 +141,15 @@ struct thread_struct; extern struct task_struct * _switch(struct thread_struct *prev, struct thread_struct *next); +/* + * On SMP systems, when the scheduler does migration-cost autodetection, + * it needs a way to flush as much of the CPU's caches as possible. + * TODO: how do you cacheflush on ppc64? + */ +static inline void sched_cacheflush(void) +{ +} + static inline int __is_processor(unsigned long pv) { unsigned long pvr; diff -puN include/asm-x86_64/system.h~sched-add-cacheflush-asm include/asm-x86_64/system.h --- 25/include/asm-x86_64/system.h~sched-add-cacheflush-asm 2005-06-25 01:17:14.000000000 -0700 +++ 25-akpm/include/asm-x86_64/system.h 2005-06-25 01:17:14.000000000 -0700 @@ -180,6 +180,15 @@ static inline void write_cr4(unsigned lo #define wbinvd() \ __asm__ __volatile__ ("wbinvd": : :"memory"); +/* + * On SMP systems, when the scheduler does migration-cost autodetection, + * it needs a way to flush as much of the CPU's caches as possible. + */ +static inline void sched_cacheflush(void) +{ + wbinvd(); +} + #endif /* __KERNEL__ */ #define nop() __asm__ __volatile__ ("nop") diff -puN kernel/sched.c~sched-add-cacheflush-asm kernel/sched.c --- 25/kernel/sched.c~sched-add-cacheflush-asm 2005-06-25 01:17:14.000000000 -0700 +++ 25-akpm/kernel/sched.c 2005-06-25 01:17:14.000000000 -0700 @@ -5089,23 +5089,6 @@ __init static void touch_cache(void *__c } } -// FIXME: move sched_cacheflush into arch include files: - -#ifdef CONFIG_IA64 -# include -#endif - -__init static void sched_cacheflush(void) -{ -#ifdef CONFIG_X86 - asm ("wbinvd"); -#elif defined(CONFIG_IA64) - ia64_sal_cache_flush(1); // what argument does d/cache flush? -#else -# warning implement sched_cacheflush()! Calibration results may be unreliable. -#endif -} - /* * Measure the cache-cost of one task migration. Returns in units of nsec. */ _