From: Manfred Spraul I have the attached patch: I've modified my eventlog patch and log put_task_struct/get_task_struct calls. Zwane ran into one crash so far with logging enabled, but he got an nmi oops in the middle of the dump. I've reduced the number of lines that are dumped, I hope that the next dump will contain usable data. I want to figure out first which part of the refcounting is bust - release_thread twice, a refcount error somewhere in /proc, or perhaps setsid()? arch/i386/Kconfig | 9 ++ arch/i386/kernel/ptrace.c | 5 + fs/proc/base.c | 6 + fs/proc/inode.c | 4 - include/asm-i386/processor.h | 6 + include/linux/eventlog.h | 82 +++++++++++++++++++++ include/linux/sched.h | 1 include/linux/spinlock.h | 59 ++++++++++++--- kernel/exit.c | 5 + kernel/fork.c | 1 kernel/pid.c | 2 kernel/sched.c | 7 + lib/Makefile | 1 lib/eventlog.c | 165 +++++++++++++++++++++++++++++++++++++++++++ 14 files changed, 337 insertions(+), 16 deletions(-) diff -puN arch/i386/Kconfig~event-log-put_task_struct arch/i386/Kconfig --- 25/arch/i386/Kconfig~event-log-put_task_struct 2003-06-03 20:36:02.000000000 -0700 +++ 25-akpm/arch/i386/Kconfig 2003-06-03 20:36:02.000000000 -0700 @@ -1559,6 +1559,15 @@ config SPINLINE itself (as ".text.lock.filename"). This can be helpful for finding the callers of locks. +config DEBUG_EVENTLOG + bool "Eventlog for deadlock analysis" + depends on DEBUG_KERNEL + help + Say Y here to log spinlock and rwlock calls of all cpus. This is + best used in conjunction with the NMI watchlog or a kernel debugger + so that the event log is actually accessable. This option causes a + noticable overhead, disable for production systems. + config DEBUG_PAGEALLOC bool "Page alloc debugging" depends on DEBUG_SLAB diff -puN arch/i386/kernel/ptrace.c~event-log-put_task_struct arch/i386/kernel/ptrace.c --- 25/arch/i386/kernel/ptrace.c~event-log-put_task_struct 2003-06-03 20:36:02.000000000 -0700 +++ 25-akpm/arch/i386/kernel/ptrace.c 2003-06-03 20:36:02.000000000 -0700 @@ -252,8 +252,10 @@ asmlinkage int sys_ptrace(long request, ret = -ESRCH; read_lock(&tasklist_lock); child = find_task_by_pid(pid); - if (child) + if (child) { + save_event(EVENT_TASK_MISC_INC,child); get_task_struct(child); + } read_unlock(&tasklist_lock); if (!child) goto out; @@ -512,6 +514,7 @@ asmlinkage int sys_ptrace(long request, break; } out_tsk: + save_event(EVENT_TASK_MISC_DEC,child); put_task_struct(child); out: unlock_kernel(); diff -puN fs/proc/base.c~event-log-put_task_struct fs/proc/base.c --- 25/fs/proc/base.c~event-log-put_task_struct 2003-06-03 20:36:02.000000000 -0700 +++ 25-akpm/fs/proc/base.c 2003-06-03 20:36:02.000000000 -0700 @@ -838,6 +838,7 @@ static struct inode *proc_pid_make_inode * grab the reference to task. */ get_task_struct(task); + save_event(EVENT_TASK_MISC_INC,task); ei->task = task; ei->type = ino; inode->i_uid = 0; @@ -1354,14 +1355,17 @@ struct dentry *proc_pid_lookup(struct in read_lock(&tasklist_lock); task = find_task_by_pid(pid); - if (task) + if (task) { get_task_struct(task); + save_event(EVENT_TASK_MISC_INC,task); + } read_unlock(&tasklist_lock); if (!task) goto out; inode = proc_pid_make_inode(dir->i_sb, task, PROC_PID_INO); + save_event(EVENT_TASK_MISC_DEC,task); put_task_struct(task); if (!inode) diff -puN fs/proc/inode.c~event-log-put_task_struct fs/proc/inode.c --- 25/fs/proc/inode.c~event-log-put_task_struct 2003-06-03 20:36:02.000000000 -0700 +++ 25-akpm/fs/proc/inode.c 2003-06-03 20:36:02.000000000 -0700 @@ -63,8 +63,10 @@ static void proc_delete_inode(struct ino /* Let go of any associated process */ tsk = PROC_I(inode)->task; - if (tsk) + if (tsk) { + save_event(EVENT_TASK_MISC_DEC,tsk); put_task_struct(tsk); + } /* Let go of any associated proc directory entry */ de = PROC_I(inode)->pde; diff -puN include/asm-i386/processor.h~event-log-put_task_struct include/asm-i386/processor.h --- 25/include/asm-i386/processor.h~event-log-put_task_struct 2003-06-03 20:36:02.000000000 -0700 +++ 25-akpm/include/asm-i386/processor.h 2003-06-03 20:36:02.000000000 -0700 @@ -498,7 +498,11 @@ static inline void rep_nop(void) __asm__ __volatile__("rep;nop": : :"memory"); } -#define cpu_relax() rep_nop() +#define cpu_relax() \ + do { \ + save_event(EVENT_CPU_RELAX, NULL); \ + rep_nop(); \ + } while(0) /* generic versions from gas */ #define GENERIC_NOP1 ".byte 0x90\n" diff -puN /dev/null include/linux/eventlog.h --- /dev/null 2002-08-30 16:31:37.000000000 -0700 +++ 25-akpm/include/linux/eventlog.h 2003-06-03 20:36:02.000000000 -0700 @@ -0,0 +1,82 @@ +#ifndef __LINUX_EVENTLOG_H +#define __LINUX_EVENTLOG_H +/* + * eventlog.h - log deadlock prone events, to simplify deadlock diagnostics + * + * Copyright (C) 2003 Manfred Spraul + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ +#include + +/* events that are pushed/poped on the eventstack */ +#define EVENT_STACK_PUSH 0x10000 +#define EVENT_STACK_POP 0x20000 +#define EVENT_STACK_REPLACE 0x40000 + +#define EVENT_SPINLOCK_BEFORE (0x0011|EVENT_STACK_PUSH) +#define EVENT_SPINLOCK (0x0012|EVENT_STACK_REPLACE) +#define EVENT_SPIN_TRYLOCK_SUCCESS (0x0013|EVENT_STACK_PUSH) +#define EVENT_SPIN_TRYLOCK_FAILED (0x0014) +#define EVENT_SPINLOCK_RELEASE (0x0015|EVENT_STACK_POP) +#define EVENT_READ_LOCK_BEFORE (0x0021|EVENT_STACK_PUSH) +#define EVENT_READ_LOCK (0x0022|EVENT_STACK_REPLACE) +#define EVENT_READ_LOCK_RELEASE (0x0023|EVENT_STACK_POP) +#define EVENT_WRITE_LOCK_BEFORE (0x0031|EVENT_STACK_PUSH) +#define EVENT_WRITE_TRYLOCK_SUCCESS (0x0032|EVENT_STACK_PUSH) +#define EVENT_WRITE_TRYLOCK_FAILED (0x0033) +#define EVENT_WRITE_LOCK (0x0034|EVENT_STACK_REPLACE) +#define EVENT_WRITE_LOCK_RELEASE (0x0035|EVENT_STACK_POP) +#define EVENT_CPU_RELAX (0x0041) +#define EVENT_TASK_NEW (0x0051) +#define EVENT_TASK_PID_INC (0x0052) +#define EVENT_TASK_PID_DEC (0x0053) +#define EVENT_TASK_SCHEDTAIL_DEC (0x0054) +#define EVENT_TASK_RELEASE_DEC (0x0055) +#define EVENT_TASK_MISC_INC (0x0056) +#define EVENT_TASK_MISC_DEC (0x0057) + +#ifdef CONFIG_DEBUG_EVENTLOG + +#define save_event(type,object) \ + do { \ + store_event(type, __FILE__, __LINE__, __stringify(object), object); \ + } while(0) + +void store_event(int type, char *file, int line, char *object_name, void *object_addr); +void print_events(void); + +#else +#define save_event(type,object) \ + do { } while(0) +static inline void print_events(void) { } +#endif +/* + * Tunables: + * EL_MAX_DEPTH: maximum spinlock recursion level that can be stored. + * Increasing the number increases the memory consumption. + * RECENT_LEN: number of recent events that are logged. + * This log contains the last few events that were logged. + * Increasing this number is only possible with a serial + * console, otherwise the log will scroll of your screen. + * Hint: Boot with vga=ask and select a video mode with 60 lines, + * that should be sufficient for dual-cpu systems with RECENT_LEN + * 16. + */ +#define EL_MAX_DEPTH 128 +#define RECENT_LEN 150 + +#endif /* __LINUX_EVENTLOG_H */ diff -puN include/linux/sched.h~event-log-put_task_struct include/linux/sched.h --- 25/include/linux/sched.h~event-log-put_task_struct 2003-06-03 20:36:02.000000000 -0700 +++ 25-akpm/include/linux/sched.h 2003-06-03 20:36:02.000000000 -0700 @@ -819,6 +819,7 @@ static inline void cond_resched_lock(spi { if (need_resched()) { _raw_spin_unlock(lock); + save_event(EVENT_SPINLOCK_RELEASE,lock); preempt_enable_no_resched(); __cond_resched(); spin_lock(lock); diff -puN include/linux/spinlock.h~event-log-put_task_struct include/linux/spinlock.h --- 25/include/linux/spinlock.h~event-log-put_task_struct 2003-06-03 20:36:02.000000000 -0700 +++ 25-akpm/include/linux/spinlock.h 2003-06-03 20:36:02.000000000 -0700 @@ -12,6 +12,7 @@ #include #include #include +#include #include @@ -200,14 +201,18 @@ extern int _metered_write_trylock(rwloc * methods are defined as nops in the case they are not required. */ #define spin_trylock(lock) ({preempt_disable(); _raw_spin_trylock(lock) ? \ - 1 : ({preempt_enable(); 0;});}) + ({save_event(EVENT_SPIN_TRYLOCK_SUCCESS, lock); 1;}) \ + : ({save_event(EVENT_SPIN_TRYLOCK_FAILED, lock); \ + preempt_enable(); 0;});}) #define write_trylock(lock) ({preempt_disable();_raw_write_trylock(lock) ? \ - 1 : ({preempt_enable(); 0;});}) + ({save_event(EVENT_WRITE_TRYLOCK_SUCCESS, lock); 1;}) \ + : ({save_event(EVENT_WRITE_TRYLOCK_FAILED, lock); \ + preempt_enable(); 0;});}) /* Where's read_trylock? */ -#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT) +#if defined(CONFIG_SMP) && defined(CONFIG_PREEMPT) && !defined(CONFIG_DEBUG_EVENTLOG) void __preempt_spin_lock(spinlock_t *lock); void __preempt_write_lock(rwlock_t *lock); @@ -229,37 +234,46 @@ do { \ #define spin_lock(lock) \ do { \ preempt_disable(); \ + save_event(EVENT_SPINLOCK_BEFORE, lock); \ _raw_spin_lock(lock); \ + save_event(EVENT_SPINLOCK, lock); \ } while(0) #define write_lock(lock) \ do { \ preempt_disable(); \ + save_event(EVENT_WRITE_LOCK_BEFORE, lock); \ _raw_write_lock(lock); \ + save_event(EVENT_WRITE_LOCK, lock); \ } while(0) #endif #define read_lock(lock) \ do { \ preempt_disable(); \ + save_event(EVENT_READ_LOCK_BEFORE, lock); \ _raw_read_lock(lock); \ + save_event(EVENT_READ_LOCK, lock); \ } while(0) #define spin_unlock(lock) \ do { \ _raw_spin_unlock(lock); \ + save_event(EVENT_SPINLOCK_RELEASE, lock); \ preempt_enable(); \ } while (0) #define write_unlock(lock) \ do { \ _raw_write_unlock(lock); \ + save_event(EVENT_WRITE_LOCK_RELEASE, lock); \ preempt_enable(); \ } while(0) #define read_unlock(lock) \ do { \ _raw_read_unlock(lock); \ + save_event(EVENT_READ_LOCK_RELEASE, lock); \ preempt_enable(); \ } while(0) @@ -267,81 +281,95 @@ do { \ do { \ local_irq_save(flags); \ preempt_disable(); \ + save_event(EVENT_SPINLOCK_BEFORE, lock); \ _raw_spin_lock(lock); \ + save_event(EVENT_SPINLOCK, lock); \ } while (0) #define spin_lock_irq(lock) \ do { \ local_irq_disable(); \ preempt_disable(); \ + save_event(EVENT_SPINLOCK_BEFORE, lock); \ _raw_spin_lock(lock); \ + save_event(EVENT_SPINLOCK, lock); \ } while (0) #define spin_lock_bh(lock) \ do { \ local_bh_disable(); \ preempt_disable(); \ + save_event(EVENT_SPINLOCK_BEFORE, lock); \ _raw_spin_lock(lock); \ + save_event(EVENT_SPINLOCK, lock); \ } while (0) #define read_lock_irqsave(lock, flags) \ do { \ local_irq_save(flags); \ preempt_disable(); \ + save_event(EVENT_READ_LOCK_BEFORE, lock); \ _raw_read_lock(lock); \ + save_event(EVENT_READ_LOCK, lock); \ } while (0) #define read_lock_irq(lock) \ do { \ local_irq_disable(); \ preempt_disable(); \ + save_event(EVENT_READ_LOCK_BEFORE, lock); \ _raw_read_lock(lock); \ + save_event(EVENT_READ_LOCK, lock); \ } while (0) #define read_lock_bh(lock) \ do { \ local_bh_disable(); \ preempt_disable(); \ + save_event(EVENT_READ_LOCK_BEFORE, lock); \ _raw_read_lock(lock); \ + save_event(EVENT_READ_LOCK, lock); \ } while (0) #define write_lock_irqsave(lock, flags) \ do { \ local_irq_save(flags); \ preempt_disable(); \ + save_event(EVENT_WRITE_LOCK_BEFORE, lock); \ _raw_write_lock(lock); \ + save_event(EVENT_WRITE_LOCK, lock); \ } while (0) #define write_lock_irq(lock) \ do { \ local_irq_disable(); \ preempt_disable(); \ + save_event(EVENT_WRITE_LOCK_BEFORE, lock); \ _raw_write_lock(lock); \ + save_event(EVENT_WRITE_LOCK, lock); \ } while (0) #define write_lock_bh(lock) \ do { \ local_bh_disable(); \ preempt_disable(); \ + save_event(EVENT_WRITE_LOCK_BEFORE, lock); \ _raw_write_lock(lock); \ + save_event(EVENT_WRITE_LOCK, lock); \ } while (0) #define spin_unlock_irqrestore(lock, flags) \ do { \ _raw_spin_unlock(lock); \ + save_event(EVENT_SPINLOCK_RELEASE, lock); \ local_irq_restore(flags); \ preempt_enable(); \ } while (0) -#define _raw_spin_unlock_irqrestore(lock, flags) \ -do { \ - _raw_spin_unlock(lock); \ - local_irq_restore(flags); \ -} while (0) - #define spin_unlock_irq(lock) \ do { \ _raw_spin_unlock(lock); \ + save_event(EVENT_SPINLOCK_RELEASE, lock); \ local_irq_enable(); \ preempt_enable(); \ } while (0) @@ -349,6 +377,7 @@ do { \ #define spin_unlock_bh(lock) \ do { \ _raw_spin_unlock(lock); \ + save_event(EVENT_SPINLOCK_RELEASE, lock); \ preempt_enable(); \ local_bh_enable(); \ } while (0) @@ -356,6 +385,7 @@ do { \ #define read_unlock_irqrestore(lock, flags) \ do { \ _raw_read_unlock(lock); \ + save_event(EVENT_READ_LOCK_RELEASE, lock); \ local_irq_restore(flags); \ preempt_enable(); \ } while (0) @@ -363,6 +393,7 @@ do { \ #define read_unlock_irq(lock) \ do { \ _raw_read_unlock(lock); \ + save_event(EVENT_READ_LOCK_RELEASE, lock); \ local_irq_enable(); \ preempt_enable(); \ } while (0) @@ -370,6 +401,7 @@ do { \ #define read_unlock_bh(lock) \ do { \ _raw_read_unlock(lock); \ + save_event(EVENT_READ_LOCK_RELEASE, lock); \ preempt_enable(); \ local_bh_enable(); \ } while (0) @@ -377,6 +409,7 @@ do { \ #define write_unlock_irqrestore(lock, flags) \ do { \ _raw_write_unlock(lock); \ + save_event(EVENT_WRITE_LOCK_RELEASE, lock); \ local_irq_restore(flags); \ preempt_enable(); \ } while (0) @@ -384,6 +417,7 @@ do { \ #define write_unlock_irq(lock) \ do { \ _raw_write_unlock(lock); \ + save_event(EVENT_WRITE_LOCK_RELEASE, lock); \ local_irq_enable(); \ preempt_enable(); \ } while (0) @@ -391,13 +425,16 @@ do { \ #define write_unlock_bh(lock) \ do { \ _raw_write_unlock(lock); \ + save_event(EVENT_WRITE_LOCK_RELEASE, lock); \ preempt_enable(); \ local_bh_enable(); \ } while (0) #define spin_trylock_bh(lock) ({ local_bh_disable(); preempt_disable(); \ - _raw_spin_trylock(lock) ? 1 : \ - ({preempt_enable(); local_bh_enable(); 0;});}) + _raw_spin_trylock(lock) ? \ + ({save_event(EVENT_SPIN_TRYLOCK_SUCCESS, lock); 1;}) \ + : ({save_event(EVENT_SPIN_TRYLOCK_FAILED,lock); \ + preempt_enable(); local_bh_enable(); 0;});}) #ifdef CONFIG_LOCKMETER #undef spin_lock diff -puN kernel/exit.c~event-log-put_task_struct kernel/exit.c --- 25/kernel/exit.c~event-log-put_task_struct 2003-06-03 20:36:02.000000000 -0700 +++ 25-akpm/kernel/exit.c 2003-06-03 20:36:02.000000000 -0700 @@ -85,6 +85,7 @@ void release_task(struct task_struct * p spin_unlock(&p->proc_lock); proc_pid_flush(proc_dentry); release_thread(p); + save_event(EVENT_TASK_RELEASE_DEC,p); put_task_struct(p); } @@ -667,6 +668,7 @@ static void exit_notify(struct task_stru * complete, and with interrupts blocked that will never happen. */ _raw_write_unlock(&tasklist_lock); + save_event(EVENT_WRITE_LOCK_RELEASE,&tasklist_lock); local_irq_enable(); } @@ -932,6 +934,7 @@ static int wait_task_stopped(task_t *p, * possibly take page faults for user memory. */ get_task_struct(p); + save_event(EVENT_TASK_MISC_INC,p); read_unlock(&tasklist_lock); write_lock_irq(&tasklist_lock); @@ -958,6 +961,7 @@ static int wait_task_stopped(task_t *p, * resumed, or it resumed and then died. */ write_unlock_irq(&tasklist_lock); + save_event(EVENT_TASK_MISC_DEC,p); put_task_struct(p); read_lock(&tasklist_lock); return 0; @@ -974,6 +978,7 @@ static int wait_task_stopped(task_t *p, retval = put_user((exit_code << 8) | 0x7f, stat_addr); if (!retval) retval = p->pid; + save_event(EVENT_TASK_MISC_DEC,p); put_task_struct(p); BUG_ON(!retval); diff -puN kernel/fork.c~event-log-put_task_struct kernel/fork.c --- 25/kernel/fork.c~event-log-put_task_struct 2003-06-03 20:36:02.000000000 -0700 +++ 25-akpm/kernel/fork.c 2003-06-03 20:36:02.000000000 -0700 @@ -82,6 +82,7 @@ static inline struct task_struct *dup_ta memcpy(tsk, orig, sizeof(*tsk) + sizeof(struct thread_info)); tsk->thread_info = (struct thread_info *)(tsk + 1); atomic_set(&tsk->usage, 2); + save_event(EVENT_TASK_NEW,tsk); } return tsk; diff -puN kernel/pid.c~event-log-put_task_struct kernel/pid.c --- 25/kernel/pid.c~event-log-put_task_struct 2003-06-03 20:36:02.000000000 -0700 +++ 25-akpm/kernel/pid.c 2003-06-03 20:36:02.000000000 -0700 @@ -178,6 +178,7 @@ int attach_pid(task_t *task, enum pid_ty INIT_LIST_HEAD(&pid->task_list); pid->task = task; get_task_struct(task); + save_event(EVENT_TASK_PID_INC,task); list_add(&pid->hash_chain, &pid_hash[type][pid_hashfn(nr)]); } list_add_tail(&task->pids[type].pid_chain, &pid->task_list); @@ -198,6 +199,7 @@ static inline int __detach_pid(task_t *t nr = pid->nr; list_del(&pid->hash_chain); + save_event(EVENT_TASK_PID_DEC,pid->task); put_task_struct(pid->task); return nr; diff -puN kernel/sched.c~event-log-put_task_struct kernel/sched.c --- 25/kernel/sched.c~event-log-put_task_struct 2003-06-03 20:36:02.000000000 -0700 +++ 25-akpm/kernel/sched.c 2003-06-03 20:36:02.000000000 -0700 @@ -613,8 +613,10 @@ static inline void finish_task_switch(ta finish_arch_switch(rq, prev); if (mm) mmdrop(mm); - if (prev->state & (TASK_DEAD | TASK_ZOMBIE)) + if (prev->state & (TASK_DEAD | TASK_ZOMBIE)) { + save_event(EVENT_TASK_SCHEDTAIL_DEC,prev); put_task_struct(prev); + } } /** @@ -1902,6 +1904,7 @@ asmlinkage long sys_sched_setaffinity(pi * usage count and then drop tasklist_lock. */ get_task_struct(p); + save_event(EVENT_TASK_MISC_INC,p); read_unlock(&tasklist_lock); retval = -EPERM; @@ -1913,6 +1916,7 @@ asmlinkage long sys_sched_setaffinity(pi set_cpus_allowed(p, new_mask); out_unlock: + save_event(EVENT_TASK_MISC_DEC,p); put_task_struct(p); return retval; } @@ -1985,6 +1989,7 @@ asmlinkage long sys_sched_yield(void) * no need to preempt: */ _raw_spin_unlock(&rq->lock); + save_event(EVENT_SPINLOCK_RELEASE,&rq->lock); preempt_enable_no_resched(); schedule(); diff -puN /dev/null lib/eventlog.c --- /dev/null 2002-08-30 16:31:37.000000000 -0700 +++ 25-akpm/lib/eventlog.c 2003-06-03 20:36:02.000000000 -0700 @@ -0,0 +1,165 @@ +/* + * eventlog.c - log deadlock prone events, to simplify deadlock diagnostics + * + * Copyright (C) 2003 Manfred Spraul + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License as published by + * the Free Software Foundation; either version 2 of the License, or + * (at your option) any later version. + + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + * + */ +#include +#include +#include +#include +#include +#include +#include + +struct eventdata { + int depth; + int type; + char *file; + int line; + char *object_name; + void *object_addr; +}; + +static struct eventdata event_stack[NR_CPUS][EL_MAX_DEPTH]; +static struct eventdata event_recent[NR_CPUS][RECENT_LEN]; +static int event_stackpos[NR_CPUS]; +static unsigned int event_recentpos[NR_CPUS]; +static atomic_t log_off = ATOMIC_INIT(0); + +void store_event(int type, char *file, int line, char *object_name, void *object_addr) +{ + unsigned long flags; + int cpu; + int enabled; + struct eventdata* ped = NULL; + + local_irq_save(flags); + cpu = get_cpu(); + enabled = !atomic_read(&log_off); + + if (enabled) { + ped = &event_recent[cpu][event_recentpos[cpu]%RECENT_LEN]; + ped->depth = event_stackpos[cpu]; + ped->type = type; + ped->file = file; + ped->line = line; + ped->object_name = object_name; + ped->object_addr = object_addr; + } + + if (type & EVENT_STACK_PUSH) { + if (event_stackpos[cpu] < EL_MAX_DEPTH && enabled) { + event_stack[cpu][event_stackpos[cpu]] = *ped; + } + event_stackpos[cpu]++; + } else if (type & EVENT_STACK_POP) { + event_stackpos[cpu]--; + BUG_ON(event_stackpos[cpu] < 0); + } else if (type & EVENT_STACK_REPLACE) { + BUG_ON(event_stackpos[cpu] <= 0); + if (event_stackpos[cpu]-1 < EL_MAX_DEPTH && enabled) { + event_stack[cpu][event_stackpos[cpu]-1] = *ped; + } + } + if (enabled && type >= EVENT_TASK_NEW && type <= 0x100) { + event_recentpos[cpu]++; + } + put_cpu(); + local_irq_restore(flags); +} + +static void print_one_event(int no, struct eventdata *ped) +{ + char *name; + switch(ped->type) { + case EVENT_SPINLOCK_BEFORE: name = "spinlock_before"; break; + case EVENT_SPINLOCK: name = "spinlock"; break; + case EVENT_SPIN_TRYLOCK_SUCCESS: name = "spin_trylock_success"; break; + case EVENT_SPIN_TRYLOCK_FAILED: name = "spin_trylock_failed"; break; + case EVENT_SPINLOCK_RELEASE: name = "spinlock_release"; break; + case EVENT_READ_LOCK_BEFORE: name = "read_lock_before"; break; + case EVENT_READ_LOCK: name = "read_lock"; break; + case EVENT_READ_LOCK_RELEASE: name = "read_lock_release"; break; + case EVENT_WRITE_LOCK_BEFORE: name = "write_lock_before"; break; + case EVENT_WRITE_LOCK: name = "write_lock"; break; + case EVENT_WRITE_TRYLOCK_SUCCESS: name = "write_trylock_success"; break; + case EVENT_WRITE_TRYLOCK_FAILED: name = "write_trylock_failed"; break; + case EVENT_WRITE_LOCK_RELEASE: name = "write_lock_release"; break; + case EVENT_CPU_RELAX: name = "cpu_relax"; break; + case EVENT_TASK_NEW: name = "task_new"; break; + case EVENT_TASK_PID_INC: name = "task_pid_inc"; break; + case EVENT_TASK_PID_DEC: name = "task_pid_dec"; break; + case EVENT_TASK_SCHEDTAIL_DEC: name = "task_schedtail_dec"; break; + case EVENT_TASK_RELEASE_DEC: name = "task_release_dec"; break; + case EVENT_TASK_MISC_INC: name = "task_misc_inc"; break; + case EVENT_TASK_MISC_DEC: name = "task_misc_dec"; break; + default: + printk(KERN_INFO"%3d: unknown type %xh.\n",no, ped->type); + name="Duh"; + break; + } + printk(KERN_INFO"%3d(%d): %s from %s/%d on %s(%p)\n", + no, ped->depth, name, ped->file, ped->line, ped->object_name, + ped->object_addr); +} + +void print_events(void) +{ + unsigned long flags; + int cpu; + int i; + local_irq_save(flags); + cpu = get_cpu(); + atomic_inc(&log_off); + + for(i=0;i EL_MAX_DEPTH) + lastpos = EL_MAX_DEPTH; + for (j=0;j