Re: wait_on_irq, CPU1

Andi Kleen (ak@suse.de)
Wed, 29 Dec 1999 13:27:38 +0100


On Wed, Dec 29, 1999 at 10:23:43AM +0100, Manfred Spraul wrote:
> Andi Kleen wrote:
> >
> > On Tue, Dec 28, 1999 at 11:59:12PM +0100, Andrea Arcangeli wrote:
> > > > I though about a tiny patch which calls
> > > >smp_call_function() in wait_on_irq(): the IPI would call a stripped down
> > > >version of show_registers().
> > >
> > > FYI: Andi just wrote the code to do that for the wait_on_bh case for other
> > > needs, extending original's Andi's patch for wait_on_irq as well should be
> > > trivial.
> >
> > It is part of the kbacktrace patch. It should give full backtrace for
> > wait_on_bh and wait_on_irq. kbacktrace generally is intended as a replacement
> > for the traditional *((int*)0)=0;
> >
>
> There are 2 buglets in the code:
>
> * the kernel stack size is THREAD_SIZE (ie 8192), your patch assumes
> that the stack size is 4096.

Ok, seems the bug was just inherited from show_registers()...
It should be probably corrected there too.

Here is an updated patch that fixes the problems.

-Andi

--- linux-2.2.13/arch/i386/kernel/irq.c.NOSHOW Wed Dec 29 02:43:21 1999
+++ linux-2.2.13/arch/i386/kernel/irq.c Wed Dec 29 13:08:44 1999
@@ -467,25 +470,25 @@
}
}

+#define for_all_cpus(i) for((i)=0;(i)<smp_num_cpus;(i)++)
+
+extern void backtrace_all_cpus(void);
+
static void show(char * str)
{
int i;
- unsigned long *stack;
int cpu = smp_processor_id();
- extern char *get_options(char *str, int *ints);

printk("\n%s, CPU %d:\n", str, cpu);
- printk("irq: %d [%d %d]\n",
- atomic_read(&global_irq_count), local_irq_count[0], local_irq_count[1]);
- printk("bh: %d [%d %d]\n",
- atomic_read(&global_bh_count), local_bh_count[0], local_bh_count[1]);
- stack = (unsigned long *) &stack;
- for (i = 40; i ; i--) {
- unsigned long x = *++stack;
- if (x > (unsigned long) &get_options && x < (unsigned long) &vsprintf) {
- printk("<[%08lx]> ", x);
- }
- }
+ printk("irq: %d [", atomic_read(&global_irq_count));
+ for_all_cpus(i)
+ printk("%d ", local_irq_count[cpu_logical_map(i)]);
+ printk("]\nbh: %d [", atomic_read(&global_bh_count));
+ for_all_cpus(i)
+ printk("%d ", local_bh_count[cpu_logical_map(i)]);
+ printk("]\n");
+
+ backtrace_all_cpus();
}

#define MAXCOUNT 100000000
--- linux-2.2.13/arch/i386/lib/Makefile.NOSHOW Wed Dec 29 02:43:22 1999
+++ linux-2.2.13/arch/i386/lib/Makefile Wed Dec 29 02:43:22 1999
@@ -9,4 +9,8 @@
L_OBJS = checksum.o old-checksum.o semaphore.o delay.o \
usercopy.o getuser.o putuser.o

+LX_OBJS = kbacktrace.o
+
+
+
include $(TOPDIR)/Rules.make
--- linux-2.2.13/arch/i386/lib/kbacktrace.c.NOSHOW Wed Dec 29 02:43:22 1999
+++ linux-2.2.13/arch/i386/lib/kbacktrace.c Wed Dec 29 13:08:41 1999
@@ -0,0 +1,179 @@
+/*
+ Generic backtrace for i386.
+ Based on the code in i386/kernel/traps.c, but with accurate module checking and does
+ not require an frame pointer on the stack.
+
+ Hacked by Andi Kleen <ak@suse.de>.
+ Should call to kdb if available.
+ Subject to the GPL.
+ */
+#include <linux/types.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <asm/pgtable.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+// these two are not exported in a standard kernel
+extern unsigned long _etext, _stext;
+extern struct module *module_list;
+
+#define MODULE_RANGE (8*1024*1024)
+
+int backtrace_stack_depth = 24;
+
+unsigned long print_trace(unsigned long *stack)
+{
+ int i;
+ unsigned long module_start,addr;
+ const char *mname = NULL;
+ unsigned long firstret=0;
+
+ static inline char * module_end_addr(struct module *mp) {
+ return ((char *)mp) + mp->size;
+ }
+
+ static inline int valid_addr(unsigned long addr) {
+ return (((addr >= (unsigned long) &_stext) &&
+ (addr <= (unsigned long) &_etext)));
+ }
+
+ static inline const char * in_module(unsigned long addr) {
+ struct module *mp;
+ for (mp = module_list; mp; mp = mp->next) {
+ if (addr >= (unsigned long) (mp+1) &&
+ addr <= (unsigned long) module_end_addr(mp))
+ return mp->name;
+ }
+ return 0;
+ }
+
+ printk(KERN_DEBUG "Call Trace: ");
+
+ module_start = PAGE_OFFSET + (max_mapnr << PAGE_SHIFT);
+ module_start = ((module_start + VMALLOC_OFFSET) & ~(VMALLOC_OFFSET-1));
+
+ i = 1;
+ while (((long) stack & (8192-1)) != 0) {
+ const char *omname = mname;
+ addr = *stack++;
+ if (valid_addr(addr) ||
+ (addr >= module_start && (mname = in_module(addr)))) {
+ if (i && ((i % 8) == 0))
+ printk("\n ");
+ if (!firstret) firstret = addr;
+ printk("[<%08lx>] ", addr);
+ if (mname && mname != omname)
+ printk(" (%s) ", mname);
+ i++;
+ }
+ if (i > 30) {
+ printk("...");
+ break;
+ }
+ }
+ printk("\n");
+ return firstret;
+}
+
+void print_stack(unsigned long *stack)
+{
+ int i;
+ printk(KERN_DEBUG "Stack: ");
+ for(i=0; i < backtrace_stack_depth; i++) {
+ if (((long) stack & (8192-1)) == 0)
+ break;
+ if (i && ((i % 8) == 0))
+ printk("\n ");
+ printk("%08lx ", *stack++);
+ }
+ printk("\n");
+}
+
+void print_code(unsigned char *eip)
+{
+ int i;
+ printk(KERN_DEBUG "Code: ");
+ for(i = 0; i < 20; i++)
+ printk("%02x ", *eip++);
+ printk("\n");
+}
+
+void backtrace(void)
+{
+ unsigned char *esp;
+
+ asm("movl %%esp,%0\n" : "=r" (esp));
+
+ print_stack((unsigned long *) esp);
+ print_trace((unsigned long *) esp);
+ print_code((unsigned char *) __builtin_return_address(0));
+}
+
+void backtrace_stack(unsigned long *stack)
+{
+ unsigned long code;
+ print_stack(stack);
+ code = print_trace(stack);
+ print_code((unsigned char *) code);
+}
+
+#if defined(__SMP__) || defined(CONFIG_SMP)
+
+#define for_all_cpus(i) for((i)=0;(i)<smp_num_cpus;(i)++)
+#define MAX_CPUS 32
+
+static void getstack(void *ptr)
+{
+ unsigned long **stackmap = ptr;
+ unsigned long dummy = 0;
+ stackmap[smp_processor_id()] = &dummy;
+}
+
+void backtrace_all_cpus(void)
+{
+ unsigned long *sps[MAX_CPUS] = {0};
+ int i;
+ int cpu = smp_processor_id();
+
+ printk("local trace [cpu %d]:\n", cpu);
+ backtrace();
+
+ if (smp_call_function(getstack, sps, 1, 1) < 0)
+ printk("smp_call_function failed\n");
+ else {
+ for_all_cpus(i) {
+ int w = cpu_logical_map(i);
+ if (w != cpu && sps[w]) {
+ printk("trace for cpu %d:\n", w);
+ backtrace_stack(sps[w]);
+ }
+ }
+ }
+
+}
+
+#else
+void backtrace_all_cpus(void)
+{
+ backtrace();
+}
+#endif
+
+EXPORT_SYMBOL(backtrace);
+EXPORT_SYMBOL(backtrace_stack);
+EXPORT_SYMBOL(backtrace_all_cpus);
+
+#ifdef MODULE
+int init_module(void)
+{
+#ifdef TEST
+ printk(KERN_DEBUG "test\n");
+ backtrace();
+#endif
+ return 0;
+}
+void cleanup_module() { }
+#endif
+
+

-
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@vger.rutgers.edu
Please read the FAQ at http://www.tux.org/lkml/